Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/sparc64/sparc64 now in common/lib/libc/arch/sparc64...
details: https://anonhg.NetBSD.org/src/rev/6730648c3306
branches: trunk
changeset: 785503:6730648c3306
user: christos <christos%NetBSD.org@localhost>
date: Sun Mar 17 00:45:03 2013 +0000
description:
now in common/lib/libc/arch/sparc64/string
diffstat:
sys/arch/sparc64/sparc64/memcpyset.s | 1900 ----------------------------------
1 files changed, 0 insertions(+), 1900 deletions(-)
diffs (truncated from 1904 to 300 lines):
diff -r db41c1bfb147 -r 6730648c3306 sys/arch/sparc64/sparc64/memcpyset.s
--- a/sys/arch/sparc64/sparc64/memcpyset.s Sun Mar 17 00:44:16 2013 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1900 +0,0 @@
-/* $NetBSD: memcpyset.s,v 1.3 2011/07/12 07:51:34 mrg Exp $ */
-
-/*
- * Copyright (c) 1996-2002 Eduardo Horvath
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- */
-
-#define USE_BLOCK_STORE_LOAD /* enable block load/store ops */
-
-#include "assym.h"
-#include <machine/param.h>
-#include <machine/ctlreg.h>
-#include <machine/psl.h>
-#include <machine/frame.h>
-#include <machine/intr.h>
-#include <machine/asm.h>
-#include <machine/locore.h>
-
-#ifdef USE_BLOCK_STORE_LOAD
-
-#define BLOCK_SIZE SPARC64_BLOCK_SIZE
-#define BLOCK_ALIGN SPARC64_BLOCK_ALIGN
-
-/*
- * The following routines allow fpu use in the kernel.
- *
- * They allocate a stack frame and use all local regs. Extra
- * local storage can be requested by setting the siz parameter,
- * and can be accessed at %sp+CC64FSZ.
- */
-
-#define ENABLE_FPU(siz) \
- save %sp, -(CC64FSZ), %sp; /* Allocate a stack frame */ \
- sethi %hi(FPLWP), %l1; \
- add %fp, STKB-FS_SIZE, %l0; /* Allocate a fpstate */ \
- LDPTR [%l1 + %lo(FPLWP)], %l2; /* Load fplwp */ \
- andn %l0, BLOCK_ALIGN, %l0; /* Align it */ \
- clr %l3; /* NULL fpstate */ \
- brz,pt %l2, 1f; /* fplwp == NULL? */ \
- add %l0, -STKB-CC64FSZ-(siz), %sp; /* Set proper %sp */ \
- LDPTR [%l2 + L_FPSTATE], %l3; \
- brz,pn %l3, 1f; /* Make sure we have an fpstate */ \
- mov %l3, %o0; \
- call _C_LABEL(savefpstate); /* Save the old fpstate */ \
-1: \
- set EINTSTACK-STKB, %l4; /* Are we on intr stack? */ \
- cmp %sp, %l4; \
- bgu,pt %xcc, 1f; \
- set INTSTACK-STKB, %l4; \
- cmp %sp, %l4; \
- blu %xcc, 1f; \
-0: \
- sethi %hi(_C_LABEL(lwp0)), %l4; /* Yes, use lpw0 */ \
- ba,pt %xcc, 2f; /* XXXX needs to change to CPUs idle proc */ \
- or %l4, %lo(_C_LABEL(lwp0)), %l5; \
-1: \
- sethi %hi(CURLWP), %l4; /* Use curlwp */ \
- LDPTR [%l4 + %lo(CURLWP)], %l5; \
- brz,pn %l5, 0b; nop; /* If curlwp is NULL need to use lwp0 */ \
-2: \
- LDPTR [%l5 + L_FPSTATE], %l6; /* Save old fpstate */ \
- STPTR %l0, [%l5 + L_FPSTATE]; /* Insert new fpstate */ \
- STPTR %l5, [%l1 + %lo(FPLWP)]; /* Set new fplwp */ \
- wr %g0, FPRS_FEF, %fprs /* Enable FPU */
-
-/*
- * Weve saved our possible fpstate, now disable the fpu
- * and continue with life.
- */
-#ifdef DEBUG
-#define __CHECK_FPU \
- LDPTR [%l5 + L_FPSTATE], %l7; \
- cmp %l7, %l0; \
- tnz 1;
-#else
-#define __CHECK_FPU
-#endif
-
-#define RESTORE_FPU \
- __CHECK_FPU \
- STPTR %l2, [%l1 + %lo(FPLWP)]; /* Restore old fproc */ \
- wr %g0, 0, %fprs; /* Disable fpu */ \
- brz,pt %l3, 1f; /* Skip if no fpstate */ \
- STPTR %l6, [%l5 + L_FPSTATE]; /* Restore old fpstate */ \
- \
- mov %l3, %o0; \
- call _C_LABEL(loadfpstate); /* Re-load orig fpstate */ \
-1: \
- membar #Sync; /* Finish all FP ops */
-
-#endif /* USE_BLOCK_STORE_LOAD */
-
-#ifdef USE_BLOCK_STORE_LOAD
-/*
- * Use block_disable to turn off block insns for
- * memcpy/memset
- */
- .data
- .align 8
- .globl block_disable
-block_disable: .xword 1
- .text
-
-#if 0
-#define ASI_STORE ASI_BLK_COMMIT_P
-#else
-#define ASI_STORE ASI_BLK_P
-#endif
-#endif /* USE_BLOCK_STORE_LOAD */
-
-#if 1
-/*
- * kernel memcpy
- * Assumes regions do not overlap; has no useful return value.
- *
- * Must not use %g7 (see copyin/copyout above).
- */
-ENTRY(memcpy) /* dest, src, size */
- /*
- * Swap args for bcopy. Gcc generates calls to memcpy for
- * structure assignments.
- */
- mov %o0, %o3
- mov %o1, %o0
- mov %o3, %o1
-#endif
-! ENTRY(bcopy) /* src, dest, size */
-#ifdef DEBUG
- set pmapdebug, %o4
- ld [%o4], %o4
- btst 0x80, %o4 ! PDB_COPY
- bz,pt %icc, 3f
- nop
- save %sp, -CC64FSZ, %sp
- mov %i0, %o1
- set 2f, %o0
- mov %i1, %o2
- call printf
- mov %i2, %o3
-! ta 1; nop
- restore
- .data
-2: .asciz "memcpy(%p<-%p,%x)\n"
- _ALIGN
- .text
-3:
-#endif
-
- cmp %o2, BCOPY_SMALL
-
-Lmemcpy_start:
- bge,pt CCCR, 2f ! if >= this many, go be fancy.
- cmp %o2, 256
-
- mov %o1, %o5 ! Save memcpy return value
- /*
- * Not much to copy, just do it a byte at a time.
- */
- deccc %o2 ! while (--len >= 0)
- bl 1f
- .empty
-0:
- inc %o0
- ldsb [%o0 - 1], %o4 ! (++dst)[-1] = *src++;
- stb %o4, [%o1]
- deccc %o2
- bge 0b
- inc %o1
-1:
- retl
- mov %o5, %o0
- NOTREACHED
-
- /*
- * Plenty of data to copy, so try to do it optimally.
- */
-2:
-#ifdef USE_BLOCK_STORE_LOAD
- ! If it is big enough, use VIS instructions
- bge Lmemcpy_block
- nop
-#endif /* USE_BLOCK_STORE_LOAD */
-Lmemcpy_fancy:
-
- !!
- !! First align the output to a 8-byte entity
- !!
-
- save %sp, -CC64FSZ, %sp
-
- mov %i0, %l0
- mov %i1, %l1
-
- mov %i2, %l2
- btst 1, %l1
-
- bz,pt %icc, 4f
- btst 2, %l1
- ldub [%l0], %l4 ! Load 1st byte
-
- deccc 1, %l2
- ble,pn CCCR, Lmemcpy_finish ! XXXX
- inc 1, %l0
-
- stb %l4, [%l1] ! Store 1st byte
- inc 1, %l1 ! Update address
- btst 2, %l1
-4:
- bz,pt %icc, 4f
-
- btst 1, %l0
- bz,a 1f
- lduh [%l0], %l4 ! Load short
-
- ldub [%l0], %l4 ! Load bytes
-
- ldub [%l0+1], %l3
- sllx %l4, 8, %l4
- or %l3, %l4, %l4
-
-1:
- deccc 2, %l2
- ble,pn CCCR, Lmemcpy_finish ! XXXX
- inc 2, %l0
- sth %l4, [%l1] ! Store 1st short
-
- inc 2, %l1
-4:
- btst 4, %l1
- bz,pt CCCR, 4f
-
- btst 3, %l0
- bz,a,pt CCCR, 1f
- lduw [%l0], %l4 ! Load word -1
-
- btst 1, %l0
- bz,a,pt %icc, 2f
- lduh [%l0], %l4
-
- ldub [%l0], %l4
-
- lduh [%l0+1], %l3
- sllx %l4, 16, %l4
- or %l4, %l3, %l4
-
- ldub [%l0+3], %l3
- sllx %l4, 8, %l4
- ba,pt %icc, 1f
- or %l4, %l3, %l4
-
-2:
- lduh [%l0+2], %l3
- sllx %l4, 16, %l4
- or %l4, %l3, %l4
-
-1:
- deccc 4, %l2
- ble,pn CCCR, Lmemcpy_finish ! XXXX
- inc 4, %l0
-
- st %l4, [%l1] ! Store word
- inc 4, %l1
-4:
- !!
- !! We are now 32-bit aligned in the dest.
- !!
-Lmemcpy_common:
-
- and %l0, 7, %l4 ! Shift amount
- andn %l0, 7, %l0 ! Source addr
-
- brz,pt %l4, Lmemcpy_noshift8 ! No shift version...
-
- sllx %l4, 3, %l4 ! In bits
- mov 8<<3, %l3
-
- ldx [%l0], %o0 ! Load word -1
Home |
Main Index |
Thread Index |
Old Index