Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/sun2/sun2 Many changes to support ELF. Highlights ...
details: https://anonhg.NetBSD.org/src/rev/52487e86d1bc
branches: trunk
changeset: 509848:52487e86d1bc
user: fredette <fredette%NetBSD.org@localhost>
date: Mon May 14 14:44:11 2001 +0000
description:
Many changes to support ELF. Highlights include:
Added % to all register names. The boot loader now
loads us exactly where we're linked, so no PIC or
remapping funny business. Removed more 68881 code
that is only needed with the real silicon, which
a 68010 will never have.
diffstat:
sys/arch/sun2/sun2/locore.s | 698 ++++++++++++++++++++-----------------------
1 files changed, 321 insertions(+), 377 deletions(-)
diffs (truncated from 1076 to 300 lines):
diff -r f83eb6fb0775 -r 52487e86d1bc sys/arch/sun2/sun2/locore.s
--- a/sys/arch/sun2/sun2/locore.s Mon May 14 14:43:45 2001 +0000
+++ b/sys/arch/sun2/sun2/locore.s Mon May 14 14:44:11 2001 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: locore.s,v 1.4 2001/05/07 21:48:24 fredette Exp $ */
+/* $NetBSD: locore.s,v 1.5 2001/05/14 14:44:11 fredette Exp $ */
/*
* Copyright (c) 2001 Matthew Fredette
@@ -60,57 +60,36 @@
GLOBAL(kernel_text)
| This is the entry point, as well as the end of the temporary stack
-| used during process switch (four 2K pages ending at start)
+| used during process switch (two 2K pages ending at start)
ASGLOBAL(tmpstk)
ASGLOBAL(start)
-| First we need to set it up so we can access the sun MMU, and be otherwise
-| undisturbed. Until otherwise noted, all code must be position independent
-| as the boot loader put us low in memory, but we are linked high.
- movw #PSL_HIGHIPL, sr | no interrupts
- moveq #FC_CONTROL, d0 | make movs access "control"
- movc d0, sfc | space where the sun2 designers
- movc d0, dfc | put all the "useful" stuff
+| As opposed to the sun3, on the sun2 the kernel is linked low. The
+| boot loader loads us exactly where we are linked, so we don't have
+| to worry about writing position independent code or moving the
+| kernel around.
+ movw #PSL_HIGHIPL, %sr | no interrupts
+ moveq #FC_CONTROL, %d0 | make movs access "control"
+ movc %d0, %sfc | space where the sun2 designers
+ movc %d0, %dfc | put all the "useful" stuff
| Set context zero and stay there until pmap_bootstrap.
- moveq #0, d0
- movsb d0, CONTEXT_REG
- movsb d0, SCONTEXT_REG
-
-| In order to "move" the kernel to higher memory, we are going to copy the
-| first 4 Mb of pmegs such that we will be mapped at the linked address.
-| This is all done by copying in the segment map (top-level MMU table).
-| We will unscramble which PMEGs we actually need later.
-
- movl #(SEGMAP_BASE+0), a0 | src
- movl #(SEGMAP_BASE+KERNBASE), a1 | dst
- movl #(0x400000/NBSG), d0 | count
+ moveq #0, %d0
+ movsb %d0, CONTEXT_REG
+ movsb %d0, SCONTEXT_REG
-| We do the move backwards, in case the src and dst overlap.
- addl #0x400000, a0
- addl #0x400000, a1
-
-L_per_pmeg:
- subl #NBSG, a0 | decrement pointers
- subl #NBSG, a1
- movsb a0@, d1 | copy segmap entry
- movsb d1, a1@
- subql #1, d0 | decrement count
- bgt L_per_pmeg
-
-| Kernel is now double mapped at zero and KERNBASE.
-| Force a long jump to the relocated code (high VA).
- jmp L_high_code:l | long jump
+| Jump around the g0 and g4 entry points.
+ jra L_high_code
| These entry points are here in pretty low memory, so that they
| can be reached from virtual address zero using the classic,
-| old-school "g0" and "g4" commands from the monitor. Note that
-| on entry, we are in low memory again (i.e., not where we were
-| linked.) We do a long jump right away to the relocated code.
+| old-school "g0" and "g4" commands from the monitor. (I.e.,
+| they need to be reachable using 16-bit displacements from PCs
+| 0 and 4).
L_g0_entry:
- jmp _C_LABEL(g0_handler):l | long jump
+ jra _C_LABEL(g0_handler)
L_g4_entry:
- jmp _C_LABEL(g4_handler):l | long jump
+ jra _C_LABEL(g4_handler)
L_high_code:
| We are now running in the correctly relocated kernel, so
@@ -118,47 +97,41 @@
| Disable interrupts, and initialize the soft copy of the
| enable register.
- movsw SYSTEM_ENAB, d0 | read the enable register
- moveq #ENA_INTS, d1
- notw d1
- andw d1, d0
- movsw d0, SYSTEM_ENAB | disable all interrupts
- movw d0, _C_LABEL(enable_reg_soft)
+ movsw SYSTEM_ENAB, %d0 | read the enable register
+ moveq #ENA_INTS, %d1
+ notw %d1
+ andw %d1, %d0
+ movsw %d0, SYSTEM_ENAB | disable all interrupts
+ movw %d0, _C_LABEL(enable_reg_soft)
-| Set up our g0 and g4 handlers. Note that we reach ourselves
-| in low memory again (i.e., not where we were linked), which
-| is why we subtract KERNBASE. The 2 and the 6 further adjust
+| Set up our g0 and g4 handlers. The 2 and the 6 adjust
| the offsets to be PC-relative.
- movl #0, a0
- movw #0x6000, a0@+ | braw
- movl #(L_g0_entry-2-KERNBASE), d0
- movw d0, a0@+ | L_g0_entry
- movw #0x6000, a0@+ | braw
- movl #(L_g4_entry-6-KERNBASE), d0
- movw d0, a0@+ | L_g4_entry
+ movl #0, %a0
+ movw #0x6000, %a0@+ | braw
+ movl #(L_g0_entry-2), %d0
+ movw %d0, %a0@+ | L_g0_entry
+ movw #0x6000, %a0@+ | braw
+ movl #(L_g4_entry-6), %d0
+ movw %d0, %a0@+ | L_g4_entry
| Do bootstrap stuff needed before main() gets called.
-| Our boot loader leaves a copy of the kernel's exec header
-| just before the start of the kernel text segment, so the
-| kernel can sanity-check the DDB symbols at [end...esym].
-| Pass the struct exec at tmpstk-32 to _bootstrap().
-| Also, make sure the initial frame pointer is zero so that
+| Make sure the initial frame pointer is zero so that
| the backtrace algorithm used by KGDB terminates nicely.
- lea _ASM_LABEL(tmpstk)-32, sp
- movl #0,a6
+ lea _ASM_LABEL(tmpstk), %sp
+ movl #0,%a6
jsr _C_LABEL(_bootstrap) | See locore2.c
| Now that _bootstrap() is done using the PROM functions,
| we can safely set the sfc/dfc to something != FC_CONTROL
- moveq #FC_USERD, d0 | make movs access "user data"
- movc d0, sfc | space for copyin/copyout
- movc d0, dfc
+ moveq #FC_USERD, %d0 | make movs access "user data"
+ movc %d0, %sfc | space for copyin/copyout
+ movc %d0, %dfc
| Setup process zero user/kernel stacks.
- movl _C_LABEL(proc0paddr),a1 | get proc0 pcb addr
- lea a1@(USPACE-4),sp | set SSP to last word
- movl #USRSTACK-4,a2
- movl a2,usp | init user SP
+ movl _C_LABEL(proc0paddr),%a1 | get proc0 pcb addr
+ lea %a1@(USPACE-4),%sp | set SSP to last word
+ movl #USRSTACK-4,%a2
+ movl %a2,%usp | init user SP
| Note curpcb was already set in _bootstrap().
| Will do fpu initialization during autoconfig (see fpu.c)
@@ -175,27 +148,27 @@
* cpu_set_kpc() to arrange for a call to a kernel function
* before the new process does its rte out to user mode.
*/
- clrw sp@- | tf_format,tf_vector
- clrl sp@- | tf_pc (filled in later)
- movw #PSL_USER,sp@- | tf_sr for user mode
- clrl sp@- | tf_stackadj
- lea sp@(-64),sp | tf_regs[16]
- movl sp,a1 | a1=trapframe
- lea _C_LABEL(proc0),a0 | proc0.p_md.md_regs =
- movl a1,a0@(P_MDREGS) | trapframe
- movl a2,a1@(FR_SP) | a2 == usp (from above)
- pea a1@ | push &trapframe
+ clrw %sp@- | tf_format,tf_vector
+ clrl %sp@- | tf_pc (filled in later)
+ movw #PSL_USER,%sp@- | tf_sr for user mode
+ clrl %sp@- | tf_stackadj
+ lea %sp@(-64),%sp | tf_regs[16]
+ movl %sp,%a1 | a1=trapframe
+ lea _C_LABEL(proc0),%a0 | proc0.p_md.md_regs =
+ movl %a1,%a0@(P_MDREGS) | trapframe
+ movl %a2,%a1@(FR_SP) | a2 == usp (from above)
+ pea %a1@ | push &trapframe
jbsr _C_LABEL(main) | main(&trapframe)
- addql #4,sp | help DDB backtrace
+ addql #4,%sp | help DDB backtrace
trap #15 | should not get here
| This is used by cpu_fork() to return to user mode.
| It is called with SP pointing to a struct trapframe.
GLOBAL(proc_do_uret)
- movl sp@(FR_SP),a0 | grab and load
- movl a0,usp | user SP
- moveml sp@+,#0x7FFF | load most registers (all but SSP)
- addql #8,sp | pop SSP and stack adjust count
+ movl %sp@(FR_SP),%a0 | grab and load
+ movl %a0,%usp | user SP
+ moveml %sp@+,#0x7FFF | load most registers (all but SSP)
+ addql #8,%sp | pop SSP and stack adjust count
rte
/*
@@ -214,9 +187,9 @@
* return using the switchframe that remains on the stack.
*/
GLOBAL(proc_trampoline)
- movl sp@+,a0 | function pointer
- jbsr a0@ | (*func)(arg)
- addql #4,sp | toss the arg
+ movl %sp@+,%a0 | function pointer
+ jbsr %a0@ | (*func)(arg)
+ addql #4,%sp | toss the arg
rts | as cpu_switch would do
| That is all the assembly startup code we need on the sun3!
@@ -230,22 +203,22 @@
GLOBAL(buserr)
tstl _C_LABEL(nofault) | device probe?
jeq _C_LABEL(addrerr) | no, handle as usual
- movl _C_LABEL(nofault),sp@- | yes,
+ movl _C_LABEL(nofault),%sp@- | yes,
jbsr _C_LABEL(longjmp) | longjmp(nofault)
GLOBAL(addrerr)
- clrl sp@- | stack adjust count
- moveml #0xFFFF,sp@- | save user registers
- movl usp,a0 | save the user SP
- movl a0,sp@(FR_SP) | in the savearea
- lea sp@(FR_HW),a1 | grab base of HW berr frame
- moveq #0,d0
- movw a1@(8),d0 | grab SSW for fault processing
- movl a1@(10),d1 | fault address is as given in frame
- movl d1,sp@- | push fault VA
- movl d0,sp@- | and padded SSW
- movw a1@(6),d0 | get frame format/vector offset
- andw #0x0FFF,d0 | clear out frame format
- cmpw #12,d0 | address error vector?
+ clrl %sp@- | stack adjust count
+ moveml #0xFFFF,%sp@- | save user registers
+ movl %usp,%a0 | save the user SP
+ movl %a0,%sp@(FR_SP) | in the savearea
+ lea %sp@(FR_HW),%a1 | grab base of HW berr frame
+ moveq #0,%d0
+ movw %a1@(8),%d0 | grab SSW for fault processing
+ movl %a1@(10),%d1 | fault address is as given in frame
+ movl %d1,%sp@- | push fault VA
+ movl %d0,%sp@- | and padded SSW
+ movw %a1@(6),%d0 | get frame format/vector offset
+ andw #0x0FFF,%d0 | clear out frame format
+ cmpw #12,%d0 | address error vector?
jeq Lisaerr | yes, go to it
/*
@@ -258,96 +231,84 @@
* [this code replaces similarly mmu specific code in the hp300 code]
*/
sun2_mmu_specific:
- clrl d0 | make sure top bits are cleard too
- movl d1, sp@- | save d1
- movc sfc, d1 | save sfc to d1
- moveq #FC_CONTROL, d0 | sfc = FC_CONTROL
- movc d0, sfc
- movsb BUSERR_REG, d0 | get value of bus error register
- movc d1, sfc | restore sfc
- movl sp@+, d1 | restore d1
- andb #BUSERR_PROTERR, d0 | is this an MMU (protection *or* page unavailable) fault?
+ clrl %d0 | make sure top bits are cleard too
+ movl %d1, %sp@- | save d1
+ movc %sfc, %d1 | save sfc to d1
+ moveq #FC_CONTROL, %d0 | sfc = FC_CONTROL
+ movc %d0, %sfc
+ movsb BUSERR_REG, %d0 | get value of bus error register
+ movc %d1, %sfc | restore sfc
+ movl %sp@+, %d1 | restore d1
+ andb #BUSERR_PROTERR, %d0 | is this an MMU (protection *or* page unavailable) fault?
jeq Lisberr | non-MMU bus error
/* End of sun2 specific code. */
Lismerr:
- movl #T_MMUFLT,sp@- | show that we are an MMU fault
+ movl #T_MMUFLT,%sp@- | show that we are an MMU fault
jra _ASM_LABEL(faultstkadj) | and deal with it
Lisaerr:
- movl #T_ADDRERR,sp@- | mark address error
+ movl #T_ADDRERR,%sp@- | mark address error
jra _ASM_LABEL(faultstkadj) | and deal with it
Lisberr:
- movl #T_BUSERR,sp@- | mark bus error
+ movl #T_BUSERR,%sp@- | mark bus error
jra _ASM_LABEL(faultstkadj) | and deal with it
/*
* FP exceptions.
*/
GLOBAL(fpfline)
- clrl sp@- | stack adjust count
- moveml #0xFFFF,sp@- | save registers
- moveq #T_FPEMULI,d0 | denote as FP emulation trap
+ clrl %sp@- | stack adjust count
+ moveml #0xFFFF,%sp@- | save registers
+ moveq #T_FPEMULI,%d0 | denote as FP emulation trap
jra _ASM_LABEL(fault) | do it
Home |
Main Index |
Thread Index |
Old Index