/* * This file contains a 'gate_init' initialization table * to initialize the x86 processor trap vectors to default entrypoints. * These entrypoints simply push a standard trap_state frame * and jump to the 'trap_handler' routine. */ #include "config_tcbsize.h" #include "config_gdt.h" #include "globalconfig.h" #include "idt_init.h" #include "low_level.h" #include "shortcut.h" #include "tcboffset.h" #include "asm.h" /* We make the trap handler an interrupt gate, because for debugging purposes, we don't want any interrupts to occur until they're explicitly enabled in the base_trap_handler (usually Thread::handle_slow_trap). */ /* * No error code. Clear error code and push trap number. */ #define EXCEPTION(n,name) \ GATE_ENTRY(n,entry_##name,ACC_PL_K | ACC_INTR_GATE) ;\ .p2align 3 ;\ entry_##name: ;\ pushl $(0) ;\ pushl $(n) ;\ pusha ;\ jmp _slowtraps /* * User-accessible exception. Otherwise, same as above. */ #define EXCEP_USR(n,name) \ GATE_ENTRY(n,entry_##name,ACC_PL_U | ACC_INTR_GATE) ;\ .p2align 3 ;\ entry_##name: ;\ pushl $(0) ;\ pushl $(n) ;\ pusha ;\ jmp _slowtraps /* * Error code has been pushed. Just push trap number. */ #define EXCEP_ERR(n,name) \ GATE_ENTRY(n,entry_##name,ACC_PL_K | ACC_INTR_GATE) ;\ .p2align 3 ;\ entry_##name: ;\ pushl $(n) ;\ pusha ;\ jmp _slowtraps .macro PRE_ALIEN_IPC btrl $17, KSEG OFS__THREAD__STATE (%ebx) /* Thread_dis_alien */ jc 1f RESTORE_STATE sub $2, 4(%esp) /* Correct EIP to point to insn */ popl %eax pushl $(0x30 << 3 | 2) pushl $(0xd) pusha jmp _slowtraps 1: /* do alien IPC and raise a trap afterwards */ RESET_THREAD_CANCEL_AT %ebx .endm .macro POST_ALIEN_IPC CHECK_SANITY $3 /* scratches ecx */ RESET_USER_SEGMENTS $3,in_cli /* scratches ecx */ RESTORE_STATE_AFTER_IPC popl %eax pushl $(0x30 << 3 | 6) pushl $(0xd) pusha jmp _slowtraps .endm #ifdef CONFIG_KERN_LIB_PAGE .macro ROLLBACK_IP addr /* scratches flags * requires address argument as macro parameter */ cmpl $VAL__MEM_LAYOUT__KERN_LIB_BASE,\addr jl 1f cmpl $VAL__MEM_LAYOUT__KERN_LIB_END,\addr jge 1f andl $0xFFFFFFC0,\addr /* roll back eip (64 bytes) */ #if defined(CONFIG_JDB_ACCOUNTING) /* update counter Kern_cnt_lib_page_hit */ incl VAL__MEM_LAYOUT__TBUF_STATUS_PAGE + \ OFS__TBUF_STATUS__KERNCNTS + 44 #endif 1: .endm #endif /* fixme: we could merge the comparisons to speedup the common case */ #ifdef CONFIG_ROLLFORWARD .macro ROLLFORWARD_IP addr /* scratches flags * requires address argument as macro parameter */ // fixme: opcode likely jump cmpl $VAL__MEM_LAYOUT__ROLLFORWARD_BASE,\addr jl 1f // fixme: opcode likely jump cmpl $VAL__MEM_LAYOUT__ROLLFORWARD_END,\addr jge 1f orl $0x1000,\addr /* add 4k to IP by OR, addr must be 8K aligned */ //movl $0x01,$VAL__CONTEXT_RF_ACTIVE // fixme: real address 1: .endm .macro ROLLFORWARD_PANIC addr reason // fixme: opcode likely jump cmpl $VAL__MEM_LAYOUT__ROLLFORWARD_BASE,\addr jl 1f // fixme: opcode likely jump cmpl $VAL__MEM_LAYOUT__ROLLFORWARD_END,\addr jge 1f movl \reason,%eax call rollforward_panic 1: .endm #endif GATE_INITTAB_BEGIN(idt_init_table) EXCEPTION(0x00,vec00_zero_div) #ifdef CONFIG_PF_UX EXCEPTION(0x01,vec01_debug) #else /* IA32 has to handle breakpoint exceptions if occured exactly at entry_sys_fast_ipc -- see ia32/entry-ia32.S */ GATE_ENTRY(0x01,entry_vec01_debug,ACC_PL_K | ACC_INTR_GATE) #endif /* XXX IA32 has to handle NMI occured exactly at entry_sys_fast_ipc */ EXCEP_USR(0x02,vec02_nmi) EXCEP_USR(0x03,vec03_breakpoint) EXCEP_USR(0x04,vec04_into) EXCEP_USR(0x05,vec05_bounds) EXCEPTION(0x06,vec06_invop) /* EXCEPTION(0x07,nofpu) */ #ifdef CONFIG_PF_UX EXCEP_ERR(0x08,vec08_dbl_fault) #else GATE_ENTRY(0x08, GDT_TSS_DBF, ACC_PL_K | ACC_TASK_GATE) #endif EXCEPTION(0x09,vec09_fpu_ovfl) /* EXCEP_ERR(0x0a,vec0a_inv_tss) */ EXCEP_ERR(0x0b,vec0b_segnp) EXCEP_ERR(0x0c,vec0c_stack_fault) EXCEP_ERR(0x0d,vec0d_gen_prot) /* EXCEP_ERR(0x0e,vec0e_page_fault) */ /* EXCEPTION(0x0f,vec0f_trap_0f) */ EXCEPTION(0x10,vec10_fpu_err) EXCEP_ERR(0x11,vec11_align) EXCEPTION(0x12,vec12_mcheck) EXCEPTION(0x13,vec13_simd_err) .p2align 4 .type slowtraps,@function .globl slowtraps /* We have to introduce the label _slowtraps besides the label slowtraps to achive that jmps from exception entry points are optimized to two-byte jmps. The label slowtraps is visible from outside. */ _slowtraps: slowtraps: #ifdef CONFIG_PF_UX # define REG_GS CPU_GS #else # define REG_GS %gs #endif pushl %fs /* we save the segment regs in the trap */ pushl REG_GS /* state, but we do not restore them. We */ pushl %ds /* rather reload them using */ pushl %es /* RESET_{KERNEL,USER}_SEGMENTS */ /* Load the kernel's segment registers. */ RESET_KERNEL_SEGMENTS_FORCE_DS_ES /* scratches ecx, edx */ #ifdef CONFIG_KERN_LIB_PAGE ROLLBACK_IP 56(%esp) // fixme: address correct? (-> trap_state.cpp) #endif #ifdef CONFIG_ROLLFORWARD ROLLFORWARD_PANIC 56(%esp), $0 #endif /* Note: we do not use RESET_THREAD_CANCEL_* here as that is needed only when an I/O-page-fault IPC is sent and when the thread is killed. Resetting Thread_cancel here could be harmful when using this trap handler in debugging. Instead, we clear this flag in Thread::handle_slow_trap() just before sending the IPC message or before killing the thread. That's OK, because it is still atomic -- we never enable IRQs (sti) before that point. */ movl %esp,%eax /* ARG1: address of trap_state */ #ifndef CONFIG_NO_FRAME_PTR pushl 56(%esp) /* create artificial stack frame */ pushl %ebp # ifndef CONFIG_PROFILE leal (%esp),%ebp # else xorl %ebp,%ebp # endif #endif /* Call the C handler function if one has been installed. */ movl BASE_TRAP_HANDLER, %ecx orl %ecx,%ecx jz unexpected_trap_pop call *%ecx in_slowtrap: #ifndef CONFIG_NO_FRAME_PTR leal 8(%esp),%esp #endif /* If the handler function returned zero (success), then resume execution as if the trap never happened. Otherwise, just panic. */ orl %eax,%eax jnz unexpected_trap CHECK_SANITY 60(%esp) /* scratches ecx */ RESET_USER_SEGMENTS 60(%esp),in_cli /* scratches ecx */ addl $4*2,%esp /* Pop ds, es segment registers */ popl REG_GS popl %fs /* Restore segment registers */ popa addl $4*2,%esp /* Pop trap number and error code */ iret unexpected_trap_pop: #ifndef CONFIG_NO_FRAME_PTR leal 8(%esp), %esp #endif unexpected_trap: movw %ss,%ax movw %ax,%ds movw %ax,%es movl %esp,%eax call trap_dump_panic GATE_ENTRY(0x0e,entry_vec0e_page_fault,ACC_PL_K | ACC_INTR_GATE) /* we must save %cr2 before we can be preempted -- therefore we're an interrupt gate (invoked with interrupts turned off). Also, we don't turn them on again here, but only after checking for page-ins from the global page directory in thread_page_fault(). XXX: If you make changes to stack layout here, fix thread_page_fault */ /* XXX slow version - sets up nice stack frame for debugger */ .p2align 4 .type entry_vec0e_page_fault,@function entry_vec0e_page_fault: pushl %eax /* save regs modifiable by C funcs */ pushl %ecx pushl %edx RESET_KERNEL_SEGMENTS /* scratches ecx, edx */ movl 12(%esp),%edx /* save error code in ARG2 ... */ movl PAGE_FAULT_ADDR,%eax /* save page fault address in ARG1 */ /* We must reset the cancel flag here atomically if we are entering fresh from user mode and an IPC might occur. NOTE: We cannot test the user-mode bit in the error code because it will flag "kernel" in case an I/O-bitmap page is not mapped during an I/O access. */ movl 20(%esp),%ecx /* get CS from stack */ andb $3,%cl /* retrieve current privilege level (CPL) */ jz 1f /* CPL == 0 -> kernel, skip resetting state */ ESP_TO_TCB_AT %ecx RESET_THREAD_CANCEL_AT %ecx 1: movl %ebp,12(%esp) /* save frame pointer */ leal 12(%esp),%ebp /* load new frame pointer */ #ifdef CONFIG_PROFILE call mcount #endif pushl %eax /* save pf address */ pushl %edx /* save error code */ leal 24(%esp),%ecx /* ARG5: ptr to Return_frame */ pushl %ecx pushl 36(%esp) /* ARG4: eflags */ #ifdef CONFIG_KERN_LIB_PAGE ROLLBACK_IP 32(%esp) #endif #ifdef CONFIG_ROLLFORWARD ROLLFORWARD_PANIC 32(%esp), $1 #endif movl 32(%esp),%ecx /* ARG3: eip */ call thread_page_fault in_page_fault: #ifdef CONFIG_SMALL_SPACES movl 32(%esp),%ecx /* check if page fault was in */ andl $0xFFFFFF00,%ecx /* sysexit trampoline page */ cmpl $VAL__MEM_LAYOUT__SMAS_TRAMPOLINE,%ecx jz page_fault_in_trampoline before_iret_page_fault: #endif orl %eax,%eax jz bad_page_fault lea 16(%esp),%esp CHECK_SANITY 20(%esp) /* scratches ecx */ RESET_USER_SEGMENTS 20(%esp),no_cli /* scratches ecx */ popl %edx popl %ecx popl %eax popl %ebp iret /* If code or stack from a small address space are not yet mapped in the current page directory we might get a page fault on return from the trampoline page. In this case we cannot return to the trampoline page after handling the fault because we are already in user mode (with segment limits below kernel space) while the trampoline code is located in kernel data space. So instead we change ESP and EIP to point to the address the trampoline wanted to return to and do the normal IRET. */ #ifdef CONFIG_SMALL_SPACES page_fault_in_trampoline: /* don't interrupt us here */ cli /* make sure that we use the user-level segment for accessing the user-level eip */ movw $(GDT_DATA_USER|SEL_PL_U), %cx movl %ecx,%ds movl 44(%esp),%ecx /* user esp */ movl (%ecx),%edx movl %edx,32(%esp) addl $16,%ecx movl %ecx,44(%esp) orl %eax,%eax jz bad_page_fault leal 16(%esp),%esp CHECK_SANITY 20(%esp) /* scratches ecx */ RESET_USER_SEGMENTS 20(%esp),in_cli /* scratches ecx */ popl %edx popl %ecx popl %eax popl %ebp iret #endif /* recover from a bad page fault by invoking the slow_trap handler */ .p2align 4 bad_page_fault: cli addl $8,%esp /* pop ARG4 and ARG5 */ movl (%ebp),%eax /* old ebp */ popl (%ebp) /* error code */ popl %edx /* page fault address */ movl %eax,%ebp /* we have on stack: error code, eax, ecx, edx move registers down to make room for trap number */ subl $4,%esp movl 4(%esp),%eax movl %eax,(%esp) movl 8(%esp),%eax movl %eax,4(%esp) movl 12(%esp),%eax movl %eax,8(%esp) movl $0x0e,12(%esp) /* error code */ pushl %ebx /* rest of trap state */ pushl %edx /* page fault address */ pushl %ebp pushl %esi pushl %edi jmp slowtraps /* FPU not available in this context. */ GATE_ENTRY(0x07,entry_vec07_fpu_unavail, ACC_PL_K | ACC_INTR_GATE) /* do all of this with disabled interrupts */ .p2align 4 .type entry_vec07_fpu_unavail,@function entry_vec07_fpu_unavail: pushl %eax pushl %ecx pushl %edx RESET_KERNEL_SEGMENTS /* scratches ecx, edx */ call thread_handle_fputrap in_handle_fputrap: CHECK_SANITY 16(%esp) /* scratches ecx */ RESET_USER_SEGMENTS 16(%esp),in_cli /* scratches ecx */ test %eax, %eax jz real_fpu_exception popl %edx popl %ecx popl %eax iret real_fpu_exception: popl %edx popl %ecx popl %eax pushl $(0) pushl $(7) pusha jmp _slowtraps /* timer interrupt */ #ifdef CONFIG_SCHED_PIT GATE_ENTRY(0x20,entry_int_timer,ACC_PL_K | ACC_INTR_GATE) #endif #ifdef CONFIG_SCHED_RTC GATE_ENTRY(0x28,entry_int_timer,ACC_PL_K | ACC_INTR_GATE) #endif #ifdef CONFIG_SCHED_APIC GATE_ENTRY(0x3d,entry_int_timer,ACC_PL_K | ACC_INTR_GATE) #endif .p2align 4 .globl entry_int_timer entry_int_timer: #ifdef CONFIG_KERN_LIB_PAGE ROLLBACK_IP 0(%esp) #endif #ifdef CONFIG_ROLLFORWARD ROLLFORWARD_IP 0(%esp) #endif #ifndef CONFIG_NO_FRAME_PTR pushl %ebp #ifndef CONFIG_PROFILE movl %esp,%ebp #else xorl %ebp,%ebp #endif #endif pushl %eax pushl %edx pushl %ecx RESET_KERNEL_SEGMENTS /* scratches ecx, edx */ do_timer_interrupt: #ifdef CONFIG_NO_FRAME_PTR movl 12(%esp), %eax /* ARG1: eip for logging */ #else movl 16(%esp), %eax /* ARG1: eip for logging */ #endif call thread_timer_interrupt /* enter with disabled irqs */ in_timer_interrupt: #ifndef CONFIG_NO_FRAME_PTR CHECK_SANITY 20(%esp) /* scratches ecx */ RESET_USER_SEGMENTS 20(%esp),in_cli /* scratches ecx */ #else CHECK_SANITY 16(%esp) /* scratches ecx */ RESET_USER_SEGMENTS 16(%esp),in_cli /* scratches ecx */ #endif popl %ecx popl %edx popl %eax #ifndef CONFIG_NO_FRAME_PTR popl %ebp #endif iret .p2align 4 .globl entry_int_timer_slow entry_int_timer_slow: #ifdef CONFIG_KERN_LIB_PAGE ROLLBACK_IP 0(%esp) #endif #ifdef CONFIG_ROLLFORWARD ROLLFORWARD_IP 0(%esp) #endif #ifndef CONFIG_NO_FRAME_PTR pushl %ebp #ifndef CONFIG_PROFILE movl %esp,%ebp #else xorl %ebp,%ebp #endif #endif pushl %eax pushl %edx pushl %ecx RESET_KERNEL_SEGMENTS /* scratches ecx, edx */ call thread_timer_interrupt_slow /* enter with disabled irqs */ in_timer_interrupt_slow: jmp do_timer_interrupt .p2align 4 .globl entry_int_timer_stop entry_int_timer_stop: #ifndef CONFIG_NO_FRAME_PTR pushl %ebp #ifndef CONFIG_PROFILE movl %esp,%ebp #else xorl %ebp,%ebp #endif #endif pushl %eax pushl %edx pushl %ecx RESET_KERNEL_SEGMENTS /* scratches ecx, edx */ call thread_timer_interrupt_stop #ifndef CONFIG_NO_FRAME_PTR CHECK_SANITY 20(%esp) /* scratches ecx */ RESET_USER_SEGMENTS 20(%esp),in_cli /* scratches ecx */ #else CHECK_SANITY 16(%esp) /* scratches ecx */ RESET_USER_SEGMENTS 16(%esp),in_cli /* scratches ecx */ #endif popl %ecx popl %edx popl %eax #ifndef CONFIG_NO_FRAME_PTR popl %ebp #endif iret /* profiling timer interrupt entry point */ #ifdef CONFIG_PROFILE .p2align 4 .globl profile_interrupt_entry .type profile_interrupt_entry,@function profile_interrupt_entry: #ifndef CONFIG_NO_FRAME_PTR pushl %ebp movl %esp,%ebp pushl %eax movl 8(%esp),%eax /* %eax = return address */ #else pushl %eax movl 4(%esp),%eax /* %eax = return address */ #endif pushl %edx pushl %ecx RESET_KERNEL_SEGMENTS /* scratches ecx, edx */ call profile_interrupt /* enter with disabled irqs */ #ifndef CONFIG_NO_FRAME_PTR CHECK_SANITY 20(%esp) /* scratches ecx */ RESET_USER_SEGMENTS 20(%esp),in_cli /* scratches ecx */ #else CHECK_SANITY 16(%esp) /* scratches ecx */ RESET_USER_SEGMENTS 16(%esp),in_cli /* scratches ecx */ #endif popl %ecx popl %edx popl %eax #ifndef CONFIG_NO_FRAME_PTR popl %ebp #endif iret #endif /* CONFIG_PROFILE */ /* other interrupts */ #define INTERRUPT(int,name) \ GATE_ENTRY(int,entry_##name,ACC_PL_K | ACC_INTR_GATE) ;\ .p2align 3 ;\ entry_##name: ;\ pushl %eax ;\ movl $ (int - 0x20), %eax /* ARG1: irqnum */ ;\ jmp all_irqs #ifndef CONFIG_SCHED_PIT INTERRUPT(0x20,int0) #endif INTERRUPT(0x21,int1) INTERRUPT(0x22,int2) INTERRUPT(0x23,int3) INTERRUPT(0x24,int4) INTERRUPT(0x25,int5) INTERRUPT(0x26,int6) INTERRUPT(0x27,int7) #ifndef CONFIG_SCHED_RTC INTERRUPT(0x28,int8) #endif INTERRUPT(0x29,int9) INTERRUPT(0x2a,inta) INTERRUPT(0x2b,intb) INTERRUPT(0x2c,intc) INTERRUPT(0x2d,intd) INTERRUPT(0x2e,inte) INTERRUPT(0x2f,intf) .p2align 4 .type all_irqs,@function all_irqs: pushl %edx pushl %ecx RESET_KERNEL_SEGMENTS /* scratches ecx, edx */ #ifdef CONFIG_KERN_LIB_PAGE ROLLBACK_IP 12(%esp) #endif #ifdef CONFIG_ROLLFORWARD ROLLFORWARD_IP 12(%esp) #endif movl 12(%esp), %edx /* ARG2: eip */ call irq_interrupt /* enter with disabled irqs */ in_interrupt: CHECK_SANITY 16(%esp) /* scratches ecx */ RESET_USER_SEGMENTS 16(%esp),in_cli /* scratches ecx */ popl %ecx popl %edx popl %eax entry_int_pic_ignore: iret .global entry_int_pic_ignore .global entry_int7 .global entry_intf /****************************************************************************/ /* system calls */ /****************************************************************************/ #ifdef CONFIG_ASSEMBLER_IPC_SHORTCUT GATE_ENTRY(0x30,entry_sys_ipc,ACC_PL_U | ACC_INTR_GATE); #else GATE_ENTRY(0x30,entry_sys_ipc_c,ACC_PL_U | ACC_INTR_GATE); #endif #if defined (CONFIG_JDB_LOGGING) || !defined(CONFIG_ASSEMBLER_IPC_SHORTCUT) .p2align 4 .globl entry_sys_ipc_c entry_sys_ipc_c: pushl %eax SAVE_STATE ESP_TO_TCB_AT %ebx RESET_KERNEL_SEGMENTS /* scratches ecx, edx */ testl $Thread_alien, KSEG OFS__THREAD__STATE (%ebx) jnz alien_sys_ipc_c RESET_THREAD_CANCEL_AT %ebx call ipc_short_cut_wrapper in_sc_ipc1: CHECK_SANITY $3 /* scratches ecx */ RESET_USER_SEGMENTS $3,in_cli /* scratches ecx */ RESTORE_STATE_AFTER_IPC popl %eax iret .globl in_sc_ipc1 #endif #ifdef CONFIG_JDB /* The slow variant of sys_ipc_entry is used when logging IPC */ .p2align 4 .globl entry_sys_ipc_log entry_sys_ipc_log: pushl %eax SAVE_STATE ESP_TO_TCB_AT %ebx RESET_KERNEL_SEGMENTS /* scratches ecx, edx */ testl $Thread_alien, KSEG OFS__THREAD__STATE (%ebx) jnz alien_sys_ipc_log RESET_THREAD_CANCEL_AT %ebx call *syscall_table in_slow_ipc4: CHECK_SANITY $3 /* scratches ecx */ RESET_USER_SEGMENTS $3,in_cli /* scratches ecx */ RESTORE_STATE_AFTER_IPC popl %eax iret .globl in_slow_ipc4 #endif // CONFIG_JDB // these labels help show_tcb to guess the thread state .globl in_syscall .globl in_slowtrap .globl in_page_fault .globl in_handle_fputrap .globl in_interrupt .globl in_timer_interrupt .globl in_timer_interrupt_slow #define SYSTEM_CALL(int,name) \ GATE_ENTRY(int,entry_##name,ACC_PL_U | ACC_INTR_GATE) ;\ .p2align 3 ;\ entry_##name: ;\ pushl %eax ;\ movl $(syscall_table+4*(int-0x30)), %eax ;\ jmp all_syscalls .p2align 4 .type all_syscalls,@function all_syscalls: SAVE_STATE ;\ ESP_TO_TCB_AT %ebx RESET_KERNEL_SEGMENTS /* scratches ecx, edx */ testl $Thread_alien, KSEG OFS__THREAD__STATE (%ebx) jnz alien_sys_call RESET_THREAD_CANCEL_AT %ebx call *(%eax) /* interrupts enabled in wrappers */ in_syscall: ret_from_syscall: CHECK_SANITY $3 /* scratches ecx */ RESET_USER_SEGMENTS $3,no_cli /* scratches ecx */ RESTORE_STATE popl %eax iret /* SYSTEM_CALL(0x30,sys_ipc) */ SYSTEM_CALL(0x31,sys_id_nearest) SYSTEM_CALL(0x32,sys_fpage_unmap) SYSTEM_CALL(0x33,sys_thread_switch) SYSTEM_CALL(0x34,sys_thread_schedule) SYSTEM_CALL(0x35,sys_thread_ex_regs) SYSTEM_CALL(0x36,sys_task_new) SYSTEM_CALL(0x39,sys_u_lock) #ifdef CONFIG_PL0_HACK SYSTEM_CALL(0x37,sys_priv_control) GATE_ENTRY(0x38,entry_sys_priv_entry,ACC_PL_U | ACC_INTR_GATE) .p2align 3 entry_sys_priv_entry: pushl $0 pushf pushl %eax pushl %edx pushl %ecx lea 16(%esp), %eax call sys_priv_entry_wrapper orl %eax, %eax jz 1f /* restore registers, diverge to user function */ popl %ecx popl %edx popl %eax popfl addl $4, %esp /* Accessing stack beneath the stack pointer is allowed here since we * are executed with interrupts off and we don't expect any exception * as well. */ jmp *-4(%esp) 1: /* no privileges so just return to the user */ addl $20, %esp iret #endif #ifndef CONFIG_PF_UX /* these functions are implemented in entry-ia32.S */ GATE_ENTRY(0x0a,entry_vec0a_invalid_tss,ACC_PL_K | ACC_INTR_GATE) GATE_ENTRY(0x0f,entry_vec0f_apic_spurious_interrupt_bug,ACC_PL_K | ACC_INTR_GATE) GATE_ENTRY(0x3e,entry_vec3e_apic_error_interrupt,ACC_PL_K | ACC_INTR_GATE) GATE_ENTRY(0x3f,entry_vec3f_apic_spurious_interrupt,ACC_PL_K | ACC_INTR_GATE) #endif GATE_INITTAB_END .globl alien_sys_ipc_c /* Also used in shortcut */ alien_sys_ipc_c: PRE_ALIEN_IPC call ipc_short_cut_wrapper POST_ALIEN_IPC #if defined (CONFIG_JDB) alien_sys_ipc_log: PRE_ALIEN_IPC call *syscall_table POST_ALIEN_IPC #endif /* * input: eax: address to syscall function * output: eax: error code */ .macro SC_ADDR_TO_ERR val sub $syscall_table , %eax /* eax = byte offset to syscall */ shr $2, %eax /* convert eax to syscall nr */ add $0x30, %eax /* convert eax to syscall int nr */ shl $3, %eax /* construct error code */ orl $\val, %eax /* -"- */ .endm alien_sys_call: btrl $17, KSEG OFS__THREAD__STATE (%ebx) /* Thread_dis_alien */ jc 1f RESTORE_STATE sub $2, 4(%esp) /* Correct EIP to point to insn */ SC_ADDR_TO_ERR 2 2: pushl $0xd xchgl 4(%esp), %eax pusha jmp _slowtraps 1: /* do alien syscall and trap mafterwards */ RESET_THREAD_CANCEL_AT %ebx pushl %eax call *(%eax) /* call with ENABLED interrupts */ CHECK_SANITY $3 /* scratches ecx */ RESET_USER_SEGMENTS $3,no_cli /* scratches ecx */ popl %eax RESTORE_STATE SC_ADDR_TO_ERR 6 jmp 2b .p2align .globl leave_by_trigger_exception leave_by_trigger_exception: cli subl $12,%esp /* clean up stack from previous * CPL0-CPL0 iret */ pushl %eax pushl %ecx pushl %edx call thread_restore_exc_state popl %edx popl %ecx popl %eax pushl $0x00 pushl $0xff pusha jmp _slowtraps .section ".text.debug.stack_profiling" .global __cyg_profile_func_enter .global __cyg_profile_func_exit __ret_from_cyg: ret __cyg_profile_func_enter: __cyg_profile_func_exit: cmp $0xc0000000, %esp jl __ret_from_cyg cmp $0xd0000000,%esp jg __ret_from_cyg mov %esp, %ecx // and $0x7ff, %ecx /* 2K TCBs */ and $0xfff, %ecx /* 4K TCBs */ cmp $0x200, %ecx jg __ret_from_cyg int3 jmp 1f .ascii "STACK" 1: ret