51007004f4
In general we want to avoid ever touching memory while within an interrupt critical section, since the page fault path goes through a different path from the hypervisor when in an interrupt critical section, and we carefully decided with tilegx that we didn't need to support this path in the kernel. (On tilepro we did implement that path as part of supporting atomic instructions in software.) In practice we always need to touch the kernel stack, since that's where we store the interrupt state before releasing the critical section, but this change cleans up a few things. The IRQ_ENABLE macro is split up so that when we want to enable interrupts in a deferred way (e.g. for cpu_idle or for interrupt return) we can read the per-cpu enable mask before entering the critical section. The cache-migration code is changed to use interrupt masking instead of interrupt critical sections. And, the interrupt-entry code is changed so that we defer loading "tp" from per-cpu data until after we have released the interrupt critical section. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
113 lines
3.4 KiB
ArmAsm
113 lines
3.4 KiB
ArmAsm
/*
|
|
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation, version 2.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
* NON INFRINGEMENT. See the GNU General Public License for
|
|
* more details.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <linux/unistd.h>
|
|
#include <asm/irqflags.h>
|
|
#include <asm/processor.h>
|
|
#include <arch/abi.h>
|
|
#include <arch/spr_def.h>
|
|
|
|
#ifdef __tilegx__
|
|
#define bnzt bnezt
|
|
#endif
|
|
|
|
STD_ENTRY(current_text_addr)
|
|
{ move r0, lr; jrp lr }
|
|
STD_ENDPROC(current_text_addr)
|
|
|
|
/*
|
|
* Implement execve(). The i386 code has a note that forking from kernel
|
|
* space results in no copy on write until the execve, so we should be
|
|
* careful not to write to the stack here.
|
|
*/
|
|
STD_ENTRY(kernel_execve)
|
|
moveli TREG_SYSCALL_NR_NAME, __NR_execve
|
|
swint1
|
|
jrp lr
|
|
STD_ENDPROC(kernel_execve)
|
|
|
|
/*
|
|
* We don't run this function directly, but instead copy it to a page
|
|
* we map into every user process. See vdso_setup().
|
|
*
|
|
* Note that libc has a copy of this function that it uses to compare
|
|
* against the PC when a stack backtrace ends, so if this code is
|
|
* changed, the libc implementation(s) should also be updated.
|
|
*/
|
|
.pushsection .data
|
|
ENTRY(__rt_sigreturn)
|
|
moveli TREG_SYSCALL_NR_NAME,__NR_rt_sigreturn
|
|
swint1
|
|
ENDPROC(__rt_sigreturn)
|
|
ENTRY(__rt_sigreturn_end)
|
|
.popsection
|
|
|
|
STD_ENTRY(dump_stack)
|
|
{ move r2, lr; lnk r1 }
|
|
{ move r4, r52; addli r1, r1, dump_stack - . }
|
|
{ move r3, sp; j _dump_stack }
|
|
jrp lr /* keep backtracer happy */
|
|
STD_ENDPROC(dump_stack)
|
|
|
|
STD_ENTRY(KBacktraceIterator_init_current)
|
|
{ move r2, lr; lnk r1 }
|
|
{ move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . }
|
|
{ move r3, sp; j _KBacktraceIterator_init_current }
|
|
jrp lr /* keep backtracer happy */
|
|
STD_ENDPROC(KBacktraceIterator_init_current)
|
|
|
|
/*
|
|
* Reset our stack to r1/r2 (sp and ksp0+cpu respectively), then
|
|
* free the old stack (passed in r0) and re-invoke cpu_idle().
|
|
* We update sp and ksp0 simultaneously to avoid backtracer warnings.
|
|
*/
|
|
STD_ENTRY(cpu_idle_on_new_stack)
|
|
{
|
|
move sp, r1
|
|
mtspr SPR_SYSTEM_SAVE_K_0, r2
|
|
}
|
|
jal free_thread_info
|
|
j cpu_idle
|
|
STD_ENDPROC(cpu_idle_on_new_stack)
|
|
|
|
/* Loop forever on a nap during SMP boot. */
|
|
STD_ENTRY(smp_nap)
|
|
nap
|
|
nop /* avoid provoking the icache prefetch with a jump */
|
|
j smp_nap /* we are not architecturally guaranteed not to exit nap */
|
|
jrp lr /* clue in the backtracer */
|
|
STD_ENDPROC(smp_nap)
|
|
|
|
/*
|
|
* Enable interrupts racelessly and then nap until interrupted.
|
|
* Architecturally, we are guaranteed that enabling interrupts via
|
|
* mtspr to INTERRUPT_CRITICAL_SECTION only interrupts at the next PC.
|
|
* This function's _cpu_idle_nap address is special; see intvec.S.
|
|
* When interrupted at _cpu_idle_nap, we bump the PC forward 8, and
|
|
* as a result return to the function that called _cpu_idle().
|
|
*/
|
|
STD_ENTRY(_cpu_idle)
|
|
movei r1, 1
|
|
IRQ_ENABLE_LOAD(r2, r3)
|
|
mtspr INTERRUPT_CRITICAL_SECTION, r1
|
|
IRQ_ENABLE_APPLY(r2, r3) /* unmask, but still with ICS set */
|
|
mtspr INTERRUPT_CRITICAL_SECTION, zero
|
|
.global _cpu_idle_nap
|
|
_cpu_idle_nap:
|
|
nap
|
|
nop /* avoid provoking the icache prefetch with a jump */
|
|
jrp lr
|
|
STD_ENDPROC(_cpu_idle)
|