[PATCH] ARM: Remove the domain switching on ARMv6k/v7 CPUs - update

Tim Gardner tim.gardner at canonical.com
Tue Jul 6 12:51:20 UTC 2010


On 07/06/2010 03:09 AM, Sebastien Jan wrote:
> From: Catalin Marinas<catalin.marinas at arm.com>
>
> This is an update of the patch from Catalin Marinas:
> Remove the domain switching on ARMv6k/v7 CPUs
>
> The first proposal of this patch was already integrated.
> This patch upgrades it to the v4 revision from Catalin:
> http://www.spinics.net/lists/arm-kernel/msg91260.html
>
> This patch fixes some 'segmentation faults' seen with Maverick FS
> on OMAP4.
>
> Signed-off-by: Sebastien Jan<s-jan at ti.com>
>
> ---
> This patch replaces the previous patch sent for reviews with title:
> Fix spurious segmentation faults
> ---
>   arch/arm/include/asm/assembler.h |   12 ++++++------
>   arch/arm/include/asm/traps.h     |    2 ++
>   arch/arm/kernel/fiq.c            |    5 +++++
>   arch/arm/kernel/traps.c          |   14 ++++++++++----
>   arch/arm/mm/Kconfig              |    3 +--
>   arch/arm/mm/mmu.c                |    6 +++---
>   arch/arm/mm/proc-macros.S        |    9 ++++++++-
>   arch/arm/mm/proc-v7.S            |    3 +++
>   8 files changed, 38 insertions(+), 16 deletions(-)
>
> diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
> index eb50c6a..66db132 100644
> --- a/arch/arm/include/asm/assembler.h
> +++ b/arch/arm/include/asm/assembler.h
> @@ -184,12 +184,12 @@
>    */
>   #ifdef CONFIG_THUMB2_KERNEL
>
> -	.macro	usraccoff, instr, reg, ptr, inc, off, cond, abort
> +	.macro	usraccoff, instr, reg, ptr, inc, off, cond, abort, t=T()
>   9999:
>   	.if	\inc == 1
> -	T(\instr\cond\()b) \reg, [\ptr, #\off]
> +	\instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
>   	.elseif	\inc == 4
> -	T(\instr\cond\()) \reg, [\ptr, #\off]
> +	\instr\cond\()\t\().w \reg, [\ptr, #\off]
>   	.else
>   	.error	"Unsupported inc macro argument"
>   	.endif
> @@ -224,13 +224,13 @@
>
>   #else	/* !CONFIG_THUMB2_KERNEL */
>
> -	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort
> +	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort, t=T()
>   	.rept	\rept
>   9999:
>   	.if	\inc == 1
> -	T(\instr\cond\()b) \reg, [\ptr], #\inc
> +	\instr\cond\()b\()\t \reg, [\ptr], #\inc
>   	.elseif	\inc == 4
> -	T(\instr\cond\()) \reg, [\ptr], #\inc
> +	\instr\cond\()\t \reg, [\ptr], #\inc
>   	.else
>   	.error	"Unsupported inc macro argument"
>   	.endif
> diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
> index 491960b..af5d5d1 100644
> --- a/arch/arm/include/asm/traps.h
> +++ b/arch/arm/include/asm/traps.h
> @@ -27,4 +27,6 @@ static inline int in_exception_text(unsigned long ptr)
>   extern void __init early_trap_init(void);
>   extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
>
> +extern void *vectors_page;
> +
>   #endif
> diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
> index 6ff7919..d601ef2 100644
> --- a/arch/arm/kernel/fiq.c
> +++ b/arch/arm/kernel/fiq.c
> @@ -45,6 +45,7 @@
>   #include<asm/fiq.h>
>   #include<asm/irq.h>
>   #include<asm/system.h>
> +#include<asm/traps.h>
>
>   static unsigned long no_fiq_insn;
>
> @@ -77,7 +78,11 @@ int show_fiq_list(struct seq_file *p, void *v)
>
>   void set_fiq_handler(void *start, unsigned int length)
>   {
> +#if defined(CONFIG_CPU_USE_DOMAINS)
>   	memcpy((void *)0xffff001c, start, length);
> +#else
> +	memcpy(vectors_page + 0x1c, start, length);
> +#endif
>   	flush_icache_range(0xffff001c, 0xffff001c + length);
>   	if (!vectors_high())
>   		flush_icache_range(0x1c, 0x1c + length);
> diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
> index 0330a40..6394d43 100644
> --- a/arch/arm/kernel/traps.c
> +++ b/arch/arm/kernel/traps.c
> @@ -37,6 +37,8 @@
>
>   static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
>
> +void *vectors_page;
> +
>   #ifdef CONFIG_DEBUG_USER
>   unsigned int user_debug;
>
> @@ -762,7 +764,11 @@ void __init trap_init(void)
>
>   void __init early_trap_init(void)
>   {
> +#if defined(CONFIG_CPU_USE_DOMAINS)
>   	unsigned long vectors = CONFIG_VECTORS_BASE;
> +#else
> +	unsigned long vectors = (unsigned long)vectors_page;
> +#endif
>   	extern char __stubs_start[], __stubs_end[];
>   	extern char __vectors_start[], __vectors_end[];
>   	extern char __kuser_helper_start[], __kuser_helper_end[];
> @@ -791,10 +797,10 @@ void __init early_trap_init(void)
>   	 * Copy signal return handlers into the vector page, and
>   	 * set sigreturn to be a pointer to these.
>   	 */
> -	memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes,
> -	       sizeof(sigreturn_codes));
> -	memcpy((void *)KERN_RESTART_CODE, syscall_restart_code,
> -	       sizeof(syscall_restart_code));
> +	memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
> +	       sigreturn_codes, sizeof(sigreturn_codes));
> +	memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
> +	       syscall_restart_code, sizeof(syscall_restart_code));
>
>   #ifndef CONFIG_CPU_USE_DOMAINS
>   	/* restore the vectors page permissions */
> diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
> index 3b07461..8f4dee8 100644
> --- a/arch/arm/mm/Kconfig
> +++ b/arch/arm/mm/Kconfig
> @@ -600,8 +600,7 @@ config CPU_CP15_MPU
>   config CPU_USE_DOMAINS
>   	bool
>   	depends on MMU
> -	default n if HAS_TLS_REG
> -	default y
> +	default y if !HAS_TLS_REG
>   	help
>   	  This option enables or disable the use of domain switching
>   	  via the set_fs() function.
> diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
> index 241c24a..4b0f4b0 100644
> --- a/arch/arm/mm/mmu.c
> +++ b/arch/arm/mm/mmu.c
> @@ -24,6 +24,7 @@
>   #include<asm/smp_plat.h>
>   #include<asm/tlb.h>
>   #include<asm/highmem.h>
> +#include<asm/traps.h>
>
>   #include<asm/mach/arch.h>
>   #include<asm/mach/map.h>
> @@ -933,12 +934,11 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
>   {
>   	struct map_desc map;
>   	unsigned long addr;
> -	void *vectors;
>
>   	/*
>   	 * Allocate the vector page early.
>   	 */
> -	vectors = alloc_bootmem_low_pages(PAGE_SIZE);
> +	vectors_page = alloc_bootmem_low_pages(PAGE_SIZE);
>
>   	for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
>   		pmd_clear(pmd_off_k(addr));
> @@ -978,7 +978,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
>   	 * location (0xffff0000).  If we aren't using high-vectors, also
>   	 * create a mapping at the low-vectors virtual address.
>   	 */
> -	map.pfn = __phys_to_pfn(virt_to_phys(vectors));
> +	map.pfn = __phys_to_pfn(virt_to_phys(vectors_page));
>   	map.virtual = 0xffff0000;
>   	map.length = PAGE_SIZE;
>   	map.type = MT_HIGH_VECTORS;
> diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
> index 7d63bea..1ccf579 100644
> --- a/arch/arm/mm/proc-macros.S
> +++ b/arch/arm/mm/proc-macros.S
> @@ -99,7 +99,11 @@
>    *  110x   0   1   0	r/w	r/o
>    *  11x0   0   1   0	r/w	r/o
>    *  1111   0   1   1	r/w	r/w
> - */
> + *
> + * If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed:
> + *  110x   1   1   1	r/o	r/o
> + *  11x0   1   1   1	r/o	r/o
> +  */
>   	.macro	armv6_mt_table pfx
>   \pfx\()_mt_table:
>   	.long	0x00						@ L_PTE_MT_UNCACHED
> @@ -138,8 +142,11 @@
>
>   	tst	r1, #L_PTE_USER
>   	orrne	r3, r3, #PTE_EXT_AP1
> +#ifdef CONFIG_CPU_USE_DOMAINS
> +	@ allow kernel read/write access to read-only user pages
>   	tstne	r3, #PTE_EXT_APX
>   	bicne	r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
> +#endif
>
>   	tst	r1, #L_PTE_EXEC
>   	orreq	r3, r3, #PTE_EXT_XN
> diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
> index df74916..c1c3fe0 100644
> --- a/arch/arm/mm/proc-v7.S
> +++ b/arch/arm/mm/proc-v7.S
> @@ -152,8 +152,11 @@ ENTRY(cpu_v7_set_pte_ext)
>
>   	tst	r1, #L_PTE_USER
>   	orrne	r3, r3, #PTE_EXT_AP1
> +#ifdef CONFIG_CPU_USE_DOMAINS
> +	@ allow kernel read/write access to read-only user pages
>   	tstne	r3, #PTE_EXT_APX
>   	bicne	r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
> +#endif
>
>   	tst	r1, #L_PTE_EXEC
>   	orreq	r3, r3, #PTE_EXT_XN

applied

-- 
Tim Gardner tim.gardner at canonical.com




More information about the kernel-team mailing list