Re: [PATCH] FRV: Use the correct preemption primitives in kmap_atomic() and co [try #2]

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, 2006-10-16 at 14:10 +0100, David Howells wrote:
> From: David Howells <[email protected]>
> 
> Use inc/dec_preempt_count() rather than preempt_enable/disable() and manually
> add in the compiler barriers that were provided by the latter.  This makes FRV
> consistent with other archs.
> 
> Furthermore, the compiler barrier effects are now there unconditionally - at
> least as far as preemption is concerned - because we don't want the compiler
> moving memory accesses out of the section of code in which the mapping is in
> force - in effect the kmap_atomic() must imply a LOCK-class barrier and the
> kunmap_atomic() must imply an UNLOCK-class barrier to the compiler.
> 
> Signed-Off-By: David Howells <[email protected]>

Acked-by: Peter Zijlstra <[email protected]>

> ---
> 
>  include/asm-frv/highmem.h |   27 ++++++++++++++-------------
>  1 files changed, 14 insertions(+), 13 deletions(-)
> 
> diff --git a/include/asm-frv/highmem.h b/include/asm-frv/highmem.h
> index e2247c2..0f390f4 100644
> --- a/include/asm-frv/highmem.h
> +++ b/include/asm-frv/highmem.h
> @@ -82,11 +82,11 @@ ({												\
>  	dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V;		\
>  												\
>  	if (type != __KM_CACHE)									\
> -		asm volatile("movgs %0,dampr"#ampr :: "r"(dampr));				\
> +		asm volatile("movgs %0,dampr"#ampr :: "r"(dampr) : "memory");			\
>  	else											\
>  		asm volatile("movgs %0,iampr"#ampr"\n"						\
>  			     "movgs %0,dampr"#ampr"\n"						\
> -			     :: "r"(dampr)							\
> +			     :: "r"(dampr) : "memory"						\
>  			     );									\
>  												\
>  	asm("movsg damlr"#ampr",%0" : "=r"(damlr));						\
> @@ -104,7 +104,7 @@ ({												  \
>  	asm volatile("movgs %0,tplr \n"								  \
>  		     "movgs %1,tppr \n"								  \
>  		     "tlbpr %0,gr0,#2,#1"							  \
> -		     : : "r"(damlr), "r"(dampr));						  \
> +		     : : "r"(damlr), "r"(dampr) : "memory");					  \
>  												  \
>  	/*printk("TLB: SECN sl=%d L=%08lx P=%08lx\n", slot, damlr, dampr);*/			  \
>  												  \
> @@ -115,7 +115,7 @@ static inline void *kmap_atomic(struct p
>  {
>  	unsigned long paddr;
>  
> -	preempt_disable();
> +	inc_preempt_count();
>  	paddr = page_to_phys(page);
>  
>  	switch (type) {
> @@ -138,16 +138,16 @@ static inline void *kmap_atomic(struct p
>  	}
>  }
>  
> -#define __kunmap_atomic_primary(type, ampr)			\
> -do {								\
> -	asm volatile("movgs gr0,dampr"#ampr"\n");		\
> -	if (type == __KM_CACHE)					\
> -		asm volatile("movgs gr0,iampr"#ampr"\n");	\
> +#define __kunmap_atomic_primary(type, ampr)				\
> +do {									\
> +	asm volatile("movgs gr0,dampr"#ampr"\n" ::: "memory");		\
> +	if (type == __KM_CACHE)						\
> +		asm volatile("movgs gr0,iampr"#ampr"\n" ::: "memory");	\
>  } while(0)
>  
> -#define __kunmap_atomic_secondary(slot, vaddr)			\
> -do {								\
> -	asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr));	\
> +#define __kunmap_atomic_secondary(slot, vaddr)				\
> +do {									\
> +	asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory");	\
>  } while(0)
>  
>  static inline void kunmap_atomic(void *kvaddr, enum km_type type)
> @@ -170,7 +170,8 @@ static inline void kunmap_atomic(void *k
>  	default:
>  		BUG();
>  	}
> -	preempt_enable();
> +	dec_preempt_count();
> +	preempt_check_resched();
>  }
>  
>  #endif /* !__ASSEMBLY__ */

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

[Index of Archives]     [Kernel Newbies]     [Netfilter]     [Bugtraq]     [Photo]     [Stuff]     [Gimp]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Video 4 Linux]     [Linux for the blind]     [Linux Resources]
  Powered by Linux