vm changes from linux-2.6.14 to linux-2.6.15
Mark Fortescue
mark at mtfhpc.demon.co.uk
Tue May 1 23:58:07 EST 2007
On Tue, 1 May 2007, Benjamin Herrenschmidt wrote:
>
>> At present, update_mmu_cache() and lazy_mmu_prot_update() are always
>> called when ptep_set_access_flags() is called so why not move them into
>> ptep_set_access_flags() and change ptep_set_access_flags() to have an
>> additional boolean parameter (__update) that would when set, cause
>> update_mmu_cache() and lazy_mmu_prot_update() to be called.
>
> Well, ptep_set_access_flags() is a low level arch hook, I'd rather not
> start hiding update_mmu_cache() calls in it ...
>
> Ben.
>
>
I have attached a patch (so pine does not mangle it) for linux-2.6.20.9.
Is this what you had in mind?
For linux-2.6.21, more work will be needed as it has more code calling
ptep_set_access_flags.
Regards
Mark Fortescue.
-------------- next part --------------
diff -ruNpd linux-2.6.20.9/include/asm-generic/pgtable.h linux-test/include/asm-generic/pgtable.h
--- linux-2.6.20.9/include/asm-generic/pgtable.h 2007-05-01 12:57:56.000000000 +0100
+++ linux-test/include/asm-generic/pgtable.h 2007-05-01 14:20:05.000000000 +0100
@@ -29,11 +29,16 @@ do { \
* to a "more permissive" setting, which allows most architectures
* to optimize this.
*/
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-do { \
- set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
- flush_tlb_page(__vma, __address); \
-} while (0)
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty, __update) \
+({ \
+ if (__update) { \
+ set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+ } else if (__dirty) { \
+ flush_tlb_page(__vma, __address); \
+ } \
+ __update; \
+})
#endif
#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
diff -ruNpd linux-2.6.20.9/include/asm-i386/pgtable.h linux-test/include/asm-i386/pgtable.h
--- linux-2.6.20.9/include/asm-i386/pgtable.h 2007-05-01 12:57:56.000000000 +0100
+++ linux-test/include/asm-i386/pgtable.h 2007-05-01 13:43:38.000000000 +0100
@@ -273,14 +273,17 @@ static inline pte_t pte_mkhuge(pte_t pte
* bit at the same time.
*/
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
-do { \
- if (dirty) { \
- (ptep)->pte_low = (entry).pte_low; \
- pte_update_defer((vma)->vm_mm, (address), (ptep)); \
- flush_tlb_page(vma, address); \
- } \
-} while (0)
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty, __update) \
+({ \
+ if (__dirty) { \
+ if (__update) { \
+ (__ptep)->pte_low = (__entry).pte_low; \
+ pte_update_defer((__vma)->vm_mm, (__address), (__ptep)); \
+ } \
+ flush_tlb_page(__vma, __address); \
+ } \
+ __update; \
+})
/*
* We don't actually have these, but we want to advertise them so that
diff -ruNpd linux-2.6.20.9/include/asm-ia64/pgtable.h linux-test/include/asm-ia64/pgtable.h
--- linux-2.6.20.9/include/asm-ia64/pgtable.h 2007-04-30 19:04:59.000000000 +0100
+++ linux-test/include/asm-ia64/pgtable.h 2007-05-01 13:48:07.000000000 +0100
@@ -537,16 +537,25 @@ extern void lazy_mmu_prot_update (pte_t
* daccess_bit in ivt.S).
*/
#ifdef CONFIG_SMP
-# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
-do { \
- if (__safely_writable) { \
- set_pte(__ptep, __entry); \
+# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty, __update) \
+({ \
+ if (__dirty) { \
+ if (__update) \
+ set_pte(__ptep, __entry); \
flush_tlb_page(__vma, __addr); \
} \
-} while (0)
+ __update; \
+})
#else
-# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
- ptep_establish(__vma, __addr, __ptep, __entry)
+# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty, __update) \
+({ \
+ if (__update) \
+ ptep_establish(__vma, __addr, __ptep, __entry); \
+ else if (__dirty) \
+ flush_tlb_page(__vma, __addr); \
+ } \
+ __update; \
+})
#endif
# ifdef CONFIG_VIRTUAL_MEM_MAP
diff -ruNpd linux-2.6.20.9/include/asm-powerpc/pgtable.h linux-test/include/asm-powerpc/pgtable.h
--- linux-2.6.20.9/include/asm-powerpc/pgtable.h 2007-05-01 12:57:56.000000000 +0100
+++ linux-test/include/asm-powerpc/pgtable.h 2007-05-01 13:58:56.000000000 +0100
@@ -437,11 +437,15 @@ static inline void __ptep_set_access_fla
:"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
:"cc");
}
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
- do { \
- __ptep_set_access_flags(__ptep, __entry, __dirty); \
- flush_tlb_page_nohash(__vma, __address); \
- } while(0)
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty, __update) \
+({ \
+ if (__update) { \
+ __ptep_set_access_flags(__ptep, __entry, __dirty); \
+ flush_tlb_page_nohash(__vma, __address); \
+ } else if (__dirty) \
+ flush_tlb_page(__vma, __address); \
+ __update; \
+})
/*
* Macro to mark a page protection value as "uncacheable".
diff -ruNpd linux-2.6.20.9/include/asm-ppc/pgtable.h linux-test/include/asm-ppc/pgtable.h
--- linux-2.6.20.9/include/asm-ppc/pgtable.h 2007-04-30 19:05:00.000000000 +0100
+++ linux-test/include/asm-ppc/pgtable.h 2007-05-01 13:50:30.000000000 +0100
@@ -693,11 +693,15 @@ static inline void __ptep_set_access_fla
pte_update(ptep, 0, bits);
}
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
- do { \
- __ptep_set_access_flags(__ptep, __entry, __dirty); \
- flush_tlb_page_nohash(__vma, __address); \
- } while(0)
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty, __update) \
+({ \
+ if (__update) { \
+ __ptep_set_access_flags(__ptep, __entry, __dirty); \
+ flush_tlb_page_nohash(__vma, __address); \
+ } else if (__dirty) \
+ flush_tlb_page(__vma, __address); \
+ __update; \
+})
/*
* Macro to mark a page protection value as "uncacheable".
diff -ruNpd linux-2.6.20.9/include/asm-s390/pgtable.h linux-test/include/asm-s390/pgtable.h
--- linux-2.6.20.9/include/asm-s390/pgtable.h 2007-05-01 12:57:56.000000000 +0100
+++ linux-test/include/asm-s390/pgtable.h 2007-05-01 14:23:24.000000000 +0100
@@ -628,8 +628,14 @@ ptep_establish(struct vm_area_struct *vm
set_pte(ptep, entry);
}
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
- ptep_establish(__vma, __address, __ptep, __entry)
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty, __update) \
+ ({ \
+ if (__update) \
+ ptep_establish(__vma, __address, __ptep, __entry); \
+ else if (__dirty) \
+ flush_tlb_page(__vma, __address); \
+ __update; \
+ })
/*
* Test and clear dirty bit in storage key.
diff -ruNpd linux-2.6.20.9/include/asm-sparc/pgtable.h linux-test/include/asm-sparc/pgtable.h
--- linux-2.6.20.9/include/asm-sparc/pgtable.h 2007-04-30 19:05:01.000000000 +0100
+++ linux-test/include/asm-sparc/pgtable.h 2007-05-01 14:32:35.000000000 +0100
@@ -446,6 +446,27 @@ extern int io_remap_pfn_range(struct vm_
#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty, __update) \
+({ \
+ int __ret; \
+ \
+ if (sparc_cpu_model == sun4c) { \
+ set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+ __ret = 1; \
+ } else { \
+ if (__update) { \
+ set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+ } else if (__dirty) { \
+ flush_tlb_page(__vma, __address); \
+ } \
+ __ret = __update; \
+ } \
+ __ret; \
+})
+
#include <asm-generic/pgtable.h>
#endif /* !(__ASSEMBLY__) */
diff -ruNpd linux-2.6.20.9/include/asm-x86_64/pgtable.h linux-test/include/asm-x86_64/pgtable.h
--- linux-2.6.20.9/include/asm-x86_64/pgtable.h 2007-05-01 12:57:57.000000000 +0100
+++ linux-test/include/asm-x86_64/pgtable.h 2007-05-01 13:37:08.000000000 +0100
@@ -396,13 +396,15 @@ static inline pte_t pte_modify(pte_t pte
* race with other CPU's that might be updating the dirty
* bit at the same time. */
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
- do { \
- if (__dirty) { \
- set_pte(__ptep, __entry); \
- flush_tlb_page(__vma, __address); \
- } \
- } while (0)
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty, __update) \
+ ({ \
+ if (__dirty) { \
+ if (__update) \
+ set_pte(__ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+ } \
+ __update; \
+ })
/* Encode and de-code a swap entry */
#define __swp_type(x) (((x).val >> 1) & 0x3f)
diff -ruNpd linux-2.6.20.9/mm/hugetlb.c linux-test/mm/hugetlb.c
--- linux-2.6.20.9/mm/hugetlb.c 2007-05-01 13:01:10.000000000 +0100
+++ linux-test/mm/hugetlb.c 2007-05-01 13:17:26.000000000 +0100
@@ -313,9 +313,10 @@ static void set_huge_ptep_writable(struc
pte_t entry;
entry = pte_mkwrite(pte_mkdirty(*ptep));
- ptep_set_access_flags(vma, address, ptep, entry, 1);
- update_mmu_cache(vma, address, entry);
- lazy_mmu_prot_update(entry);
+ if (ptep_set_access_flags(vma, address, ptep, entry, 1, 1)) {
+ update_mmu_cache(vma, address, entry);
+ lazy_mmu_prot_update(entry);
+ }
}
diff -ruNpd linux-2.6.20.9/mm/memory.c linux-test/mm/memory.c
--- linux-2.6.20.9/mm/memory.c 2007-05-01 12:57:57.000000000 +0100
+++ linux-test/mm/memory.c 2007-05-01 14:26:38.000000000 +0100
@@ -1553,9 +1553,11 @@ static int do_wp_page(struct mm_struct *
flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = pte_mkyoung(orig_pte);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- ptep_set_access_flags(vma, address, page_table, entry, 1);
- update_mmu_cache(vma, address, entry);
- lazy_mmu_prot_update(entry);
+ if (ptep_set_access_flags(vma, address, page_table,
+ entry, 1, 1)) {
+ update_mmu_cache(vma, address, entry);
+ lazy_mmu_prot_update(entry);
+ }
ret |= VM_FAULT_WRITE;
goto unlock;
}
@@ -2423,19 +2425,10 @@ static inline int handle_pte_fault(struc
entry = pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
- if (!pte_same(old_entry, entry)) {
- ptep_set_access_flags(vma, address, pte, entry, write_access);
+ if (ptep_set_access_flags(vma, address, pte, entry, write_access,
+ !pte_same(old_entry, entry))) {
update_mmu_cache(vma, address, entry);
lazy_mmu_prot_update(entry);
- } else {
- /*
- * This is needed only for protection faults but the arch code
- * is not yet telling us if this is a protection fault or not.
- * This still avoids useless tlb flushes for .text page faults
- * with threads.
- */
- if (write_access)
- flush_tlb_page(vma, address);
}
unlock:
pte_unmap_unlock(pte, ptl);
More information about the Linuxppc-dev
mailing list