Merge branch 'akpm' (patches from Andrew)
[muen/linux.git] / mm / memory.c
index 5d8c2afb07307d03fa5b9e8c55ff122884f11f50..7206a634270be3641e2255aa4c9d9eee68daed51 100644 (file)
@@ -817,17 +817,12 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
  * PFNMAP mappings in order to support COWable mappings.
  *
  */
-#ifdef __HAVE_ARCH_PTE_SPECIAL
-# define HAVE_PTE_SPECIAL 1
-#else
-# define HAVE_PTE_SPECIAL 0
-#endif
 struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
                             pte_t pte, bool with_public_device)
 {
        unsigned long pfn = pte_pfn(pte);
 
-       if (HAVE_PTE_SPECIAL) {
+       if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
                if (likely(!pte_special(pte)))
                        goto check_pfn;
                if (vma->vm_ops && vma->vm_ops->find_special_page)
@@ -862,7 +857,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
                return NULL;
        }
 
-       /* !HAVE_PTE_SPECIAL case follows: */
+       /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
 
        if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
                if (vma->vm_flags & VM_MIXEDMAP) {
@@ -881,6 +876,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
 
        if (is_zero_pfn(pfn))
                return NULL;
+
 check_pfn:
        if (unlikely(pfn > highest_memmap_pfn)) {
                print_bad_pte(vma, addr, pte, NULL);
@@ -904,7 +900,7 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
        /*
         * There is no pmd_special() but there may be special pmds, e.g.
         * in a direct-access (dax) mapping, so let's just replicate the
-        * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
+        * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
         */
        if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
                if (vma->vm_flags & VM_MIXEDMAP) {
@@ -1932,7 +1928,8 @@ static int __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
         * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
         * without pte special, it would there be refcounted as a normal page.
         */
-       if (!HAVE_PTE_SPECIAL && !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
+       if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
+           !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
                struct page *page;
 
                /*
@@ -1954,12 +1951,25 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
 }
 EXPORT_SYMBOL(vm_insert_mixed);
 
-int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr,
-                       pfn_t pfn)
+/*
+ *  If the insertion of PTE failed because someone else already added a
+ *  different entry in the mean time, we treat that as success as we assume
+ *  the same entry was actually inserted.
+ */
+
+vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
+               unsigned long addr, pfn_t pfn)
 {
-       return __vm_insert_mixed(vma, addr, pfn, true);
+       int err;
+
+       err =  __vm_insert_mixed(vma, addr, pfn, true);
+       if (err == -ENOMEM)
+               return VM_FAULT_OOM;
+       if (err < 0 && err != -EBUSY)
+               return VM_FAULT_SIGBUS;
+       return VM_FAULT_NOPAGE;
 }
-EXPORT_SYMBOL(vm_insert_mixed_mkwrite);
+EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
 
 /*
  * maps a range of physical memory into the requested pages. the old