Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 25 May 2018 16:35:11 +0000 (09:35 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 25 May 2018 16:35:11 +0000 (09:35 -0700)
Pull more arm64 fixes from Will Deacon:

 - fix application of read-only permissions to kernel section mappings

 - sanitise reported ESR values for signals delivered on a kernel
   address

 - ensure tishift GCC helpers are exported to modules

 - fix inline asm constraints for some LSE atomics

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: Make sure permission updates happen for pmd/pud
  arm64: fault: Don't leak data in ESR context for user fault on kernel VA
  arm64: export tishift functions to modules
  arm64: lse: Add early clobbers to some input/output asm operands

arch/arm64/include/asm/atomic_lse.h
arch/arm64/kernel/arm64ksyms.c
arch/arm64/lib/tishift.S
arch/arm64/mm/fault.c
arch/arm64/mm/mmu.c

index 9ef0797380cbbdf182a86e934c2eec5aa97d889d..f9b0b09153e0eaa3b15728fd42471c77c2d1955a 100644 (file)
@@ -117,7 +117,7 @@ static inline void atomic_and(int i, atomic_t *v)
        /* LSE atomics */
        "       mvn     %w[i], %w[i]\n"
        "       stclr   %w[i], %[v]")
-       : [i] "+r" (w0), [v] "+Q" (v->counter)
+       : [i] "+&r" (w0), [v] "+Q" (v->counter)
        : "r" (x1)
        : __LL_SC_CLOBBERS);
 }
@@ -135,7 +135,7 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v)                \
        /* LSE atomics */                                               \
        "       mvn     %w[i], %w[i]\n"                                 \
        "       ldclr" #mb "    %w[i], %w[i], %[v]")                    \
-       : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
+       : [i] "+&r" (w0), [v] "+Q" (v->counter)                         \
        : "r" (x1)                                                      \
        : __LL_SC_CLOBBERS, ##cl);                                      \
                                                                        \
@@ -161,7 +161,7 @@ static inline void atomic_sub(int i, atomic_t *v)
        /* LSE atomics */
        "       neg     %w[i], %w[i]\n"
        "       stadd   %w[i], %[v]")
-       : [i] "+r" (w0), [v] "+Q" (v->counter)
+       : [i] "+&r" (w0), [v] "+Q" (v->counter)
        : "r" (x1)
        : __LL_SC_CLOBBERS);
 }
@@ -180,7 +180,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v)               \
        "       neg     %w[i], %w[i]\n"                                 \
        "       ldadd" #mb "    %w[i], w30, %[v]\n"                     \
        "       add     %w[i], %w[i], w30")                             \
-       : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
+       : [i] "+&r" (w0), [v] "+Q" (v->counter)                         \
        : "r" (x1)                                                      \
        : __LL_SC_CLOBBERS , ##cl);                                     \
                                                                        \
@@ -207,7 +207,7 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v)                \
        /* LSE atomics */                                               \
        "       neg     %w[i], %w[i]\n"                                 \
        "       ldadd" #mb "    %w[i], %w[i], %[v]")                    \
-       : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
+       : [i] "+&r" (w0), [v] "+Q" (v->counter)                         \
        : "r" (x1)                                                      \
        : __LL_SC_CLOBBERS, ##cl);                                      \
                                                                        \
@@ -314,7 +314,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
        /* LSE atomics */
        "       mvn     %[i], %[i]\n"
        "       stclr   %[i], %[v]")
-       : [i] "+r" (x0), [v] "+Q" (v->counter)
+       : [i] "+&r" (x0), [v] "+Q" (v->counter)
        : "r" (x1)
        : __LL_SC_CLOBBERS);
 }
@@ -332,7 +332,7 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v)  \
        /* LSE atomics */                                               \
        "       mvn     %[i], %[i]\n"                                   \
        "       ldclr" #mb "    %[i], %[i], %[v]")                      \
-       : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
+       : [i] "+&r" (x0), [v] "+Q" (v->counter)                         \
        : "r" (x1)                                                      \
        : __LL_SC_CLOBBERS, ##cl);                                      \
                                                                        \
@@ -358,7 +358,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
        /* LSE atomics */
        "       neg     %[i], %[i]\n"
        "       stadd   %[i], %[v]")
-       : [i] "+r" (x0), [v] "+Q" (v->counter)
+       : [i] "+&r" (x0), [v] "+Q" (v->counter)
        : "r" (x1)
        : __LL_SC_CLOBBERS);
 }
@@ -377,7 +377,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
        "       neg     %[i], %[i]\n"                                   \
        "       ldadd" #mb "    %[i], x30, %[v]\n"                      \
        "       add     %[i], %[i], x30")                               \
-       : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
+       : [i] "+&r" (x0), [v] "+Q" (v->counter)                         \
        : "r" (x1)                                                      \
        : __LL_SC_CLOBBERS, ##cl);                                      \
                                                                        \
@@ -404,7 +404,7 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v)  \
        /* LSE atomics */                                               \
        "       neg     %[i], %[i]\n"                                   \
        "       ldadd" #mb "    %[i], %[i], %[v]")                      \
-       : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
+       : [i] "+&r" (x0), [v] "+Q" (v->counter)                         \
        : "r" (x1)                                                      \
        : __LL_SC_CLOBBERS, ##cl);                                      \
                                                                        \
@@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
        "       sub     x30, x30, %[ret]\n"
        "       cbnz    x30, 1b\n"
        "2:")
-       : [ret] "+r" (x0), [v] "+Q" (v->counter)
+       : [ret] "+&r" (x0), [v] "+Q" (v->counter)
        :
        : __LL_SC_CLOBBERS, "cc", "memory");
 
@@ -516,7 +516,7 @@ static inline long __cmpxchg_double##name(unsigned long old1,               \
        "       eor     %[old1], %[old1], %[oldval1]\n"                 \
        "       eor     %[old2], %[old2], %[oldval2]\n"                 \
        "       orr     %[old1], %[old1], %[old2]")                     \
-       : [old1] "+r" (x0), [old2] "+r" (x1),                           \
+       : [old1] "+&r" (x0), [old2] "+&r" (x1),                         \
          [v] "+Q" (*(unsigned long *)ptr)                              \
        : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),             \
          [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)              \
index 66be504edb6cf5be422afa59d82aa2db4fd3ed7f..d894a20b70b28f709f776d0330edb598283aecad 100644 (file)
@@ -75,3 +75,11 @@ NOKPROBE_SYMBOL(_mcount);
        /* arm-smccc */
 EXPORT_SYMBOL(__arm_smccc_smc);
 EXPORT_SYMBOL(__arm_smccc_hvc);
+
+       /* tishift.S */
+extern long long __ashlti3(long long a, int b);
+EXPORT_SYMBOL(__ashlti3);
+extern long long __ashrti3(long long a, int b);
+EXPORT_SYMBOL(__ashrti3);
+extern long long __lshrti3(long long a, int b);
+EXPORT_SYMBOL(__lshrti3);
index d3db9b2cd479bebb64e0dd0b3c18edfef3d552b0..0fdff97794debbdfaae4a146a99b4550e670ab5e 100644 (file)
@@ -1,17 +1,6 @@
-/*
- * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2017-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
  */
 
 #include <linux/linkage.h>
index 4165485e8b6ecbc60f161d98c20139992877c416..2af3dd89bcdbed669238b10defa7fc7deb1e2640 100644 (file)
@@ -293,6 +293,57 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
 static void __do_user_fault(struct siginfo *info, unsigned int esr)
 {
        current->thread.fault_address = (unsigned long)info->si_addr;
+
+       /*
+        * If the faulting address is in the kernel, we must sanitize the ESR.
+        * From userspace's point of view, kernel-only mappings don't exist
+        * at all, so we report them as level 0 translation faults.
+        * (This is not quite the way that "no mapping there at all" behaves:
+        * an alignment fault not caused by the memory type would take
+        * precedence over translation fault for a real access to empty
+        * space. Unfortunately we can't easily distinguish "alignment fault
+        * not caused by memory type" from "alignment fault caused by memory
+        * type", so we ignore this wrinkle and just return the translation
+        * fault.)
+        */
+       if (current->thread.fault_address >= TASK_SIZE) {
+               switch (ESR_ELx_EC(esr)) {
+               case ESR_ELx_EC_DABT_LOW:
+                       /*
+                        * These bits provide only information about the
+                        * faulting instruction, which userspace knows already.
+                        * We explicitly clear bits which are architecturally
+                        * RES0 in case they are given meanings in future.
+                        * We always report the ESR as if the fault was taken
+                        * to EL1 and so ISV and the bits in ISS[23:14] are
+                        * clear. (In fact it always will be a fault to EL1.)
+                        */
+                       esr &= ESR_ELx_EC_MASK | ESR_ELx_IL |
+                               ESR_ELx_CM | ESR_ELx_WNR;
+                       esr |= ESR_ELx_FSC_FAULT;
+                       break;
+               case ESR_ELx_EC_IABT_LOW:
+                       /*
+                        * Claim a level 0 translation fault.
+                        * All other bits are architecturally RES0 for faults
+                        * reported with that DFSC value, so we clear them.
+                        */
+                       esr &= ESR_ELx_EC_MASK | ESR_ELx_IL;
+                       esr |= ESR_ELx_FSC_FAULT;
+                       break;
+               default:
+                       /*
+                        * This should never happen (entry.S only brings us
+                        * into this code for insn and data aborts from a lower
+                        * exception level). Fail safe by not providing an ESR
+                        * context record at all.
+                        */
+                       WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr);
+                       esr = 0;
+                       break;
+               }
+       }
+
        current->thread.fault_code = esr;
        arm64_force_sig_info(info, esr_to_fault_info(esr)->name, current);
 }
index 2dbb2c9f1ec1770e7f9f5aca7176eac2cc153d32..493ff75670ffd98a1dc344a133f0f31a634f93ff 100644 (file)
@@ -933,13 +933,15 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
 {
        pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
                                        pgprot_val(mk_sect_prot(prot)));
+       pud_t new_pud = pfn_pud(__phys_to_pfn(phys), sect_prot);
 
-       /* ioremap_page_range doesn't honour BBM */
-       if (pud_present(READ_ONCE(*pudp)))
+       /* Only allow permission changes for now */
+       if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
+                                  pud_val(new_pud)))
                return 0;
 
        BUG_ON(phys & ~PUD_MASK);
-       set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot));
+       set_pud(pudp, new_pud);
        return 1;
 }
 
@@ -947,13 +949,15 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
 {
        pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
                                        pgprot_val(mk_sect_prot(prot)));
+       pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), sect_prot);
 
-       /* ioremap_page_range doesn't honour BBM */
-       if (pmd_present(READ_ONCE(*pmdp)))
+       /* Only allow permission changes for now */
+       if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
+                                  pmd_val(new_pmd)))
                return 0;
 
        BUG_ON(phys & ~PMD_MASK);
-       set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot));
+       set_pmd(pmdp, new_pmd);
        return 1;
 }