hugetlb: introduce generic version of huge_ptep_get_and_clear()
[muen/linux.git] / arch / powerpc / include / asm / hugetlb.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_HUGETLB_H
3 #define _ASM_POWERPC_HUGETLB_H
4
5 #ifdef CONFIG_HUGETLB_PAGE
6 #include <asm/page.h>
7
8 extern struct kmem_cache *hugepte_cache;
9
10 #ifdef CONFIG_PPC_BOOK3S_64
11
12 #include <asm/book3s/64/hugetlb.h>
13 /*
14  * This should work for other subarchs too. But right now we use the
15  * new format only for 64bit book3s
16  */
17 static inline pte_t *hugepd_page(hugepd_t hpd)
18 {
19         BUG_ON(!hugepd_ok(hpd));
20         /*
21          * We have only four bits to encode, MMU page size
22          */
23         BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
24         return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
25 }
26
27 static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
28 {
29         return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
30 }
31
32 static inline unsigned int hugepd_shift(hugepd_t hpd)
33 {
34         return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
35 }
36 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
37                                       unsigned long vmaddr)
38 {
39         if (radix_enabled())
40                 return radix__flush_hugetlb_page(vma, vmaddr);
41 }
42
43 #else
44
45 static inline pte_t *hugepd_page(hugepd_t hpd)
46 {
47         BUG_ON(!hugepd_ok(hpd));
48 #ifdef CONFIG_PPC_8xx
49         return (pte_t *)__va(hpd_val(hpd) & ~HUGEPD_SHIFT_MASK);
50 #else
51         return (pte_t *)((hpd_val(hpd) &
52                           ~HUGEPD_SHIFT_MASK) | PD_HUGE);
53 #endif
54 }
55
56 static inline unsigned int hugepd_shift(hugepd_t hpd)
57 {
58 #ifdef CONFIG_PPC_8xx
59         return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
60 #else
61         return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
62 #endif
63 }
64
65 #endif /* CONFIG_PPC_BOOK3S_64 */
66
67
68 static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
69                                     unsigned pdshift)
70 {
71         /*
72          * On FSL BookE, we have multiple higher-level table entries that
73          * point to the same hugepte.  Just use the first one since they're all
74          * identical.  So for that case, idx=0.
75          */
76         unsigned long idx = 0;
77
78         pte_t *dir = hugepd_page(hpd);
79 #ifndef CONFIG_PPC_FSL_BOOK3E
80         idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
81 #endif
82
83         return dir + idx;
84 }
85
86 void flush_dcache_icache_hugepage(struct page *page);
87
88 int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
89                            unsigned long len);
90
91 static inline int is_hugepage_only_range(struct mm_struct *mm,
92                                          unsigned long addr,
93                                          unsigned long len)
94 {
95         if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled())
96                 return slice_is_hugepage_only_range(mm, addr, len);
97         return 0;
98 }
99
100 void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
101                             pte_t pte);
102 #ifdef CONFIG_PPC_8xx
103 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
104                                       unsigned long vmaddr)
105 {
106         flush_tlb_page(vma, vmaddr);
107 }
108 #else
109 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
110 #endif
111
112 #define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
113 void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
114                             unsigned long end, unsigned long floor,
115                             unsigned long ceiling);
116
117 /*
118  * If the arch doesn't supply something else, assume that hugepage
119  * size aligned regions are ok without further preparation.
120  */
121 static inline int prepare_hugepage_range(struct file *file,
122                         unsigned long addr, unsigned long len)
123 {
124         struct hstate *h = hstate_file(file);
125         if (len & ~huge_page_mask(h))
126                 return -EINVAL;
127         if (addr & ~huge_page_mask(h))
128                 return -EINVAL;
129         return 0;
130 }
131
132 #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
133 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
134                                             unsigned long addr, pte_t *ptep)
135 {
136 #ifdef CONFIG_PPC64
137         return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
138 #else
139         return __pte(pte_update(ptep, ~0UL, 0));
140 #endif
141 }
142
143 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
144                                          unsigned long addr, pte_t *ptep)
145 {
146         pte_t pte;
147         pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
148         flush_hugetlb_page(vma, addr);
149 }
150
151 static inline int huge_pte_none(pte_t pte)
152 {
153         return pte_none(pte);
154 }
155
156 static inline pte_t huge_pte_wrprotect(pte_t pte)
157 {
158         return pte_wrprotect(pte);
159 }
160
161 extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
162                                       unsigned long addr, pte_t *ptep,
163                                       pte_t pte, int dirty);
164
165 static inline pte_t huge_ptep_get(pte_t *ptep)
166 {
167         return *ptep;
168 }
169
170 static inline void arch_clear_hugepage_flags(struct page *page)
171 {
172 }
173
174 #include <asm-generic/hugetlb.h>
175
176 #else /* ! CONFIG_HUGETLB_PAGE */
177 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
178                                       unsigned long vmaddr)
179 {
180 }
181
182 #define hugepd_shift(x) 0
183 static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
184                                     unsigned pdshift)
185 {
186         return NULL;
187 }
188 #endif /* CONFIG_HUGETLB_PAGE */
189
190 #endif /* _ASM_POWERPC_HUGETLB_H */