F: arch/arm/boot/dts/owl-*
F: arch/arm64/boot/dts/actions/
F: drivers/clocksource/owl-*
+F: drivers/pinctrl/actions/*
F: drivers/soc/actions/
F: include/dt-bindings/power/owl-*
F: include/linux/soc/actions/
F: Documentation/devicetree/bindings/arm/actions.txt
+F: Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt
F: Documentation/devicetree/bindings/power/actions,owl-sps.txt
F: Documentation/devicetree/bindings/timer/actions,owl-timer.txt
AUXILIARY DISPLAY DRIVERS
M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
-W: http://miguelojeda.es/auxdisplay.htm
-W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm
S: Maintained
F: drivers/auxdisplay/
F: include/linux/cfag12864b.h
CFAG12864B LCD DRIVER
M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
-W: http://miguelojeda.es/auxdisplay.htm
-W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm
S: Maintained
F: drivers/auxdisplay/cfag12864b.c
F: include/linux/cfag12864b.h
CFAG12864BFB LCD FRAMEBUFFER DRIVER
M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
-W: http://miguelojeda.es/auxdisplay.htm
-W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm
S: Maintained
F: drivers/auxdisplay/cfag12864bfb.c
F: include/linux/cfag12864b.h
S: Maintained
F: drivers/media/radio/radio-keene*
- KERNEL AUTOMOUNTER v4 (AUTOFS4)
+ KERNEL AUTOMOUNTER
M: Ian Kent <raven@themaw.net>
L: autofs@vger.kernel.org
S: Maintained
- F: fs/autofs4/
+ F: fs/autofs/
KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
M: Masahiro Yamada <yamada.masahiro@socionext.com>
KS0108 LCD CONTROLLER DRIVER
M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
-W: http://miguelojeda.es/auxdisplay.htm
-W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm
S: Maintained
F: Documentation/auxdisplay/ks0108
F: drivers/auxdisplay/ks0108.c
S: Maintained
F: drivers/media/dvb-frontends/mn88473*
+PCI DRIVER FOR MOBIVEIL PCIE IP
+M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+L: linux-pci@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
+F: drivers/pci/host/pcie-mobiveil.c
+
MODULE SUPPORT
M: Jessica Yu <jeyu@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
OPEN FIRMWARE AND DEVICE TREE OVERLAYS
M: Pantelis Antoniou <pantelis.antoniou@konsulko.com>
+M: Frank Rowand <frowand.list@gmail.com>
L: devicetree@vger.kernel.org
S: Maintained
F: Documentation/devicetree/dynamic-resolution-notes.txt
F: Documentation/devicetree/overlay-notes.txt
F: drivers/of/overlay.c
F: drivers/of/resolver.c
+K: of_overlay_notifier_
OPEN FIRMWARE AND FLATTENED DEVICE TREE
M: Rob Herring <robh+dt@kernel.org>
F: drivers/pci/cadence/pcie-cadence*
PCI DRIVER FOR FREESCALE LAYERSCAPE
-M: Minghuan Lian <minghuan.Lian@freescale.com>
-M: Mingkai Hu <mingkai.hu@freescale.com>
-M: Roy Zang <tie-fei.zang@freescale.com>
+M: Minghuan Lian <minghuan.Lian@nxp.com>
+M: Mingkai Hu <mingkai.hu@nxp.com>
+M: Roy Zang <roy.zang@nxp.com>
L: linuxppc-dev@lists.ozlabs.org
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org
L: linux-pci@vger.kernel.org
L: linux-rockchip@lists.infradead.org
S: Maintained
-F: Documentation/devicetree/bindings/pci/rockchip-pcie.txt
-F: drivers/pci/host/pcie-rockchip.c
+F: Documentation/devicetree/bindings/pci/rockchip-pcie*
+F: drivers/pci/host/pcie-rockchip*
PCI DRIVER FOR V3 SEMICONDUCTOR V360EPC
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
F: Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
F: Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt
+F: drivers/pinctrl/mediatek/mtk-eint.*
F: drivers/pinctrl/mediatek/pinctrl-mtk-common.*
F: drivers/pinctrl/mediatek/pinctrl-mt2701.c
F: drivers/pinctrl/mediatek/pinctrl-mt7622.c
/*
* These are the virtual MM functions - opening of an area, closing and
* unmapping it (needed to keep files on disk up-to-date etc), pointer
- * to the functions called when a no-page or a wp-page exception occurs.
+ * to the functions called when a no-page or a wp-page exception occurs.
*/
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd);
-int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
- unsigned long size);
+void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size);
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
- unsigned long size);
+ unsigned long size);
void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long start, unsigned long end);
{
if (!ptlock_init(page))
return false;
+ __SetPageTable(page);
inc_zone_page_state(page, NR_PAGETABLE);
return true;
}
static inline void pgtable_page_dtor(struct page *page)
{
pte_lock_deinit(page);
+ __ClearPageTable(page);
dec_zone_page_state(page, NR_PAGETABLE);
}
extern void truncate_inode_pages_final(struct address_space *);
/* generic vm_area_ops exported for stackable file systems */
- extern int filemap_fault(struct vm_fault *vmf);
+ extern vm_fault_t filemap_fault(struct vm_fault *vmf);
extern void filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
- extern int filemap_page_mkwrite(struct vm_fault *vmf);
+ extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
/* mm/page-writeback.c */
int __must_check write_one_page(struct page *page);
unsigned long pfn, pgprot_t pgprot);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn);
- int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr,
- pfn_t pfn);
+ vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
+ unsigned long addr, pfn_t pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
#ifdef CONFIG_PAGE_POISONING
extern bool page_poisoning_enabled(void);
extern void kernel_poison_pages(struct page *page, int numpages, int enable);
- extern bool page_is_poisoned(struct page *page);
#else
static inline bool page_poisoning_enabled(void) { return false; }
static inline void kernel_poison_pages(struct page *page, int numpages,
int enable) { }
- static inline bool page_is_poisoned(struct page *page) { return false; }
#endif
#ifdef CONFIG_DEBUG_PAGEALLOC
* PFNMAP mappings in order to support COWable mappings.
*
*/
- #ifdef __HAVE_ARCH_PTE_SPECIAL
- # define HAVE_PTE_SPECIAL 1
- #else
- # define HAVE_PTE_SPECIAL 0
- #endif
struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte, bool with_public_device)
{
unsigned long pfn = pte_pfn(pte);
- if (HAVE_PTE_SPECIAL) {
+ if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
if (likely(!pte_special(pte)))
goto check_pfn;
if (vma->vm_ops && vma->vm_ops->find_special_page)
return NULL;
}
- /* !HAVE_PTE_SPECIAL case follows: */
+ /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
if (vma->vm_flags & VM_MIXEDMAP) {
if (is_zero_pfn(pfn))
return NULL;
+
check_pfn:
if (unlikely(pfn > highest_memmap_pfn)) {
print_bad_pte(vma, addr, pte, NULL);
/*
* There is no pmd_special() but there may be special pmds, e.g.
* in a direct-access (dax) mapping, so let's just replicate the
- * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
+ * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
*/
if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
if (vma->vm_flags & VM_MIXEDMAP) {
*
* The entire address range must be fully contained within the vma.
*
- * Returns 0 if successful.
*/
-int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size)
{
if (address < vma->vm_start || address + size > vma->vm_end ||
!(vma->vm_flags & VM_PFNMAP))
- return -1;
+ return;
+
zap_page_range_single(vma, address, size, NULL);
- return 0;
}
EXPORT_SYMBOL_GPL(zap_vma_ptes);
* than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
* without pte special, it would there be refcounted as a normal page.
*/
- if (!HAVE_PTE_SPECIAL && !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
+ !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
struct page *page;
/*
}
EXPORT_SYMBOL(vm_insert_mixed);
- int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr,
- pfn_t pfn)
+ /*
+ * If the insertion of PTE failed because someone else already added a
+ * different entry in the mean time, we treat that as success as we assume
+ * the same entry was actually inserted.
+ */
+
+ vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
+ unsigned long addr, pfn_t pfn)
{
- return __vm_insert_mixed(vma, addr, pfn, true);
+ int err;
+
+ err = __vm_insert_mixed(vma, addr, pfn, true);
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ if (err < 0 && err != -EBUSY)
+ return VM_FAULT_SIGBUS;
+ return VM_FAULT_NOPAGE;
}
- EXPORT_SYMBOL(vm_insert_mixed_mkwrite);
+ EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
/*
* maps a range of physical memory into the requested pages. the old