Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 26 Jul 2017 17:46:48 +0000 (10:46 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 26 Jul 2017 17:46:48 +0000 (10:46 -0700)
Pull virtio fixes and cleanups from Michael Tsirkin:
 "Fixes some minor issues all over the codebase"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  virtio-net: fix module unloading
  virtio-balloon: coding format cleanup
  virtio-balloon: deflate via a page list
  virtio_blk: Use sysfs_match_string() helper

26 files changed:
arch/arc/mm/dma.c
arch/arm/mm/dma-mapping-nommu.c
arch/arm/mm/dma-mapping.c
arch/arm64/mm/dma-mapping.c
arch/mips/mm/dma-default.c
arch/s390/kernel/perf_cpum_sf.c
arch/s390/mm/pgtable.c
drivers/base/dma-coherent.c
drivers/base/dma-mapping.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-ortek.c
drivers/hid/usbhid/hid-core.c
drivers/s390/cio/chp.c
drivers/scsi/hpsa.c
drivers/scsi/sg.c
drivers/scsi/smartpqi/smartpqi.h
drivers/thunderbolt/switch.c
drivers/thunderbolt/tb.h
drivers/thunderbolt/tb_msgs.h
fs/jfs/acl.c
fs/jfs/resize.c
fs/jfs/super.c
include/linux/dma-mapping.h
include/linux/uuid.h
lib/test_uuid.c

index 2a07e6ecafbd768bcdca7c09fa4cc29bc1a6ff65..71d3efff99d35c56c6f30bea8a6ab1cf1702d753 100644 (file)
@@ -117,7 +117,7 @@ static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
 
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
-       if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+       if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
                return ret;
 
        if (off < count && user_count <= (count - off)) {
index 90ee354d803e6ce6a9f06e6bf3513f439716ac35..6db5fc26d154c6febf6d61c0bdf66aae9c563f9e 100644 (file)
@@ -40,9 +40,21 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
 
 {
        const struct dma_map_ops *ops = &dma_noop_ops;
+       void *ret;
 
        /*
-        * We are here because:
+        * Try generic allocator first if we are advertised that
+        * consistency is not required.
+        */
+
+       if (attrs & DMA_ATTR_NON_CONSISTENT)
+               return ops->alloc(dev, size, dma_handle, gfp, attrs);
+
+       ret = dma_alloc_from_global_coherent(size, dma_handle);
+
+       /*
+        * dma_alloc_from_global_coherent() may fail because:
+        *
         * - no consistent DMA region has been defined, so we can't
         *   continue.
         * - there is no space left in consistent DMA region, so we
@@ -50,11 +62,8 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
         *   advertised that consistency is not required.
         */
 
-       if (attrs & DMA_ATTR_NON_CONSISTENT)
-               return ops->alloc(dev, size, dma_handle, gfp, attrs);
-
-       WARN_ON_ONCE(1);
-       return NULL;
+       WARN_ON_ONCE(ret == NULL);
+       return ret;
 }
 
 static void arm_nommu_dma_free(struct device *dev, size_t size,
@@ -63,14 +72,31 @@ static void arm_nommu_dma_free(struct device *dev, size_t size,
 {
        const struct dma_map_ops *ops = &dma_noop_ops;
 
-       if (attrs & DMA_ATTR_NON_CONSISTENT)
+       if (attrs & DMA_ATTR_NON_CONSISTENT) {
                ops->free(dev, size, cpu_addr, dma_addr, attrs);
-       else
-               WARN_ON_ONCE(1);
+       } else {
+               int ret = dma_release_from_global_coherent(get_order(size),
+                                                          cpu_addr);
+
+               WARN_ON_ONCE(ret == 0);
+       }
 
        return;
 }
 
+static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+                             void *cpu_addr, dma_addr_t dma_addr, size_t size,
+                             unsigned long attrs)
+{
+       int ret;
+
+       if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
+               return ret;
+
+       return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+
 static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
                                  enum dma_data_direction dir)
 {
@@ -173,6 +199,7 @@ static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist
 const struct dma_map_ops arm_nommu_dma_ops = {
        .alloc                  = arm_nommu_dma_alloc,
        .free                   = arm_nommu_dma_free,
+       .mmap                   = arm_nommu_dma_mmap,
        .map_page               = arm_nommu_dma_map_page,
        .unmap_page             = arm_nommu_dma_unmap_page,
        .map_sg                 = arm_nommu_dma_map_sg,
index e7380bafbfa6d37ef928fecf38ec26c56df31f90..fcf1473d6fed5398be1f9474c2543958120e22a5 100644 (file)
@@ -851,7 +851,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
        unsigned long pfn = dma_to_pfn(dev, dma_addr);
        unsigned long off = vma->vm_pgoff;
 
-       if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+       if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
                return ret;
 
        if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
index e90cd1db42a809ebbb978cc9ae4674c0944d265f..f27d4dd043848dc8775a52588cffa7e27bd006a6 100644 (file)
@@ -329,7 +329,7 @@ static int __swiotlb_mmap(struct device *dev,
        vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
                                             is_device_dma_coherent(dev));
 
-       if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+       if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
                return ret;
 
        return __swiotlb_mmap_pfn(vma, pfn, size);
@@ -706,7 +706,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
        vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
                                             is_device_dma_coherent(dev));
 
-       if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+       if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
                return ret;
 
        if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
index e08598c70b3e72d3c579462d9b82cbe340256ac9..8e78251eccc25e0a981ede0b89d1df907f9e896a 100644 (file)
@@ -232,7 +232,7 @@ static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
        else
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
-       if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+       if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
                return ret;
 
        if (off < count && user_count <= (count - off)) {
index 0c82f7903fc7a7c0bfc9c7d5b02ab79ab4a2be89..c1bf75ffb8756549b1c6a5c97908ff49dd2e79aa 100644 (file)
@@ -998,7 +998,7 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
        psw_bits(regs.psw).ia   = sfr->basic.ia;
        psw_bits(regs.psw).dat  = sfr->basic.T;
        psw_bits(regs.psw).wait = sfr->basic.W;
-       psw_bits(regs.psw).per  = sfr->basic.P;
+       psw_bits(regs.psw).pstate = sfr->basic.P;
        psw_bits(regs.psw).as   = sfr->basic.AS;
 
        /*
index d4d409ba206b2e0f4ed0b88cc4f3a3ba125b3597..4a1f7366b17aeffacb6c766ab891227e2609f1b9 100644 (file)
@@ -591,11 +591,11 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
        unsigned long ptev;
        pgste_t pgste;
 
-       /* Clear storage key */
+       /* Clear storage key ACC and F, but set R/C */
        preempt_disable();
        pgste = pgste_get_lock(ptep);
-       pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
-                             PGSTE_GR_BIT | PGSTE_GC_BIT);
+       pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
+       pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
        ptev = pte_val(*ptep);
        if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
                page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
index 2ae24c28e70c87901185de6d558545ebe58d8d0b..1c152aed6b8265409a0ab2a677ce4bd0a99d62a9 100644 (file)
@@ -25,7 +25,7 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de
 {
        if (dev && dev->dma_mem)
                return dev->dma_mem;
-       return dma_coherent_default_memory;
+       return NULL;
 }
 
 static inline dma_addr_t dma_get_device_base(struct device *dev,
@@ -165,34 +165,15 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
 }
 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
 
-/**
- * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
- *
- * @dev:       device from which we allocate memory
- * @size:      size of requested memory area
- * @dma_handle:        This will be filled with the correct dma handle
- * @ret:       This pointer will be filled with the virtual address
- *             to allocated area.
- *
- * This function should be only called from per-arch dma_alloc_coherent()
- * to support allocation from per-device coherent memory pools.
- *
- * Returns 0 if dma_alloc_coherent should continue with allocating from
- * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
- */
-int dma_alloc_from_coherent(struct device *dev, ssize_t size,
-                                      dma_addr_t *dma_handle, void **ret)
+static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
+               ssize_t size, dma_addr_t *dma_handle)
 {
-       struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
        int order = get_order(size);
        unsigned long flags;
        int pageno;
        int dma_memory_map;
+       void *ret;
 
-       if (!mem)
-               return 0;
-
-       *ret = NULL;
        spin_lock_irqsave(&mem->spinlock, flags);
 
        if (unlikely(size > (mem->size << PAGE_SHIFT)))
@@ -203,21 +184,50 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
                goto err;
 
        /*
-        * Memory was found in the per-device area.
+        * Memory was found in the coherent area.
         */
-       *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
-       *ret = mem->virt_base + (pageno << PAGE_SHIFT);
+       *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
+       ret = mem->virt_base + (pageno << PAGE_SHIFT);
        dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
        spin_unlock_irqrestore(&mem->spinlock, flags);
        if (dma_memory_map)
-               memset(*ret, 0, size);
+               memset(ret, 0, size);
        else
-               memset_io(*ret, 0, size);
+               memset_io(ret, 0, size);
 
-       return 1;
+       return ret;
 
 err:
        spin_unlock_irqrestore(&mem->spinlock, flags);
+       return NULL;
+}
+
+/**
+ * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
+ * @dev:       device from which we allocate memory
+ * @size:      size of requested memory area
+ * @dma_handle:        This will be filled with the correct dma handle
+ * @ret:       This pointer will be filled with the virtual address
+ *             to allocated area.
+ *
+ * This function should be only called from per-arch dma_alloc_coherent()
+ * to support allocation from per-device coherent memory pools.
+ *
+ * Returns 0 if dma_alloc_coherent should continue with allocating from
+ * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
+ */
+int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
+               dma_addr_t *dma_handle, void **ret)
+{
+       struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+
+       if (!mem)
+               return 0;
+
+       *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
+       if (*ret)
+               return 1;
+
        /*
         * In the case where the allocation can not be satisfied from the
         * per-device area, try to fall back to generic memory if the
@@ -225,25 +235,20 @@ err:
         */
        return mem->flags & DMA_MEMORY_EXCLUSIVE;
 }
-EXPORT_SYMBOL(dma_alloc_from_coherent);
+EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
 
-/**
- * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
- * @dev:       device from which the memory was allocated
- * @order:     the order of pages allocated
- * @vaddr:     virtual address of allocated pages
- *
- * This checks whether the memory was allocated from the per-device
- * coherent memory pool and if so, releases that memory.
- *
- * Returns 1 if we correctly released the memory, or 0 if
- * dma_release_coherent() should proceed with releasing memory from
- * generic pools.
- */
-int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
+void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
 {
-       struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+       if (!dma_coherent_default_memory)
+               return NULL;
+
+       return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
+                       dma_handle);
+}
 
+static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
+                                      int order, void *vaddr)
+{
        if (mem && vaddr >= mem->virt_base && vaddr <
                   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
                int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
@@ -256,28 +261,39 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
        }
        return 0;
 }
-EXPORT_SYMBOL(dma_release_from_coherent);
 
 /**
- * dma_mmap_from_coherent() - try to mmap the memory allocated from
- * per-device coherent memory pool to userspace
+ * dma_release_from_dev_coherent() - free memory to device coherent memory pool
  * @dev:       device from which the memory was allocated
- * @vma:       vm_area for the userspace memory
- * @vaddr:     cpu address returned by dma_alloc_from_coherent
- * @size:      size of the memory buffer allocated by dma_alloc_from_coherent
- * @ret:       result from remap_pfn_range()
+ * @order:     the order of pages allocated
+ * @vaddr:     virtual address of allocated pages
  *
  * This checks whether the memory was allocated from the per-device
- * coherent memory pool and if so, maps that memory to the provided vma.
+ * coherent memory pool and if so, releases that memory.
  *
- * Returns 1 if we correctly mapped the memory, or 0 if the caller should
- * proceed with mapping memory from generic pools.
+ * Returns 1 if we correctly released the memory, or 0 if the caller should
+ * proceed with releasing memory from generic pools.
  */
-int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
-                          void *vaddr, size_t size, int *ret)
+int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
 {
        struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
 
+       return __dma_release_from_coherent(mem, order, vaddr);
+}
+EXPORT_SYMBOL(dma_release_from_dev_coherent);
+
+int dma_release_from_global_coherent(int order, void *vaddr)
+{
+       if (!dma_coherent_default_memory)
+               return 0;
+
+       return __dma_release_from_coherent(dma_coherent_default_memory, order,
+                       vaddr);
+}
+
+static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
+               struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
+{
        if (mem && vaddr >= mem->virt_base && vaddr + size <=
                   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
                unsigned long off = vma->vm_pgoff;
@@ -296,7 +312,39 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
        }
        return 0;
 }
-EXPORT_SYMBOL(dma_mmap_from_coherent);
+
+/**
+ * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
+ * @dev:       device from which the memory was allocated
+ * @vma:       vm_area for the userspace memory
+ * @vaddr:     cpu address returned by dma_alloc_from_dev_coherent
+ * @size:      size of the memory buffer allocated
+ * @ret:       result from remap_pfn_range()
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, maps that memory to the provided vma.
+ *
+ * Returns 1 if we correctly mapped the memory, or 0 if the caller should
+ * proceed with mapping memory from generic pools.
+ */
+int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
+                          void *vaddr, size_t size, int *ret)
+{
+       struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+
+       return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
+}
+EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
+
+int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
+                                  size_t size, int *ret)
+{
+       if (!dma_coherent_default_memory)
+               return 0;
+
+       return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
+                                       vaddr, size, ret);
+}
 
 /*
  * Support for reserved memory regions defined in device tree
index 5096755d185e0c1a597d25189547ab797b568f36..b555ff9dd8fceb176af2f567157165726e9ab314 100644 (file)
@@ -235,7 +235,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
 
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
-       if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+       if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
                return ret;
 
        if (off < count && user_count <= (count - off)) {
index 6fd01a692197af4e70c9e1bb4bfd0bd8ffb4f348..9017dcc14502d7236ff48a03457cb67a7f9d0a5e 100644 (file)
@@ -2216,6 +2216,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
 #if IS_ENABLED(CONFIG_HID_ORTEK)
        { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
 #endif
 #if IS_ENABLED(CONFIG_HID_PANTHERLORD)
index 3d911bfd91cf1a8ea67c504f344ecba32acc603e..c9ba4c6db74ca076d0e26426656d238ac878088e 100644 (file)
 #define USB_VENDOR_ID_ORTEK            0x05a4
 #define USB_DEVICE_ID_ORTEK_PKB1700    0x1700
 #define USB_DEVICE_ID_ORTEK_WKB2000    0x2000
+#define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S   0x8003
 
 #define USB_VENDOR_ID_PLANTRONICS      0x047f
 
index 6620f15fec228a21ccf6f1b12a5e7dfd7b73046d..8783a064cdcf43b0752973a4693d72ddca4fe4fa 100644 (file)
@@ -5,6 +5,7 @@
  *
  *    Ortek PKB-1700
  *    Ortek WKB-2000
+ *    iHome IMAC-A210S
  *    Skycable wireless presenter
  *
  *  Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com>
@@ -28,10 +29,10 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
        if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) {
-               hid_info(hdev, "Fixing up logical minimum in report descriptor (Ortek)\n");
+               hid_info(hdev, "Fixing up logical maximum in report descriptor (Ortek)\n");
                rdesc[55] = 0x92;
        } else if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) {
-               hid_info(hdev, "Fixing up logical minimum in report descriptor (Skycable)\n");
+               hid_info(hdev, "Fixing up logical maximum in report descriptor (Skycable)\n");
                rdesc[53] = 0x65;
        }
        return rdesc;
@@ -40,6 +41,7 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
 static const struct hid_device_id ortek_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
        { }
 };
index 76013eb5cb7faedbf5ec6ca62dca6181afc5919b..c008847e0b20a2accb00451b10fb1c648f67925b 100644 (file)
@@ -680,18 +680,21 @@ static int usbhid_open(struct hid_device *hid)
        struct usbhid_device *usbhid = hid->driver_data;
        int res;
 
+       set_bit(HID_OPENED, &usbhid->iofl);
+
        if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
                return 0;
 
        res = usb_autopm_get_interface(usbhid->intf);
        /* the device must be awake to reliably request remote wakeup */
-       if (res < 0)
+       if (res < 0) {
+               clear_bit(HID_OPENED, &usbhid->iofl);
                return -EIO;
+       }
 
        usbhid->intf->needs_remote_wakeup = 1;
 
        set_bit(HID_RESUME_RUNNING, &usbhid->iofl);
-       set_bit(HID_OPENED, &usbhid->iofl);
        set_bit(HID_IN_POLLING, &usbhid->iofl);
 
        res = hid_start_in(hid);
@@ -727,19 +730,20 @@ static void usbhid_close(struct hid_device *hid)
 {
        struct usbhid_device *usbhid = hid->driver_data;
 
-       if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
-               return;
-
        /*
         * Make sure we don't restart data acquisition due to
         * a resumption we no longer care about by avoiding racing
         * with hid_start_in().
         */
        spin_lock_irq(&usbhid->lock);
-       clear_bit(HID_IN_POLLING, &usbhid->iofl);
        clear_bit(HID_OPENED, &usbhid->iofl);
+       if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL))
+               clear_bit(HID_IN_POLLING, &usbhid->iofl);
        spin_unlock_irq(&usbhid->lock);
 
+       if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
+               return;
+
        hid_cancel_delayed_stuff(usbhid);
        usb_kill_urb(usbhid->urbin);
        usbhid->intf->needs_remote_wakeup = 0;
index 7e0d4f724ddae73c71f698a9e4f7775328ef4d93..432fc40990bd26ad2e0dd9e39897ba5c348b197f 100644 (file)
@@ -559,6 +559,7 @@ static void chp_process_crw(struct crw *crw0, struct crw *crw1,
        chpid.id = crw0->rsid;
        switch (crw0->erc) {
        case CRW_ERC_IPARM: /* Path has come. */
+       case CRW_ERC_INIT:
                if (!chp_is_registered(chpid))
                        chp_new(chpid);
                chsc_chp_online(chpid);
index 8914eab843372fe3dde09e97b0d2c45aa65bbf64..4f7cdb28bd38fbadc6105906f56fe813346c299c 100644 (file)
@@ -938,7 +938,7 @@ static struct scsi_host_template hpsa_driver_template = {
 #endif
        .sdev_attrs = hpsa_sdev_attrs,
        .shost_attrs = hpsa_shost_attrs,
-       .max_sectors = 8192,
+       .max_sectors = 1024,
        .no_write_same = 1,
 };
 
index 1e82d4128a84800d3f96011c49e0b3fc12c55d8e..4fe606b000b4461c05e441cbb787edcfec51e72b 100644 (file)
@@ -759,8 +759,11 @@ static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)
                        return false;
                return true;
        case SG_DXFER_FROM_DEV:
-               if (hp->dxfer_len < 0)
-                       return false;
+               /*
+                * for SG_DXFER_FROM_DEV we always set dxfer_len to > 0. dxferp
+                * can either be NULL or != NULL so there's no point in checking
+                * it either. So just return true.
+                */
                return true;
        case SG_DXFER_TO_DEV:
        case SG_DXFER_TO_FROM_DEV:
index 07ec8a8877dec04b6ac3ea35aef2ac1a6c6c3e01..e164ffade38a7166690a9ecb206be1165e62278c 100644 (file)
@@ -690,7 +690,7 @@ struct pqi_config_table_heartbeat {
 
 #define PQI_MAX_OUTSTANDING_REQUESTS           ((u32)~0)
 #define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP     32
-#define PQI_MAX_TRANSFER_SIZE                  (4 * 1024U * 1024U)
+#define PQI_MAX_TRANSFER_SIZE                  (1024U * 1024U)
 #define PQI_MAX_TRANSFER_SIZE_KDUMP            (512 * 1024U)
 
 #define RAID_MAP_MAX_ENTRIES           1024
index 40219a7063099674f1e7144cfb32d5753138f7ce..e9391bbd4036023c63d8b7ec90185f56f9523ade 100644 (file)
@@ -30,7 +30,7 @@ static DEFINE_IDA(nvm_ida);
 
 struct nvm_auth_status {
        struct list_head list;
-       uuid_be uuid;
+       uuid_t uuid;
        u32 status;
 };
 
@@ -47,7 +47,7 @@ static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
        struct nvm_auth_status *st;
 
        list_for_each_entry(st, &nvm_auth_status_cache, list) {
-               if (!uuid_be_cmp(st->uuid, *sw->uuid))
+               if (uuid_equal(&st->uuid, sw->uuid))
                        return st;
        }
 
@@ -1461,7 +1461,7 @@ struct tb_sw_lookup {
        struct tb *tb;
        u8 link;
        u8 depth;
-       const uuid_be *uuid;
+       const uuid_t *uuid;
 };
 
 static int tb_switch_match(struct device *dev, void *data)
@@ -1518,7 +1518,7 @@ struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
  * Returned switch has reference count increased so the caller needs to
  * call tb_switch_put() when done with the switch.
  */
-struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid)
+struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
 {
        struct tb_sw_lookup lookup;
        struct device *dev;
index 3d9f64676e5839d35b5b3a7edb56a9bbcc00c211..e0deee4f1eb095233789ca7ff8fa3de377d770c1 100644 (file)
@@ -101,7 +101,7 @@ struct tb_switch {
        struct tb_dma_port *dma_port;
        struct tb *tb;
        u64 uid;
-       uuid_be *uuid;
+       uuid_t *uuid;
        u16 vendor;
        u16 device;
        const char *vendor_name;
@@ -407,7 +407,7 @@ void tb_sw_set_unplugged(struct tb_switch *sw);
 struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
                                               u8 depth);
-struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid);
+struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
 
 static inline unsigned int tb_switch_phy_port_from_link(unsigned int link)
 {
index 85b6d33c09193a4fdf255d3c809ecfb84e7f8177..de6441e4a060283f6e5abdd43672f34103577e93 100644 (file)
@@ -179,7 +179,7 @@ struct icm_fr_pkg_get_topology_response {
 
 struct icm_fr_event_device_connected {
        struct icm_pkg_header hdr;
-       uuid_be ep_uuid;
+       uuid_t ep_uuid;
        u8 connection_key;
        u8 connection_id;
        u16 link_info;
@@ -193,7 +193,7 @@ struct icm_fr_event_device_connected {
 
 struct icm_fr_pkg_approve_device {
        struct icm_pkg_header hdr;
-       uuid_be ep_uuid;
+       uuid_t ep_uuid;
        u8 connection_key;
        u8 connection_id;
        u16 reserved;
@@ -207,7 +207,7 @@ struct icm_fr_event_device_disconnected {
 
 struct icm_fr_pkg_add_device_key {
        struct icm_pkg_header hdr;
-       uuid_be ep_uuid;
+       uuid_t ep_uuid;
        u8 connection_key;
        u8 connection_id;
        u16 reserved;
@@ -216,7 +216,7 @@ struct icm_fr_pkg_add_device_key {
 
 struct icm_fr_pkg_add_device_key_response {
        struct icm_pkg_header hdr;
-       uuid_be ep_uuid;
+       uuid_t ep_uuid;
        u8 connection_key;
        u8 connection_id;
        u16 reserved;
@@ -224,7 +224,7 @@ struct icm_fr_pkg_add_device_key_response {
 
 struct icm_fr_pkg_challenge_device {
        struct icm_pkg_header hdr;
-       uuid_be ep_uuid;
+       uuid_t ep_uuid;
        u8 connection_key;
        u8 connection_id;
        u16 reserved;
@@ -233,7 +233,7 @@ struct icm_fr_pkg_challenge_device {
 
 struct icm_fr_pkg_challenge_device_response {
        struct icm_pkg_header hdr;
-       uuid_be ep_uuid;
+       uuid_t ep_uuid;
        u8 connection_key;
        u8 connection_id;
        u16 reserved;
index 7bc186f4ed4de6837b94ab8670cca7043122b28c..2e71b6e7e646a489e35bad17c23cc2140c5bc64b 100644 (file)
@@ -77,13 +77,6 @@ static int __jfs_set_acl(tid_t tid, struct inode *inode, int type,
        switch (type) {
        case ACL_TYPE_ACCESS:
                ea_name = XATTR_NAME_POSIX_ACL_ACCESS;
-               if (acl) {
-                       rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
-                       if (rc)
-                               return rc;
-                       inode->i_ctime = current_time(inode);
-                       mark_inode_dirty(inode);
-               }
                break;
        case ACL_TYPE_DEFAULT:
                ea_name = XATTR_NAME_POSIX_ACL_DEFAULT;
@@ -115,12 +108,27 @@ int jfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 {
        int rc;
        tid_t tid;
+       int update_mode = 0;
+       umode_t mode = inode->i_mode;
 
        tid = txBegin(inode->i_sb, 0);
        mutex_lock(&JFS_IP(inode)->commit_mutex);
+       if (type == ACL_TYPE_ACCESS && acl) {
+               rc = posix_acl_update_mode(inode, &mode, &acl);
+               if (rc)
+                       goto end_tx;
+               update_mode = 1;
+       }
        rc = __jfs_set_acl(tid, inode, type, acl);
-       if (!rc)
+       if (!rc) {
+               if (update_mode) {
+                       inode->i_mode = mode;
+                       inode->i_ctime = current_time(inode);
+                       mark_inode_dirty(inode);
+               }
                rc = txCommit(tid, 1, &inode, 0);
+       }
+end_tx:
        txEnd(tid);
        mutex_unlock(&JFS_IP(inode)->commit_mutex);
        return rc;
index bd9b641ada2c5e9ba507c4efaf346a244559f4fb..7ddcb445a3d9f65c61a4c423e2e969bf14649394 100644 (file)
@@ -98,7 +98,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
                goto out;
        }
 
-       VolumeSize = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
+       VolumeSize = i_size_read(sb->s_bdev->bd_inode) >> sb->s_blocksize_bits;
 
        if (VolumeSize) {
                if (newLVSize > VolumeSize) {
@@ -211,7 +211,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
        txQuiesce(sb);
 
        /* Reset size of direct inode */
-       sbi->direct_inode->i_size =  sb->s_bdev->bd_inode->i_size;
+       sbi->direct_inode->i_size =  i_size_read(sb->s_bdev->bd_inode);
 
        if (sbi->mntflag & JFS_INLINELOG) {
                /*
index e8aad7d87b8c938aad4a51db500684950df73944..78b41e1d5c67151744fb1c98bf1af54560593457 100644 (file)
@@ -313,7 +313,7 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
                }
                case Opt_resize_nosize:
                {
-                       *newLVSize = sb->s_bdev->bd_inode->i_size >>
+                       *newLVSize = i_size_read(sb->s_bdev->bd_inode) >>
                                sb->s_blocksize_bits;
                        if (*newLVSize == 0)
                                pr_err("JFS: Cannot determine volume size\n");
@@ -579,7 +579,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
                goto out_unload;
        }
        inode->i_ino = 0;
-       inode->i_size = sb->s_bdev->bd_inode->i_size;
+       inode->i_size = i_size_read(sb->s_bdev->bd_inode);
        inode->i_mapping->a_ops = &jfs_metapage_aops;
        hlist_add_fake(&inode->i_hash);
        mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
index 843ab866e0f487c2000e0974592ee98544293758..03c0196a6f2474ea4e34e9840638ff80a45370ec 100644 (file)
@@ -157,16 +157,40 @@ static inline int is_device_dma_capable(struct device *dev)
  * These three functions are only for dma allocator.
  * Don't use them in device drivers.
  */
-int dma_alloc_from_coherent(struct device *dev, ssize_t size,
+int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
                                       dma_addr_t *dma_handle, void **ret);
-int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
+int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
 
-int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
                            void *cpu_addr, size_t size, int *ret);
+
+void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
+int dma_release_from_global_coherent(int order, void *vaddr);
+int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
+                                 size_t size, int *ret);
+
 #else
-#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
-#define dma_release_from_coherent(dev, order, vaddr) (0)
-#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
+#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
+#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
+#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
+
+static inline void *dma_alloc_from_global_coherent(ssize_t size,
+                                                  dma_addr_t *dma_handle)
+{
+       return NULL;
+}
+
+static inline int dma_release_from_global_coherent(int order, void *vaddr)
+{
+       return 0;
+}
+
+static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
+                                               void *cpu_addr, size_t size,
+                                               int *ret)
+{
+       return 0;
+}
 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
 
 #ifdef CONFIG_HAS_DMA
@@ -481,7 +505,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
 
        BUG_ON(!ops);
 
-       if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
+       if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
                return cpu_addr;
 
        if (!arch_dma_alloc_attrs(&dev, &flag))
@@ -503,7 +527,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
        BUG_ON(!ops);
        WARN_ON(irqs_disabled());
 
-       if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
+       if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
                return;
 
        if (!ops->free || !cpu_addr)
index 2251e1925ea4f34b491b615288c19ca6aba921fb..33b0bdbb613c6b63c9d8c16ca8ee155e981f52b7 100644 (file)
@@ -84,26 +84,12 @@ int guid_parse(const char *uuid, guid_t *u);
 int uuid_parse(const char *uuid, uuid_t *u);
 
 /* backwards compatibility, don't use in new code */
-typedef uuid_t uuid_be;
-#define UUID_BE(a, _b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
-       UUID_INIT(a, _b, c, d0, d1, d2, d3, d4, d5, d6, d7)
-#define NULL_UUID_BE                                                   \
-       UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00,     \
-            0x00, 0x00, 0x00, 0x00)
-
 #define uuid_le_gen(u)         guid_gen(u)
-#define uuid_be_gen(u)         uuid_gen(u)
 #define uuid_le_to_bin(guid, u)        guid_parse(guid, u)
-#define uuid_be_to_bin(uuid, u)        uuid_parse(uuid, u)
 
 static inline int uuid_le_cmp(const guid_t u1, const guid_t u2)
 {
        return memcmp(&u1, &u2, sizeof(guid_t));
 }
 
-static inline int uuid_be_cmp(const uuid_t u1, const uuid_t u2)
-{
-       return memcmp(&u1, &u2, sizeof(uuid_t));
-}
-
 #endif
index 478c049630b5cb8e09a57c5ccd012d51b5c4be97..cd819c397dc7ebed8be5bc6167254c543d0f26c8 100644 (file)
@@ -82,7 +82,7 @@ static void __init test_uuid_test(const struct test_uuid_data *data)
                test_uuid_failed("conversion", false, true, data->uuid, NULL);
 
        total_tests++;
-       if (uuid_equal(&data->be, &be)) {
+       if (!uuid_equal(&data->be, &be)) {
                sprintf(buf, "%pUb", &be);
                test_uuid_failed("cmp", false, true, data->uuid, buf);
        }