Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 20 Oct 2018 13:03:45 +0000 (15:03 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 20 Oct 2018 13:03:45 +0000 (15:03 +0200)
Ingo writes:
  "scheduler fixes:

   Two fixes: a CFS-throttling bug fix, and an interactivity fix."

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/fair: Fix the min_vruntime update logic in dequeue_entity()
  sched/fair: Fix throttle_list starvation with low CFS quota

255 files changed:
Documentation/core-api/idr.rst
LICENSES/other/CC-BY-SA-4.0 [deleted file]
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/kernel/process.c
arch/arm/boot/dts/imx53-qsb-common.dtsi
arch/arm/kernel/vmlinux.lds.h
arch/arm/kvm/coproc.c
arch/arm64/kernel/perf_event.c
arch/arm64/kernel/setup.c
arch/mips/include/asm/processor.h
arch/mips/kernel/process.c
arch/mips/kernel/setup.c
arch/mips/kernel/vdso.c
arch/mips/lib/memset.S
arch/parisc/kernel/unwind.c
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/s390/include/asm/sclp.h
arch/s390/kernel/early_printk.c
arch/s390/kernel/swsusp.S
arch/sparc/include/asm/cpudata_64.h
arch/sparc/include/uapi/asm/unistd.h
arch/sparc/kernel/kgdb_32.c
arch/sparc/kernel/kgdb_64.c
arch/sparc/kernel/perf_event.c
arch/sparc/kernel/rtrap_64.S
arch/sparc/kernel/systbls_32.S
arch/sparc/kernel/systbls_64.S
arch/sparc/kernel/viohs.c
arch/sparc/vdso/Makefile
arch/sparc/vdso/vclock_gettime.c
arch/sparc/vdso/vma.c
arch/x86/include/asm/pgtable_types.h
arch/x86/kernel/cpu/intel_rdt.h
arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/mm/pgtable.c
block/blk-lib.c
block/blk-wbt.c
drivers/block/sunvdc.c
drivers/bluetooth/hci_qca.c
drivers/clk/sunxi-ng/ccu-sun4i-a10.c
drivers/crypto/inside-secure/safexcel.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/sun4i/sun4i_dotclock.c
drivers/hwmon/npcm750-pwm-fan.c
drivers/i2c/i2c-core-base.c
drivers/infiniband/core/ucm.c
drivers/infiniband/core/ucma.c
drivers/infiniband/hw/mlx5/mr.c
drivers/input/evdev.c
drivers/input/joystick/xpad.c
drivers/input/misc/uinput.c
drivers/input/mouse/elan_i2c_core.c
drivers/input/mousedev.c
drivers/input/serio/i8042.c
drivers/md/dm-cache-target.c
drivers/md/dm-flakey.c
drivers/md/dm-integrity.c
drivers/md/dm-linear.c
drivers/md/dm.c
drivers/mmc/core/block.c
drivers/mux/adgs1408.c
drivers/net/dsa/bcm_sf2.c
drivers/net/ethernet/amazon/ena/ena_eth_com.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
drivers/net/ethernet/mellanox/mlx5/core/wq.c
drivers/net/ethernet/mellanox/mlx5/core/wq.h
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/core.h
drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/netronome/nfp/flower/action.c
drivers/net/ethernet/qlogic/qed/qed_int.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/socionext/netsec.c
drivers/net/geneve.c
drivers/net/phy/sfp.c
drivers/net/usb/qmi_wwan.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wireless/marvell/libertas/if_sdio.c
drivers/net/wireless/mediatek/mt76/usb.c
drivers/nvme/host/core.c
drivers/of/unittest.c
drivers/pci/controller/pcie-cadence.c
drivers/perf/arm_pmu.c
drivers/pinctrl/pinctrl-mcp23s08.c
drivers/platform/chrome/cros_ec_proto.c
drivers/ptp/ptp_chardev.c
drivers/s390/char/sclp_early_core.c
drivers/s390/cio/vfio_ccw_cp.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/s390/cio/vfio_ccw_fsm.c
drivers/s390/cio/vfio_ccw_ops.c
drivers/s390/cio/vfio_ccw_private.h
drivers/sbus/char/openprom.c
drivers/sbus/char/oradax.c
drivers/soc/fsl/qbman/bman_ccsr.c
drivers/soc/fsl/qbman/qman_ccsr.c
drivers/soc/fsl/qbman/qman_portal.c
drivers/tty/serial/qcom_geni_serial.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/devio.c
drivers/usb/gadget/function/f_mass_storage.c
drivers/usb/host/xhci-pci.c
drivers/usb/roles/intel-xhci-usb-role-switch.c
drivers/usb/usbip/vhci_hcd.c
drivers/video/fbdev/aty/atyfb.h
drivers/video/fbdev/aty/atyfb_base.c
drivers/video/fbdev/aty/mach64_ct.c
fs/afs/cell.c
fs/afs/dynroot.c
fs/afs/internal.h
fs/afs/main.c
fs/afs/proc.c
fs/afs/rxrpc.c
fs/cachefiles/namei.c
fs/dax.c
fs/fat/fatent.c
fs/fscache/cookie.c
fs/fscache/internal.h
fs/fscache/main.c
fs/gfs2/bmap.c
fs/ocfs2/dlmglue.c
fs/ubifs/super.c
fs/xfs/xfs_reflink.c
include/asm-generic/vmlinux.lds.h
include/drm/drm_atomic.h
include/drm/drm_edid.h
include/linux/cgroup-defs.h
include/linux/gpio/driver.h
include/linux/huge_mm.h
include/linux/mlx5/driver.h
include/linux/module.h
include/linux/netdevice.h
include/linux/perf/arm_pmu.h
include/linux/suspend.h
include/linux/tracepoint-defs.h
include/linux/tracepoint.h
include/net/devlink.h
include/net/dst.h
include/net/ip6_fib.h
include/net/ip_fib.h
include/net/sctp/sm.h
include/net/sctp/structs.h
include/soc/fsl/bman.h
include/soc/fsl/qman.h
include/trace/events/rxrpc.h
include/uapi/linux/sctp.h
include/uapi/linux/smc_diag.h
include/uapi/linux/udp.h
kernel/bpf/xskmap.c
kernel/cgroup/cgroup.c
kernel/power/suspend.c
kernel/trace/preemptirq_delay_test.c
kernel/trace/trace_events_hist.c
kernel/tracepoint.c
lib/Makefile
lib/bch.c
lib/test_ida.c
lib/vsprintf.c
mm/huge_memory.c
mm/mmap.c
mm/mremap.c
mm/percpu.c
net/bpfilter/bpfilter_kern.c
net/core/dev.c
net/core/devlink.c
net/core/ethtool.c
net/core/skbuff.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/ipmr_base.c
net/ipv4/route.c
net/ipv4/udp.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_tunnel.c
net/ipv6/mcast.c
net/ipv6/route.c
net/ipv6/udp.c
net/ipv6/xfrm6_policy.c
net/llc/llc_conn.c
net/rds/send.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_object.c
net/rxrpc/conn_client.c
net/rxrpc/conn_event.c
net/rxrpc/input.c
net/rxrpc/local_object.c
net/rxrpc/output.c
net/rxrpc/peer_event.c
net/rxrpc/peer_object.c
net/sched/cls_api.c
net/sched/cls_u32.c
net/sched/sch_api.c
net/sched/sch_cake.c
net/sctp/associola.c
net/sctp/input.c
net/sctp/output.c
net/sctp/socket.c
net/socket.c
net/tipc/group.c
net/tipc/link.c
net/tipc/name_distr.c
net/tipc/socket.c
net/xdp/xsk.c
net/xfrm/xfrm_interface.c
net/xfrm/xfrm_policy.c
samples/Kconfig
scripts/Makefile.build
tools/arch/x86/include/uapi/asm/kvm.h
tools/include/uapi/linux/kvm.h
tools/lib/api/fs/tracing_path.c
tools/perf/Makefile.config
tools/perf/Makefile.perf
tools/perf/builtin-report.c
tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
tools/perf/scripts/python/export-to-postgresql.py
tools/perf/scripts/python/export-to-sqlite.py
tools/perf/util/event.c
tools/perf/util/evsel.c
tools/perf/util/machine.c
tools/perf/util/pmu.c
tools/perf/util/setup.py
tools/perf/util/srcline.c
tools/testing/selftests/drivers/usb/usbip/usbip_test.sh
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc [new file with mode: 0644]
tools/testing/selftests/net/reuseport_bpf.c
tools/testing/selftests/net/rtnetlink.sh
tools/testing/selftests/net/udpgso_bench.sh

index d351e880a2f6cf156f41a5ee4a3e330dfb7a6f0f..a2738050c4f00834283789416cf1b1b0921bc88e 100644 (file)
@@ -1,4 +1,4 @@
-.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. SPDX-License-Identifier: GPL-2.0+
 
 =============
 ID Allocation
diff --git a/LICENSES/other/CC-BY-SA-4.0 b/LICENSES/other/CC-BY-SA-4.0
deleted file mode 100644 (file)
index f9158e8..0000000
+++ /dev/null
@@ -1,397 +0,0 @@
-Valid-License-Identifier: CC-BY-SA-4.0
-SPDX-URL: https://spdx.org/licenses/CC-BY-SA-4.0
-Usage-Guide:
-  To use the Creative Commons Attribution Share Alike 4.0 International
-  license put the following SPDX tag/value pair into a comment according to
-  the placement guidelines in the licensing rules documentation:
-    SPDX-License-Identifier: CC-BY-SA-4.0
-License-Text:
-
-Creative Commons Attribution-ShareAlike 4.0 International
-
-Creative Commons Corporation ("Creative Commons") is not a law firm and
-does not provide legal services or legal advice. Distribution of Creative
-Commons public licenses does not create a lawyer-client or other
-relationship. Creative Commons makes its licenses and related information
-available on an "as-is" basis. Creative Commons gives no warranties
-regarding its licenses, any material licensed under their terms and
-conditions, or any related information. Creative Commons disclaims all
-liability for damages resulting from their use to the fullest extent
-possible.
-
-Using Creative Commons Public Licenses
-
-Creative Commons public licenses provide a standard set of terms and
-conditions that creators and other rights holders may use to share original
-works of authorship and other material subject to copyright and certain
-other rights specified in the public license below. The following
-considerations are for informational purposes only, are not exhaustive, and
-do not form part of our licenses.
-
-Considerations for licensors: Our public licenses are intended for use by
-those authorized to give the public permission to use material in ways
-otherwise restricted by copyright and certain other rights. Our licenses
-are irrevocable. Licensors should read and understand the terms and
-conditions of the license they choose before applying it. Licensors should
-also secure all rights necessary before applying our licenses so that the
-public can reuse the material as expected. Licensors should clearly mark
-any material not subject to the license. This includes other CC-licensed
-material, or material used under an exception or limitation to
-copyright. More considerations for licensors :
-wiki.creativecommons.org/Considerations_for_licensors
-
-Considerations for the public: By using one of our public licenses, a
-licensor grants the public permission to use the licensed material under
-specified terms and conditions. If the licensor's permission is not
-necessary for any reason - for example, because of any applicable exception
-or limitation to copyright - then that use is not regulated by the
-license. Our licenses grant only permissions under copyright and certain
-other rights that a licensor has authority to grant. Use of the licensed
-material may still be restricted for other reasons, including because
-others have copyright or other rights in the material. A licensor may make
-special requests, such as asking that all changes be marked or described.
-
-Although not required by our licenses, you are encouraged to respect those
-requests where reasonable. More considerations for the public :
-wiki.creativecommons.org/Considerations_for_licensees
-
-Creative Commons Attribution-ShareAlike 4.0 International Public License
-
-By exercising the Licensed Rights (defined below), You accept and agree to
-be bound by the terms and conditions of this Creative Commons
-Attribution-ShareAlike 4.0 International Public License ("Public
-License"). To the extent this Public License may be interpreted as a
-contract, You are granted the Licensed Rights in consideration of Your
-acceptance of these terms and conditions, and the Licensor grants You such
-rights in consideration of benefits the Licensor receives from making the
-Licensed Material available under these terms and conditions.
-
-Section 1 - Definitions.
-
-    a. Adapted Material means material subject to Copyright and Similar
-       Rights that is derived from or based upon the Licensed Material and
-       in which the Licensed Material is translated, altered, arranged,
-       transformed, or otherwise modified in a manner requiring permission
-       under the Copyright and Similar Rights held by the Licensor. For
-       purposes of this Public License, where the Licensed Material is a
-       musical work, performance, or sound recording, Adapted Material is
-       always produced where the Licensed Material is synched in timed
-       relation with a moving image.
-
-    b. Adapter's License means the license You apply to Your Copyright and
-       Similar Rights in Your contributions to Adapted Material in
-       accordance with the terms and conditions of this Public License.
-
-    c. BY-SA Compatible License means a license listed at
-       creativecommons.org/compatiblelicenses, approved by Creative Commons
-       as essentially the equivalent of this Public License.
-
-    d. Copyright and Similar Rights means copyright and/or similar rights
-       closely related to copyright including, without limitation,
-       performance, broadcast, sound recording, and Sui Generis Database
-       Rights, without regard to how the rights are labeled or
-       categorized. For purposes of this Public License, the rights
-       specified in Section 2(b)(1)-(2) are not Copyright and Similar
-       Rights.
-
-    e. Effective Technological Measures means those measures that, in the
-       absence of proper authority, may not be circumvented under laws
-       fulfilling obligations under Article 11 of the WIPO Copyright Treaty
-       adopted on December 20, 1996, and/or similar international
-       agreements.
-
-    f. Exceptions and Limitations means fair use, fair dealing, and/or any
-       other exception or limitation to Copyright and Similar Rights that
-       applies to Your use of the Licensed Material.
-
-    g. License Elements means the license attributes listed in the name of
-       a Creative Commons Public License. The License Elements of this
-       Public License are Attribution and ShareAlike.
-
-    h. Licensed Material means the artistic or literary work, database, or
-       other material to which the Licensor applied this Public License.
-
-    i. Licensed Rights means the rights granted to You subject to the terms
-       and conditions of this Public License, which are limited to all
-       Copyright and Similar Rights that apply to Your use of the Licensed
-       Material and that the Licensor has authority to license.
-
-    j. Licensor means the individual(s) or entity(ies) granting rights
-       under this Public License.
-
-    k. Share means to provide material to the public by any means or
-       process that requires permission under the Licensed Rights, such as
-       reproduction, public display, public performance, distribution,
-       dissemination, communication, or importation, and to make material
-       available to the public including in ways that members of the public
-       may access the material from a place and at a time individually
-       chosen by them.
-
-    l. Sui Generis Database Rights means rights other than copyright
-       resulting from Directive 96/9/EC of the European Parliament and of
-       the Council of 11 March 1996 on the legal protection of databases,
-       as amended and/or succeeded, as well as other essentially equivalent
-       rights anywhere in the world.  m. You means the individual or entity
-       exercising the Licensed Rights under this Public License. Your has a
-       corresponding meaning.
-
-Section 2 - Scope.
-
-    a. License grant.
-
-        1. Subject to the terms and conditions of this Public License, the
-           Licensor hereby grants You a worldwide, royalty-free,
-           non-sublicensable, non-exclusive, irrevocable license to
-           exercise the Licensed Rights in the Licensed Material to:
-
-            A. reproduce and Share the Licensed Material, in whole or in part; and
-
-            B. produce, reproduce, and Share Adapted Material.
-
-        2. Exceptions and Limitations. For the avoidance of doubt, where
-           Exceptions and Limitations apply to Your use, this Public
-           License does not apply, and You do not need to comply with its
-           terms and conditions.
-
-        3. Term. The term of this Public License is specified in Section 6(a).
-
-        4. Media and formats; technical modifications allowed. The Licensor
-           authorizes You to exercise the Licensed Rights in all media and
-           formats whether now known or hereafter created, and to make
-           technical modifications necessary to do so. The Licensor waives
-           and/or agrees not to assert any right or authority to forbid You
-           from making technical modifications necessary to exercise the
-           Licensed Rights, including technical modifications necessary to
-           circumvent Effective Technological Measures. For purposes of
-           this Public License, simply making modifications authorized by
-           this Section 2(a)(4) never produces Adapted Material.
-
-        5. Downstream recipients.
-
-            A. Offer from the Licensor - Licensed Material. Every recipient
-               of the Licensed Material automatically receives an offer
-               from the Licensor to exercise the Licensed Rights under the
-               terms and conditions of this Public License.
-
-            B. Additional offer from the Licensor - Adapted Material. Every
-               recipient of Adapted Material from You automatically
-               receives an offer from the Licensor to exercise the Licensed
-               Rights in the Adapted Material under the conditions of the
-               Adapter's License You apply.
-
-            C. No downstream restrictions. You may not offer or impose any
-               additional or different terms or conditions on, or apply any
-               Effective Technological Measures to, the Licensed Material
-               if doing so restricts exercise of the Licensed Rights by any
-               recipient of the Licensed Material.
-
-        6. No endorsement. Nothing in this Public License constitutes or
-           may be construed as permission to assert or imply that You are,
-           or that Your use of the Licensed Material is, connected with, or
-           sponsored, endorsed, or granted official status by, the Licensor
-           or others designated to receive attribution as provided in
-           Section 3(a)(1)(A)(i).
-
-    b. Other rights.
-
-        1. Moral rights, such as the right of integrity, are not licensed
-           under this Public License, nor are publicity, privacy, and/or
-           other similar personality rights; however, to the extent
-           possible, the Licensor waives and/or agrees not to assert any
-           such rights held by the Licensor to the limited extent necessary
-           to allow You to exercise the Licensed Rights, but not otherwise.
-
-        2. Patent and trademark rights are not licensed under this Public
-           License.
-
-        3. To the extent possible, the Licensor waives any right to collect
-           royalties from You for the exercise of the Licensed Rights,
-           whether directly or through a collecting society under any
-           voluntary or waivable statutory or compulsory licensing
-           scheme. In all other cases the Licensor expressly reserves any
-           right to collect such royalties.
-
-Section 3 - License Conditions.
-
-Your exercise of the Licensed Rights is expressly made subject to the
-following conditions.
-
-    a. Attribution.
-
-        1. If You Share the Licensed Material (including in modified form),
-           You must:
-
-            A. retain the following if it is supplied by the Licensor with
-               the Licensed Material:
-
-                i. identification of the creator(s) of the Licensed
-                   Material and any others designated to receive
-                   attribution, in any reasonable manner requested by the
-                   Licensor (including by pseudonym if designated);
-
-                ii. a copyright notice;
-
-                iii. a notice that refers to this Public License;
-
-                iv. a notice that refers to the disclaimer of warranties;
-
-                v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable;
-
-            B. indicate if You modified the Licensed Material and retain an
-               indication of any previous modifications; and
-
-            C. indicate the Licensed Material is licensed under this Public
-            License, and include the text of, or the URI or hyperlink to,
-            this Public License.
-
-        2. You may satisfy the conditions in Section 3(a)(1) in any
-           reasonable manner based on the medium, means, and context in
-           which You Share the Licensed Material. For example, it may be
-           reasonable to satisfy the conditions by providing a URI or
-           hyperlink to a resource that includes the required information.
-
-        3. If requested by the Licensor, You must remove any of the
-           information required by Section 3(a)(1)(A) to the extent
-           reasonably practicable.  b. ShareAlike.In addition to the
-           conditions in Section 3(a), if You Share Adapted Material You
-           produce, the following conditions also apply.
-
-           1. The Adapter's License You apply must be a Creative Commons
-              license with the same License Elements, this version or
-              later, or a BY-SA Compatible License.
-
-           2. You must include the text of, or the URI or hyperlink to, the
-              Adapter's License You apply. You may satisfy this condition
-              in any reasonable manner based on the medium, means, and
-              context in which You Share Adapted Material.
-
-           3. You may not offer or impose any additional or different terms
-              or conditions on, or apply any Effective Technological
-              Measures to, Adapted Material that restrict exercise of the
-              rights granted under the Adapter's License You apply.
-
-Section 4 - Sui Generis Database Rights.
-
-Where the Licensed Rights include Sui Generis Database Rights that apply to
-Your use of the Licensed Material:
-
-    a. for the avoidance of doubt, Section 2(a)(1) grants You the right to
-       extract, reuse, reproduce, and Share all or a substantial portion of
-       the contents of the database;
-
-    b. if You include all or a substantial portion of the database contents
-       in a database in which You have Sui Generis Database Rights, then
-       the database in which You have Sui Generis Database Rights (but not
-       its individual contents) is Adapted Material, including for purposes
-       of Section 3(b); and
-
-    c. You must comply with the conditions in Section 3(a) if You Share all
-       or a substantial portion of the contents of the database.
-
-    For the avoidance of doubt, this Section 4 supplements and does not
-    replace Your obligations under this Public License where the Licensed
-    Rights include other Copyright and Similar Rights.
-
-Section 5 - Disclaimer of Warranties and Limitation of Liability.
-
-    a. Unless otherwise separately undertaken by the Licensor, to the
-       extent possible, the Licensor offers the Licensed Material as-is and
-       as-available, and makes no representations or warranties of any kind
-       concerning the Licensed Material, whether express, implied,
-       statutory, or other. This includes, without limitation, warranties
-       of title, merchantability, fitness for a particular purpose,
-       non-infringement, absence of latent or other defects, accuracy, or
-       the presence or absence of errors, whether or not known or
-       discoverable. Where disclaimers of warranties are not allowed in
-       full or in part, this disclaimer may not apply to You.
-
-    b. To the extent possible, in no event will the Licensor be liable to
-       You on any legal theory (including, without limitation, negligence)
-       or otherwise for any direct, special, indirect, incidental,
-       consequential, punitive, exemplary, or other losses, costs,
-       expenses, or damages arising out of this Public License or use of
-       the Licensed Material, even if the Licensor has been advised of the
-       possibility of such losses, costs, expenses, or damages. Where a
-       limitation of liability is not allowed in full or in part, this
-       limitation may not apply to You.
-
-    c. The disclaimer of warranties and limitation of liability provided
-       above shall be interpreted in a manner that, to the extent possible,
-       most closely approximates an absolute disclaimer and waiver of all
-       liability.
-
-Section 6 - Term and Termination.
-
-    a. This Public License applies for the term of the Copyright and
-       Similar Rights licensed here. However, if You fail to comply with
-       this Public License, then Your rights under this Public License
-       terminate automatically.
-
-    b. Where Your right to use the Licensed Material has terminated under
-       Section 6(a), it reinstates:
-
-        1. automatically as of the date the violation is cured, provided it
-           is cured within 30 days of Your discovery of the violation; or
-
-        2. upon express reinstatement by the Licensor.
-
-    c. For the avoidance of doubt, this Section 6(b) does not affect any
-       right the Licensor may have to seek remedies for Your violations of
-       this Public License.
-
-    d. For the avoidance of doubt, the Licensor may also offer the Licensed
-       Material under separate terms or conditions or stop distributing the
-       Licensed Material at any time; however, doing so will not terminate
-       this Public License.
-
-    e. Sections 1, 5, 6, 7, and 8 survive termination of this Public License.
-
-Section 7 - Other Terms and Conditions.
-
-    a. The Licensor shall not be bound by any additional or different terms
-       or conditions communicated by You unless expressly agreed.
-
-    b. Any arrangements, understandings, or agreements regarding the
-       Licensed Material not stated herein are separate from and
-       independent of the terms and conditions of this Public License.
-
-Section 8 - Interpretation.
-
-    a. For the avoidance of doubt, this Public License does not, and shall
-       not be interpreted to, reduce, limit, restrict, or impose conditions
-       on any use of the Licensed Material that could lawfully be made
-       without permission under this Public License.
-
-    b. To the extent possible, if any provision of this Public License is
-       deemed unenforceable, it shall be automatically reformed to the
-       minimum extent necessary to make it enforceable. If the provision
-       cannot be reformed, it shall be severed from this Public License
-       without affecting the enforceability of the remaining terms and
-       conditions.
-
-    c. No term or condition of this Public License will be waived and no
-       failure to comply consented to unless expressly agreed to by the
-       Licensor.
-
-    d. Nothing in this Public License constitutes or may be interpreted as
-       a limitation upon, or waiver of, any privileges and immunities that
-       apply to the Licensor or You, including from the legal processes of
-       any jurisdiction or authority.
-
-Creative Commons is not a party to its public licenses. Notwithstanding,
-Creative Commons may elect to apply one of its public licenses to material
-it publishes and in those instances will be considered the "Licensor." The
-text of the Creative Commons public licenses is dedicated to the public
-domain under the CC0 Public Domain Dedication. Except for the limited
-purpose of indicating that material is shared under a Creative Commons
-public license or as otherwise permitted by the Creative Commons policies
-published at creativecommons.org/policies, Creative Commons does not
-authorize the use of the trademark "Creative Commons" or any other
-trademark or logo of Creative Commons without its prior written consent
-including, without limitation, in connection with any unauthorized
-modifications to any of its public licenses or any other arrangements,
-understandings, or agreements concerning use of licensed material. For the
-avoidance of doubt, this paragraph does not form part of the public
-licenses.
-
-Creative Commons may be contacted at creativecommons.org.
index 48a65c3a41898f9d747379ebd602bbe8e1ea29b2..7f371d372bdd97b685fc925caf14688af01b86de 100644 (file)
@@ -9657,7 +9657,8 @@ MIPS/LOONGSON2 ARCHITECTURE
 M:     Jiaxun Yang <jiaxun.yang@flygoat.com>
 L:     linux-mips@linux-mips.org
 S:     Maintained
-F:     arch/mips/loongson64/*{2e/2f}*
+F:     arch/mips/loongson64/fuloong-2e/
+F:     arch/mips/loongson64/lemote-2f/
 F:     arch/mips/include/asm/mach-loongson64/
 F:     drivers/*/*loongson2*
 F:     drivers/*/*/*loongson2*
@@ -9864,7 +9865,7 @@ M:        Peter Rosin <peda@axentia.se>
 S:     Maintained
 F:     Documentation/ABI/testing/sysfs-class-mux*
 F:     Documentation/devicetree/bindings/mux/
-F:     include/linux/dt-bindings/mux/
+F:     include/dt-bindings/mux/
 F:     include/linux/mux/
 F:     drivers/mux/
 
@@ -10121,7 +10122,6 @@ L:      netdev@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git
 S:     Maintained
-F:     net/core/flow.c
 F:     net/xfrm/
 F:     net/key/
 F:     net/ipv4/xfrm*
@@ -13061,7 +13061,7 @@ SELINUX SECURITY MODULE
 M:     Paul Moore <paul@paul-moore.com>
 M:     Stephen Smalley <sds@tycho.nsa.gov>
 M:     Eric Paris <eparis@parisplace.org>
-L:     selinux@tycho.nsa.gov (moderated for non-subscribers)
+L:     selinux@vger.kernel.org
 W:     https://selinuxproject.org
 W:     https://github.com/SELinuxProject
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/selinux.git
index 9b2df076885a844d22033ba5daa75f6c2eda29dc..bf3786e4ffece3ad2a860cfca080a61dae7ab608 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 19
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc8
 NAME = Merciless Moray
 
 # *DOCUMENTATION*
@@ -483,13 +483,15 @@ endif
 ifeq ($(cc-name),clang)
 ifneq ($(CROSS_COMPILE),)
 CLANG_TARGET   := --target=$(notdir $(CROSS_COMPILE:%-=%))
-GCC_TOOLCHAIN  := $(realpath $(dir $(shell which $(LD)))/..)
+GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
+CLANG_PREFIX   := --prefix=$(GCC_TOOLCHAIN_DIR)
+GCC_TOOLCHAIN  := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
 endif
 ifneq ($(GCC_TOOLCHAIN),)
 CLANG_GCC_TC   := --gcc-toolchain=$(GCC_TOOLCHAIN)
 endif
-KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
-KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
+KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
 KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
 KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
 endif
index b4441b0764d71aff87b67fba665163b57bdb20b6..a045f30860477bd60e84e50d215684da38edbc19 100644 (file)
@@ -149,7 +149,7 @@ config ARC_CPU_770
          Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
          This core has a bunch of cool new features:
          -MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
-                   Shared Address Spaces (for sharing TLB entires in MMU)
+                   Shared Address Spaces (for sharing TLB entries in MMU)
          -Caches: New Prog Model, Region Flush
          -Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
 
index 99cce77ab98f2d79c3dbef3130bff70b91ea076d..644815c0516e75d2ed850f08d9dffd2c65f131c2 100644 (file)
@@ -6,33 +6,11 @@
 # published by the Free Software Foundation.
 #
 
-ifeq ($(CROSS_COMPILE),)
-ifndef CONFIG_CPU_BIG_ENDIAN
-CROSS_COMPILE := arc-linux-
-else
-CROSS_COMPILE := arceb-linux-
-endif
-endif
-
 KBUILD_DEFCONFIG := nsim_700_defconfig
 
 cflags-y       += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
 cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
-cflags-$(CONFIG_ISA_ARCV2)     += -mcpu=archs
-
-is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
-
-ifdef CONFIG_ISA_ARCOMPACT
-ifeq ($(is_700), 0)
-    $(error Toolchain not configured for ARCompact builds)
-endif
-endif
-
-ifdef CONFIG_ISA_ARCV2
-ifeq ($(is_700), 1)
-    $(error Toolchain not configured for ARCv2 builds)
-endif
-endif
+cflags-$(CONFIG_ISA_ARCV2)     += -mcpu=hs38
 
 ifdef CONFIG_ARC_CURR_IN_REG
 # For a global register defintion, make sure it gets passed to every file
@@ -79,7 +57,7 @@ cflags-$(disable_small_data)          += -mno-sdata -fcall-used-gp
 cflags-$(CONFIG_CPU_BIG_ENDIAN)                += -mbig-endian
 ldflags-$(CONFIG_CPU_BIG_ENDIAN)       += -EB
 
-LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
+LIBGCC = $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
 
 # Modules with short calls might break for calls into builtin-kernel
 KBUILD_CFLAGS_MODULE   += -mlong-calls -mno-millicode
index 4674541eba3fd019a51aeb02db27b2bc04569412..8ce6e723591556fc12765a19e08090632bb9d0ba 100644 (file)
@@ -241,6 +241,26 @@ int copy_thread(unsigned long clone_flags,
                task_thread_info(current)->thr_ptr;
        }
 
+
+       /*
+        * setup usermode thread pointer #1:
+        * when child is picked by scheduler, __switch_to() uses @c_callee to
+        * populate usermode callee regs: this works (despite being in a kernel
+        * function) since special return path for child @ret_from_fork()
+        * ensures those regs are not clobbered all the way to RTIE to usermode
+        */
+       c_callee->r25 = task_thread_info(p)->thr_ptr;
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+       /*
+        * setup usermode thread pointer #2:
+        * however for this special use of r25 in kernel, __switch_to() sets
+        * r25 for kernel needs and only in the final return path is usermode
+        * r25 setup, from pt_regs->user_r25. So set that up as well
+        */
+       c_regs->user_r25 = c_callee->r25;
+#endif
+
        return 0;
 }
 
index 7423d462d1e4229699f755a75e8237a06a51465c..50dde84b72ed762ea87e4f21ed5aa260a548bd1b 100644 (file)
        };
 };
 
+&cpu0 {
+       /* CPU rated to 1GHz, not 1.2GHz as per the default settings */
+       operating-points = <
+               /* kHz   uV */
+               166666  850000
+               400000  900000
+               800000  1050000
+               1000000 1200000
+       >;
+};
+
 &esdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc1>;
index ae5fdff18406171132817390420dea366281f019..8247bc15addc419d2b6f4bf222d4f37e172ca0a7 100644 (file)
@@ -49,6 +49,8 @@
 #define ARM_DISCARD                                                    \
                *(.ARM.exidx.exit.text)                                 \
                *(.ARM.extab.exit.text)                                 \
+               *(.ARM.exidx.text.exit)                                 \
+               *(.ARM.extab.text.exit)                                 \
                ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))             \
                ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))             \
                ARM_EXIT_DISCARD(EXIT_TEXT)                             \
index 450c7a4fbc8a15b10bfd0a7e73b0878291e392c9..cb094e55dc5f12cacd25bde0b5b9eb853c6f3eeb 100644 (file)
@@ -478,15 +478,15 @@ static const struct coproc_reg cp15_regs[] = {
 
        /* ICC_SGI1R */
        { CRm64(12), Op1( 0), is64, access_gic_sgi},
-       /* ICC_ASGI1R */
-       { CRm64(12), Op1( 1), is64, access_gic_sgi},
-       /* ICC_SGI0R */
-       { CRm64(12), Op1( 2), is64, access_gic_sgi},
 
        /* VBAR: swapped by interrupt.S. */
        { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
                        NULL, reset_val, c12_VBAR, 0x00000000 },
 
+       /* ICC_ASGI1R */
+       { CRm64(12), Op1( 1), is64, access_gic_sgi},
+       /* ICC_SGI0R */
+       { CRm64(12), Op1( 2), is64, access_gic_sgi},
        /* ICC_SRE */
        { CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre },
 
index 8e38d5267f222356e6085a5bf77a52ac00383a78..e213f8e867f65fa63ae84cac48555aaffb1794af 100644 (file)
@@ -966,6 +966,12 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
        return 0;
 }
 
+static int armv8pmu_filter_match(struct perf_event *event)
+{
+       unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
+       return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
+}
+
 static void armv8pmu_reset(void *info)
 {
        struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
@@ -1114,6 +1120,7 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
        cpu_pmu->stop                   = armv8pmu_stop,
        cpu_pmu->reset                  = armv8pmu_reset,
        cpu_pmu->set_event_filter       = armv8pmu_set_event_filter;
+       cpu_pmu->filter_match           = armv8pmu_filter_match;
 
        return 0;
 }
index 5b4fac434c841e0472d8b3dee0a3d02931095fcb..b3354ff94e7984641dd5a0c076d63f67db5f62e9 100644 (file)
@@ -64,6 +64,9 @@
 #include <asm/xen/hypervisor.h>
 #include <asm/mmu_context.h>
 
+static int num_standard_resources;
+static struct resource *standard_resources;
+
 phys_addr_t __fdt_pointer __initdata;
 
 /*
@@ -206,14 +209,19 @@ static void __init request_standard_resources(void)
 {
        struct memblock_region *region;
        struct resource *res;
+       unsigned long i = 0;
 
        kernel_code.start   = __pa_symbol(_text);
        kernel_code.end     = __pa_symbol(__init_begin - 1);
        kernel_data.start   = __pa_symbol(_sdata);
        kernel_data.end     = __pa_symbol(_end - 1);
 
+       num_standard_resources = memblock.memory.cnt;
+       standard_resources = alloc_bootmem_low(num_standard_resources *
+                                              sizeof(*standard_resources));
+
        for_each_memblock(memory, region) {
-               res = alloc_bootmem_low(sizeof(*res));
+               res = &standard_resources[i++];
                if (memblock_is_nomap(region)) {
                        res->name  = "reserved";
                        res->flags = IORESOURCE_MEM;
@@ -243,36 +251,26 @@ static void __init request_standard_resources(void)
 
 static int __init reserve_memblock_reserved_regions(void)
 {
-       phys_addr_t start, end, roundup_end = 0;
-       struct resource *mem, *res;
-       u64 i;
-
-       for_each_reserved_mem_region(i, &start, &end) {
-               if (end <= roundup_end)
-                       continue; /* done already */
-
-               start = __pfn_to_phys(PFN_DOWN(start));
-               end = __pfn_to_phys(PFN_UP(end)) - 1;
-               roundup_end = end;
-
-               res = kzalloc(sizeof(*res), GFP_ATOMIC);
-               if (WARN_ON(!res))
-                       return -ENOMEM;
-               res->start = start;
-               res->end = end;
-               res->name  = "reserved";
-               res->flags = IORESOURCE_MEM;
-
-               mem = request_resource_conflict(&iomem_resource, res);
-               /*
-                * We expected memblock_reserve() regions to conflict with
-                * memory created by request_standard_resources().
-                */
-               if (WARN_ON_ONCE(!mem))
+       u64 i, j;
+
+       for (i = 0; i < num_standard_resources; ++i) {
+               struct resource *mem = &standard_resources[i];
+               phys_addr_t r_start, r_end, mem_size = resource_size(mem);
+
+               if (!memblock_is_region_reserved(mem->start, mem_size))
                        continue;
-               kfree(res);
 
-               reserve_region_with_split(mem, start, end, "reserved");
+               for_each_reserved_mem_region(j, &r_start, &r_end) {
+                       resource_size_t start, end;
+
+                       start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
+                       end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
+
+                       if (start > mem->end || end < mem->start)
+                               continue;
+
+                       reserve_region_with_split(mem, start, end, "reserved");
+               }
        }
 
        return 0;
index b2fa62922d88443dd307d1875eb433479ee7f993..49d6046ca1d0c1661403111519478666964f5a16 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/atomic.h>
 #include <linux/cpumask.h>
+#include <linux/sizes.h>
 #include <linux/threads.h>
 
 #include <asm/cachectl.h>
@@ -80,11 +81,10 @@ extern unsigned int vced_count, vcei_count;
 
 #endif
 
-/*
- * One page above the stack is used for branch delay slot "emulation".
- * See dsemul.c for details.
- */
-#define STACK_TOP      ((TASK_SIZE & PAGE_MASK) - PAGE_SIZE)
+#define VDSO_RANDOMIZE_SIZE    (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_256M)
+
+extern unsigned long mips_stack_top(void);
+#define STACK_TOP              mips_stack_top()
 
 /*
  * This decides where the kernel will search for a free chunk of vm
index 8fc69891e1173a91da5e972a5feaadb3e8547775..d4f7fd4550e10d7ea0dfd8ddcfe916f08df4a03e 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/nmi.h>
 #include <linux/cpu.h>
 
+#include <asm/abi.h>
 #include <asm/asm.h>
 #include <asm/bootinfo.h>
 #include <asm/cpu.h>
@@ -39,6 +40,7 @@
 #include <asm/dsp.h>
 #include <asm/fpu.h>
 #include <asm/irq.h>
+#include <asm/mips-cps.h>
 #include <asm/msa.h>
 #include <asm/pgtable.h>
 #include <asm/mipsregs.h>
@@ -645,6 +647,29 @@ out:
        return pc;
 }
 
+unsigned long mips_stack_top(void)
+{
+       unsigned long top = TASK_SIZE & PAGE_MASK;
+
+       /* One page for branch delay slot "emulation" */
+       top -= PAGE_SIZE;
+
+       /* Space for the VDSO, data page & GIC user page */
+       top -= PAGE_ALIGN(current->thread.abi->vdso->size);
+       top -= PAGE_SIZE;
+       top -= mips_gic_present() ? PAGE_SIZE : 0;
+
+       /* Space for cache colour alignment */
+       if (cpu_has_dc_aliases)
+               top -= shm_align_mask + 1;
+
+       /* Space to randomize the VDSO base */
+       if (current->flags & PF_RANDOMIZE)
+               top -= VDSO_RANDOMIZE_SIZE;
+
+       return top;
+}
+
 /*
  * Don't forget that the stack pointer must be aligned on a 8 bytes
  * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
index c71d1eb7da5944b182c287aae347488a7594529b..8aaaa42f91ed6626809366e4ca7e533dbeb43bfc 100644 (file)
@@ -846,6 +846,34 @@ static void __init arch_mem_init(char **cmdline_p)
        struct memblock_region *reg;
        extern void plat_mem_setup(void);
 
+       /*
+        * Initialize boot_command_line to an innocuous but non-empty string in
+        * order to prevent early_init_dt_scan_chosen() from copying
+        * CONFIG_CMDLINE into it without our knowledge. We handle
+        * CONFIG_CMDLINE ourselves below & don't want to duplicate its
+        * content because repeating arguments can be problematic.
+        */
+       strlcpy(boot_command_line, " ", COMMAND_LINE_SIZE);
+
+       /* call board setup routine */
+       plat_mem_setup();
+
+       /*
+        * Make sure all kernel memory is in the maps.  The "UP" and
+        * "DOWN" are opposite for initdata since if it crosses over
+        * into another memory section you don't want that to be
+        * freed when the initdata is freed.
+        */
+       arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
+                        PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
+                        BOOT_MEM_RAM);
+       arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
+                        PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
+                        BOOT_MEM_INIT_RAM);
+
+       pr_info("Determined physical RAM map:\n");
+       print_memory_map();
+
 #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
        strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 #else
@@ -873,26 +901,6 @@ static void __init arch_mem_init(char **cmdline_p)
        }
 #endif
 #endif
-
-       /* call board setup routine */
-       plat_mem_setup();
-
-       /*
-        * Make sure all kernel memory is in the maps.  The "UP" and
-        * "DOWN" are opposite for initdata since if it crosses over
-        * into another memory section you don't want that to be
-        * freed when the initdata is freed.
-        */
-       arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
-                        PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
-                        BOOT_MEM_RAM);
-       arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
-                        PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
-                        BOOT_MEM_INIT_RAM);
-
-       pr_info("Determined physical RAM map:\n");
-       print_memory_map();
-
        strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
 
        *cmdline_p = command_line;
index 8f845f6e5f4266568288969b9b19b7357b86598b..48a9c6b90e079110e52603947901be76018323a1 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/ioport.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
+#include <linux/random.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/timekeeper_internal.h>
@@ -97,6 +98,21 @@ void update_vsyscall_tz(void)
        }
 }
 
+static unsigned long vdso_base(void)
+{
+       unsigned long base;
+
+       /* Skip the delay slot emulation page */
+       base = STACK_TOP + PAGE_SIZE;
+
+       if (current->flags & PF_RANDOMIZE) {
+               base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1);
+               base = PAGE_ALIGN(base);
+       }
+
+       return base;
+}
+
 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 {
        struct mips_vdso_image *image = current->thread.abi->vdso;
@@ -137,7 +153,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
        if (cpu_has_dc_aliases)
                size += shm_align_mask + 1;
 
-       base = get_unmapped_area(NULL, 0, size, 0, 0);
+       base = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
        if (IS_ERR_VALUE(base)) {
                ret = base;
                goto out;
index 3a6f34ef5ffc38edf71fe616b2888af9b6dc6f30..069acec3df9f05df3bcea2ff507a8d8763fa60d0 100644 (file)
         * unset_bytes = end_addr - current_addr + 1
         *      a2     =    t1    -      a0      + 1
         */
+       .set            reorder
        PTR_SUBU        a2, t1, a0
+       PTR_ADDIU       a2, 1
        jr              ra
-        PTR_ADDIU      a2, 1
+       .set            noreorder
 
        .endm
 
index f329b466e68f66acc3f8fb8669de2b92776e29df..2d14f17838d23405383e82e28b3dcc2cf5e3afe7 100644 (file)
@@ -426,7 +426,7 @@ void unwind_frame_init_task(struct unwind_frame_info *info,
                        r.gr[30] = get_parisc_stackpointer();
                        regs = &r;
                }
-               unwind_frame_init(info, task, &r);
+               unwind_frame_init(info, task, regs);
        } else {
                unwind_frame_init_from_blocked_task(info, task);
        }
index 2fdc865ca3741e89b0e356724467c7846fc42a80..2a2486526d1fc2a6c2ae84ada6601396758360ba 100644 (file)
  */
 #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
                         _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \
-                        _PAGE_SOFT_DIRTY)
+                        _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
 /*
  * user access blocked by key
  */
  */
 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
                         _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE |   \
-                        _PAGE_SOFT_DIRTY)
+                        _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
 
 #define H_PTE_PKEY  (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \
                     H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4)
index 3cae9168f63c4f5070fd659ee93ab0c82b6a26a4..e44a8d7959f513ebbd2982314369a5706bda3528 100644 (file)
@@ -108,7 +108,8 @@ int sclp_early_get_core_info(struct sclp_core_info *info);
 void sclp_early_get_ipl_info(struct sclp_ipl_info *info);
 void sclp_early_detect(void);
 void sclp_early_printk(const char *s);
-void __sclp_early_printk(const char *s, unsigned int len);
+void sclp_early_printk_force(const char *s);
+void __sclp_early_printk(const char *s, unsigned int len, unsigned int force);
 
 int _sclp_get_core_info(struct sclp_core_info *info);
 int sclp_core_configure(u8 core);
index 9431784d7796b57cd16604795ea35c8b8f8c235e..40c1dfec944e6040c3f1402679f318250190ffb4 100644 (file)
@@ -10,7 +10,7 @@
 
 static void sclp_early_write(struct console *con, const char *s, unsigned int len)
 {
-       __sclp_early_printk(s, len);
+       __sclp_early_printk(s, len, 0);
 }
 
 static struct console sclp_early_console = {
index a049a7b9d6e893801a1ecd79d9332d3faea8d0ba..c1a080b11ae97743d2553f8e07a99a956ce22763 100644 (file)
@@ -198,12 +198,10 @@ pgm_check_entry:
 
        /* Suspend CPU not available -> panic */
        larl    %r15,init_thread_union
-       ahi     %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
+       aghi    %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
+       aghi    %r15,-STACK_FRAME_OVERHEAD
        larl    %r2,.Lpanic_string
-       lghi    %r1,0
-       sam31
-       sigp    %r1,%r0,SIGP_SET_ARCHITECTURE
-       brasl   %r14,sclp_early_printk
+       brasl   %r14,sclp_early_printk_force
        larl    %r3,.Ldisabled_wait_31
        lpsw    0(%r3)
 4:
index 666d6b5c0440416537783ceabc7c9bce0520ac39..9c3fc03abe9ae2799898f2d58ff8523ade25d36b 100644 (file)
@@ -28,7 +28,7 @@ typedef struct {
        unsigned short  sock_id;        /* physical package */
        unsigned short  core_id;
        unsigned short  max_cache_id;   /* groupings of highest shared cache */
-       unsigned short  proc_id;        /* strand (aka HW thread) id */
+       signed short    proc_id;        /* strand (aka HW thread) id */
 } cpuinfo_sparc;
 
 DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
index 09acf0ddec10c17903e5a05a7aba251091c38fe7..45b4bf1875e6bec9b070ee764f31b6281d422fd2 100644 (file)
 #define __NR_preadv2           358
 #define __NR_pwritev2          359
 #define __NR_statx             360
+#define __NR_io_pgetevents     361
 
-#define NR_syscalls            361
+#define NR_syscalls            362
 
 /* Bitmask values returned from kern_features system call.  */
 #define KERN_FEATURE_MIXED_MODE_STACK  0x00000001
index 5868fc333ea8df33d5cb292d399dce6c8865a141..639c8e54530aa56c8493b3676b63b0192025571b 100644 (file)
@@ -122,7 +122,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
                        linux_regs->pc = addr;
                        linux_regs->npc = addr + 4;
                }
-               /* fallthru */
+               /* fall through */
 
        case 'D':
        case 'k':
index d5f7dc6323d500768bdbd622671f67a66a7bb809..a68bbddbdba4702727247f5c4c5a8d35d9426398 100644 (file)
@@ -148,7 +148,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
                        linux_regs->tpc = addr;
                        linux_regs->tnpc = addr + 4;
                }
-               /* fallthru */
+               /* fall through */
 
        case 'D':
        case 'k':
index d3149baaa33c6291e679add3bf1c05db5268a8e0..67b3e6b3ce5d7cf8b417d361c5bbaadce92cc1e0 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/cpudata.h>
 #include <linux/uaccess.h>
 #include <linux/atomic.h>
+#include <linux/sched/clock.h>
 #include <asm/nmi.h>
 #include <asm/pcr.h>
 #include <asm/cacheflush.h>
@@ -927,6 +928,8 @@ static void read_in_all_counters(struct cpu_hw_events *cpuc)
                        sparc_perf_event_update(cp, &cp->hw,
                                                cpuc->current_idx[i]);
                        cpuc->current_idx[i] = PIC_NO_INDEX;
+                       if (cp->hw.state & PERF_HES_STOPPED)
+                               cp->hw.state |= PERF_HES_ARCH;
                }
        }
 }
@@ -959,10 +962,12 @@ static void calculate_single_pcr(struct cpu_hw_events *cpuc)
 
                enc = perf_event_get_enc(cpuc->events[i]);
                cpuc->pcr[0] &= ~mask_for_index(idx);
-               if (hwc->state & PERF_HES_STOPPED)
+               if (hwc->state & PERF_HES_ARCH) {
                        cpuc->pcr[0] |= nop_for_index(idx);
-               else
+               } else {
                        cpuc->pcr[0] |= event_encoding(enc, idx);
+                       hwc->state = 0;
+               }
        }
 out:
        cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
@@ -988,6 +993,9 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
 
                cpuc->current_idx[i] = idx;
 
+               if (cp->hw.state & PERF_HES_ARCH)
+                       continue;
+
                sparc_pmu_start(cp, PERF_EF_RELOAD);
        }
 out:
@@ -1079,6 +1087,8 @@ static void sparc_pmu_start(struct perf_event *event, int flags)
        event->hw.state = 0;
 
        sparc_pmu_enable_event(cpuc, &event->hw, idx);
+
+       perf_event_update_userpage(event);
 }
 
 static void sparc_pmu_stop(struct perf_event *event, int flags)
@@ -1371,9 +1381,9 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
        cpuc->events[n0] = event->hw.event_base;
        cpuc->current_idx[n0] = PIC_NO_INDEX;
 
-       event->hw.state = PERF_HES_UPTODATE;
+       event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
        if (!(ef_flags & PERF_EF_START))
-               event->hw.state |= PERF_HES_STOPPED;
+               event->hw.state |= PERF_HES_ARCH;
 
        /*
         * If group events scheduling transaction was started,
@@ -1603,6 +1613,8 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
        struct perf_sample_data data;
        struct cpu_hw_events *cpuc;
        struct pt_regs *regs;
+       u64 finish_clock;
+       u64 start_clock;
        int i;
 
        if (!atomic_read(&active_events))
@@ -1616,6 +1628,8 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
                return NOTIFY_DONE;
        }
 
+       start_clock = sched_clock();
+
        regs = args->regs;
 
        cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -1654,6 +1668,10 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
                        sparc_pmu_stop(event, 0);
        }
 
+       finish_clock = sched_clock();
+
+       perf_sample_event_took(finish_clock - start_clock);
+
        return NOTIFY_STOP;
 }
 
index f6528884a2c898a756b9ef1281192d42ed6ab86c..4073e2b87dd0e39045eebb8cc67328ba720fcadb 100644 (file)
@@ -84,8 +84,9 @@ __handle_signal:
                ldx                     [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
                sethi                   %hi(0xf << 20), %l4
                and                     %l1, %l4, %l4
+               andn                    %l1, %l4, %l1
                ba,pt                   %xcc, __handle_preemption_continue
-                andn                   %l1, %l4, %l1
+                srl                    %l4, 20, %l4
 
                /* When returning from a NMI (%pil==15) interrupt we want to
                 * avoid running softirqs, doing IRQ tracing, preempting, etc.
index 12bee14b552cd4f1bf2769ec79e558b5eda799c5..621a363098eccdca195ce276ef684b1dfa09e89c 100644 (file)
@@ -90,4 +90,4 @@ sys_call_table:
 /*345*/        .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
 /*355*/        .long sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
-/*360*/        .long sys_statx
+/*360*/        .long sys_statx, sys_io_pgetevents
index 387ef993880ae2b359955fd4c10d9c00907d55bd..bb68c805b891855e18af6397ce534f74d5550a4d 100644 (file)
@@ -91,7 +91,7 @@ sys_call_table32:
        .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
        .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range, compat_sys_preadv2, compat_sys_pwritev2
-/*360*/        .word sys_statx
+/*360*/        .word sys_statx, compat_sys_io_pgetevents
 
 #endif /* CONFIG_COMPAT */
 
@@ -173,4 +173,4 @@ sys_call_table:
        .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
        .word sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
-/*360*/        .word sys_statx
+/*360*/        .word sys_statx, sys_io_pgetevents
index 635d67ffc9a39f72f3acd24f94b052e754fcfcb1..7db5aabe9708576109bd028c241532150339ff59 100644 (file)
@@ -180,11 +180,17 @@ static int send_dreg(struct vio_driver_state *vio)
                struct vio_dring_register pkt;
                char all[sizeof(struct vio_dring_register) +
                         (sizeof(struct ldc_trans_cookie) *
-                         dr->ncookies)];
+                         VIO_MAX_RING_COOKIES)];
        } u;
+       size_t bytes = sizeof(struct vio_dring_register) +
+                      (sizeof(struct ldc_trans_cookie) *
+                       dr->ncookies);
        int i;
 
-       memset(&u, 0, sizeof(u));
+       if (WARN_ON(bytes > sizeof(u)))
+               return -EINVAL;
+
+       memset(&u, 0, bytes);
        init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
        u.pkt.dring_ident = 0;
        u.pkt.num_descr = dr->num_entries;
@@ -206,7 +212,7 @@ static int send_dreg(struct vio_driver_state *vio)
                       (unsigned long long) u.pkt.cookies[i].cookie_size);
        }
 
-       return send_ctrl(vio, &u.pkt.tag, sizeof(u));
+       return send_ctrl(vio, &u.pkt.tag, bytes);
 }
 
 static int send_rdx(struct vio_driver_state *vio)
index dd0b5a92ffd07fff4da30402c240ec96201b5a58..dc85570d88395a411842c7d419a0f2d2fb200914 100644 (file)
@@ -31,23 +31,21 @@ obj-y += $(vdso_img_objs)
 targets += $(vdso_img_cfiles)
 targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
 
-export CPPFLAGS_vdso.lds += -P -C
+CPPFLAGS_vdso.lds += -P -C
 
 VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
                        -Wl,--no-undefined \
                        -Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \
                        $(DISABLE_LTO)
 
-$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
+$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
        $(call if_changed,vdso)
 
 HOST_EXTRACFLAGS += -I$(srctree)/tools/include
 hostprogs-y                    += vdso2c
 
 quiet_cmd_vdso2c = VDSO2C  $@
-define cmd_vdso2c
-       $(obj)/vdso2c $< $(<:%.dbg=%) $@
-endef
+      cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@
 
 $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
        $(call if_changed,vdso2c)
index 3feb3d960ca50c6152c8b702eadb50d9e0b5a42f..75dca9aab737c6cb43cda34d098a7ad005d552e8 100644 (file)
 #define        TICK_PRIV_BIT   (1ULL << 63)
 #endif
 
+#ifdef CONFIG_SPARC64
 #define SYSCALL_STRING                                                 \
        "ta     0x6d;"                                                  \
-       "sub    %%g0, %%o0, %%o0;"                                      \
+       "bcs,a  1f;"                                                    \
+       " sub   %%g0, %%o0, %%o0;"                                      \
+       "1:"
+#else
+#define SYSCALL_STRING                                                 \
+       "ta     0x10;"                                                  \
+       "bcs,a  1f;"                                                    \
+       " sub   %%g0, %%o0, %%o0;"                                      \
+       "1:"
+#endif
 
 #define SYSCALL_CLOBBERS                                               \
        "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",                 \
index f51595f861b85999f62e0ac7db3f7726a486eede..5eaff3c1aa0c73110ba0668709b6da5c5fd87736 100644 (file)
@@ -262,7 +262,9 @@ static __init int vdso_setup(char *s)
        unsigned long val;
 
        err = kstrtoul(s, 10, &val);
+       if (err)
+               return err;
        vdso_enabled = val;
-       return err;
+       return 0;
 }
 __setup("vdso=", vdso_setup);
index b64acb08a62b94b5c944182ef4b90664ee7f6432..106b7d0e2dae5b4ca34fd41fbe996f851b5c5168 100644 (file)
  */
 #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |         \
                         _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
-                        _PAGE_SOFT_DIRTY)
+                        _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
 #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
 
 /*
index 285eb3ec4200e5377d8462eb87ecd4a943341f6b..3736f6dc95450f6f51204946d351b47e27feacf8 100644 (file)
@@ -529,14 +529,14 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
 int rdtgroup_schemata_show(struct kernfs_open_file *of,
                           struct seq_file *s, void *v);
 bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
-                          u32 _cbm, int closid, bool exclusive);
+                          unsigned long cbm, int closid, bool exclusive);
 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
-                                 u32 cbm);
+                                 unsigned long cbm);
 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
 int rdtgroup_tasks_assigned(struct rdtgroup *r);
 int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
 int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
-bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm);
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm);
 bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d);
 int rdt_pseudo_lock_init(void);
 void rdt_pseudo_lock_release(void);
index 40f3903ae5d98a9124efde23d164a65554fb24c8..f8c260d522ca045f33a675e47a225c908854be5d 100644 (file)
@@ -797,25 +797,27 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
 /**
  * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
  * @d: RDT domain
- * @_cbm: CBM to test
+ * @cbm: CBM to test
  *
- * @d represents a cache instance and @_cbm a capacity bitmask that is
- * considered for it. Determine if @_cbm overlaps with any existing
+ * @d represents a cache instance and @cbm a capacity bitmask that is
+ * considered for it. Determine if @cbm overlaps with any existing
  * pseudo-locked region on @d.
  *
- * Return: true if @_cbm overlaps with pseudo-locked region on @d, false
+ * @cbm is unsigned long, even if only 32 bits are used, to make the
+ * bitmap functions work correctly.
+ *
+ * Return: true if @cbm overlaps with pseudo-locked region on @d, false
  * otherwise.
  */
-bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm)
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm)
 {
-       unsigned long *cbm = (unsigned long *)&_cbm;
-       unsigned long *cbm_b;
        unsigned int cbm_len;
+       unsigned long cbm_b;
 
        if (d->plr) {
                cbm_len = d->plr->r->cache.cbm_len;
-               cbm_b = (unsigned long *)&d->plr->cbm;
-               if (bitmap_intersects(cbm, cbm_b, cbm_len))
+               cbm_b = d->plr->cbm;
+               if (bitmap_intersects(&cbm, &cbm_b, cbm_len))
                        return true;
        }
        return false;
index 1b8e86a5d5e11ef3f0742a512fd652086c0fcf6f..b140c68bc14ba81b35406d835190428772c91836 100644 (file)
@@ -975,33 +975,34 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
  * is false then overlaps with any resource group or hardware entities
  * will be considered.
  *
+ * @cbm is unsigned long, even if only 32 bits are used, to make the
+ * bitmap functions work correctly.
+ *
  * Return: false if CBM does not overlap, true if it does.
  */
 bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
-                          u32 _cbm, int closid, bool exclusive)
+                          unsigned long cbm, int closid, bool exclusive)
 {
-       unsigned long *cbm = (unsigned long *)&_cbm;
-       unsigned long *ctrl_b;
        enum rdtgrp_mode mode;
+       unsigned long ctrl_b;
        u32 *ctrl;
        int i;
 
        /* Check for any overlap with regions used by hardware directly */
        if (!exclusive) {
-               if (bitmap_intersects(cbm,
-                                     (unsigned long *)&r->cache.shareable_bits,
-                                     r->cache.cbm_len))
+               ctrl_b = r->cache.shareable_bits;
+               if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len))
                        return true;
        }
 
        /* Check for overlap with other resource groups */
        ctrl = d->ctrl_val;
        for (i = 0; i < closids_supported(); i++, ctrl++) {
-               ctrl_b = (unsigned long *)ctrl;
+               ctrl_b = *ctrl;
                mode = rdtgroup_mode_by_closid(i);
                if (closid_allocated(i) && i != closid &&
                    mode != RDT_MODE_PSEUDO_LOCKSETUP) {
-                       if (bitmap_intersects(cbm, ctrl_b, r->cache.cbm_len)) {
+                       if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) {
                                if (exclusive) {
                                        if (mode == RDT_MODE_EXCLUSIVE)
                                                return true;
@@ -1138,15 +1139,18 @@ out:
  * computed by first dividing the total cache size by the CBM length to
  * determine how many bytes each bit in the bitmask represents. The result
  * is multiplied with the number of bits set in the bitmask.
+ *
+ * @cbm is unsigned long, even if only 32 bits are used to make the
+ * bitmap functions work correctly.
  */
 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
-                                 struct rdt_domain *d, u32 cbm)
+                                 struct rdt_domain *d, unsigned long cbm)
 {
        struct cpu_cacheinfo *ci;
        unsigned int size = 0;
        int num_b, i;
 
-       num_b = bitmap_weight((unsigned long *)&cbm, r->cache.cbm_len);
+       num_b = bitmap_weight(&cbm, r->cache.cbm_len);
        ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
        for (i = 0; i < ci->num_leaves; i++) {
                if (ci->info_list[i].level == r->cache_level) {
@@ -2353,6 +2357,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
        u32 used_b = 0, unused_b = 0;
        u32 closid = rdtgrp->closid;
        struct rdt_resource *r;
+       unsigned long tmp_cbm;
        enum rdtgrp_mode mode;
        struct rdt_domain *d;
        int i, ret;
@@ -2390,9 +2395,14 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
                         * modify the CBM based on system availability.
                         */
                        cbm_ensure_valid(&d->new_ctrl, r);
-                       if (bitmap_weight((unsigned long *) &d->new_ctrl,
-                                         r->cache.cbm_len) <
-                                       r->cache.min_cbm_bits) {
+                       /*
+                        * Assign the u32 CBM to an unsigned long to ensure
+                        * that bitmap_weight() does not access out-of-bound
+                        * memory.
+                        */
+                       tmp_cbm = d->new_ctrl;
+                       if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
+                           r->cache.min_cbm_bits) {
                                rdt_last_cmd_printf("no space on %s:%d\n",
                                                    r->name, d->id);
                                return -ENOSPC;
index d96092b35936991c839ef5d66a5f58561ebdc149..61ccfb13899ed702d8ab7dc88bdb5489b34bcfeb 100644 (file)
@@ -436,14 +436,18 @@ static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
 
 static inline bool svm_sev_enabled(void)
 {
-       return max_sev_asid;
+       return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
 }
 
 static inline bool sev_guest(struct kvm *kvm)
 {
+#ifdef CONFIG_KVM_AMD_SEV
        struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
 
        return sev->active;
+#else
+       return false;
+#endif
 }
 
 static inline int sev_get_asid(struct kvm *kvm)
index 612fd17be6351c48544abc36884df1c7669727da..e665aa7167cf9729aac82a075c358236d9f03aec 100644 (file)
@@ -1572,8 +1572,12 @@ static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
                goto out;
        }
 
+       /*
+        * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs the address of the
+        * base of EPT PML4 table, strip off EPT configuration information.
+        */
        ret = hyperv_flush_guest_mapping(
-                       to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer);
+                       to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer & PAGE_MASK);
 
 out:
        spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
index 089e78c4effd1fce3a9d7fdd886cddb88aadd281..59274e2c1ac44c0fb2fb4c004e3e64484b305335 100644 (file)
@@ -115,6 +115,8 @@ static inline void pgd_list_del(pgd_t *pgd)
 
 #define UNSHARED_PTRS_PER_PGD                          \
        (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
+#define MAX_UNSHARED_PTRS_PER_PGD                      \
+       max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
 
 
 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
@@ -181,6 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
  * and initialize the kernel pmds here.
  */
 #define PREALLOCATED_PMDS      UNSHARED_PTRS_PER_PGD
+#define MAX_PREALLOCATED_PMDS  MAX_UNSHARED_PTRS_PER_PGD
 
 /*
  * We allocate separate PMDs for the kernel part of the user page-table
@@ -189,6 +192,7 @@ static void pgd_dtor(pgd_t *pgd)
  */
 #define PREALLOCATED_USER_PMDS  (static_cpu_has(X86_FEATURE_PTI) ? \
                                        KERNEL_PGD_PTRS : 0)
+#define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
 
 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
 {
@@ -210,7 +214,9 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
 
 /* No need to prepopulate any pagetable entries in non-PAE modes. */
 #define PREALLOCATED_PMDS      0
+#define MAX_PREALLOCATED_PMDS  0
 #define PREALLOCATED_USER_PMDS  0
+#define MAX_PREALLOCATED_USER_PMDS 0
 #endif /* CONFIG_X86_PAE */
 
 static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
@@ -428,8 +434,8 @@ static inline void _pgd_free(pgd_t *pgd)
 pgd_t *pgd_alloc(struct mm_struct *mm)
 {
        pgd_t *pgd;
-       pmd_t *u_pmds[PREALLOCATED_USER_PMDS];
-       pmd_t *pmds[PREALLOCATED_PMDS];
+       pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
+       pmd_t *pmds[MAX_PREALLOCATED_PMDS];
 
        pgd = _pgd_alloc();
 
index d1b9dd03da256f36d0f12f3b6dbc839656de7529..bbd44666f2b516c758a0334a2e7b45ce3a291c84 100644 (file)
@@ -29,9 +29,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 {
        struct request_queue *q = bdev_get_queue(bdev);
        struct bio *bio = *biop;
-       unsigned int granularity;
        unsigned int op;
-       int alignment;
        sector_t bs_mask;
 
        if (!q)
@@ -54,38 +52,16 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
        if ((sector | nr_sects) & bs_mask)
                return -EINVAL;
 
-       /* Zero-sector (unknown) and one-sector granularities are the same.  */
-       granularity = max(q->limits.discard_granularity >> 9, 1U);
-       alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
-
        while (nr_sects) {
-               unsigned int req_sects;
-               sector_t end_sect, tmp;
+               unsigned int req_sects = nr_sects;
+               sector_t end_sect;
 
-               /*
-                * Issue in chunks of the user defined max discard setting,
-                * ensuring that bi_size doesn't overflow
-                */
-               req_sects = min_t(sector_t, nr_sects,
-                                       q->limits.max_discard_sectors);
                if (!req_sects)
                        goto fail;
                if (req_sects > UINT_MAX >> 9)
                        req_sects = UINT_MAX >> 9;
 
-               /*
-                * If splitting a request, and the next starting sector would be
-                * misaligned, stop the discard at the previous aligned sector.
-                */
                end_sect = sector + req_sects;
-               tmp = end_sect;
-               if (req_sects < nr_sects &&
-                   sector_div(tmp, granularity) != alignment) {
-                       end_sect = end_sect - alignment;
-                       sector_div(end_sect, granularity);
-                       end_sect = end_sect * granularity + alignment;
-                       req_sects = end_sect - sector;
-               }
 
                bio = next_bio(bio, 0, gfp_mask);
                bio->bi_iter.bi_sector = sector;
index 8e20a0677dcf69b6e523b571099fede8c9904d78..8ac93fcbaa2eaaf680cebac8f3d8da8f9a25805d 100644 (file)
@@ -310,6 +310,7 @@ static void scale_up(struct rq_wb *rwb)
        rq_depth_scale_up(&rwb->rq_depth);
        calc_wb_limits(rwb);
        rwb->unknown_cnt = 0;
+       rwb_wake_all(rwb);
        rwb_trace_step(rwb, "scale up");
 }
 
@@ -318,7 +319,6 @@ static void scale_down(struct rq_wb *rwb, bool hard_throttle)
        rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
        calc_wb_limits(rwb);
        rwb->unknown_cnt = 0;
-       rwb_wake_all(rwb);
        rwb_trace_step(rwb, "scale down");
 }
 
index 5ca56bfae63cf69872cd18270fb8c980db4eddae..f68e9baffad764a72e31fdb652b9707dbaf8e46e 100644 (file)
@@ -36,6 +36,10 @@ MODULE_VERSION(DRV_MODULE_VERSION);
 #define VDC_TX_RING_SIZE       512
 #define VDC_DEFAULT_BLK_SIZE   512
 
+#define MAX_XFER_BLKS          (128 * 1024)
+#define MAX_XFER_SIZE          (MAX_XFER_BLKS / VDC_DEFAULT_BLK_SIZE)
+#define MAX_RING_COOKIES       ((MAX_XFER_BLKS / PAGE_SIZE) + 2)
+
 #define WAITING_FOR_LINK_UP    0x01
 #define WAITING_FOR_TX_SPACE   0x02
 #define WAITING_FOR_GEN_CMD    0x04
@@ -450,7 +454,7 @@ static int __send_request(struct request *req)
 {
        struct vdc_port *port = req->rq_disk->private_data;
        struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
-       struct scatterlist sg[port->ring_cookies];
+       struct scatterlist sg[MAX_RING_COOKIES];
        struct vdc_req_entry *rqe;
        struct vio_disk_desc *desc;
        unsigned int map_perm;
@@ -458,6 +462,9 @@ static int __send_request(struct request *req)
        u64 len;
        u8 op;
 
+       if (WARN_ON(port->ring_cookies > MAX_RING_COOKIES))
+               return -EINVAL;
+
        map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
 
        if (rq_data_dir(req) == READ) {
@@ -984,9 +991,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
                goto err_out_free_port;
 
        port->vdisk_block_size = VDC_DEFAULT_BLK_SIZE;
-       port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size);
-       port->ring_cookies = ((port->max_xfer_size *
-                              port->vdisk_block_size) / PAGE_SIZE) + 2;
+       port->max_xfer_size = MAX_XFER_SIZE;
+       port->ring_cookies = MAX_RING_COOKIES;
 
        err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
        if (err)
index e182f6019f68abeadac89ce14405a3e27efb7a49..2fee65886d50fd6a45ef14b365900ccdc5a4ffe6 100644 (file)
@@ -1322,7 +1322,7 @@ static int qca_init_regulators(struct qca_power *qca,
 {
        int i;
 
-       qca->vreg_bulk = devm_kzalloc(qca->dev, num_vregs *
+       qca->vreg_bulk = devm_kcalloc(qca->dev, num_vregs,
                                      sizeof(struct regulator_bulk_data),
                                      GFP_KERNEL);
        if (!qca->vreg_bulk)
index ffa5dac221e471f95cf4c16dd4c6cb759eb6872a..129ebd2588fdcffe0ddcaf8cae447780b20969f9 100644 (file)
@@ -1434,8 +1434,16 @@ static void __init sun4i_ccu_init(struct device_node *node,
                return;
        }
 
-       /* Force the PLL-Audio-1x divider to 1 */
        val = readl(reg + SUN4I_PLL_AUDIO_REG);
+
+       /*
+        * Force VCO and PLL bias current to lowest setting. Higher
+        * settings interfere with sigma-delta modulation and result
+        * in audible noise and distortions when using SPDIF or I2S.
+        */
+       val &= ~GENMASK(25, 16);
+
+       /* Force the PLL-Audio-1x divider to 1 */
        val &= ~GENMASK(29, 26);
        writel(val | (1 << 26), reg + SUN4I_PLL_AUDIO_REG);
 
index 7e71043457a68523536249f18e832d8f67e86785..86c699c14f849aca6b5f21caab966b4432ddbc05 100644 (file)
@@ -1044,7 +1044,8 @@ static int safexcel_probe(struct platform_device *pdev)
 
        safexcel_configure(priv);
 
-       priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring),
+       priv->ring = devm_kcalloc(dev, priv->config.rings,
+                                 sizeof(*priv->ring),
                                  GFP_KERNEL);
        if (!priv->ring) {
                ret = -ENOMEM;
@@ -1063,8 +1064,9 @@ static int safexcel_probe(struct platform_device *pdev)
                if (ret)
                        goto err_reg_clk;
 
-               priv->ring[i].rdr_req = devm_kzalloc(dev,
-                       sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE,
+               priv->ring[i].rdr_req = devm_kcalloc(dev,
+                       EIP197_DEFAULT_RING_SIZE,
+                       sizeof(priv->ring[i].rdr_req),
                        GFP_KERNEL);
                if (!priv->ring[i].rdr_req) {
                        ret = -ENOMEM;
index a57300c1d649a36ef6ecbd207a2afd4e92b86513..25187403e3ace0d891feaa3b59923efbcdb45a7a 100644 (file)
@@ -1682,7 +1682,8 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
                irq_set_chained_handler_and_data(parent_irq, parent_handler,
                                                 gpiochip);
 
-               gpiochip->irq.parents = &parent_irq;
+               gpiochip->irq.parent_irq = parent_irq;
+               gpiochip->irq.parents = &gpiochip->irq.parent_irq;
                gpiochip->irq.num_parents = 1;
        }
 
index 018fcdb353d254293456b613af5ed0e25ec07caa..281cf9cbb44c41981b7541408c97fda822fb0061 100644 (file)
@@ -174,6 +174,11 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
                state->crtcs[i].state = NULL;
                state->crtcs[i].old_state = NULL;
                state->crtcs[i].new_state = NULL;
+
+               if (state->crtcs[i].commit) {
+                       drm_crtc_commit_put(state->crtcs[i].commit);
+                       state->crtcs[i].commit = NULL;
+               }
        }
 
        for (i = 0; i < config->num_total_plane; i++) {
index 80be74df7ba66355163368f9f2b3eaeaf967a0d8..1bb4c318bdd4d36ae6e1b666176bd6134bf1e81f 100644 (file)
@@ -1408,15 +1408,16 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
 void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
                                          struct drm_atomic_state *old_state)
 {
-       struct drm_crtc_state *new_crtc_state;
        struct drm_crtc *crtc;
        int i;
 
-       for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
-               struct drm_crtc_commit *commit = new_crtc_state->commit;
+       for (i = 0; i < dev->mode_config.num_crtc; i++) {
+               struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
                int ret;
 
-               if (!commit)
+               crtc = old_state->crtcs[i].ptr;
+
+               if (!crtc || !commit)
                        continue;
 
                ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
@@ -1934,6 +1935,9 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
                drm_crtc_commit_get(commit);
 
                commit->abort_completion = true;
+
+               state->crtcs[i].commit = commit;
+               drm_crtc_commit_get(commit);
        }
 
        for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
index bae43938c8f6128ce29d75ee03a84c310c779621..9cbe8f5c9acafedf5bee808e82b205984528f1b8 100644 (file)
@@ -567,9 +567,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
        struct drm_mode_crtc *crtc_req = data;
        struct drm_crtc *crtc;
        struct drm_plane *plane;
-       struct drm_connector **connector_set = NULL, *connector;
-       struct drm_framebuffer *fb = NULL;
-       struct drm_display_mode *mode = NULL;
+       struct drm_connector **connector_set, *connector;
+       struct drm_framebuffer *fb;
+       struct drm_display_mode *mode;
        struct drm_mode_set set;
        uint32_t __user *set_connectors_ptr;
        struct drm_modeset_acquire_ctx ctx;
@@ -598,6 +598,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
        mutex_lock(&crtc->dev->mode_config.mutex);
        drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
 retry:
+       connector_set = NULL;
+       fb = NULL;
+       mode = NULL;
+
        ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx);
        if (ret)
                goto out;
index 3c9fc99648b7c912a4b9fa686798d8fe8d23b651..ff0bfc65a8c1dbbbbe99ac77aeb2e122cdaf3026 100644 (file)
@@ -113,6 +113,9 @@ static const struct edid_quirk {
        /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
        { "AEO", 0, EDID_QUIRK_FORCE_6BPC },
 
+       /* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
+       { "BOE", 0x78b, EDID_QUIRK_FORCE_6BPC },
+
        /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
        { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
 
@@ -4279,7 +4282,7 @@ static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector,
        struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
 
        dc_mask = db[7] & DRM_EDID_YCBCR420_DC_MASK;
-       hdmi->y420_dc_modes |= dc_mask;
+       hdmi->y420_dc_modes = dc_mask;
 }
 
 static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector,
index 515a7aec57acc48505f195f462a1c46c69da7cbb..9628dd61782698cf71d3e6687df3dc6ff8053d09 100644 (file)
@@ -1580,6 +1580,25 @@ unlock:
 }
 EXPORT_SYMBOL(drm_fb_helper_ioctl);
 
+static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1,
+                                     const struct fb_var_screeninfo *var_2)
+{
+       return var_1->bits_per_pixel == var_2->bits_per_pixel &&
+              var_1->grayscale == var_2->grayscale &&
+              var_1->red.offset == var_2->red.offset &&
+              var_1->red.length == var_2->red.length &&
+              var_1->red.msb_right == var_2->red.msb_right &&
+              var_1->green.offset == var_2->green.offset &&
+              var_1->green.length == var_2->green.length &&
+              var_1->green.msb_right == var_2->green.msb_right &&
+              var_1->blue.offset == var_2->blue.offset &&
+              var_1->blue.length == var_2->blue.length &&
+              var_1->blue.msb_right == var_2->blue.msb_right &&
+              var_1->transp.offset == var_2->transp.offset &&
+              var_1->transp.length == var_2->transp.length &&
+              var_1->transp.msb_right == var_2->transp.msb_right;
+}
+
 /**
  * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var
  * @var: screeninfo to check
@@ -1590,7 +1609,6 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
 {
        struct drm_fb_helper *fb_helper = info->par;
        struct drm_framebuffer *fb = fb_helper->fb;
-       int depth;
 
        if (var->pixclock != 0 || in_dbg_master())
                return -EINVAL;
@@ -1610,72 +1628,15 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
                return -EINVAL;
        }
 
-       switch (var->bits_per_pixel) {
-       case 16:
-               depth = (var->green.length == 6) ? 16 : 15;
-               break;
-       case 32:
-               depth = (var->transp.length > 0) ? 32 : 24;
-               break;
-       default:
-               depth = var->bits_per_pixel;
-               break;
-       }
-
-       switch (depth) {
-       case 8:
-               var->red.offset = 0;
-               var->green.offset = 0;
-               var->blue.offset = 0;
-               var->red.length = 8;
-               var->green.length = 8;
-               var->blue.length = 8;
-               var->transp.length = 0;
-               var->transp.offset = 0;
-               break;
-       case 15:
-               var->red.offset = 10;
-               var->green.offset = 5;
-               var->blue.offset = 0;
-               var->red.length = 5;
-               var->green.length = 5;
-               var->blue.length = 5;
-               var->transp.length = 1;
-               var->transp.offset = 15;
-               break;
-       case 16:
-               var->red.offset = 11;
-               var->green.offset = 5;
-               var->blue.offset = 0;
-               var->red.length = 5;
-               var->green.length = 6;
-               var->blue.length = 5;
-               var->transp.length = 0;
-               var->transp.offset = 0;
-               break;
-       case 24:
-               var->red.offset = 16;
-               var->green.offset = 8;
-               var->blue.offset = 0;
-               var->red.length = 8;
-               var->green.length = 8;
-               var->blue.length = 8;
-               var->transp.length = 0;
-               var->transp.offset = 0;
-               break;
-       case 32:
-               var->red.offset = 16;
-               var->green.offset = 8;
-               var->blue.offset = 0;
-               var->red.length = 8;
-               var->green.length = 8;
-               var->blue.length = 8;
-               var->transp.length = 8;
-               var->transp.offset = 24;
-               break;
-       default:
+       /*
+        * drm fbdev emulation doesn't support changing the pixel format at all,
+        * so reject all pixel format changing requests.
+        */
+       if (!drm_fb_pixel_format_equal(var, &info->var)) {
+               DRM_DEBUG("fbdev emulation doesn't support changing the pixel format\n");
                return -EINVAL;
        }
+
        return 0;
 }
 EXPORT_SYMBOL(drm_fb_helper_check_var);
index 0b976dfd04df0b0c8b71bb0c6b2c471027a2e552..92ecb9bf982cfe7398eefe3993966fc2acf28b0c 100644 (file)
@@ -600,7 +600,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
        }
 
        mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
-       mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr *
+       mtk_crtc->planes = devm_kcalloc(dev, mtk_crtc->layer_nr,
                                        sizeof(struct drm_plane),
                                        GFP_KERNEL);
 
index 790d39f816dc0c49545cb92cb58458c51f2c8278..b557687b1964e49b8fb5eec078d200a5d6ac739e 100644 (file)
@@ -153,8 +153,8 @@ int msm_dss_parse_clock(struct platform_device *pdev,
                return 0;
        }
 
-       mp->clk_config = devm_kzalloc(&pdev->dev,
-                                     sizeof(struct dss_clk) * num_clk,
+       mp->clk_config = devm_kcalloc(&pdev->dev,
+                                     num_clk, sizeof(struct dss_clk),
                                      GFP_KERNEL);
        if (!mp->clk_config)
                return -ENOMEM;
index 5691dfa1db6fe388bcf50b2db79c3aa5a05a7696..041e7daf8a337f8204107ff02582d8fcfa499b83 100644 (file)
@@ -900,9 +900,22 @@ static enum drm_connector_status
 nv50_mstc_detect(struct drm_connector *connector, bool force)
 {
        struct nv50_mstc *mstc = nv50_mstc(connector);
+       enum drm_connector_status conn_status;
+       int ret;
+
        if (!mstc->port)
                return connector_status_disconnected;
-       return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port);
+
+       ret = pm_runtime_get_sync(connector->dev->dev);
+       if (ret < 0 && ret != -EACCES)
+               return connector_status_disconnected;
+
+       conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr,
+                                            mstc->port);
+
+       pm_runtime_mark_last_busy(connector->dev->dev);
+       pm_runtime_put_autosuspend(connector->dev->dev);
+       return conn_status;
 }
 
 static void
index e36004fbe45360deb9487fa80cdd564c33fa030e..2a15f2f9271ea26fe775dbed7276014cfe3b4fd7 100644 (file)
@@ -81,9 +81,19 @@ static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
        int i;
 
        for (i = tcon->dclk_min_div; i <= tcon->dclk_max_div; i++) {
-               unsigned long ideal = rate * i;
+               u64 ideal = (u64)rate * i;
                unsigned long rounded;
 
+               /*
+                * ideal has overflowed the max value that can be stored in an
+                * unsigned long, and every clk operation we might do on a
+                * truncated u64 value will give us incorrect results.
+                * Let's just stop there since bigger dividers will result in
+                * the same overflow issue.
+                */
+               if (ideal > ULONG_MAX)
+                       goto out;
+
                rounded = clk_hw_round_rate(clk_hw_get_parent(hw),
                                            ideal);
 
index 8474d601aa63ce018bb51e1f1c51982e195d8edc..b998f9fbed41e3fd854625f1a399936efd84835b 100644 (file)
@@ -908,7 +908,7 @@ static int npcm7xx_en_pwm_fan(struct device *dev,
        if (fan_cnt < 1)
                return -EINVAL;
 
-       fan_ch = devm_kzalloc(dev, sizeof(*fan_ch) * fan_cnt, GFP_KERNEL);
+       fan_ch = devm_kcalloc(dev, fan_cnt, sizeof(*fan_ch), GFP_KERNEL);
        if (!fan_ch)
                return -ENOMEM;
 
index 9ee9a15e71347629d024709a17b9a6bb04fca8aa..9200e349f29e411d53d2dc4126ea2f7a450a9288 100644 (file)
@@ -2270,7 +2270,7 @@ EXPORT_SYMBOL(i2c_put_adapter);
  *
  * Return: NULL if a DMA safe buffer was not obtained. Use msg->buf with PIO.
  *        Or a valid pointer to be used with DMA. After use, release it by
- *        calling i2c_release_dma_safe_msg_buf().
+ *        calling i2c_put_dma_safe_msg_buf().
  *
  * This function must only be called from process context!
  */
index faa9e6116b2f970d911cd3ac76d0fed671bbcfef..73332b9a25b544c1cee573dcff8b98813c4ee68e 100644 (file)
@@ -46,6 +46,8 @@
 #include <linux/mutex.h>
 #include <linux/slab.h>
 
+#include <linux/nospec.h>
+
 #include <linux/uaccess.h>
 
 #include <rdma/ib.h>
@@ -1120,6 +1122,7 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
 
        if (hdr.cmd >= ARRAY_SIZE(ucm_cmd_table))
                return -EINVAL;
+       hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucm_cmd_table));
 
        if (hdr.in + sizeof(hdr) > len)
                return -EINVAL;
index 21863ddde63e3040b285d9decd8a2ee1c47534b8..01d68ed46c1b6c530a717a7efd8866dd62dc6506 100644 (file)
@@ -44,6 +44,8 @@
 #include <linux/module.h>
 #include <linux/nsproxy.h>
 
+#include <linux/nospec.h>
+
 #include <rdma/rdma_user_cm.h>
 #include <rdma/ib_marshall.h>
 #include <rdma/rdma_cm.h>
@@ -1676,6 +1678,7 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
 
        if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
                return -EINVAL;
+       hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucma_cmd_table));
 
        if (hdr.in + sizeof(hdr) > len)
                return -EINVAL;
index 9fb1d9cb94014e963d88d095b4bfcb31ecb22cb9..e223148376458fdd3c8dba767137abfbbfd0345a 100644 (file)
@@ -544,6 +544,9 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
        int shrink = 0;
        int c;
 
+       if (!mr->allocated_from_cache)
+               return;
+
        c = order2idx(dev, mr->order);
        if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
                mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
@@ -1647,18 +1650,19 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
                umem = NULL;
        }
 #endif
-
        clean_mr(dev, mr);
 
+       /*
+        * We should unregister the DMA address from the HCA before
+        * remove the DMA mapping.
+        */
+       mlx5_mr_cache_free(dev, mr);
        if (umem) {
                ib_umem_release(umem);
                atomic_sub(npages, &dev->mdev->priv.reg_pages);
        }
-
        if (!mr->allocated_from_cache)
                kfree(mr);
-       else
-               mlx5_mr_cache_free(dev, mr);
 }
 
 int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
index 370206f987f9600021bace25f658b6d6befbffc1..f48369d6f3a0f36ef1ab412ac95a2b2b51d55b3d 100644 (file)
@@ -564,6 +564,7 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
 
                input_inject_event(&evdev->handle,
                                   event.type, event.code, event.value);
+               cond_resched();
        }
 
  out:
index cd620e009bada3a8f8c1e70b99be25100bea9c44..d4b9db487b16fa3f9a87e4f5fd6732a8b4d9c9b4 100644 (file)
@@ -231,6 +231,7 @@ static const struct xpad_device {
        { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
        { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
        { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
+       { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
        { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
        { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
        { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
@@ -530,6 +531,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
        XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
        XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1),
        XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2),
+       XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init1),
+       XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init2),
        XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
        XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
        XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
index eb14ddf693467b4619a9501aa5e712a9b45dfcdf..8ec483e8688be194078d07f3b47fa40d7f75e9ac 100644 (file)
@@ -598,6 +598,7 @@ static ssize_t uinput_inject_events(struct uinput_device *udev,
 
                input_event(udev->dev, ev.type, ev.code, ev.value);
                bytes += input_event_size();
+               cond_resched();
        }
 
        return bytes;
index f5ae24865355a3292ae8a8efd713746b628cacb0..b0f9d19b3410ae1867e1c134b30f8ccb8a1e5bd3 100644 (file)
@@ -1346,6 +1346,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN0611", 0 },
        { "ELAN0612", 0 },
        { "ELAN0618", 0 },
+       { "ELAN061C", 0 },
        { "ELAN061D", 0 },
        { "ELAN0622", 0 },
        { "ELAN1000", 0 },
index e08228061bcdd2f97aaadece31d6c83eb7539ae5..412fa71245afe26a7a8ad75705566f83633ba347 100644 (file)
@@ -707,6 +707,7 @@ static ssize_t mousedev_write(struct file *file, const char __user *buffer,
                mousedev_generate_response(client, c);
 
                spin_unlock_irq(&client->packet_lock);
+               cond_resched();
        }
 
        kill_fasync(&client->fasync, SIGIO, POLL_IN);
index b8bc71569349d8c45fb18dd4a41947cbf3e68581..95a78ccbd847035007bac3e5ea43e5ee04db35de 100644 (file)
@@ -1395,15 +1395,26 @@ static void __init i8042_register_ports(void)
        for (i = 0; i < I8042_NUM_PORTS; i++) {
                struct serio *serio = i8042_ports[i].serio;
 
-               if (serio) {
-                       printk(KERN_INFO "serio: %s at %#lx,%#lx irq %d\n",
-                               serio->name,
-                               (unsigned long) I8042_DATA_REG,
-                               (unsigned long) I8042_COMMAND_REG,
-                               i8042_ports[i].irq);
-                       serio_register_port(serio);
-                       device_set_wakeup_capable(&serio->dev, true);
-               }
+               if (!serio)
+                       continue;
+
+               printk(KERN_INFO "serio: %s at %#lx,%#lx irq %d\n",
+                       serio->name,
+                       (unsigned long) I8042_DATA_REG,
+                       (unsigned long) I8042_COMMAND_REG,
+                       i8042_ports[i].irq);
+               serio_register_port(serio);
+               device_set_wakeup_capable(&serio->dev, true);
+
+               /*
+                * On platforms using suspend-to-idle, allow the keyboard to
+                * wake up the system from sleep by enabling keyboard wakeups
+                * by default.  This is consistent with keyboard wakeup
+                * behavior on many platforms using suspend-to-RAM (ACPI S3)
+                * by default.
+                */
+               if (pm_suspend_via_s2idle() && i == I8042_KBD_PORT_NO)
+                       device_set_wakeup_enable(&serio->dev, true);
        }
 }
 
index e13d991e9fb52eff6176e2a275c8a8d2342b6701..b29a8327eed15641df9000e019c82ad5c1cffedc 100644 (file)
@@ -3484,14 +3484,13 @@ static int __init dm_cache_init(void)
        int r;
 
        migration_cache = KMEM_CACHE(dm_cache_migration, 0);
-       if (!migration_cache) {
-               dm_unregister_target(&cache_target);
+       if (!migration_cache)
                return -ENOMEM;
-       }
 
        r = dm_register_target(&cache_target);
        if (r) {
                DMERR("cache target registration failed: %d", r);
+               kmem_cache_destroy(migration_cache);
                return r;
        }
 
index 21d126a5078c637db31c234d6e8173dbd7275bfb..32aabe27b37ce94d06d8df4e855ae6790d200dfd 100644 (file)
@@ -467,7 +467,9 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
 static struct target_type flakey_target = {
        .name   = "flakey",
        .version = {1, 5, 0},
+#ifdef CONFIG_BLK_DEV_ZONED
        .features = DM_TARGET_ZONED_HM,
+#endif
        .module = THIS_MODULE,
        .ctr    = flakey_ctr,
        .dtr    = flakey_dtr,
index 89ccb64342de7a4fa8e03d528f66ab9b726e0539..e1fa6baf4e8e39ad79d39254c04be23333992f65 100644 (file)
@@ -3462,7 +3462,8 @@ try_smaller_buffer:
                        r = -ENOMEM;
                        goto bad;
                }
-               ic->recalc_tags = kvmalloc((RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size, GFP_KERNEL);
+               ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
+                                                ic->tag_size, GFP_KERNEL);
                if (!ic->recalc_tags) {
                        ti->error = "Cannot allocate tags for recalculating";
                        r = -ENOMEM;
index d10964d41fd7799cb53c11d0fad14d7620b39140..2f7c44a006c417c0c8c98eb585d97e4682e8112b 100644 (file)
@@ -102,6 +102,7 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
        return DM_MAPIO_REMAPPED;
 }
 
+#ifdef CONFIG_BLK_DEV_ZONED
 static int linear_end_io(struct dm_target *ti, struct bio *bio,
                         blk_status_t *error)
 {
@@ -112,6 +113,7 @@ static int linear_end_io(struct dm_target *ti, struct bio *bio,
 
        return DM_ENDIO_DONE;
 }
+#endif
 
 static void linear_status(struct dm_target *ti, status_type_t type,
                          unsigned status_flags, char *result, unsigned maxlen)
@@ -208,12 +210,16 @@ static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
 static struct target_type linear_target = {
        .name   = "linear",
        .version = {1, 4, 0},
+#ifdef CONFIG_BLK_DEV_ZONED
+       .end_io = linear_end_io,
        .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
+#else
+       .features = DM_TARGET_PASSES_INTEGRITY,
+#endif
        .module = THIS_MODULE,
        .ctr    = linear_ctr,
        .dtr    = linear_dtr,
        .map    = linear_map,
-       .end_io = linear_end_io,
        .status = linear_status,
        .prepare_ioctl = linear_prepare_ioctl,
        .iterate_devices = linear_iterate_devices,
index 20f7e4ef534227c1141e0dfb6da155359ede25cd..45abb54037fc6427106f383bc1a3ba22bc8d3152 100644 (file)
@@ -1155,12 +1155,14 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
 EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
 
 /*
- * The zone descriptors obtained with a zone report indicate
- * zone positions within the target device. The zone descriptors
- * must be remapped to match their position within the dm device.
- * A target may call dm_remap_zone_report after completion of a
- * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained
- * from the target device mapping to the dm device.
+ * The zone descriptors obtained with a zone report indicate zone positions
+ * within the target backing device, regardless of that device is a partition
+ * and regardless of the target mapping start sector on the device or partition.
+ * The zone descriptors start sector and write pointer position must be adjusted
+ * to match their relative position within the dm device.
+ * A target may call dm_remap_zone_report() after completion of a
+ * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the
+ * backing device.
  */
 void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
 {
@@ -1171,6 +1173,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
        struct blk_zone *zone;
        unsigned int nr_rep = 0;
        unsigned int ofst;
+       sector_t part_offset;
        struct bio_vec bvec;
        struct bvec_iter iter;
        void *addr;
@@ -1178,6 +1181,15 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
        if (bio->bi_status)
                return;
 
+       /*
+        * bio sector was incremented by the request size on completion. Taking
+        * into account the original request sector, the target start offset on
+        * the backing device and the target mapping offset (ti->begin), the
+        * start sector of the backing device. The partition offset is always 0
+        * if the target uses a whole device.
+        */
+       part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio));
+
        /*
         * Remap the start sector of the reported zones. For sequential zones,
         * also remap the write pointer position.
@@ -1195,6 +1207,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
                /* Set zones start sector */
                while (hdr->nr_zones && ofst < bvec.bv_len) {
                        zone = addr + ofst;
+                       zone->start -= part_offset;
                        if (zone->start >= start + ti->len) {
                                hdr->nr_zones = 0;
                                break;
@@ -1206,7 +1219,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
                                else if (zone->cond == BLK_ZONE_COND_EMPTY)
                                        zone->wp = zone->start;
                                else
-                                       zone->wp = zone->wp + ti->begin - start;
+                                       zone->wp = zone->wp + ti->begin - start - part_offset;
                        }
                        ofst += sizeof(struct blk_zone);
                        hdr->nr_zones--;
index a0b9102c4c6e10dca0472328e7f97631863a9208..e201ccb3fda4d74dab326e0f2eb983aa7bacc434 100644 (file)
@@ -1370,6 +1370,16 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
                brq->data.blocks = card->host->max_blk_count;
 
        if (brq->data.blocks > 1) {
+               /*
+                * Some SD cards in SPI mode return a CRC error or even lock up
+                * completely when trying to read the last block using a
+                * multiblock read command.
+                */
+               if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) &&
+                   (blk_rq_pos(req) + blk_rq_sectors(req) ==
+                    get_capacity(md->disk)))
+                       brq->data.blocks--;
+
                /*
                 * After a read error, we redo the request one sector
                 * at a time in order to accurately determine which
index 0f7cf54e323481a708078dc67e768f81b5b24f8c..89096f10f4c4b9f0001704caf7d457ef98f3c350 100644 (file)
@@ -128,4 +128,4 @@ module_spi_driver(adgs1408_driver);
 
 MODULE_AUTHOR("Mircea Caprioru <mircea.caprioru@analog.com>");
 MODULE_DESCRIPTION("Analog Devices ADGS1408 MUX driver");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
index e0066adcd2f3d6ffd8e8ea6baba94337df0cd457..fc8b48adf38b45aa7f0dbef5dc2de248826d1889 100644 (file)
@@ -703,7 +703,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
 static int bcm_sf2_sw_resume(struct dsa_switch *ds)
 {
        struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
-       unsigned int port;
        int ret;
 
        ret = bcm_sf2_sw_rst(priv);
@@ -715,14 +714,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
        if (priv->hw_params.num_gphy == 1)
                bcm_sf2_gphy_enable_set(ds, true);
 
-       for (port = 0; port < DSA_MAX_PORTS; port++) {
-               if (dsa_is_user_port(ds, port))
-                       bcm_sf2_port_setup(ds, port, NULL);
-               else if (dsa_is_cpu_port(ds, port))
-                       bcm_sf2_imp_setup(ds, port);
-       }
-
-       bcm_sf2_enable_acb(ds);
+       ds->ops->setup(ds);
 
        return 0;
 }
@@ -1173,10 +1165,10 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
 {
        struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
 
-       /* Disable all ports and interrupts */
        priv->wol_ports_mask = 0;
-       bcm_sf2_sw_suspend(priv->dev->ds);
        dsa_unregister_switch(priv->dev->ds);
+       /* Disable all ports and interrupts */
+       bcm_sf2_sw_suspend(priv->dev->ds);
        bcm_sf2_mdio_unregister(priv);
 
        return 0;
index 1c682b76190f9eb9ecbe6e428735ae21c4ed8b71..2b3ff0c2015539137538d66f60a83bb72d8cb289 100644 (file)
@@ -245,11 +245,11 @@ static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
                (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
                ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
        ena_rx_ctx->l3_csum_err =
-               (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
-               ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+               !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
+               ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
        ena_rx_ctx->l4_csum_err =
-               (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
-               ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+               !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
+               ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
        ena_rx_ctx->hash = cdesc->hash;
        ena_rx_ctx->frag =
                (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
index 25621a218f20754c29963c7ded3075c0c89a232c..d906293ce07d9d6edab5cd03f32dea94597987b1 100644 (file)
@@ -1575,8 +1575,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
        if (rc)
                return rc;
 
-       ena_init_napi(adapter);
-
        ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
 
        ena_refill_all_rx_bufs(adapter);
@@ -1730,6 +1728,13 @@ static int ena_up(struct ena_adapter *adapter)
 
        ena_setup_io_intr(adapter);
 
+       /* napi poll functions should be initialized before running
+        * request_irq(), to handle a rare condition where there is a pending
+        * interrupt, causing the ISR to fire immediately while the poll
+        * function wasn't set yet, causing a null dereference
+        */
+       ena_init_napi(adapter);
+
        rc = ena_request_io_irq(adapter);
        if (rc)
                goto err_req_irq;
@@ -2619,7 +2624,11 @@ err_disable_msix:
        ena_free_mgmnt_irq(adapter);
        ena_disable_msix(adapter);
 err_device_destroy:
+       ena_com_abort_admin_commands(ena_dev);
+       ena_com_wait_for_abort_completion(ena_dev);
        ena_com_admin_destroy(ena_dev);
+       ena_com_mmio_reg_read_request_destroy(ena_dev);
+       ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
 err:
        clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
        clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
@@ -3099,15 +3108,8 @@ err_rss_init:
 
 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
 {
-       int release_bars;
-
-       if (ena_dev->mem_bar)
-               devm_iounmap(&pdev->dev, ena_dev->mem_bar);
-
-       if (ena_dev->reg_bar)
-               devm_iounmap(&pdev->dev, ena_dev->reg_bar);
+       int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
 
-       release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
        pci_release_selected_regions(pdev, release_bars);
 }
 
index 4241ae928d4abb4f61d39344e93d08603a2d2c86..34af5f1569c8f4105d3cf36789eefdb0d63509dc 100644 (file)
@@ -321,9 +321,12 @@ int bcmgenet_mii_probe(struct net_device *dev)
        phydev->advertising = phydev->supported;
 
        /* The internal PHY has its link interrupts routed to the
-        * Ethernet MAC ISRs
+        * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
+        * that prevents the signaling of link UP interrupts when
+        * the link operates at 10Mbps, so fallback to polling for
+        * those versions of GENET.
         */
-       if (priv->internal_phy)
+       if (priv->internal_phy && !GENET_IS_V5(priv))
                dev->phydev->irq = PHY_IGNORE_INTERRUPT;
 
        return 0;
index 4778b663653e3213dab380d7345b875a227a034e..bf80855dd0dd4337e7a9c577744d4202fb00c4d3 100644 (file)
@@ -452,6 +452,10 @@ struct bufdesc_ex {
  * initialisation.
  */
 #define FEC_QUIRK_MIB_CLEAR            (1 << 15)
+/* Only i.MX25/i.MX27/i.MX28 controller supports FRBR,FRSR registers,
+ * those FIFO receive registers are resolved in other platforms.
+ */
+#define FEC_QUIRK_HAS_FRREG            (1 << 16)
 
 struct bufdesc_prop {
        int qid;
index bf9b9fd6d2a07c720597fb72d1d6c6091a3369fd..7b98bb75ba8ac025584306668c0cc93e34943917 100644 (file)
@@ -91,14 +91,16 @@ static struct platform_device_id fec_devtype[] = {
                .driver_data = 0,
        }, {
                .name = "imx25-fec",
-               .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR,
+               .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
+                              FEC_QUIRK_HAS_FRREG,
        }, {
                .name = "imx27-fec",
-               .driver_data = FEC_QUIRK_MIB_CLEAR,
+               .driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
        }, {
                .name = "imx28-fec",
                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
-                               FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC,
+                               FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
+                               FEC_QUIRK_HAS_FRREG,
        }, {
                .name = "imx6q-fec",
                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
@@ -2164,7 +2166,13 @@ static void fec_enet_get_regs(struct net_device *ndev,
        memset(buf, 0, regs->len);
 
        for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
-               off = fec_enet_register_offset[i] / 4;
+               off = fec_enet_register_offset[i];
+
+               if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
+                   !(fep->quirks & FEC_QUIRK_HAS_FRREG))
+                       continue;
+
+               off >>= 2;
                buf[off] = readl(&theregs[off]);
        }
 }
index d2d59444f5626c1d6e08fa7a8f5494b1554e0259..6a046030e8734a8542ff8ea67560f980d9ffb26c 100644 (file)
@@ -260,47 +260,34 @@ static const struct devlink_param mlx4_devlink_params[] = {
                             NULL, NULL, NULL),
 };
 
-static void mlx4_devlink_set_init_value(struct devlink *devlink, u32 param_id,
-                                       union devlink_param_value init_val)
-{
-       struct mlx4_priv *priv = devlink_priv(devlink);
-       struct mlx4_dev *dev = &priv->dev;
-       int err;
-
-       err = devlink_param_driverinit_value_set(devlink, param_id, init_val);
-       if (err)
-               mlx4_warn(dev,
-                         "devlink set parameter %u value failed (err = %d)",
-                         param_id, err);
-}
-
 static void mlx4_devlink_set_params_init_values(struct devlink *devlink)
 {
        union devlink_param_value value;
 
        value.vbool = !!mlx4_internal_err_reset;
-       mlx4_devlink_set_init_value(devlink,
-                                   DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
-                                   value);
+       devlink_param_driverinit_value_set(devlink,
+                                          DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+                                          value);
 
        value.vu32 = 1UL << log_num_mac;
-       mlx4_devlink_set_init_value(devlink,
-                                   DEVLINK_PARAM_GENERIC_ID_MAX_MACS, value);
+       devlink_param_driverinit_value_set(devlink,
+                                          DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+                                          value);
 
        value.vbool = enable_64b_cqe_eqe;
-       mlx4_devlink_set_init_value(devlink,
-                                   MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
-                                   value);
+       devlink_param_driverinit_value_set(devlink,
+                                          MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+                                          value);
 
        value.vbool = enable_4k_uar;
-       mlx4_devlink_set_init_value(devlink,
-                                   MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
-                                   value);
+       devlink_param_driverinit_value_set(devlink,
+                                          MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+                                          value);
 
        value.vbool = false;
-       mlx4_devlink_set_init_value(devlink,
-                                   DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
-                                   value);
+       devlink_param_driverinit_value_set(devlink,
+                                          DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+                                          value);
 }
 
 static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
index 15d8ae28c040c17e50d37928adac65ead8311893..00172dee5339c42eebb6222cacd3ac9945073dd0 100644 (file)
@@ -432,10 +432,9 @@ static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
 
 static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
                                              struct mlx5_wq_cyc *wq,
-                                             u16 pi, u16 frag_pi)
+                                             u16 pi, u16 nnops)
 {
        struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi];
-       u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
 
        edge_wi = wi + nnops;
 
@@ -454,15 +453,14 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
        struct mlx5_wq_cyc *wq = &sq->wq;
        struct mlx5e_umr_wqe *umr_wqe;
        u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
-       u16 pi, frag_pi;
+       u16 pi, contig_wqebbs_room;
        int err;
        int i;
 
        pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
-       frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
-
-       if (unlikely(frag_pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_frag_size(wq))) {
-               mlx5e_fill_icosq_frag_edge(sq, wq, pi, frag_pi);
+       contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+       if (unlikely(contig_wqebbs_room < MLX5E_UMR_WQEBBS)) {
+               mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room);
                pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
        }
 
index ae73ea992845683358e3d4097ad5ce58a6abde6e..6dacaeba2fbff85e5091a1151f7ee731e70cf0cd 100644 (file)
@@ -290,10 +290,9 @@ dma_unmap_wqe_err:
 
 static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
                                           struct mlx5_wq_cyc *wq,
-                                          u16 pi, u16 frag_pi)
+                                          u16 pi, u16 nnops)
 {
        struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
-       u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
 
        edge_wi = wi + nnops;
 
@@ -348,8 +347,8 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
        struct mlx5e_tx_wqe_info *wi;
 
        struct mlx5e_sq_stats *stats = sq->stats;
+       u16 headlen, ihs, contig_wqebbs_room;
        u16 ds_cnt, ds_cnt_inl = 0;
-       u16 headlen, ihs, frag_pi;
        u8 num_wqebbs, opcode;
        u32 num_bytes;
        int num_dma;
@@ -386,9 +385,9 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
        }
 
        num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
-       frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
-       if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
-               mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
+       contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+       if (unlikely(contig_wqebbs_room < num_wqebbs)) {
+               mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
                mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
        }
 
@@ -636,7 +635,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
        struct mlx5e_tx_wqe_info *wi;
 
        struct mlx5e_sq_stats *stats = sq->stats;
-       u16 headlen, ihs, pi, frag_pi;
+       u16 headlen, ihs, pi, contig_wqebbs_room;
        u16 ds_cnt, ds_cnt_inl = 0;
        u8 num_wqebbs, opcode;
        u32 num_bytes;
@@ -672,13 +671,14 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
        }
 
        num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
-       frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
-       if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
+       pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+       contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+       if (unlikely(contig_wqebbs_room < num_wqebbs)) {
+               mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
                pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
-               mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
        }
 
-       mlx5i_sq_fetch_wqe(sq, &wqe, &pi);
+       mlx5i_sq_fetch_wqe(sq, &wqe, pi);
 
        /* fill wqe */
        wi       = &sq->db.wqe_info[pi];
index 48864f4988a4efa7f60f98c239daaad554395569..c1e1a16a9b07d4335bb4cdc3b29bdea3673b8fa2 100644 (file)
@@ -273,7 +273,7 @@ static void eq_pf_process(struct mlx5_eq *eq)
                case MLX5_PFAULT_SUBTYPE_WQE:
                        /* WQE based event */
                        pfault->type =
-                               be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24;
+                               (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
                        pfault->token =
                                be32_to_cpu(pf_eqe->wqe.token);
                        pfault->wqe.wq_num =
index 5645a4facad2f3e5cc1a4a48553e6f83142e72dc..b8ee9101c5066fba94c27d5600aa3a632cff33ca 100644 (file)
@@ -245,7 +245,7 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
                return ERR_PTR(res);
        }
 
-       /* Context will be freed by wait func after completion */
+       /* Context should be freed by the caller after completion. */
        return context;
 }
 
@@ -418,10 +418,8 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
        cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
        cmd.flags = htonl(flags);
        context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
-       if (IS_ERR(context)) {
-               err = PTR_ERR(context);
-               goto out;
-       }
+       if (IS_ERR(context))
+               return PTR_ERR(context);
 
        err = mlx5_fpga_ipsec_cmd_wait(context);
        if (err)
@@ -435,6 +433,7 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
        }
 
 out:
+       kfree(context);
        return err;
 }
 
index 08eac92fc26cff8e8d1818d7b3f7dd76f356ca0c..0982c579ec740f0b8ecd467c2666bc678037b083 100644 (file)
@@ -109,12 +109,11 @@ struct mlx5i_tx_wqe {
 
 static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
                                      struct mlx5i_tx_wqe **wqe,
-                                     u16 *pi)
+                                     u16 pi)
 {
        struct mlx5_wq_cyc *wq = &sq->wq;
 
-       *pi  = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
-       *wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
+       *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
        memset(*wqe, 0, sizeof(**wqe));
 }
 
index 68e7f8df2a6d310989a2b6ee9b2e6c488c3b17e0..ddca327e89505db22a029a53315ab21905f57d12 100644 (file)
@@ -39,11 +39,6 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
        return (u32)wq->fbc.sz_m1 + 1;
 }
 
-u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
-{
-       return wq->fbc.frag_sz_m1 + 1;
-}
-
 u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
 {
        return wq->fbc.sz_m1 + 1;
index 3a1a170bb2d7f3244e7761a6acf6c1fb4a3534c3..b1293d153a587e7cd1c99820d1293d6c396136b0 100644 (file)
@@ -80,7 +80,6 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                       void *wqc, struct mlx5_wq_cyc *wq,
                       struct mlx5_wq_ctrl *wq_ctrl);
 u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
-u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
 
 int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                      void *qpc, struct mlx5_wq_qp *wq,
@@ -140,11 +139,6 @@ static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
        return ctr & wq->fbc.sz_m1;
 }
 
-static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr)
-{
-       return ctr & wq->fbc.frag_sz_m1;
-}
-
 static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq)
 {
        return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr);
@@ -160,6 +154,11 @@ static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
        return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
 }
 
+static inline u16 mlx5_wq_cyc_get_contig_wqebbs(struct mlx5_wq_cyc *wq, u16 ix)
+{
+       return mlx5_frag_buf_get_idx_last_contig_stride(&wq->fbc, ix) - ix + 1;
+}
+
 static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
 {
        int equal   = (cc1 == cc2);
index 81533d7f395c14b4e200df93785acdd45ed89c90..937d0ace699a7eeb4e04af3bf54eebde5dd5d459 100644 (file)
@@ -1055,6 +1055,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
 err_driver_init:
        mlxsw_thermal_fini(mlxsw_core->thermal);
 err_thermal_init:
+       mlxsw_hwmon_fini(mlxsw_core->hwmon);
 err_hwmon_init:
        if (!reload)
                devlink_unregister(devlink);
@@ -1088,6 +1089,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
        if (mlxsw_core->driver->fini)
                mlxsw_core->driver->fini(mlxsw_core);
        mlxsw_thermal_fini(mlxsw_core->thermal);
+       mlxsw_hwmon_fini(mlxsw_core->hwmon);
        if (!reload)
                devlink_unregister(devlink);
        mlxsw_emad_fini(mlxsw_core);
index 655ddd204ab27c603fba8c2881fa9b60c40803ba..c35be477856f18d6493c4a8c1c6d14e0ef2f2d1b 100644 (file)
@@ -359,6 +359,10 @@ static inline int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
        return 0;
 }
 
+static inline void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
+{
+}
+
 #endif
 
 struct mlxsw_thermal;
index f6cf2896d337d704f102b27644fd769b47f82775..e04e8162aa140d42990bfde521cde7ebf0e9d2af 100644 (file)
@@ -303,8 +303,7 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
        struct device *hwmon_dev;
        int err;
 
-       mlxsw_hwmon = devm_kzalloc(mlxsw_bus_info->dev, sizeof(*mlxsw_hwmon),
-                                  GFP_KERNEL);
+       mlxsw_hwmon = kzalloc(sizeof(*mlxsw_hwmon), GFP_KERNEL);
        if (!mlxsw_hwmon)
                return -ENOMEM;
        mlxsw_hwmon->core = mlxsw_core;
@@ -321,10 +320,9 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
        mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group;
        mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs;
 
-       hwmon_dev = devm_hwmon_device_register_with_groups(mlxsw_bus_info->dev,
-                                                          "mlxsw",
-                                                          mlxsw_hwmon,
-                                                          mlxsw_hwmon->groups);
+       hwmon_dev = hwmon_device_register_with_groups(mlxsw_bus_info->dev,
+                                                     "mlxsw", mlxsw_hwmon,
+                                                     mlxsw_hwmon->groups);
        if (IS_ERR(hwmon_dev)) {
                err = PTR_ERR(hwmon_dev);
                goto err_hwmon_register;
@@ -337,5 +335,12 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
 err_hwmon_register:
 err_fans_init:
 err_temp_init:
+       kfree(mlxsw_hwmon);
        return err;
 }
+
+void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
+{
+       hwmon_device_unregister(mlxsw_hwmon->hwmon_dev);
+       kfree(mlxsw_hwmon);
+}
index 1a4f2bb48ead712634ce5968e23144117d89b8d7..ed4e298cd823977c663c4cd63be8286a3d55ea6e 100644 (file)
@@ -133,9 +133,9 @@ static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
 {
        unsigned int val, timeout = 10;
 
-       /* Wait for the issued mac table command to be completed, or timeout.
-        * When the command read from ANA_TABLES_MACACCESS is
-        * MACACCESS_CMD_IDLE, the issued command completed successfully.
+       /* Wait for the issued vlan table command to be completed, or timeout.
+        * When the command read from ANA_TABLES_VLANACCESS is
+        * VLANACCESS_CMD_IDLE, the issued command completed successfully.
         */
        do {
                val = ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
index 46ba0cf257c6d8cdb80b0d84e80e078618d11c36..7a1e9cd9cc62cd539c3833866750efe4bdea0ad4 100644 (file)
@@ -429,12 +429,14 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
 
        switch (off) {
        case offsetof(struct iphdr, daddr):
-               set_ip_addr->ipv4_dst_mask = mask;
-               set_ip_addr->ipv4_dst = exact;
+               set_ip_addr->ipv4_dst_mask |= mask;
+               set_ip_addr->ipv4_dst &= ~mask;
+               set_ip_addr->ipv4_dst |= exact & mask;
                break;
        case offsetof(struct iphdr, saddr):
-               set_ip_addr->ipv4_src_mask = mask;
-               set_ip_addr->ipv4_src = exact;
+               set_ip_addr->ipv4_src_mask |= mask;
+               set_ip_addr->ipv4_src &= ~mask;
+               set_ip_addr->ipv4_src |= exact & mask;
                break;
        default:
                return -EOPNOTSUPP;
@@ -448,11 +450,12 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
 }
 
 static void
-nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask,
+nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
                      struct nfp_fl_set_ipv6_addr *ip6)
 {
-       ip6->ipv6[idx % 4].mask = mask;
-       ip6->ipv6[idx % 4].exact = exact;
+       ip6->ipv6[word].mask |= mask;
+       ip6->ipv6[word].exact &= ~mask;
+       ip6->ipv6[word].exact |= exact & mask;
 
        ip6->reserved = cpu_to_be16(0);
        ip6->head.jump_id = opcode_tag;
@@ -465,6 +468,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
               struct nfp_fl_set_ipv6_addr *ip_src)
 {
        __be32 exact, mask;
+       u8 word;
 
        /* We are expecting tcf_pedit to return a big endian value */
        mask = (__force __be32)~tcf_pedit_mask(action, idx);
@@ -473,17 +477,20 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
        if (exact & ~mask)
                return -EOPNOTSUPP;
 
-       if (off < offsetof(struct ipv6hdr, saddr))
+       if (off < offsetof(struct ipv6hdr, saddr)) {
                return -EOPNOTSUPP;
-       else if (off < offsetof(struct ipv6hdr, daddr))
-               nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx,
+       } else if (off < offsetof(struct ipv6hdr, daddr)) {
+               word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
+               nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
                                      exact, mask, ip_src);
-       else if (off < offsetof(struct ipv6hdr, daddr) +
-                      sizeof(struct in6_addr))
-               nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx,
+       } else if (off < offsetof(struct ipv6hdr, daddr) +
+                      sizeof(struct in6_addr)) {
+               word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact);
+               nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
                                      exact, mask, ip_dst);
-       else
+       } else {
                return -EOPNOTSUPP;
+       }
 
        return 0;
 }
@@ -541,7 +548,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
        struct nfp_fl_set_eth set_eth;
        enum pedit_header_type htype;
        int idx, nkeys, err;
-       size_t act_size;
+       size_t act_size = 0;
        u32 offset, cmd;
        u8 ip_proto = 0;
 
@@ -599,7 +606,9 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
                act_size = sizeof(set_eth);
                memcpy(nfp_action, &set_eth, act_size);
                *a_len += act_size;
-       } else if (set_ip_addr.head.len_lw) {
+       }
+       if (set_ip_addr.head.len_lw) {
+               nfp_action += act_size;
                act_size = sizeof(set_ip_addr);
                memcpy(nfp_action, &set_ip_addr, act_size);
                *a_len += act_size;
@@ -607,10 +616,12 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
                /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
                *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
                                nfp_fl_csum_l4_to_flag(ip_proto);
-       } else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
+       }
+       if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
                /* TC compiles set src and dst IPv6 address as a single action,
                 * the hardware requires this to be 2 separate actions.
                 */
+               nfp_action += act_size;
                act_size = sizeof(set_ip6_src);
                memcpy(nfp_action, &set_ip6_src, act_size);
                *a_len += act_size;
@@ -623,6 +634,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
                /* Hardware will automatically fix TCP/UDP checksum. */
                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
        } else if (set_ip6_dst.head.len_lw) {
+               nfp_action += act_size;
                act_size = sizeof(set_ip6_dst);
                memcpy(nfp_action, &set_ip6_dst, act_size);
                *a_len += act_size;
@@ -630,13 +642,16 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
                /* Hardware will automatically fix TCP/UDP checksum. */
                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
        } else if (set_ip6_src.head.len_lw) {
+               nfp_action += act_size;
                act_size = sizeof(set_ip6_src);
                memcpy(nfp_action, &set_ip6_src, act_size);
                *a_len += act_size;
 
                /* Hardware will automatically fix TCP/UDP checksum. */
                *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
-       } else if (set_tport.head.len_lw) {
+       }
+       if (set_tport.head.len_lw) {
+               nfp_action += act_size;
                act_size = sizeof(set_tport);
                memcpy(nfp_action, &set_tport, act_size);
                *a_len += act_size;
index af3a28ec04ebf7c2931c871784a24c7a8bb0fd55..0f0aba793352c406404b53306f4bfb454b70a8b6 100644 (file)
@@ -228,7 +228,7 @@ static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
                attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)),
                GET_FIELD(tmp2, QED_GRC_ATTENTION_PF),
                (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) ==
-                QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)",
+                QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)",
                GET_FIELD(tmp2, QED_GRC_ATTENTION_VF));
 
 out:
index b48f761820499d20d6c1c34892e5cdc7b4785ca1..10b075bc595966ac405751ade7cda6b78ed930d7 100644 (file)
@@ -380,8 +380,6 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
 
        qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
        ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
-       ql_write_nvram_reg(qdev, spir,
-                          ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
 }
 
 /*
index 9a5e2969df6197cd3383e263b2336dca5faa1a2d..2c350099b83cff30ded1fa7deda5de8fb2f87e34 100644 (file)
@@ -4282,8 +4282,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
                RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
                break;
        case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
-       case RTL_GIGA_MAC_VER_34:
-       case RTL_GIGA_MAC_VER_35:
+       case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
+       case RTL_GIGA_MAC_VER_38:
                RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
                break;
        case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
@@ -6549,17 +6549,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
        struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
        struct net_device *dev = tp->dev;
        u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
-       int work_done= 0;
+       int work_done;
        u16 status;
 
        status = rtl_get_events(tp);
        rtl_ack_events(tp, status & ~tp->event_slow);
 
-       if (status & RTL_EVENT_NAPI_RX)
-               work_done = rtl_rx(dev, tp, (u32) budget);
+       work_done = rtl_rx(dev, tp, (u32) budget);
 
-       if (status & RTL_EVENT_NAPI_TX)
-               rtl_tx(dev, tp);
+       rtl_tx(dev, tp);
 
        if (status & tp->event_slow) {
                enable_mask &= ~tp->event_slow;
@@ -7093,20 +7091,12 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
 {
        unsigned int flags;
 
-       switch (tp->mac_version) {
-       case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+       if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
                RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
                RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
                RTL_W8(tp, Cfg9346, Cfg9346_Lock);
                flags = PCI_IRQ_LEGACY;
-               break;
-       case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_40:
-               /* This version was reported to have issues with resume
-                * from suspend when using MSI-X
-                */
-               flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
-               break;
-       default:
+       } else {
                flags = PCI_IRQ_ALL_TYPES;
        }
 
index 7aa5ebb6766cb23eb2553b0af3d699f8d48c69d3..4289ccb26e4ec3045aee380144196150ca198f08 100644 (file)
@@ -735,8 +735,11 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
                u16 idx = dring->tail;
                struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
 
-               if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD))
+               if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
+                       /* reading the register clears the irq */
+                       netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
                        break;
+               }
 
                /* This  barrier is needed to keep us from reading
                 * any other fields out of the netsec_de until we have
index 6acb6b5718b94c1050fdbef60a4a556e27c2275e..493cd382b8aa0f187fcaf1dc520da765f9595fe0 100644 (file)
@@ -830,12 +830,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
        if (IS_ERR(rt))
                return PTR_ERR(rt);
 
-       if (skb_dst(skb)) {
-               int mtu = dst_mtu(&rt->dst) - GENEVE_IPV4_HLEN -
-                         info->options_len;
-
-               skb_dst_update_pmtu(skb, mtu);
-       }
+       skb_tunnel_check_pmtu(skb, &rt->dst,
+                             GENEVE_IPV4_HLEN + info->options_len);
 
        sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
        if (geneve->collect_md) {
@@ -876,11 +872,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
        if (IS_ERR(dst))
                return PTR_ERR(dst);
 
-       if (skb_dst(skb)) {
-               int mtu = dst_mtu(dst) - GENEVE_IPV6_HLEN - info->options_len;
-
-               skb_dst_update_pmtu(skb, mtu);
-       }
+       skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len);
 
        sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
        if (geneve->collect_md) {
index 6e13b8832bc7df94467211f07c1e7dba15a6e877..fd8bb998ae52d946ca5b29172a4553176addc726 100644 (file)
@@ -163,8 +163,6 @@ static const enum gpiod_flags gpio_flags[] = {
 /* Give this long for the PHY to reset. */
 #define T_PHY_RESET_MS 50
 
-static DEFINE_MUTEX(sfp_mutex);
-
 struct sff_data {
        unsigned int gpios;
        bool (*module_supported)(const struct sfp_eeprom_id *id);
index 533b6fb8d923161ad34b539883713bd3e5a65af4..72a55b6b421184c4fb69411ba3d0150e6c337a88 100644 (file)
@@ -1241,6 +1241,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)},    /* Olivetti Olicard 500 */
        {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)},    /* Cinterion PLxx */
        {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)},    /* Cinterion PHxx,PXxx */
+       {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)},   /* Cinterion ALASxx (1 RmNet) */
        {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)},    /* Cinterion PHxx,PXxx (2 RmNet) */
        {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)},    /* Cinterion PHxx,PXxx (2 RmNet) */
        {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)},    /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
index dab504ec5e502be401cbfe9a8e3f0f572c0220ba..ddfa3f24204c71e66f3d9bfeea5b257404ffc577 100644 (file)
@@ -2218,8 +2218,9 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
        /* Make sure no work handler is accessing the device */
        flush_work(&vi->config_work);
 
+       netif_tx_lock_bh(vi->dev);
        netif_device_detach(vi->dev);
-       netif_tx_disable(vi->dev);
+       netif_tx_unlock_bh(vi->dev);
        cancel_delayed_work_sync(&vi->refill);
 
        if (netif_running(vi->dev)) {
@@ -2255,7 +2256,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
                }
        }
 
+       netif_tx_lock_bh(vi->dev);
        netif_device_attach(vi->dev);
+       netif_tx_unlock_bh(vi->dev);
        return err;
 }
 
index 2b8da2b7e721e33f0683efa61e50ceac68d256e7..27bd586b94b0a01f1f99e0c01d8df85c1757b960 100644 (file)
@@ -2194,11 +2194,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                }
 
                ndst = &rt->dst;
-               if (skb_dst(skb)) {
-                       int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
-
-                       skb_dst_update_pmtu(skb, mtu);
-               }
+               skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
 
                tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
                ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
@@ -2235,11 +2231,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                                goto out_unlock;
                }
 
-               if (skb_dst(skb)) {
-                       int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
-
-                       skb_dst_update_pmtu(skb, mtu);
-               }
+               skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
 
                tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
                ttl = ttl ? : ip6_dst_hoplimit(ndst);
index 43743c26c071f538f1942696aa97d20b0cbf091d..39bf85d0ade0ed077b5d85c657d72a7f30698c53 100644 (file)
@@ -1317,6 +1317,10 @@ static int if_sdio_suspend(struct device *dev)
        if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
                dev_info(dev, "Suspend without wake params -- powering down card\n");
                if (priv->fw_ready) {
+                       ret = lbs_suspend(priv);
+                       if (ret)
+                               return ret;
+
                        priv->power_up_on_resume = true;
                        if_sdio_power_off(card);
                }
index 7780b07543bb8d2bf247268f456e44efed517eab..79e59f2379a26e116f2026f43d403dd625318f0a 100644 (file)
@@ -258,7 +258,7 @@ int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
        if (!buf->urb)
                return -ENOMEM;
 
-       buf->urb->sg = devm_kzalloc(dev->dev, nsgs * sizeof(*buf->urb->sg),
+       buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg),
                                    gfp);
        if (!buf->urb->sg)
                return -ENOMEM;
@@ -464,8 +464,8 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
        int i, err, nsgs;
 
        spin_lock_init(&q->lock);
-       q->entry = devm_kzalloc(dev->dev,
-                               MT_NUM_RX_ENTRIES * sizeof(*q->entry),
+       q->entry = devm_kcalloc(dev->dev,
+                               MT_NUM_RX_ENTRIES, sizeof(*q->entry),
                                GFP_KERNEL);
        if (!q->entry)
                return -ENOMEM;
@@ -717,8 +717,8 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
                INIT_LIST_HEAD(&q->swq);
                q->hw_idx = q2hwq(i);
 
-               q->entry = devm_kzalloc(dev->dev,
-                                       MT_NUM_TX_ENTRIES * sizeof(*q->entry),
+               q->entry = devm_kcalloc(dev->dev,
+                                       MT_NUM_TX_ENTRIES, sizeof(*q->entry),
                                        GFP_KERNEL);
                if (!q->entry)
                        return -ENOMEM;
index dd8ec1dd92190997f823e0bfb72389baea73b268..6bb9908bf46f1e2906f45af82be11684d18af444 100644 (file)
@@ -3143,8 +3143,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
        }
 
        mutex_lock(&ns->ctrl->subsys->lock);
-       nvme_mpath_clear_current_path(ns);
        list_del_rcu(&ns->siblings);
+       nvme_mpath_clear_current_path(ns);
        mutex_unlock(&ns->ctrl->subsys->lock);
 
        down_write(&ns->ctrl->namespaces_rwsem);
index 722537e14848436bfb58bc1c32fabd5c786166ba..41b49716ac75f24f2f97d382fb4271f2998217ce 100644 (file)
@@ -771,6 +771,9 @@ static void __init of_unittest_parse_interrupts(void)
        struct of_phandle_args args;
        int i, rc;
 
+       if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+               return;
+
        np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
        if (!np) {
                pr_err("missing testcase data\n");
@@ -845,6 +848,9 @@ static void __init of_unittest_parse_interrupts_extended(void)
        struct of_phandle_args args;
        int i, rc;
 
+       if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+               return;
+
        np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
        if (!np) {
                pr_err("missing testcase data\n");
@@ -1001,15 +1007,19 @@ static void __init of_unittest_platform_populate(void)
        pdev = of_find_device_by_node(np);
        unittest(pdev, "device 1 creation failed\n");
 
-       irq = platform_get_irq(pdev, 0);
-       unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq);
+       if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) {
+               irq = platform_get_irq(pdev, 0);
+               unittest(irq == -EPROBE_DEFER,
+                        "device deferred probe failed - %d\n", irq);
 
-       /* Test that a parsing failure does not return -EPROBE_DEFER */
-       np = of_find_node_by_path("/testcase-data/testcase-device2");
-       pdev = of_find_device_by_node(np);
-       unittest(pdev, "device 2 creation failed\n");
-       irq = platform_get_irq(pdev, 0);
-       unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq);
+               /* Test that a parsing failure does not return -EPROBE_DEFER */
+               np = of_find_node_by_path("/testcase-data/testcase-device2");
+               pdev = of_find_device_by_node(np);
+               unittest(pdev, "device 2 creation failed\n");
+               irq = platform_get_irq(pdev, 0);
+               unittest(irq < 0 && irq != -EPROBE_DEFER,
+                        "device parsing error failed - %d\n", irq);
+       }
 
        np = of_find_node_by_path("/testcase-data/platform-tests");
        unittest(np, "No testcase data in device tree\n");
index 86f1b002c846a395ca7c93bcc5866ab0fe3aa374..975bcdd6b5c0a73e0c75a947574c0b6b9b774e10 100644 (file)
@@ -180,11 +180,11 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
                return 0;
        }
 
-       phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
+       phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
        if (!phy)
                return -ENOMEM;
 
-       link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL);
+       link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
        if (!link)
                return -ENOMEM;
 
index 7f01f6f60b870374506131eaa13adb2b54a5facb..d0b7dd8fb184b041446707dceccda87588ba45a2 100644 (file)
@@ -485,7 +485,13 @@ static int armpmu_filter_match(struct perf_event *event)
 {
        struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
        unsigned int cpu = smp_processor_id();
-       return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
+       int ret;
+
+       ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
+       if (ret && armpmu->filter_match)
+               return armpmu->filter_match(event);
+
+       return ret;
 }
 
 static ssize_t armpmu_cpumask_show(struct device *dev,
index 4a8a8efadefab04b7add3ffe37a5c81fb322c093..cf73a403d22dfc1778d6a893bca04d5b706dce18 100644 (file)
@@ -636,6 +636,14 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
                return err;
        }
 
+       return 0;
+}
+
+static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp)
+{
+       struct gpio_chip *chip = &mcp->chip;
+       int err;
+
        err =  gpiochip_irqchip_add_nested(chip,
                                           &mcp23s08_irq_chip,
                                           0,
@@ -912,7 +920,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
        }
 
        if (mcp->irq && mcp->irq_controller) {
-               ret = mcp23s08_irq_setup(mcp);
+               ret = mcp23s08_irqchip_setup(mcp);
                if (ret)
                        goto fail;
        }
@@ -944,6 +952,9 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
                goto fail;
        }
 
+       if (mcp->irq)
+               ret = mcp23s08_irq_setup(mcp);
+
 fail:
        if (ret < 0)
                dev_dbg(dev, "can't setup chip %d, --> %d\n", addr, ret);
index 398393ab5df854d71d2158cad47b52923246bcb4..b6fd4838f60f3f9c198988072e7dd25d6a01d02a 100644 (file)
@@ -520,7 +520,7 @@ static int get_next_event_xfer(struct cros_ec_device *ec_dev,
        ret = cros_ec_cmd_xfer(ec_dev, msg);
        if (ret > 0) {
                ec_dev->event_size = ret - 1;
-               memcpy(&ec_dev->event_data, msg->data, ec_dev->event_size);
+               memcpy(&ec_dev->event_data, msg->data, ret);
        }
 
        return ret;
index 01b0e2bb33190c78fb3818e34d5aebf4f60b2832..2012551d93e02381cb1136ada745e22da77f188d 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/slab.h>
 #include <linux/timekeeping.h>
 
+#include <linux/nospec.h>
+
 #include "ptp_private.h"
 
 static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
@@ -248,6 +250,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
                        err = -EINVAL;
                        break;
                }
+               pin_index = array_index_nospec(pin_index, ops->n_pins);
                if (mutex_lock_interruptible(&ptp->pincfg_mux))
                        return -ERESTARTSYS;
                pd = ops->pin_config[pin_index];
@@ -266,6 +269,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
                        err = -EINVAL;
                        break;
                }
+               pin_index = array_index_nospec(pin_index, ops->n_pins);
                if (mutex_lock_interruptible(&ptp->pincfg_mux))
                        return -ERESTARTSYS;
                err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
index eceba3858cefbbe0b2ce46df46751f4d53bc7122..2f61f5579aa54708213f3226c940ef65c462b72b 100644 (file)
@@ -210,11 +210,11 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
  * Output one or more lines of text on the SCLP console (VT220 and /
  * or line-mode).
  */
-void __sclp_early_printk(const char *str, unsigned int len)
+void __sclp_early_printk(const char *str, unsigned int len, unsigned int force)
 {
        int have_linemode, have_vt220;
 
-       if (sclp_init_state != sclp_init_state_uninitialized)
+       if (!force && sclp_init_state != sclp_init_state_uninitialized)
                return;
        if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0)
                return;
@@ -227,5 +227,10 @@ void __sclp_early_printk(const char *str, unsigned int len)
 
 void sclp_early_printk(const char *str)
 {
-       __sclp_early_printk(str, strlen(str));
+       __sclp_early_printk(str, strlen(str), 0);
+}
+
+void sclp_early_printk_force(const char *str)
+{
+       __sclp_early_printk(str, strlen(str), 1);
 }
index dbe7c7ac9ac8c8c4456f142b14c740d3bdc0c5e6..fd77e46eb3b21520f2bf155612aed0248e773884 100644 (file)
@@ -163,7 +163,7 @@ static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat,
 
        for (i = 0; i < pat->pat_nr; i++, pa++)
                for (j = 0; j < pa->pa_nr; j++)
-                       if (pa->pa_iova_pfn[i] == iova_pfn)
+                       if (pa->pa_iova_pfn[j] == iova_pfn)
                                return true;
 
        return false;
index 770fa9cfc31041dd84a78a00f0f4135bef5a79ed..f47d16b5810b9154c7b8bd852039d1cdc89b33d3 100644 (file)
@@ -22,6 +22,7 @@
 #include "vfio_ccw_private.h"
 
 struct workqueue_struct *vfio_ccw_work_q;
+struct kmem_cache *vfio_ccw_io_region;
 
 /*
  * Helpers
@@ -79,7 +80,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
                cp_update_scsw(&private->cp, &irb->scsw);
                cp_free(&private->cp);
        }
-       memcpy(private->io_region.irb_area, irb, sizeof(*irb));
+       memcpy(private->io_region->irb_area, irb, sizeof(*irb));
 
        if (private->io_trigger)
                eventfd_signal(private->io_trigger, 1);
@@ -114,6 +115,14 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
        private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
        if (!private)
                return -ENOMEM;
+
+       private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
+                                              GFP_KERNEL | GFP_DMA);
+       if (!private->io_region) {
+               kfree(private);
+               return -ENOMEM;
+       }
+
        private->sch = sch;
        dev_set_drvdata(&sch->dev, private);
 
@@ -139,6 +148,7 @@ out_disable:
        cio_disable_subchannel(sch);
 out_free:
        dev_set_drvdata(&sch->dev, NULL);
+       kmem_cache_free(vfio_ccw_io_region, private->io_region);
        kfree(private);
        return ret;
 }
@@ -153,6 +163,7 @@ static int vfio_ccw_sch_remove(struct subchannel *sch)
 
        dev_set_drvdata(&sch->dev, NULL);
 
+       kmem_cache_free(vfio_ccw_io_region, private->io_region);
        kfree(private);
 
        return 0;
@@ -232,10 +243,20 @@ static int __init vfio_ccw_sch_init(void)
        if (!vfio_ccw_work_q)
                return -ENOMEM;
 
+       vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
+                                       sizeof(struct ccw_io_region), 0,
+                                       SLAB_ACCOUNT, 0,
+                                       sizeof(struct ccw_io_region), NULL);
+       if (!vfio_ccw_io_region) {
+               destroy_workqueue(vfio_ccw_work_q);
+               return -ENOMEM;
+       }
+
        isc_register(VFIO_CCW_ISC);
        ret = css_driver_register(&vfio_ccw_sch_driver);
        if (ret) {
                isc_unregister(VFIO_CCW_ISC);
+               kmem_cache_destroy(vfio_ccw_io_region);
                destroy_workqueue(vfio_ccw_work_q);
        }
 
@@ -246,6 +267,7 @@ static void __exit vfio_ccw_sch_exit(void)
 {
        css_driver_unregister(&vfio_ccw_sch_driver);
        isc_unregister(VFIO_CCW_ISC);
+       kmem_cache_destroy(vfio_ccw_io_region);
        destroy_workqueue(vfio_ccw_work_q);
 }
 module_init(vfio_ccw_sch_init);
index 797a82731159a5f9f584810f924adc3467b1e702..f94aa01f9c36adb748693a09766cb0ea33dec450 100644 (file)
@@ -93,13 +93,13 @@ static void fsm_io_error(struct vfio_ccw_private *private,
                         enum vfio_ccw_event event)
 {
        pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
-       private->io_region.ret_code = -EIO;
+       private->io_region->ret_code = -EIO;
 }
 
 static void fsm_io_busy(struct vfio_ccw_private *private,
                        enum vfio_ccw_event event)
 {
-       private->io_region.ret_code = -EBUSY;
+       private->io_region->ret_code = -EBUSY;
 }
 
 static void fsm_disabled_irq(struct vfio_ccw_private *private,
@@ -126,7 +126,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
 {
        union orb *orb;
        union scsw *scsw = &private->scsw;
-       struct ccw_io_region *io_region = &private->io_region;
+       struct ccw_io_region *io_region = private->io_region;
        struct mdev_device *mdev = private->mdev;
        char *errstr = "request";
 
index 41eeb57d68a3d3b5f4528c0d1a05e493142301ac..f673e106c041535fd0e8b69de44cbddb92a5e6e1 100644 (file)
@@ -174,7 +174,7 @@ static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
                return -EINVAL;
 
        private = dev_get_drvdata(mdev_parent_dev(mdev));
-       region = &private->io_region;
+       region = private->io_region;
        if (copy_to_user(buf, (void *)region + *ppos, count))
                return -EFAULT;
 
@@ -196,7 +196,7 @@ static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
        if (private->state != VFIO_CCW_STATE_IDLE)
                return -EACCES;
 
-       region = &private->io_region;
+       region = private->io_region;
        if (copy_from_user((void *)region + *ppos, buf, count))
                return -EFAULT;
 
index 78a66d96756ba0f55059e566ae848a3126fca94e..078e46f9623d5f4a128ea37e3840867583ff47f8 100644 (file)
@@ -41,7 +41,7 @@ struct vfio_ccw_private {
        atomic_t                avail;
        struct mdev_device      *mdev;
        struct notifier_block   nb;
-       struct ccw_io_region    io_region;
+       struct ccw_io_region    *io_region;
 
        struct channel_program  cp;
        struct irb              irb;
index 7b31f19ade8318594a4745df869ee16ae1f3cabe..050879a2ddef28c7c0156359e3942cf483bdffaa 100644 (file)
@@ -715,22 +715,13 @@ static struct miscdevice openprom_dev = {
 
 static int __init openprom_init(void)
 {
-       struct device_node *dp;
        int err;
 
        err = misc_register(&openprom_dev);
        if (err)
                return err;
 
-       dp = of_find_node_by_path("/");
-       dp = dp->child;
-       while (dp) {
-               if (!strcmp(dp->name, "options"))
-                       break;
-               dp = dp->sibling;
-       }
-       options_node = dp;
-
+       options_node = of_get_child_by_name(of_find_node_by_path("/"), "options");
        if (!options_node) {
                misc_deregister(&openprom_dev);
                return -EIO;
index 524f9ea62e52a0ed472b1177c57f7aa3b0f98f17..6516bc3cb58b0704b37a57d1cff3d3297feccb71 100644 (file)
@@ -689,8 +689,7 @@ static int dax_open(struct inode *inode, struct file *f)
 alloc_error:
        kfree(ctx->ccb_buf);
 done:
-       if (ctx != NULL)
-               kfree(ctx);
+       kfree(ctx);
        return -ENOMEM;
 }
 
index 05c42235dd41de138ebde5b5fce9289ae5a5b4bc..7c3cc968053cd1ddac6230765d6580227e7ee70a 100644 (file)
@@ -120,6 +120,7 @@ static void bm_set_memory(u64 ba, u32 size)
  */
 static dma_addr_t fbpr_a;
 static size_t fbpr_sz;
+static int __bman_probed;
 
 static int bman_fbpr(struct reserved_mem *rmem)
 {
@@ -166,6 +167,12 @@ static irqreturn_t bman_isr(int irq, void *ptr)
        return IRQ_HANDLED;
 }
 
+int bman_is_probed(void)
+{
+       return __bman_probed;
+}
+EXPORT_SYMBOL_GPL(bman_is_probed);
+
 static int fsl_bman_probe(struct platform_device *pdev)
 {
        int ret, err_irq;
@@ -175,6 +182,8 @@ static int fsl_bman_probe(struct platform_device *pdev)
        u16 id, bm_pool_cnt;
        u8 major, minor;
 
+       __bman_probed = -1;
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
                dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
@@ -255,6 +264,8 @@ static int fsl_bman_probe(struct platform_device *pdev)
                return ret;
        }
 
+       __bman_probed = 1;
+
        return 0;
 };
 
index 79cba58387a58f7ebcf19861a9fffcc56cc2c490..6fd5fef5f39b361039397fec94b456ab426aeb5a 100644 (file)
@@ -273,6 +273,7 @@ static const struct qman_error_info_mdata error_mdata[] = {
 static u32 __iomem *qm_ccsr_start;
 /* A SDQCR mask comprising all the available/visible pool channels */
 static u32 qm_pools_sdqcr;
+static int __qman_probed;
 
 static inline u32 qm_ccsr_in(u32 offset)
 {
@@ -686,6 +687,12 @@ static int qman_resource_init(struct device *dev)
        return 0;
 }
 
+int qman_is_probed(void)
+{
+       return __qman_probed;
+}
+EXPORT_SYMBOL_GPL(qman_is_probed);
+
 static int fsl_qman_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -695,6 +702,8 @@ static int fsl_qman_probe(struct platform_device *pdev)
        u16 id;
        u8 major, minor;
 
+       __qman_probed = -1;
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
                dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
@@ -828,6 +837,8 @@ static int fsl_qman_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       __qman_probed = 1;
+
        return 0;
 }
 
index a120002b630ea69974e52586cefdf4c788276aaa..3e9391d117c543cb46f49893610e528ed52984e8 100644 (file)
@@ -227,6 +227,14 @@ static int qman_portal_probe(struct platform_device *pdev)
        int irq, cpu, err;
        u32 val;
 
+       err = qman_is_probed();
+       if (!err)
+               return -EPROBE_DEFER;
+       if (err < 0) {
+               dev_err(&pdev->dev, "failing probe due to qman probe error\n");
+               return -ENODEV;
+       }
+
        pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
        if (!pcfg)
                return -ENOMEM;
index 29ec343872466e49a310ad740b67fe2f9a9603d0..1515074e18fb6d60a37de13daf6102b98712841f 100644 (file)
@@ -868,8 +868,8 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
        geni_se_init(&port->se, port->rx_wm, port->rx_rfr);
        geni_se_select_mode(&port->se, port->xfer_mode);
        if (!uart_console(uport)) {
-               port->rx_fifo = devm_kzalloc(uport->dev,
-                       port->rx_fifo_depth * sizeof(u32), GFP_KERNEL);
+               port->rx_fifo = devm_kcalloc(uport->dev,
+                       port->rx_fifo_depth, sizeof(u32), GFP_KERNEL);
                if (!port->rx_fifo)
                        return -ENOMEM;
        }
index bc03b0a690b4d166b9984d2677642797a421551f..9ede35cecb1267be281ca9f3733f187a79f16d23 100644 (file)
@@ -310,17 +310,17 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
 
                if (difference & ACM_CTRL_DSR)
                        acm->iocount.dsr++;
-               if (difference & ACM_CTRL_BRK)
-                       acm->iocount.brk++;
-               if (difference & ACM_CTRL_RI)
-                       acm->iocount.rng++;
                if (difference & ACM_CTRL_DCD)
                        acm->iocount.dcd++;
-               if (difference & ACM_CTRL_FRAMING)
+               if (newctrl & ACM_CTRL_BRK)
+                       acm->iocount.brk++;
+               if (newctrl & ACM_CTRL_RI)
+                       acm->iocount.rng++;
+               if (newctrl & ACM_CTRL_FRAMING)
                        acm->iocount.frame++;
-               if (difference & ACM_CTRL_PARITY)
+               if (newctrl & ACM_CTRL_PARITY)
                        acm->iocount.parity++;
-               if (difference & ACM_CTRL_OVERRUN)
+               if (newctrl & ACM_CTRL_OVERRUN)
                        acm->iocount.overrun++;
                spin_unlock_irqrestore(&acm->read_lock, flags);
 
@@ -355,7 +355,6 @@ static void acm_ctrl_irq(struct urb *urb)
        case -ENOENT:
        case -ESHUTDOWN:
                /* this urb is terminated, clean up */
-               acm->nb_index = 0;
                dev_dbg(&acm->control->dev,
                        "%s - urb shutting down with status: %d\n",
                        __func__, status);
@@ -1642,6 +1641,7 @@ static int acm_pre_reset(struct usb_interface *intf)
        struct acm *acm = usb_get_intfdata(intf);
 
        clear_bit(EVENT_RX_STALL, &acm->flags);
+       acm->nb_index = 0; /* pending control transfers are lost */
 
        return 0;
 }
index 244417d0dfd1fdc74c2f0c8ee58e35f14dd9e7db..ffccd40ea67da4c5d70a96ec057d4cc32cb04ed4 100644 (file)
@@ -1474,8 +1474,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
        u = 0;
        switch (uurb->type) {
        case USBDEVFS_URB_TYPE_CONTROL:
-               if (is_in)
-                       allow_short = true;
                if (!usb_endpoint_xfer_control(&ep->desc))
                        return -EINVAL;
                /* min 8 byte setup packet */
@@ -1505,6 +1503,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
                        is_in = 0;
                        uurb->endpoint &= ~USB_DIR_IN;
                }
+               if (is_in)
+                       allow_short = true;
                snoop(&ps->dev->dev, "control urb: bRequestType=%02x "
                        "bRequest=%02x wValue=%04x "
                        "wIndex=%04x wLength=%04x\n",
index ca8a4b53c59f9896e40e1ffd107d2d3b1f3e2e74..1074cb82ec172d2ac464d72d9e52c9461715c868 100644 (file)
 #include <linux/usb/gadget.h>
 #include <linux/usb/composite.h>
 
+#include <linux/nospec.h>
+
 #include "configfs.h"
 
 
@@ -3152,6 +3154,7 @@ static struct config_group *fsg_lun_make(struct config_group *group,
        fsg_opts = to_fsg_opts(&group->cg_item);
        if (num >= FSG_MAX_LUNS)
                return ERR_PTR(-ERANGE);
+       num = array_index_nospec(num, FSG_MAX_LUNS);
 
        mutex_lock(&fsg_opts->lock);
        if (fsg_opts->refcnt || fsg_opts->common->luns[num]) {
index 722860eb5a91f5bcec455871320f6c5d6e95e535..51dd8e00c4f8ea20a53b48e38c2b0552e5cb3acc 100644 (file)
@@ -179,10 +179,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                xhci->quirks |= XHCI_PME_STUCK_QUIRK;
        }
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
-                pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
+           pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)
                xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
+       if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+           (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+            pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
                xhci->quirks |= XHCI_INTEL_USB_ROLE_SW;
-       }
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
            (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
             pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
index 1fb3dd0f1dfa3cb9402445930beb1b2ceef66c1a..277de96181f9a056091aaca586831ece879f1d60 100644 (file)
@@ -161,6 +161,8 @@ static int intel_xhci_usb_remove(struct platform_device *pdev)
 {
        struct intel_xhci_usb_data *data = platform_get_drvdata(pdev);
 
+       pm_runtime_disable(&pdev->dev);
+
        usb_role_switch_unregister(data->role_sw);
        return 0;
 }
index d11f3f8dad4045e9c51bce1789b9473b60237f61..1e592ec94ba49d19ba457af7274cc71c9779ea35 100644 (file)
@@ -318,8 +318,9 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
        struct vhci_hcd *vhci_hcd;
        struct vhci     *vhci;
        int             retval = 0;
-       int             rhport;
+       int             rhport = -1;
        unsigned long   flags;
+       bool invalid_rhport = false;
 
        u32 prev_port_status[VHCI_HC_PORTS];
 
@@ -334,9 +335,19 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
        usbip_dbg_vhci_rh("typeReq %x wValue %x wIndex %x\n", typeReq, wValue,
                          wIndex);
 
-       if (wIndex > VHCI_HC_PORTS)
-               pr_err("invalid port number %d\n", wIndex);
-       rhport = wIndex - 1;
+       /*
+        * wIndex can be 0 for some request types (typeReq). rhport is
+        * in valid range when wIndex >= 1 and < VHCI_HC_PORTS.
+        *
+        * Reference port_status[] only with valid rhport when
+        * invalid_rhport is false.
+        */
+       if (wIndex < 1 || wIndex > VHCI_HC_PORTS) {
+               invalid_rhport = true;
+               if (wIndex > VHCI_HC_PORTS)
+                       pr_err("invalid port number %d\n", wIndex);
+       } else
+               rhport = wIndex - 1;
 
        vhci_hcd = hcd_to_vhci_hcd(hcd);
        vhci = vhci_hcd->vhci;
@@ -345,8 +356,9 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 
        /* store old status and compare now and old later */
        if (usbip_dbg_flag_vhci_rh) {
-               memcpy(prev_port_status, vhci_hcd->port_status,
-                       sizeof(prev_port_status));
+               if (!invalid_rhport)
+                       memcpy(prev_port_status, vhci_hcd->port_status,
+                               sizeof(prev_port_status));
        }
 
        switch (typeReq) {
@@ -354,8 +366,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                usbip_dbg_vhci_rh(" ClearHubFeature\n");
                break;
        case ClearPortFeature:
-               if (rhport < 0)
+               if (invalid_rhport) {
+                       pr_err("invalid port number %d\n", wIndex);
                        goto error;
+               }
                switch (wValue) {
                case USB_PORT_FEAT_SUSPEND:
                        if (hcd->speed == HCD_USB3) {
@@ -415,9 +429,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                break;
        case GetPortStatus:
                usbip_dbg_vhci_rh(" GetPortStatus port %x\n", wIndex);
-               if (wIndex < 1) {
+               if (invalid_rhport) {
                        pr_err("invalid port number %d\n", wIndex);
                        retval = -EPIPE;
+                       goto error;
                }
 
                /* we do not care about resume. */
@@ -513,16 +528,20 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                                goto error;
                        }
 
-                       if (rhport < 0)
+                       if (invalid_rhport) {
+                               pr_err("invalid port number %d\n", wIndex);
                                goto error;
+                       }
 
                        vhci_hcd->port_status[rhport] |= USB_PORT_STAT_SUSPEND;
                        break;
                case USB_PORT_FEAT_POWER:
                        usbip_dbg_vhci_rh(
                                " SetPortFeature: USB_PORT_FEAT_POWER\n");
-                       if (rhport < 0)
+                       if (invalid_rhport) {
+                               pr_err("invalid port number %d\n", wIndex);
                                goto error;
+                       }
                        if (hcd->speed == HCD_USB3)
                                vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER;
                        else
@@ -531,8 +550,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                case USB_PORT_FEAT_BH_PORT_RESET:
                        usbip_dbg_vhci_rh(
                                " SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n");
-                       if (rhport < 0)
+                       if (invalid_rhport) {
+                               pr_err("invalid port number %d\n", wIndex);
                                goto error;
+                       }
                        /* Applicable only for USB3.0 hub */
                        if (hcd->speed != HCD_USB3) {
                                pr_err("USB_PORT_FEAT_BH_PORT_RESET req not "
@@ -543,8 +564,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                case USB_PORT_FEAT_RESET:
                        usbip_dbg_vhci_rh(
                                " SetPortFeature: USB_PORT_FEAT_RESET\n");
-                       if (rhport < 0)
+                       if (invalid_rhport) {
+                               pr_err("invalid port number %d\n", wIndex);
                                goto error;
+                       }
                        /* if it's already enabled, disable */
                        if (hcd->speed == HCD_USB3) {
                                vhci_hcd->port_status[rhport] = 0;
@@ -565,8 +588,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                default:
                        usbip_dbg_vhci_rh(" SetPortFeature: default %d\n",
                                          wValue);
-                       if (rhport < 0)
+                       if (invalid_rhport) {
+                               pr_err("invalid port number %d\n", wIndex);
                                goto error;
+                       }
                        if (hcd->speed == HCD_USB3) {
                                if ((vhci_hcd->port_status[rhport] &
                                     USB_SS_PORT_STAT_POWER) != 0) {
@@ -608,7 +633,7 @@ error:
        if (usbip_dbg_flag_vhci_rh) {
                pr_debug("port %d\n", rhport);
                /* Only dump valid port status */
-               if (rhport >= 0) {
+               if (!invalid_rhport) {
                        dump_port_status_diff(prev_port_status[rhport],
                                              vhci_hcd->port_status[rhport],
                                              hcd->speed == HCD_USB3);
@@ -618,8 +643,10 @@ error:
 
        spin_unlock_irqrestore(&vhci->lock, flags);
 
-       if ((vhci_hcd->port_status[rhport] & PORT_C_MASK) != 0)
+       if (!invalid_rhport &&
+           (vhci_hcd->port_status[rhport] & PORT_C_MASK) != 0) {
                usb_hcd_poll_rh_status(hcd);
+       }
 
        return retval;
 }
index 8235b285dbb29660beb7d389cab4aa3567af28f4..d09bab3bf22412cd7ac4c52c5db42ebad5ded86a 100644 (file)
@@ -333,6 +333,8 @@ extern const struct aty_pll_ops aty_pll_ct; /* Integrated */
 extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll);
 extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
 
+extern const u8 aty_postdividers[8];
+
 
     /*
      *  Hardware cursor support
@@ -359,7 +361,6 @@ static inline void wait_for_idle(struct atyfb_par *par)
 
 extern void aty_reset_engine(const struct atyfb_par *par);
 extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info);
-extern u8   aty_ld_pll_ct(int offset, const struct atyfb_par *par);
 
 void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
 void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
index a9a8272f7a6eeda70a8e8d1a3c8b6381bbfaea41..05111e90f1681c0e40a8438c5eb427ff25f9ffc3 100644 (file)
@@ -3087,17 +3087,18 @@ static int atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info,
                /*
                 * PLL Reference Divider M:
                 */
-               M = pll_regs[2];
+               M = pll_regs[PLL_REF_DIV];
 
                /*
                 * PLL Feedback Divider N (Dependent on CLOCK_CNTL):
                 */
-               N = pll_regs[7 + (clock_cntl & 3)];
+               N = pll_regs[VCLK0_FB_DIV + (clock_cntl & 3)];
 
                /*
                 * PLL Post Divider P (Dependent on CLOCK_CNTL):
                 */
-               P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1));
+               P = aty_postdividers[((pll_regs[VCLK_POST_DIV] >> ((clock_cntl & 3) << 1)) & 3) |
+                                    ((pll_regs[PLL_EXT_CNTL] >> (2 + (clock_cntl & 3))) & 4)];
 
                /*
                 * PLL Divider Q:
index 74a62aa193c02b9000e587fc47f707f3340c2cda..f87cc81f4fa2b767ccb3ec4a5e963159d80e2aa2 100644 (file)
@@ -115,7 +115,7 @@ static void aty_st_pll_ct(int offset, u8 val, const struct atyfb_par *par)
  */
 
 #define Maximum_DSP_PRECISION 7
-static u8 postdividers[] = {1,2,4,8,3};
+const u8 aty_postdividers[8] = {1,2,4,8,3,5,6,12};
 
 static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll)
 {
@@ -222,7 +222,7 @@ static int aty_valid_pll_ct(const struct fb_info *info, u32 vclk_per, struct pll
                pll->vclk_post_div += (q <  64*8);
                pll->vclk_post_div += (q <  32*8);
        }
-       pll->vclk_post_div_real = postdividers[pll->vclk_post_div];
+       pll->vclk_post_div_real = aty_postdividers[pll->vclk_post_div];
        //    pll->vclk_post_div <<= 6;
        pll->vclk_fb_div = q * pll->vclk_post_div_real / 8;
        pllvclk = (1000000 * 2 * pll->vclk_fb_div) /
@@ -513,7 +513,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
                u8 mclk_fb_div, pll_ext_cntl;
                pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par);
                pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par);
-               pll->ct.xclk_post_div_real = postdividers[pll_ext_cntl & 0x07];
+               pll->ct.xclk_post_div_real = aty_postdividers[pll_ext_cntl & 0x07];
                mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par);
                if (pll_ext_cntl & PLL_MFB_TIMES_4_2B)
                        mclk_fb_div <<= 1;
@@ -535,7 +535,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
                xpost_div += (q <  64*8);
                xpost_div += (q <  32*8);
        }
-       pll->ct.xclk_post_div_real = postdividers[xpost_div];
+       pll->ct.xclk_post_div_real = aty_postdividers[xpost_div];
        pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8;
 
 #ifdef CONFIG_PPC
@@ -584,7 +584,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
                        mpost_div += (q <  64*8);
                        mpost_div += (q <  32*8);
                }
-               sclk_post_div_real = postdividers[mpost_div];
+               sclk_post_div_real = aty_postdividers[mpost_div];
                pll->ct.sclk_fb_div = q * sclk_post_div_real / 8;
                pll->ct.spll_cntl2 = mpost_div << 4;
 #ifdef DEBUG
index f3d0bef16d78b99291c28e39fc266fa59098572d..6127f0fcd62c4e376bd2554c1003aedb40aab471 100644 (file)
@@ -514,6 +514,8 @@ static int afs_alloc_anon_key(struct afs_cell *cell)
  */
 static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
 {
+       struct hlist_node **p;
+       struct afs_cell *pcell;
        int ret;
 
        if (!cell->anonymous_key) {
@@ -534,7 +536,18 @@ static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
                return ret;
 
        mutex_lock(&net->proc_cells_lock);
-       list_add_tail(&cell->proc_link, &net->proc_cells);
+       for (p = &net->proc_cells.first; *p; p = &(*p)->next) {
+               pcell = hlist_entry(*p, struct afs_cell, proc_link);
+               if (strcmp(cell->name, pcell->name) < 0)
+                       break;
+       }
+
+       cell->proc_link.pprev = p;
+       cell->proc_link.next = *p;
+       rcu_assign_pointer(*p, &cell->proc_link.next);
+       if (cell->proc_link.next)
+               cell->proc_link.next->pprev = &cell->proc_link.next;
+
        afs_dynroot_mkdir(net, cell);
        mutex_unlock(&net->proc_cells_lock);
        return 0;
@@ -550,7 +563,7 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
        afs_proc_cell_remove(cell);
 
        mutex_lock(&net->proc_cells_lock);
-       list_del_init(&cell->proc_link);
+       hlist_del_rcu(&cell->proc_link);
        afs_dynroot_rmdir(net, cell);
        mutex_unlock(&net->proc_cells_lock);
 
index 1cde710a80133bd4c9e2e88f48b1236fd1034992..f29c6dade7f6250348b886b44b8be150199f78f7 100644 (file)
@@ -265,7 +265,7 @@ int afs_dynroot_populate(struct super_block *sb)
                return -ERESTARTSYS;
 
        net->dynroot_sb = sb;
-       list_for_each_entry(cell, &net->proc_cells, proc_link) {
+       hlist_for_each_entry(cell, &net->proc_cells, proc_link) {
                ret = afs_dynroot_mkdir(net, cell);
                if (ret < 0)
                        goto error;
index 871a228d7f37ce1b0b0d7122b41a8f83f134f391..34c02fdcc25f107ccceca1ca26a304eb37f6e247 100644 (file)
@@ -242,7 +242,7 @@ struct afs_net {
        seqlock_t               cells_lock;
 
        struct mutex            proc_cells_lock;
-       struct list_head        proc_cells;
+       struct hlist_head       proc_cells;
 
        /* Known servers.  Theoretically each fileserver can only be in one
         * cell, but in practice, people create aliases and subsets and there's
@@ -320,7 +320,7 @@ struct afs_cell {
        struct afs_net          *net;
        struct key              *anonymous_key; /* anonymous user key for this cell */
        struct work_struct      manager;        /* Manager for init/deinit/dns */
-       struct list_head        proc_link;      /* /proc cell list link */
+       struct hlist_node       proc_link;      /* /proc cell list link */
 #ifdef CONFIG_AFS_FSCACHE
        struct fscache_cookie   *cache;         /* caching cookie */
 #endif
index e84fe822a960714c8b274435dddbb66f4ba3275a..107427688edddf47fbaf2d865d9421825d0b5244 100644 (file)
@@ -87,7 +87,7 @@ static int __net_init afs_net_init(struct net *net_ns)
        timer_setup(&net->cells_timer, afs_cells_timer, 0);
 
        mutex_init(&net->proc_cells_lock);
-       INIT_LIST_HEAD(&net->proc_cells);
+       INIT_HLIST_HEAD(&net->proc_cells);
 
        seqlock_init(&net->fs_lock);
        net->fs_servers = RB_ROOT;
index 476dcbb79713d20d12023dfb265a0ef62e48e6fc..9101f62707af2da3dbff5e33c6067d0cafbb9013 100644 (file)
@@ -33,9 +33,8 @@ static inline struct afs_net *afs_seq2net_single(struct seq_file *m)
 static int afs_proc_cells_show(struct seq_file *m, void *v)
 {
        struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link);
-       struct afs_net *net = afs_seq2net(m);
 
-       if (v == &net->proc_cells) {
+       if (v == SEQ_START_TOKEN) {
                /* display header on line 1 */
                seq_puts(m, "USE NAME\n");
                return 0;
@@ -50,12 +49,12 @@ static void *afs_proc_cells_start(struct seq_file *m, loff_t *_pos)
        __acquires(rcu)
 {
        rcu_read_lock();
-       return seq_list_start_head(&afs_seq2net(m)->proc_cells, *_pos);
+       return seq_hlist_start_head_rcu(&afs_seq2net(m)->proc_cells, *_pos);
 }
 
 static void *afs_proc_cells_next(struct seq_file *m, void *v, loff_t *pos)
 {
-       return seq_list_next(v, &afs_seq2net(m)->proc_cells, pos);
+       return seq_hlist_next_rcu(v, &afs_seq2net(m)->proc_cells, pos);
 }
 
 static void afs_proc_cells_stop(struct seq_file *m, void *v)
index 35f2ae30f31f7ffdf6a782cb950120e7b967a9d7..77a83790a31f38c9e25ffeaa1c190eb8958e7fa9 100644 (file)
@@ -690,8 +690,6 @@ static void afs_process_async_call(struct work_struct *work)
        }
 
        if (call->state == AFS_CALL_COMPLETE) {
-               call->reply[0] = NULL;
-
                /* We have two refs to release - one from the alloc and one
                 * queued with the work item - and we can't just deallocate the
                 * call because the work item may be queued again.
index af2b17b21b94ba0c97b1085dc7154a3ee4df5c62..95983c744164a830661f105cd7b5ca54645a043f 100644 (file)
@@ -343,7 +343,7 @@ try_again:
        trap = lock_rename(cache->graveyard, dir);
 
        /* do some checks before getting the grave dentry */
-       if (rep->d_parent != dir) {
+       if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
                /* the entry was probably culled when we dropped the parent dir
                 * lock */
                unlock_rename(cache->graveyard, dir);
index 4becbf168b7f0df3229b1e1a5d0fb8daca02df0d..0fb270f0a0ef68f3264f9c21d108a98897a3cc0d 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -666,6 +666,8 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
        while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
                                min(end - index, (pgoff_t)PAGEVEC_SIZE),
                                indices)) {
+               pgoff_t nr_pages = 1;
+
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *pvec_ent = pvec.pages[i];
                        void *entry;
@@ -680,8 +682,15 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
 
                        xa_lock_irq(&mapping->i_pages);
                        entry = get_unlocked_mapping_entry(mapping, index, NULL);
-                       if (entry)
+                       if (entry) {
                                page = dax_busy_page(entry);
+                               /*
+                                * Account for multi-order entries at
+                                * the end of the pagevec.
+                                */
+                               if (i + 1 >= pagevec_count(&pvec))
+                                       nr_pages = 1UL << dax_radix_order(entry);
+                       }
                        put_unlocked_mapping_entry(mapping, index, entry);
                        xa_unlock_irq(&mapping->i_pages);
                        if (page)
@@ -696,7 +705,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
                 */
                pagevec_remove_exceptionals(&pvec);
                pagevec_release(&pvec);
-               index++;
+               index += nr_pages;
 
                if (page)
                        break;
index defc2168de915c23406fe869af1ca65834818402..f58c0cacc531df1c6c439325cb6c574cd704486d 100644 (file)
@@ -682,6 +682,7 @@ int fat_count_free_clusters(struct super_block *sb)
                        if (ops->ent_get(&fatent) == FAT_ENT_FREE)
                                free++;
                } while (fat_ent_next(sbi, &fatent));
+               cond_resched();
        }
        sbi->free_clusters = free;
        sbi->free_clus_valid = 1;
index 83bfe04456b6a99a196c485830b2ba2bc7419dec..c550512ce335052ccc22329c354eb305b015ac52 100644 (file)
@@ -70,20 +70,7 @@ void fscache_free_cookie(struct fscache_cookie *cookie)
 }
 
 /*
- * initialise an cookie jar slab element prior to any use
- */
-void fscache_cookie_init_once(void *_cookie)
-{
-       struct fscache_cookie *cookie = _cookie;
-
-       memset(cookie, 0, sizeof(*cookie));
-       spin_lock_init(&cookie->lock);
-       spin_lock_init(&cookie->stores_lock);
-       INIT_HLIST_HEAD(&cookie->backing_objects);
-}
-
-/*
- * Set the index key in a cookie.  The cookie struct has space for a 12-byte
+ * Set the index key in a cookie.  The cookie struct has space for a 16-byte
  * key plus length and hash, but if that's not big enough, it's instead a
  * pointer to a buffer containing 3 bytes of hash, 1 byte of length and then
  * the key data.
@@ -93,20 +80,18 @@ static int fscache_set_key(struct fscache_cookie *cookie,
 {
        unsigned long long h;
        u32 *buf;
+       int bufs;
        int i;
 
-       cookie->key_len = index_key_len;
+       bufs = DIV_ROUND_UP(index_key_len, sizeof(*buf));
 
        if (index_key_len > sizeof(cookie->inline_key)) {
-               buf = kzalloc(index_key_len, GFP_KERNEL);
+               buf = kcalloc(bufs, sizeof(*buf), GFP_KERNEL);
                if (!buf)
                        return -ENOMEM;
                cookie->key = buf;
        } else {
                buf = (u32 *)cookie->inline_key;
-               buf[0] = 0;
-               buf[1] = 0;
-               buf[2] = 0;
        }
 
        memcpy(buf, index_key, index_key_len);
@@ -116,7 +101,8 @@ static int fscache_set_key(struct fscache_cookie *cookie,
         */
        h = (unsigned long)cookie->parent;
        h += index_key_len + cookie->type;
-       for (i = 0; i < (index_key_len + sizeof(u32) - 1) / sizeof(u32); i++)
+
+       for (i = 0; i < bufs; i++)
                h += buf[i];
 
        cookie->key_hash = h ^ (h >> 32);
@@ -161,7 +147,7 @@ struct fscache_cookie *fscache_alloc_cookie(
        struct fscache_cookie *cookie;
 
        /* allocate and initialise a cookie */
-       cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
+       cookie = kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL);
        if (!cookie)
                return NULL;
 
@@ -192,6 +178,9 @@ struct fscache_cookie *fscache_alloc_cookie(
        cookie->netfs_data      = netfs_data;
        cookie->flags           = (1 << FSCACHE_COOKIE_NO_DATA_YET);
        cookie->type            = def->type;
+       spin_lock_init(&cookie->lock);
+       spin_lock_init(&cookie->stores_lock);
+       INIT_HLIST_HEAD(&cookie->backing_objects);
 
        /* radix tree insertion won't use the preallocation pool unless it's
         * told it may not wait */
index f83328a7f0482a6d63695779ef941a1532ebfed0..d6209022e96582f107f9226852ee45414b6b445c 100644 (file)
@@ -51,7 +51,6 @@ extern struct fscache_cache *fscache_select_cache_for_object(
 extern struct kmem_cache *fscache_cookie_jar;
 
 extern void fscache_free_cookie(struct fscache_cookie *);
-extern void fscache_cookie_init_once(void *);
 extern struct fscache_cookie *fscache_alloc_cookie(struct fscache_cookie *,
                                                   const struct fscache_cookie_def *,
                                                   const void *, size_t,
index 7dce110bf17d04b1d6631c636c5c080f3939d201..30ad89db1efcc6c448823d020b88dfde5a5aa247 100644 (file)
@@ -143,9 +143,7 @@ static int __init fscache_init(void)
 
        fscache_cookie_jar = kmem_cache_create("fscache_cookie_jar",
                                               sizeof(struct fscache_cookie),
-                                              0,
-                                              0,
-                                              fscache_cookie_init_once);
+                                              0, 0, NULL);
        if (!fscache_cookie_jar) {
                pr_notice("Failed to allocate a cookie jar\n");
                ret = -ENOMEM;
index 03128ed1f34e8f781e06935c6362b229c6476011..84544a4f012d744dda1a22d74c2482e9ff000f61 100644 (file)
@@ -1057,7 +1057,7 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
                }
        }
        release_metapath(&mp);
-       if (gfs2_is_jdata(ip))
+       if (!gfs2_is_stuffed(ip) && gfs2_is_jdata(ip))
                iomap->page_done = gfs2_iomap_journaled_page_done;
        return 0;
 
index 8e712b614e6e2ac541528d90cc75ea6f850a0e1e..933aac5da193415643b34a33e14db4fdb6fc29b5 100644 (file)
@@ -96,7 +96,9 @@ struct ocfs2_unblock_ctl {
 };
 
 /* Lockdep class keys */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
 static struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
+#endif
 
 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
                                        int new_level);
index bf000c8aeffbb30fe8b1ebbed322540021f0bb55..fec62e9dfbe6a6c639d7f61879bf21ac84ef4a6c 100644 (file)
@@ -2337,8 +2337,8 @@ late_initcall(ubifs_init);
 
 static void __exit ubifs_exit(void)
 {
-       WARN_ON(list_empty(&ubifs_infos));
-       WARN_ON(atomic_long_read(&ubifs_clean_zn_cnt) == 0);
+       WARN_ON(!list_empty(&ubifs_infos));
+       WARN_ON(atomic_long_read(&ubifs_clean_zn_cnt) != 0);
 
        dbg_debugfs_exit();
        ubifs_compressors_exit();
index 5289e22cb081d4aee3f0a57ef7665b3930393c15..42ea7bab9144cc026f50d802acc31c42ab7f0858 100644 (file)
@@ -1220,35 +1220,92 @@ retry:
        return 0;
 }
 
+/* Unlock both inodes after they've been prepped for a range clone. */
+STATIC void
+xfs_reflink_remap_unlock(
+       struct file             *file_in,
+       struct file             *file_out)
+{
+       struct inode            *inode_in = file_inode(file_in);
+       struct xfs_inode        *src = XFS_I(inode_in);
+       struct inode            *inode_out = file_inode(file_out);
+       struct xfs_inode        *dest = XFS_I(inode_out);
+       bool                    same_inode = (inode_in == inode_out);
+
+       xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
+       if (!same_inode)
+               xfs_iunlock(src, XFS_MMAPLOCK_SHARED);
+       inode_unlock(inode_out);
+       if (!same_inode)
+               inode_unlock_shared(inode_in);
+}
+
 /*
- * Link a range of blocks from one file to another.
+ * If we're reflinking to a point past the destination file's EOF, we must
+ * zero any speculative post-EOF preallocations that sit between the old EOF
+ * and the destination file offset.
  */
-int
-xfs_reflink_remap_range(
+static int
+xfs_reflink_zero_posteof(
+       struct xfs_inode        *ip,
+       loff_t                  pos)
+{
+       loff_t                  isize = i_size_read(VFS_I(ip));
+
+       if (pos <= isize)
+               return 0;
+
+       trace_xfs_zero_eof(ip, isize, pos - isize);
+       return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL,
+                       &xfs_iomap_ops);
+}
+
+/*
+ * Prepare two files for range cloning.  Upon a successful return both inodes
+ * will have the iolock and mmaplock held, the page cache of the out file will
+ * be truncated, and any leases on the out file will have been broken.  This
+ * function borrows heavily from xfs_file_aio_write_checks.
+ *
+ * The VFS allows partial EOF blocks to "match" for dedupe even though it hasn't
+ * checked that the bytes beyond EOF physically match. Hence we cannot use the
+ * EOF block in the source dedupe range because it's not a complete block match,
+ * hence can introduce a corruption into the file that has it's block replaced.
+ *
+ * In similar fashion, the VFS file cloning also allows partial EOF blocks to be
+ * "block aligned" for the purposes of cloning entire files.  However, if the
+ * source file range includes the EOF block and it lands within the existing EOF
+ * of the destination file, then we can expose stale data from beyond the source
+ * file EOF in the destination file.
+ *
+ * XFS doesn't support partial block sharing, so in both cases we have check
+ * these cases ourselves. For dedupe, we can simply round the length to dedupe
+ * down to the previous whole block and ignore the partial EOF block. While this
+ * means we can't dedupe the last block of a file, this is an acceptible
+ * tradeoff for simplicity on implementation.
+ *
+ * For cloning, we want to share the partial EOF block if it is also the new EOF
+ * block of the destination file. If the partial EOF block lies inside the
+ * existing destination EOF, then we have to abort the clone to avoid exposing
+ * stale data in the destination file. Hence we reject these clone attempts with
+ * -EINVAL in this case.
+ */
+STATIC int
+xfs_reflink_remap_prep(
        struct file             *file_in,
        loff_t                  pos_in,
        struct file             *file_out,
        loff_t                  pos_out,
-       u64                     len,
+       u64                     *len,
        bool                    is_dedupe)
 {
        struct inode            *inode_in = file_inode(file_in);
        struct xfs_inode        *src = XFS_I(inode_in);
        struct inode            *inode_out = file_inode(file_out);
        struct xfs_inode        *dest = XFS_I(inode_out);
-       struct xfs_mount        *mp = src->i_mount;
        bool                    same_inode = (inode_in == inode_out);
-       xfs_fileoff_t           sfsbno, dfsbno;
-       xfs_filblks_t           fsblen;
-       xfs_extlen_t            cowextsize;
+       u64                     blkmask = i_blocksize(inode_in) - 1;
        ssize_t                 ret;
 
-       if (!xfs_sb_version_hasreflink(&mp->m_sb))
-               return -EOPNOTSUPP;
-
-       if (XFS_FORCED_SHUTDOWN(mp))
-               return -EIO;
-
        /* Lock both files against IO */
        ret = xfs_iolock_two_inodes_and_break_layout(inode_in, inode_out);
        if (ret)
@@ -1270,33 +1327,115 @@ xfs_reflink_remap_range(
                goto out_unlock;
 
        ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out,
-                       &len, is_dedupe);
+                       len, is_dedupe);
        if (ret <= 0)
                goto out_unlock;
 
+       /*
+        * If the dedupe data matches, chop off the partial EOF block
+        * from the source file so we don't try to dedupe the partial
+        * EOF block.
+        */
+       if (is_dedupe) {
+               *len &= ~blkmask;
+       } else if (*len & blkmask) {
+               /*
+                * The user is attempting to share a partial EOF block,
+                * if it's inside the destination EOF then reject it.
+                */
+               if (pos_out + *len < i_size_read(inode_out)) {
+                       ret = -EINVAL;
+                       goto out_unlock;
+               }
+       }
+
        /* Attach dquots to dest inode before changing block map */
        ret = xfs_qm_dqattach(dest);
        if (ret)
                goto out_unlock;
 
-       trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
-
        /*
-        * Clear out post-eof preallocations because we don't have page cache
-        * backing the delayed allocations and they'll never get freed on
-        * their own.
+        * Zero existing post-eof speculative preallocations in the destination
+        * file.
         */
-       if (xfs_can_free_eofblocks(dest, true)) {
-               ret = xfs_free_eofblocks(dest);
-               if (ret)
-                       goto out_unlock;
-       }
+       ret = xfs_reflink_zero_posteof(dest, pos_out);
+       if (ret)
+               goto out_unlock;
 
        /* Set flags and remap blocks. */
        ret = xfs_reflink_set_inode_flag(src, dest);
        if (ret)
                goto out_unlock;
 
+       /* Zap any page cache for the destination file's range. */
+       truncate_inode_pages_range(&inode_out->i_data, pos_out,
+                                  PAGE_ALIGN(pos_out + *len) - 1);
+
+       /* If we're altering the file contents... */
+       if (!is_dedupe) {
+               /*
+                * ...update the timestamps (which will grab the ilock again
+                * from xfs_fs_dirty_inode, so we have to call it before we
+                * take the ilock).
+                */
+               if (!(file_out->f_mode & FMODE_NOCMTIME)) {
+                       ret = file_update_time(file_out);
+                       if (ret)
+                               goto out_unlock;
+               }
+
+               /*
+                * ...clear the security bits if the process is not being run
+                * by root.  This keeps people from modifying setuid and setgid
+                * binaries.
+                */
+               ret = file_remove_privs(file_out);
+               if (ret)
+                       goto out_unlock;
+       }
+
+       return 1;
+out_unlock:
+       xfs_reflink_remap_unlock(file_in, file_out);
+       return ret;
+}
+
+/*
+ * Link a range of blocks from one file to another.
+ */
+int
+xfs_reflink_remap_range(
+       struct file             *file_in,
+       loff_t                  pos_in,
+       struct file             *file_out,
+       loff_t                  pos_out,
+       u64                     len,
+       bool                    is_dedupe)
+{
+       struct inode            *inode_in = file_inode(file_in);
+       struct xfs_inode        *src = XFS_I(inode_in);
+       struct inode            *inode_out = file_inode(file_out);
+       struct xfs_inode        *dest = XFS_I(inode_out);
+       struct xfs_mount        *mp = src->i_mount;
+       xfs_fileoff_t           sfsbno, dfsbno;
+       xfs_filblks_t           fsblen;
+       xfs_extlen_t            cowextsize;
+       ssize_t                 ret;
+
+       if (!xfs_sb_version_hasreflink(&mp->m_sb))
+               return -EOPNOTSUPP;
+
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return -EIO;
+
+       /* Prepare and then clone file data. */
+       ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
+                       &len, is_dedupe);
+       if (ret <= 0)
+               return ret;
+
+       trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
+
        dfsbno = XFS_B_TO_FSBT(mp, pos_out);
        sfsbno = XFS_B_TO_FSBT(mp, pos_in);
        fsblen = XFS_B_TO_FSB(mp, len);
@@ -1305,10 +1444,6 @@ xfs_reflink_remap_range(
        if (ret)
                goto out_unlock;
 
-       /* Zap any page cache for the destination file's range. */
-       truncate_inode_pages_range(&inode_out->i_data, pos_out,
-                                  PAGE_ALIGN(pos_out + len) - 1);
-
        /*
         * Carry the cowextsize hint from src to dest if we're sharing the
         * entire source file to the entire destination file, the source file
@@ -1325,12 +1460,7 @@ xfs_reflink_remap_range(
                        is_dedupe);
 
 out_unlock:
-       xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
-       if (!same_inode)
-               xfs_iunlock(src, XFS_MMAPLOCK_SHARED);
-       inode_unlock(inode_out);
-       if (!same_inode)
-               inode_unlock_shared(inode_in);
+       xfs_reflink_remap_unlock(file_in, file_out);
        if (ret)
                trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
        return ret;
index 7b75ff6e2fceeb407e828c4e171176afc5ae7281..d7701d466b608b588fd8b6c69166ce485e402a24 100644 (file)
@@ -68,7 +68,7 @@
  */
 #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
 #define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
-#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
+#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX*
 #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
 #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
 #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
 
 #define EXIT_DATA                                                      \
        *(.exit.data .exit.data.*)                                      \
-       *(.fini_array)                                                  \
-       *(.dtors)                                                       \