Merge tag '5.1-rc-smb3' of git://git.samba.org/sfrench/cifs-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 16 Mar 2019 01:52:12 +0000 (18:52 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 16 Mar 2019 01:52:12 +0000 (18:52 -0700)
Pull more smb3 updates from Steve French:
 "Various tracing and debugging improvements, crediting fixes, some
  cleanup, and important fallocate fix (fixes three xfstests) and lock
  fix.

  Summary:

   - Various additional dynamic tracing tracepoints

   - Debugging improvements (including ability to query the server via
     SMB3 fsctl from userspace tools which can help with stats and
     debugging)

   - One minor performance improvement (root directory inode caching)

   - Crediting (SMB3 flow control) fixes

   - Some cleanup (docs and to mknod)

   - Important fixes: one to smb3 implementation of fallocate zero range
     (which fixes three xfstests) and a POSIX lock fix"

* tag '5.1-rc-smb3' of git://git.samba.org/sfrench/cifs-2.6: (22 commits)
  CIFS: fix POSIX lock leak and invalid ptr deref
  SMB3: Allow SMB3 FSCTL queries to be sent to server from tools
  cifs: fix incorrect handling of smb2_set_sparse() return in smb3_simple_falloc
  smb2: fix typo in definition of a few error flags
  CIFS: make mknod() an smb_version_op
  cifs: minor documentation updates
  cifs: remove unused value pointed out by Coverity
  SMB3: passthru query info doesn't check for SMB3 FSCTL passthru
  smb3: add dynamic tracepoints for simple fallocate and zero range
  cifs: fix smb3_zero_range so it can expand the file-size when required
  cifs: add SMB2_ioctl_init/free helpers to be used with compounding
  smb3: Add dynamic trace points for various compounded smb3 ops
  cifs: cache FILE_ALL_INFO for the shared root handle
  smb3: display volume serial number for shares in /proc/fs/cifs/DebugData
  cifs: simplify how we handle credits in compound_send_recv()
  smb3: add dynamic tracepoint for timeout waiting for credits
  smb3: display security information in /proc/fs/cifs/DebugData more accurately
  cifs: add a timeout argument to wait_for_free_credits
  cifs: prevent starvation in wait_for_free_credits for multi-credit requests
  cifs: wait_for_free_credits() make it possible to wait for >=1 credits
  ...

314 files changed:
Documentation/ABI/testing/sysfs-fs-f2fs
Documentation/arm/kernel_mode_neon.txt
Documentation/devicetree/bindings/display/ssd1307fb.txt
Documentation/filesystems/f2fs.txt
Documentation/virtual/kvm/api.txt
Documentation/virtual/kvm/halt-polling.txt
Documentation/virtual/kvm/mmu.txt
MAINTAINERS
arch/arm/Kconfig
arch/arm/Kconfig-nommu
arch/arm/Makefile
arch/arm/boot/bootp/Makefile
arch/arm/boot/bootp/init.S
arch/arm/boot/compressed/Makefile
arch/arm/boot/compressed/ll_char_wr.S
arch/arm/boot/dts/imx28-cfa10036.dts
arch/arm/common/mcpm_entry.c
arch/arm/include/asm/arch_gicv3.h
arch/arm/include/asm/assembler.h
arch/arm/include/asm/barrier.h
arch/arm/include/asm/hardware/entry-macro-iomd.S
arch/arm/include/asm/kvm_emulate.h
arch/arm/include/asm/kvm_host.h
arch/arm/include/asm/kvm_hyp.h
arch/arm/include/asm/kvm_mmu.h
arch/arm/include/asm/pgtable.h
arch/arm/include/asm/processor.h
arch/arm/include/asm/smp.h
arch/arm/include/asm/smp_twd.h
arch/arm/include/asm/spinlock.h
arch/arm/include/asm/suspend.h
arch/arm/include/asm/uaccess.h
arch/arm/include/asm/v7m.h
arch/arm/include/asm/vfpmacros.h
arch/arm/include/debug/tegra.S
arch/arm/kernel/debug.S
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/entry-header.S
arch/arm/kernel/entry-v7m.S
arch/arm/kernel/head-nommu.S
arch/arm/kernel/hyp-stub.S
arch/arm/kernel/machine_kexec.c
arch/arm/kernel/patch.c
arch/arm/kernel/sleep.S
arch/arm/kernel/smp.c
arch/arm/kernel/smp_twd.c
arch/arm/kernel/unwind.c
arch/arm/kvm/Makefile
arch/arm/kvm/coproc.c
arch/arm/kvm/hyp/cp15-sr.c
arch/arm/kvm/hyp/hyp-entry.S
arch/arm/kvm/hyp/switch.c
arch/arm/kvm/hyp/tlb.c
arch/arm/kvm/interrupts.S
arch/arm/lib/Makefile
arch/arm/lib/bitops.h
arch/arm/lib/clear_user.S
arch/arm/lib/copy_from_user.S
arch/arm/lib/copy_page.S
arch/arm/lib/copy_template.S
arch/arm/lib/copy_to_user.S
arch/arm/lib/csumpartial.S
arch/arm/lib/csumpartialcopygeneric.S
arch/arm/lib/csumpartialcopyuser.S
arch/arm/lib/div64.S
arch/arm/lib/floppydma.S
arch/arm/lib/io-readsb.S
arch/arm/lib/io-readsl.S
arch/arm/lib/io-readsw-armv3.S
arch/arm/lib/io-readsw-armv4.S
arch/arm/lib/io-writesb.S
arch/arm/lib/io-writesl.S
arch/arm/lib/io-writesw-armv3.S
arch/arm/lib/io-writesw-armv4.S
arch/arm/lib/lib1funcs.S
arch/arm/lib/memcpy.S
arch/arm/lib/memmove.S
arch/arm/lib/memset.S
arch/arm/lib/xor-neon.c
arch/arm/mach-actions/platsmp.c
arch/arm/mach-exynos/headsmp.S
arch/arm/mach-exynos/platsmp.c
arch/arm/mach-ks8695/include/mach/entry-macro.S
arch/arm/mach-omap2/prm_common.c
arch/arm/mach-oxnas/Makefile
arch/arm/mach-oxnas/hotplug.c [deleted file]
arch/arm/mach-oxnas/platsmp.c
arch/arm/mach-prima2/common.h
arch/arm/mach-prima2/headsmp.S
arch/arm/mach-prima2/hotplug.c
arch/arm/mach-prima2/platsmp.c
arch/arm/mach-qcom/platsmp.c
arch/arm/mach-spear/generic.h
arch/arm/mach-spear/headsmp.S
arch/arm/mach-spear/hotplug.c
arch/arm/mach-spear/platsmp.c
arch/arm/mach-tegra/reset-handler.S
arch/arm/mm/cache-v6.S
arch/arm/mm/copypage-v4mc.c
arch/arm/mm/copypage-v4wb.c
arch/arm/mm/copypage-v4wt.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/idmap.c
arch/arm/mm/init.c
arch/arm/mm/pmsa-v8.c
arch/arm/mm/proc-v7m.S
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_hyp.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/sysreg.h
arch/arm64/kvm/Makefile
arch/arm64/kvm/debug.c
arch/arm64/kvm/hyp.S
arch/arm64/kvm/hyp/hyp-entry.S
arch/arm64/kvm/hyp/sysreg-sr.c
arch/arm64/kvm/sys_regs.c
arch/mips/include/asm/kvm_host.h
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/include/uapi/asm/kvm.h
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_32_mmu.c
arch/powerpc/kvm/book3s_64_mmu.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/powerpc/kvm/book3s_64_vio.c
arch/powerpc/kvm/book3s_emulate.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_builtin.c
arch/powerpc/kvm/book3s_hv_rm_xics.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_rtas.c
arch/powerpc/kvm/powerpc.c
arch/s390/include/asm/cio.h
arch/s390/include/asm/irq.h
arch/s390/include/asm/isc.h
arch/s390/include/asm/kvm_host.h
arch/s390/kernel/irq.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.h
arch/um/drivers/ubd_kern.c
arch/um/drivers/vector_user.c
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/kvm_vcpu_regs.h [new file with mode: 0644]
arch/x86/kernel/kvmclock.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/i8254.c
arch/x86/kvm/i8259.c
arch/x86/kvm/ioapic.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.h
arch/x86/kvm/mmutrace.h
arch/x86/kvm/page_track.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmcs.h
arch/x86/kvm/vmx/vmenter.S
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
drivers/amba/bus.c
drivers/clocksource/arm_arch_timer.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/modules/freesync/freesync.c
drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_ioc32.c
drivers/gpu/drm/etnaviv/Kconfig
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
drivers/gpu/drm/etnaviv/etnaviv_dump.c
drivers/gpu/drm/etnaviv/etnaviv_gem.h
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
drivers/gpu/drm/etnaviv/etnaviv_sched.c
drivers/gpu/drm/i915/i915_active.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_scheduler.c
drivers/gpu/drm/i915/intel_breadcrumbs.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/selftests/i915_gem_context.c
drivers/gpu/drm/qxl/qxl_drv.c
drivers/gpu/vga/vgaarb.c
drivers/hwtracing/coresight/coresight-etm3x.c
drivers/hwtracing/coresight/coresight-etm4x.c
drivers/hwtracing/coresight/coresight-priv.h
drivers/hwtracing/coresight/coresight-stm.c
drivers/hwtracing/coresight/coresight-tmc.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-mt65xx.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-sis630.c
drivers/i2c/busses/i2c-stm32f7.c
drivers/i2c/i2c-core-base.c
drivers/iommu/amd_iommu.c
drivers/ntb/hw/intel/ntb_hw_gen1.c
drivers/ntb/hw/intel/ntb_hw_gen1.h
drivers/ntb/hw/intel/ntb_hw_gen3.c
drivers/ntb/hw/mscc/ntb_hw_switchtec.c
drivers/ntb/ntb_transport.c
drivers/s390/cio/chsc.c
drivers/s390/cio/chsc.h
drivers/video/fbdev/aty/radeon_pm.c
drivers/video/fbdev/cg14.c
drivers/video/fbdev/cg3.c
drivers/video/fbdev/chipsfb.c
drivers/video/fbdev/core/fb_cmdline.c
drivers/video/fbdev/core/fbcon.c
drivers/video/fbdev/core/fbmem.c
drivers/video/fbdev/core/fbmon.c
drivers/video/fbdev/ffb.c
drivers/video/fbdev/geode/gxfb_core.c
drivers/video/fbdev/geode/lxfb_core.c
drivers/video/fbdev/imsttfb.c
drivers/video/fbdev/mbx/mbxdebugfs.c
drivers/video/fbdev/mbx/mbxfb.c
drivers/video/fbdev/offb.c
drivers/video/fbdev/omap2/omapfb/dss/core.c
drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
drivers/video/fbdev/omap2/omapfb/dss/dss.h
drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c
drivers/video/fbdev/ssd1307fb.c
drivers/video/fbdev/via/viafbdev.c
fs/f2fs/checkpoint.c
fs/f2fs/data.c
fs/f2fs/debug.c
fs/f2fs/dir.c
fs/f2fs/extent_cache.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/inline.c
fs/f2fs/inode.c
fs/f2fs/namei.c
fs/f2fs/node.c
fs/f2fs/segment.c
fs/f2fs/segment.h
fs/f2fs/super.c
fs/f2fs/sysfs.c
fs/f2fs/trace.c
fs/f2fs/xattr.c
fs/f2fs/xattr.h
fs/xfs/libxfs/xfs_dir2_leaf.c
fs/xfs/libxfs/xfs_dir2_node.c
include/clocksource/arm_arch_timer.h
include/drm/drm_fb_helper.h
include/kvm/arm_arch_timer.h
include/linux/amba/bus.h
include/linux/f2fs_fs.h
include/linux/kvm_host.h
include/linux/ntb.h
include/linux/pagemap.h
include/linux/ring_buffer.h
include/linux/switchtec.h
include/linux/vgaarb.h
include/sound/pcm.h
include/trace/events/f2fs.h
kernel/printk/printk.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_kdb.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_probe.c
kernel/trace/trace_probe.h
kernel/trace/trace_uprobe.c
lib/raid6/Makefile
mm/filemap.c
sound/hda/hdac_stream.c
sound/pci/hda/hda_tegra.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/usb/usx2y/usb_stream.c
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c [new file with mode: 0644]
virt/kvm/arm/arch_timer.c
virt/kvm/arm/arm.c
virt/kvm/arm/hyp/vgic-v3-sr.c
virt/kvm/arm/mmu.c
virt/kvm/arm/trace.h
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/coalesced_mmio.c
virt/kvm/eventfd.c
virt/kvm/irqchip.c
virt/kvm/kvm_main.c
virt/kvm/vfio.c

index a7ce331994578451759aebd8a080bc38085d1093..91822ce258317df500271f8d28cf540e30cb19da 100644 (file)
@@ -86,6 +86,13 @@ Description:
                The unit size is one block, now only support configuring in range
                of [1, 512].
 
+What:          /sys/fs/f2fs/<disk>/umount_discard_timeout
+Date:          January 2019
+Contact:       "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:
+               Set timeout to issue discard commands during umount.
+               Default: 5 secs
+
 What:          /sys/fs/f2fs/<disk>/max_victim_search
 Date:          January 2014
 Contact:       "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
index 525452726d31e94c43d7c8c7c05c4431b006912a..b9e060c5b61e08c1491c710adc560530b8bae03e 100644 (file)
@@ -6,7 +6,7 @@ TL;DR summary
 * Use only NEON instructions, or VFP instructions that don't rely on support
   code
 * Isolate your NEON code in a separate compilation unit, and compile it with
-  '-mfpu=neon -mfloat-abi=softfp'
+  '-march=armv7-a -mfpu=neon -mfloat-abi=softfp'
 * Put kernel_neon_begin() and kernel_neon_end() calls around the calls into your
   NEON code
 * Don't sleep in your NEON code, and be aware that it will be executed with
@@ -87,7 +87,7 @@ instructions appearing in unexpected places if no special care is taken.
 Therefore, the recommended and only supported way of using NEON/VFP in the
 kernel is by adhering to the following rules:
 * isolate the NEON code in a separate compilation unit and compile it with
-  '-mfpu=neon -mfloat-abi=softfp';
+  '-march=armv7-a -mfpu=neon -mfloat-abi=softfp';
 * issue the calls to kernel_neon_begin(), kernel_neon_end() as well as the calls
   into the unit containing the NEON code from a compilation unit which is *not*
   built with the GCC flag '-mfpu=neon' set.
index 209d931ef16c4e53e46e8378b09f5cf04b121586..b67f8caa212c8fa8959944cf3a8cd2152ba68f92 100644 (file)
@@ -36,7 +36,6 @@ ssd1307: oled@3c {
         reg = <0x3c>;
         pwms = <&pwm 4 3000>;
         reset-gpios = <&gpio2 7>;
-        reset-active-low;
 };
 
 ssd1306: oled@3c {
@@ -44,7 +43,6 @@ ssd1306: oled@3c {
         reg = <0x3c>;
         pwms = <&pwm 4 3000>;
         reset-gpios = <&gpio2 7>;
-        reset-active-low;
         solomon,com-lrremap;
         solomon,com-invdir;
         solomon,com-offset = <32>;
index e46c2147ddf8e02083622c0976fb63da061c85ee..f7b5e4ff0de3e1a196cf7d66f4780345e24829b1 100644 (file)
@@ -126,6 +126,8 @@ disable_ext_identify   Disable the extension list configured by mkfs, so f2fs
                        does not aware of cold files such as media files.
 inline_xattr           Enable the inline xattrs feature.
 noinline_xattr         Disable the inline xattrs feature.
+inline_xattr_size=%u   Support configuring inline xattr size, it depends on
+                      flexible inline xattr feature.
 inline_data            Enable the inline data feature: New created small(<~3.4k)
                        files can be written into inode block.
 inline_dentry          Enable the inline dir feature: data in new created
index 356156f5c52d299b13481ccc516f15388c5797aa..7de9eee73fcd9d533aec2c1bc88d413f6216db73 100644 (file)
@@ -45,6 +45,23 @@ the API.  The only supported use is one virtual machine per process,
 and one vcpu per thread.
 
 
+It is important to note that althought VM ioctls may only be issued from
+the process that created the VM, a VM's lifecycle is associated with its
+file descriptor, not its creator (process).  In other words, the VM and
+its resources, *including the associated address space*, are not freed
+until the last reference to the VM's file descriptor has been released.
+For example, if fork() is issued after ioctl(KVM_CREATE_VM), the VM will
+not be freed until both the parent (original) process and its child have
+put their references to the VM's file descriptor.
+
+Because a VM's resources are not freed until the last reference to its
+file descriptor is released, creating additional references to a VM via
+via fork(), dup(), etc... without careful consideration is strongly
+discouraged and may have unwanted side effects, e.g. memory allocated
+by and on behalf of the VM's process may not be freed/unaccounted when
+the VM is shut down.
+
+
 3. Extensions
 -------------
 
index 4a841831876978cbc5396f8fc88232815cfd786c..4f791b128dd27a0ed9bc4ad79eddc8794bcab2bd 100644 (file)
@@ -53,7 +53,8 @@ the global max polling interval then the polling interval can be increased in
 the hope that next time during the longer polling interval the wake up source
 will be received while the host is polling and the latency benefits will be
 received. The polling interval is grown in the function grow_halt_poll_ns() and
-is multiplied by the module parameter halt_poll_ns_grow.
+is multiplied by the module parameters halt_poll_ns_grow and
+halt_poll_ns_grow_start.
 
 In the event that the total block time was greater than the global max polling
 interval then the host will never poll for long enough (limited by the global
@@ -80,22 +81,30 @@ shrunk. These variables are defined in include/linux/kvm_host.h and as module
 parameters in virt/kvm/kvm_main.c, or arch/powerpc/kvm/book3s_hv.c in the
 powerpc kvm-hv case.
 
-Module Parameter    |       Description              |      Default Value
+Module Parameter       |   Description             |        Default Value
 --------------------------------------------------------------------------------
-halt_poll_ns       | The global max polling interval | KVM_HALT_POLL_NS_DEFAULT
-                   | which defines the ceiling value |
-                   | of the polling interval for     | (per arch value)
-                   | each vcpu.                      |
+halt_poll_ns           | The global max polling    | KVM_HALT_POLL_NS_DEFAULT
+                       | interval which defines    |
+                       | the ceiling value of the  |
+                       | polling interval for      | (per arch value)
+                       | each vcpu.                |
 --------------------------------------------------------------------------------
-halt_poll_ns_grow   | The value by which the halt     |        2
-                   | polling interval is multiplied  |
-                   | in the grow_halt_poll_ns()      |
-                   | function.                       |
+halt_poll_ns_grow      | The value by which the    | 2
+                       | halt polling interval is  |
+                       | multiplied in the         |
+                       | grow_halt_poll_ns()       |
+                       | function.                 |
 --------------------------------------------------------------------------------
-halt_poll_ns_shrink | The value by which the halt     |        0
-                   | polling interval is divided in  |
-                   | the shrink_halt_poll_ns()       |
-                   | function.                       |
+halt_poll_ns_grow_start | The initial value to grow | 10000
+                       | to from zero in the       |
+                       | grow_halt_poll_ns()       |
+                       | function.                 |
+--------------------------------------------------------------------------------
+halt_poll_ns_shrink    | The value by which the    | 0
+                       | halt polling interval is  |
+                       | divided in the            |
+                       | shrink_halt_poll_ns()     |
+                       | function.                 |
 --------------------------------------------------------------------------------
 
 These module parameters can be set from the debugfs files in:
index e507a9e0421ed22e630425074e053303f5e990bf..f365102c80f5dd64133cbe60a8a4fd76fc86393d 100644 (file)
@@ -224,10 +224,6 @@ Shadow pages contain the following information:
     A bitmap indicating which sptes in spt point (directly or indirectly) at
     pages that may be unsynchronized.  Used to quickly locate all unsychronized
     pages reachable from a given page.
-  mmu_valid_gen:
-    Generation number of the page.  It is compared with kvm->arch.mmu_valid_gen
-    during hash table lookup, and used to skip invalidated shadow pages (see
-    "Zapping all pages" below.)
   clear_spte_count:
     Only present on 32-bit hosts, where a 64-bit spte cannot be written
     atomically.  The reader uses this while running out of the MMU lock
@@ -402,27 +398,6 @@ causes its disallow_lpage to be incremented, thus preventing instantiation of
 a large spte.  The frames at the end of an unaligned memory slot have
 artificially inflated ->disallow_lpages so they can never be instantiated.
 
-Zapping all pages (page generation count)
-=========================================
-
-For the large memory guests, walking and zapping all pages is really slow
-(because there are a lot of pages), and also blocks memory accesses of
-all VCPUs because it needs to hold the MMU lock.
-
-To make it be more scalable, kvm maintains a global generation number
-which is stored in kvm->arch.mmu_valid_gen.  Every shadow page stores
-the current global generation-number into sp->mmu_valid_gen when it
-is created.  Pages with a mismatching generation number are "obsolete".
-
-When KVM need zap all shadow pages sptes, it just simply increases the global
-generation-number then reload root shadow pages on all vcpus.  As the VCPUs
-create new shadow page tables, the old pages are not used because of the
-mismatching generation number.
-
-KVM then walks through all pages and zaps obsolete pages.  While the zap
-operation needs to take the MMU lock, the lock can be released periodically
-so that the VCPUs can make progress.
-
 Fast invalidation of MMIO sptes
 ===============================
 
@@ -435,8 +410,7 @@ shadow pages, and is made more scalable with a similar technique.
 MMIO sptes have a few spare bits, which are used to store a
 generation number.  The global generation number is stored in
 kvm_memslots(kvm)->generation, and increased whenever guest memory info
-changes.  This generation number is distinct from the one described in
-the previous section.
+changes.
 
 When KVM finds an MMIO spte, it checks the generation number of the spte.
 If the generation number of the spte does not equal the global generation
@@ -452,13 +426,16 @@ stored into the MMIO spte.  Thus, the MMIO spte might be created based on
 out-of-date information, but with an up-to-date generation number.
 
 To avoid this, the generation number is incremented again after synchronize_srcu
-returns; thus, the low bit of kvm_memslots(kvm)->generation is only 1 during a
+returns; thus, bit 63 of kvm_memslots(kvm)->generation set to 1 only during a
 memslot update, while some SRCU readers might be using the old copy.  We do not
 want to use an MMIO sptes created with an odd generation number, and we can do
-this without losing a bit in the MMIO spte.  The low bit of the generation
-is not stored in MMIO spte, and presumed zero when it is extracted out of the
-spte.  If KVM is unlucky and creates an MMIO spte while the low bit is 1,
-the next access to the spte will always be a cache miss.
+this without losing a bit in the MMIO spte.  The "update in-progress" bit of the
+generation is not stored in MMIO spte, and is so is implicitly zero when the
+generation is extracted out of the spte.  If KVM is unlucky and creates an MMIO
+spte while an update is in-progress, the next access to the spte will always be
+a cache miss.  For example, a subsequent access during the update window will
+miss due to the in-progress flag diverging, while an access after the update
+window closes will have a higher generation number (as compared to the spte).
 
 
 Further reading
index f8ff9ae52c21085372c31d04a1c7ed4f02cd1494..e17ebf70b5480ecc232ce1f62aedf95a03b5f403 100644 (file)
@@ -5278,7 +5278,7 @@ DRM DRIVERS FOR VIVANTE GPU IP
 M:     Lucas Stach <l.stach@pengutronix.de>
 R:     Russell King <linux+etnaviv@armlinux.org.uk>
 R:     Christian Gmeiner <christian.gmeiner@gmail.com>
-L:     etnaviv@lists.freedesktop.org
+L:     etnaviv@lists.freedesktop.org (moderated for non-subscribers)
 L:     dri-devel@lists.freedesktop.org
 S:     Maintained
 F:     drivers/gpu/drm/etnaviv/
@@ -8461,6 +8461,7 @@ F:        include/linux/kvm*
 F:     include/kvm/iodev.h
 F:     virt/kvm/*
 F:     tools/kvm/
+F:     tools/testing/selftests/kvm/
 
 KERNEL VIRTUAL MACHINE FOR AMD-V (KVM/amd)
 M:     Joerg Roedel <joro@8bytes.org>
@@ -8470,29 +8471,25 @@ S:      Maintained
 F:     arch/x86/include/asm/svm.h
 F:     arch/x86/kvm/svm.c
 
-KERNEL VIRTUAL MACHINE FOR ARM (KVM/arm)
+KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
 M:     Christoffer Dall <christoffer.dall@arm.com>
 M:     Marc Zyngier <marc.zyngier@arm.com>
+R:     James Morse <james.morse@arm.com>
+R:     Julien Thierry <julien.thierry@arm.com>
+R:     Suzuki K Pouloze <suzuki.poulose@arm.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     kvmarm@lists.cs.columbia.edu
 W:     http://systems.cs.columbia.edu/projects/kvm-arm
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git
-S:     Supported
+S:     Maintained
 F:     arch/arm/include/uapi/asm/kvm*
 F:     arch/arm/include/asm/kvm*
 F:     arch/arm/kvm/
-F:     virt/kvm/arm/
-F:     include/kvm/arm_*
-
-KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
-M:     Christoffer Dall <christoffer.dall@arm.com>
-M:     Marc Zyngier <marc.zyngier@arm.com>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-L:     kvmarm@lists.cs.columbia.edu
-S:     Maintained
 F:     arch/arm64/include/uapi/asm/kvm*
 F:     arch/arm64/include/asm/kvm*
 F:     arch/arm64/kvm/
+F:     virt/kvm/arm/
+F:     include/kvm/arm_*
 
 KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
 M:     James Hogan <jhogan@kernel.org>
index 5085a1eab9fc538adbc7e07f84c7ef89f8195949..054ead960f983a99a9f241ce1427fe0e1cd6cb8a 100644 (file)
@@ -1310,7 +1310,7 @@ config SCHED_SMT
 config HAVE_ARM_SCU
        bool
        help
-         This option enables support for the ARM system coherency unit
+         This option enables support for the ARM snoop control unit
 
 config HAVE_ARM_ARCH_TIMER
        bool "Architected timer support"
@@ -1322,7 +1322,6 @@ config HAVE_ARM_ARCH_TIMER
 
 config HAVE_ARM_TWD
        bool
-       select TIMER_OF if OF
        help
          This options enables support for the ARM timer and watchdog unit
 
index 1168a03c85255fbe7295c0fad7f77fb7d2e9b2af..36c80d3dd93f2fe64f83d21482ba74fe9fe8cf88 100644 (file)
@@ -20,10 +20,12 @@ config DRAM_SIZE
 
 config FLASH_MEM_BASE
        hex 'FLASH Base Address' if SET_MEM_PARAM
+       depends on CPU_ARM740T || CPU_ARM946E || CPU_ARM940T
        default 0x00400000
 
 config FLASH_SIZE
        hex 'FLASH Size' if SET_MEM_PARAM
+       depends on CPU_ARM740T || CPU_ARM946E || CPU_ARM940T
        default 0x00400000
 
 config PROCESSOR_ID
index 00000e91ad652897624e7a39061292999292199c..807a7d06c2a0825bed8f8ea8c1d9d8c7eb5f8be6 100644 (file)
@@ -10,7 +10,7 @@
 #
 # Copyright (C) 1995-2001 by Russell King
 
-LDFLAGS_vmlinux        :=-p --no-undefined -X --pic-veneer
+LDFLAGS_vmlinux        := --no-undefined -X --pic-veneer
 ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
 LDFLAGS_vmlinux        += --be8
 KBUILD_LDFLAGS_MODULE  += --be8
index 83e1a076a5d64a095d558d214f541f9407e027fa..981a8d03f064c24f47d7fa69294a5e14a3805fa4 100644 (file)
@@ -8,7 +8,7 @@
 
 GCOV_PROFILE   := n
 
-LDFLAGS_bootp  :=-p --no-undefined -X \
+LDFLAGS_bootp  := --no-undefined -X \
                 --defsym initrd_phys=$(INITRD_PHYS) \
                 --defsym params_phys=$(PARAMS_PHYS) -T
 AFLAGS_initrd.o :=-DINITRD=\"$(INITRD)\"
index 78b508075161fd4967bdbb3821eab162aa9dd5bc..142927e5f485adfe034855af467013ca0bbe8543 100644 (file)
@@ -44,7 +44,7 @@ _start:               add     lr, pc, #-0x8           @ lr = current load addr
  */
                movne   r10, #0                 @ terminator
                movne   r4, #2                  @ Size of this entry (2 words)
-               stmneia r9, {r4, r5, r10}       @ Size, ATAG_CORE, terminator
+               stmiane r9, {r4, r5, r10}       @ Size, ATAG_CORE, terminator
 
 /*
  * find the end of the tag list, and then add an INITRD tag on the end.
index 6114ae6ea4666ddee8e5429b6074c7b5249af229..9219389bbe612799fd473a36fb10261596325548 100644 (file)
@@ -132,8 +132,6 @@ endif
 ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
 LDFLAGS_vmlinux += --be8
 endif
-# ?
-LDFLAGS_vmlinux += -p
 # Report unresolved symbol references
 LDFLAGS_vmlinux += --no-undefined
 # Delete all temporary local symbols
index 8517c8606b4a7f1bafabd39556b8aa2390910bc1..b1dcdb9f4030e22b65fa1838a4b68cbeb847fa49 100644 (file)
@@ -75,7 +75,7 @@ Lrow4bpplp:
        tst     r1, #7                          @ avoid using r7 directly after
        str     r7, [r0, -r5]!
        subne   r1, r1, #1
-       ldrneb  r7, [r6, r1]
+       ldrbne  r7, [r6, r1]
        bne     Lrow4bpplp
        ldmfd   sp!, {r4 - r7, pc}
 
@@ -103,7 +103,7 @@ Lrow8bpplp:
        sub     r0, r0, r5                      @ avoid ip
        stmia   r0, {r4, ip}
        subne   r1, r1, #1
-       ldrneb  r7, [r6, r1]
+       ldrbne  r7, [r6, r1]
        bne     Lrow8bpplp
        ldmfd   sp!, {r4 - r7, pc}
 
index d3e3622979c5bf9379dd0f3b9cd5ce55048975f9..de48b5808ef6e97efee62f4e7e2c1a257c770970 100644 (file)
@@ -11,6 +11,7 @@
 
 /dts-v1/;
 #include "imx28.dtsi"
+#include <dt-bindings/gpio/gpio.h>
 
 / {
        model = "Crystalfontz CFA-10036 Board";
@@ -96,7 +97,7 @@
                                        pinctrl-names = "default";
                                        pinctrl-0 = <&ssd1306_cfa10036>;
                                        reg = <0x3c>;
-                                       reset-gpios = <&gpio2 7 0>;
+                                       reset-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
                                        solomon,height = <32>;
                                        solomon,width = <128>;
                                        solomon,page-offset = <0>;
index ad574d20415c219f408bb494a1bec3de6a7fcbac..1b1b82b37ce035f97e17e40be6d81d913ee27e61 100644 (file)
@@ -381,7 +381,7 @@ static int __init nocache_trampoline(unsigned long _arg)
        unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
        phys_reset_t phys_reset;
 
-       mcpm_set_entry_vector(cpu, cluster, cpu_resume);
+       mcpm_set_entry_vector(cpu, cluster, cpu_resume_no_hyp);
        setup_mm_for_reboot();
 
        __mcpm_cpu_going_down(cpu, cluster);
index f6f485f4744e034679f5b4483daa74d98a3907d3..d15b8c99f1b3c994967ac6840e0c223eb92c1e0c 100644 (file)
@@ -55,7 +55,7 @@
 #define ICH_VTR                                __ACCESS_CP15(c12, 4, c11, 1)
 #define ICH_MISR                       __ACCESS_CP15(c12, 4, c11, 2)
 #define ICH_EISR                       __ACCESS_CP15(c12, 4, c11, 3)
-#define ICH_ELSR                       __ACCESS_CP15(c12, 4, c11, 5)
+#define ICH_ELRSR                      __ACCESS_CP15(c12, 4, c11, 5)
 #define ICH_VMCR                       __ACCESS_CP15(c12, 4, c11, 7)
 
 #define __LR0(x)                       __ACCESS_CP15(c12, 4, c12, x)
@@ -152,7 +152,7 @@ CPUIF_MAP(ICH_HCR, ICH_HCR_EL2)
 CPUIF_MAP(ICH_VTR, ICH_VTR_EL2)
 CPUIF_MAP(ICH_MISR, ICH_MISR_EL2)
 CPUIF_MAP(ICH_EISR, ICH_EISR_EL2)
-CPUIF_MAP(ICH_ELSR, ICH_ELSR_EL2)
+CPUIF_MAP(ICH_ELRSR, ICH_ELRSR_EL2)
 CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2)
 CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2)
 CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2)
index 28a48e0d4cca04f65b8bc90179c8daf5a3632397..b59921a560da3ea0fb72baa5194a1aced4239c6e 100644 (file)
@@ -376,9 +376,9 @@ THUMB(      orr     \reg , \reg , #PSR_T_BIT        )
        .macro  usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
 9999:
        .if     \inc == 1
-       \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
+       \instr\()b\t\cond\().w \reg, [\ptr, #\off]
        .elseif \inc == 4
-       \instr\cond\()\t\().w \reg, [\ptr, #\off]
+       \instr\t\cond\().w \reg, [\ptr, #\off]
        .else
        .error  "Unsupported inc macro argument"
        .endif
@@ -417,9 +417,9 @@ THUMB(      orr     \reg , \reg , #PSR_T_BIT        )
        .rept   \rept
 9999:
        .if     \inc == 1
-       \instr\cond\()b\()\t \reg, [\ptr], #\inc
+       \instr\()b\t\cond \reg, [\ptr], #\inc
        .elseif \inc == 4
-       \instr\cond\()\t \reg, [\ptr], #\inc
+       \instr\t\cond \reg, [\ptr], #\inc
        .else
        .error  "Unsupported inc macro argument"
        .endif
@@ -460,7 +460,7 @@ THUMB(      orr     \reg , \reg , #PSR_T_BIT        )
        .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
 #ifndef CONFIG_CPU_USE_DOMAINS
        adds    \tmp, \addr, #\size - 1
-       sbcccs  \tmp, \tmp, \limit
+       sbcscc  \tmp, \tmp, \limit
        bcs     \bad
 #ifdef CONFIG_CPU_SPECTRE
        movcs   \addr, #0
@@ -474,7 +474,7 @@ THUMB(      orr     \reg , \reg , #PSR_T_BIT        )
        sub     \tmp, \limit, #1
        subs    \tmp, \tmp, \addr       @ tmp = limit - 1 - addr
        addhs   \tmp, \tmp, #1          @ if (tmp >= 0) {
-       subhss  \tmp, \tmp, \size       @ tmp = limit - (addr + size) }
+       subshs  \tmp, \tmp, \size       @ tmp = limit - (addr + size) }
        movlo   \addr, #0               @ if (tmp < 0) addr = NULL
        csdb
 #endif
index 69772e742a0acdc16dbf76f2130a8025af41b6c0..83ae97c049d9bd48b474f0127164c71628bf05c0 100644 (file)
@@ -11,6 +11,8 @@
 #define sev()  __asm__ __volatile__ ("sev" : : : "memory")
 #define wfe()  __asm__ __volatile__ ("wfe" : : : "memory")
 #define wfi()  __asm__ __volatile__ ("wfi" : : : "memory")
+#else
+#define wfe()  do { } while (0)
 #endif
 
 #if __LINUX_ARM_ARCH__ >= 7
index 8c215acd9b573232a6818de2cd847047bdc6bb6e..f7692731e514359a6a8fb66cb229444b9cf9fabe 100644 (file)
                ldr     \tmp, =irq_prio_h
                teq     \irqstat, #0
 #ifdef IOMD_BASE
-               ldreqb  \irqstat, [\base, #IOMD_DMAREQ] @ get dma
+               ldrbeq  \irqstat, [\base, #IOMD_DMAREQ] @ get dma
                addeq   \tmp, \tmp, #256                @ irq_prio_h table size
                teqeq   \irqstat, #0
                bne     2406f
 #endif
-               ldreqb  \irqstat, [\base, #IOMD_IRQREQA]        @ get low priority
+               ldrbeq  \irqstat, [\base, #IOMD_IRQREQA]        @ get low priority
                addeq   \tmp, \tmp, #256                @ irq_prio_d table size
                teqeq   \irqstat, #0
 #ifdef IOMD_IRQREQC
-               ldreqb  \irqstat, [\base, #IOMD_IRQREQC]
+               ldrbeq  \irqstat, [\base, #IOMD_IRQREQC]
                addeq   \tmp, \tmp, #256                @ irq_prio_l table size
                teqeq   \irqstat, #0
 #endif
 #ifdef IOMD_IRQREQD
-               ldreqb  \irqstat, [\base, #IOMD_IRQREQD]
+               ldrbeq  \irqstat, [\base, #IOMD_IRQREQD]
                addeq   \tmp, \tmp, #256                @ irq_prio_lc table size
                teqeq   \irqstat, #0
 #endif
-2406:          ldrneb  \irqnr, [\tmp, \irqstat]        @ get IRQ number
+2406:          ldrbne  \irqnr, [\tmp, \irqstat]        @ get IRQ number
                .endm
 
 /*
index 77121b713bef3adbc4b2c77c559dab1886dec96b..8927cae7c96662a4f4bb3187a8beca641cbe6e5a 100644 (file)
@@ -265,6 +265,14 @@ static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
        }
 }
 
+static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
+{
+       if (kvm_vcpu_trap_is_iabt(vcpu))
+               return false;
+
+       return kvm_vcpu_dabt_iswrite(vcpu);
+}
+
 static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
 {
        return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
index 50e89869178a9725f0bb6c8bb2082bc186fcbab7..770d73257ad936d6dea09f11d05b9639bd00b051 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/kvm_asm.h>
 #include <asm/kvm_mmio.h>
 #include <asm/fpstate.h>
+#include <asm/smp_plat.h>
 #include <kvm/arm_arch_timer.h>
 
 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -57,10 +58,13 @@ int __attribute_const__ kvm_target_cpu(void);
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
 void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
 
-struct kvm_arch {
-       /* VTTBR value associated with below pgd and vmid */
-       u64    vttbr;
+struct kvm_vmid {
+       /* The VMID generation used for the virt. memory system */
+       u64    vmid_gen;
+       u32    vmid;
+};
 
+struct kvm_arch {
        /* The last vcpu id that ran on each physical CPU */
        int __percpu *last_vcpu_ran;
 
@@ -70,11 +74,11 @@ struct kvm_arch {
         */
 
        /* The VMID generation used for the virt. memory system */
-       u64    vmid_gen;
-       u32    vmid;
+       struct kvm_vmid vmid;
 
        /* Stage-2 page table */
        pgd_t *pgd;
+       phys_addr_t pgd_phys;
 
        /* Interrupt controller */
        struct vgic_dist        vgic;
@@ -148,6 +152,13 @@ struct kvm_cpu_context {
 
 typedef struct kvm_cpu_context kvm_cpu_context_t;
 
+static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt,
+                                            int cpu)
+{
+       /* The host's MPIDR is immutable, so let's set it up at boot time */
+       cpu_ctxt->cp15[c0_MPIDR] = cpu_logical_map(cpu);
+}
+
 struct vcpu_reset_state {
        unsigned long   pc;
        unsigned long   r0;
@@ -224,7 +235,35 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-unsigned long kvm_call_hyp(void *hypfn, ...);
+
+unsigned long __kvm_call_hyp(void *hypfn, ...);
+
+/*
+ * The has_vhe() part doesn't get emitted, but is used for type-checking.
+ */
+#define kvm_call_hyp(f, ...)                                           \
+       do {                                                            \
+               if (has_vhe()) {                                        \
+                       f(__VA_ARGS__);                                 \
+               } else {                                                \
+                       __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
+               }                                                       \
+       } while(0)
+
+#define kvm_call_hyp_ret(f, ...)                                       \
+       ({                                                              \
+               typeof(f(__VA_ARGS__)) ret;                             \
+                                                                       \
+               if (has_vhe()) {                                        \
+                       ret = f(__VA_ARGS__);                           \
+               } else {                                                \
+                       ret = __kvm_call_hyp(kvm_ksym_ref(f),           \
+                                            ##__VA_ARGS__);            \
+               }                                                       \
+                                                                       \
+               ret;                                                    \
+       })
+
 void force_vm_exit(const cpumask_t *mask);
 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
                              struct kvm_vcpu_events *events);
@@ -275,7 +314,7 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
         * compliant with the PCS!).
         */
 
-       kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
+       __kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
 }
 
 static inline void __cpu_init_stage2(void)
index e93a0cac9addc5bf8caf9fa2bcca34223ee1c281..87bcd18df8d58eb83175f54f7220088cbb287691 100644 (file)
@@ -40,6 +40,7 @@
 #define TTBR1          __ACCESS_CP15_64(1, c2)
 #define VTTBR          __ACCESS_CP15_64(6, c2)
 #define PAR            __ACCESS_CP15_64(0, c7)
+#define CNTP_CVAL      __ACCESS_CP15_64(2, c14)
 #define CNTV_CVAL      __ACCESS_CP15_64(3, c14)
 #define CNTVOFF                __ACCESS_CP15_64(4, c14)
 
@@ -85,6 +86,7 @@
 #define TID_PRIV       __ACCESS_CP15(c13, 0, c0, 4)
 #define HTPIDR         __ACCESS_CP15(c13, 4, c0, 2)
 #define CNTKCTL                __ACCESS_CP15(c14, 0, c1, 0)
+#define CNTP_CTL       __ACCESS_CP15(c14, 0, c2, 1)
 #define CNTV_CTL       __ACCESS_CP15(c14, 0, c3, 1)
 #define CNTHCTL                __ACCESS_CP15(c14, 4, c1, 0)
 
@@ -94,6 +96,8 @@
 #define read_sysreg_el0(r)             read_sysreg(r##_el0)
 #define write_sysreg_el0(v, r)         write_sysreg(v, r##_el0)
 
+#define cntp_ctl_el0                   CNTP_CTL
+#define cntp_cval_el0                  CNTP_CVAL
 #define cntv_ctl_el0                   CNTV_CTL
 #define cntv_cval_el0                  CNTV_CVAL
 #define cntvoff_el2                    CNTVOFF
index 3a875fc1b63ca3416c071afa9536262525915025..2de96a180166eb920833b1100159716735f5e206 100644 (file)
@@ -421,9 +421,14 @@ static inline int hyp_map_aux_data(void)
 
 static inline void kvm_set_ipa_limit(void) {}
 
-static inline bool kvm_cpu_has_cnp(void)
+static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
 {
-       return false;
+       struct kvm_vmid *vmid = &kvm->arch.vmid;
+       u64 vmid_field, baddr;
+
+       baddr = kvm->arch.pgd_phys;
+       vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
+       return kvm_phys_to_vttbr(baddr) | vmid_field;
 }
 
 #endif /* !__ASSEMBLY__ */
index a757401129f9567cbdebea5249b60e7e9a117e87..48ce1b19069b67d86bb97525cc3bd7e54384f33f 100644 (file)
@@ -125,6 +125,9 @@ extern pgprot_t             pgprot_s2_device;
 #define pgprot_stronglyordered(prot) \
        __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
 
+#define pgprot_device(prot) \
+       __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_SHARED | L_PTE_SHARED | L_PTE_DIRTY | L_PTE_XN)
+
 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
 #define pgprot_dmacoherent(prot) \
        __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
index 120f4c9bbfde2a3fbade0fa5e611aca635bc6b82..57fe73ea0f7258af4315ea6b7fcecea7566d838e 100644 (file)
@@ -89,7 +89,11 @@ extern void release_thread(struct task_struct *);
 unsigned long get_wchan(struct task_struct *p);
 
 #if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
-#define cpu_relax()                    smp_mb()
+#define cpu_relax()                                            \
+       do {                                                    \
+               smp_mb();                                       \
+               __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;");      \
+       } while (0)
 #else
 #define cpu_relax()                    barrier()
 #endif
index 709a55989cb0641f7006f5a1765898aa9fb972c3..451ae684aaf48ff0822bafd30849d9c82cacf6f7 100644 (file)
@@ -67,7 +67,6 @@ struct secondary_data {
        void *stack;
 };
 extern struct secondary_data secondary_data;
-extern volatile int pen_release;
 extern void secondary_startup(void);
 extern void secondary_startup_arm(void);
 
index 312784ee9936ae4fe6da6459d9ca452ada438d07..c729d2113a2457e32a32ca989fa05404c52625e0 100644 (file)
 #define TWD_TIMER_CONTROL_PERIODIC     (1 << 1)
 #define TWD_TIMER_CONTROL_IT_ENABLE    (1 << 2)
 
-#include <linux/ioport.h>
-
-struct twd_local_timer {
-       struct resource res[2];
-};
-
-#define DEFINE_TWD_LOCAL_TIMER(name,base,irq)  \
-struct twd_local_timer name __initdata = {     \
-       .res    = {                             \
-               DEFINE_RES_MEM(base, 0x10),     \
-               DEFINE_RES_IRQ(irq),            \
-       },                                      \
-};
-
-int twd_local_timer_register(struct twd_local_timer *);
-
 #endif
index 099c78fcf62d43cd0a123b4d520d44a5d853a813..8f009e788ad401766b5b9456b6ac9f1ec75e84fe 100644 (file)
@@ -210,11 +210,12 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
 
        prefetchw(&rw->lock);
        __asm__ __volatile__(
+"      .syntax unified\n"
 "1:    ldrex   %0, [%2]\n"
 "      adds    %0, %0, #1\n"
 "      strexpl %1, %0, [%2]\n"
        WFE("mi")
-"      rsbpls  %0, %1, #0\n"
+"      rsbspl  %0, %1, #0\n"
 "      bmi     1b"
        : "=&r" (tmp), "=&r" (tmp2)
        : "r" (&rw->lock)
index 452bbdcbcc835fc838b4c957f2da3e6dafd2fdc9..506314265c6f1a24e50641dbbea1d41367d322d5 100644 (file)
@@ -10,6 +10,7 @@ struct sleep_save_sp {
 };
 
 extern void cpu_resume(void);
+extern void cpu_resume_no_hyp(void);
 extern void cpu_resume_arm(void);
 extern int cpu_suspend(unsigned long, int (*)(unsigned long));
 
index ae5a0df5316e5780121bcc8de5f78b847222e9e7..dff49845eb87628a007776e695fc4103db1a7a58 100644 (file)
@@ -85,7 +85,8 @@ static inline void set_fs(mm_segment_t fs)
 #define __range_ok(addr, size) ({ \
        unsigned long flag, roksum; \
        __chk_user_ptr(addr);   \
-       __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
+       __asm__(".syntax unified\n" \
+               "adds %1, %2, %3; sbcscc %1, %1, %0; movcc %0, #0" \
                : "=&r" (flag), "=&r" (roksum) \
                : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
                : "cc"); \
index 187ccf6496ad61c222dc6e53102ec8a5b6ccf881..2cb00d15831b93e9e10164134d1f72cdb64c4bb0 100644 (file)
@@ -49,7 +49,7 @@
  * (0 -> msp; 1 -> psp). Bits [1:0] are fixed to 0b01.
  */
 #define EXC_RET_STACK_MASK                     0x00000004
-#define EXC_RET_THREADMODE_PROCESSSTACK                0xfffffffd
+#define EXC_RET_THREADMODE_PROCESSSTACK                (3 << 2)
 
 /* Cache related definitions */
 
index ef5dfedacd8d642bdfe27147e37bdec0a6f055bb..628c336e8e3b20c5918f58d9d8f5139b10723953 100644 (file)
        ldr     \tmp, =elf_hwcap                    @ may not have MVFR regs
        ldr     \tmp, [\tmp, #0]
        tst     \tmp, #HWCAP_VFPD32
-       ldcnel  p11, cr0, [\base],#32*4             @ FLDMIAD \base!, {d16-d31}
+       ldclne  p11, cr0, [\base],#32*4             @ FLDMIAD \base!, {d16-d31}
        addeq   \base, \base, #32*4                 @ step over unused register space
 #else
        VFPFMRX \tmp, MVFR0                         @ Media and VFP Feature Register 0
        and     \tmp, \tmp, #MVFR0_A_SIMD_MASK      @ A_SIMD field
        cmp     \tmp, #2                            @ 32 x 64bit registers?
-       ldceql  p11, cr0, [\base],#32*4             @ FLDMIAD \base!, {d16-d31}
+       ldcleq  p11, cr0, [\base],#32*4             @ FLDMIAD \base!, {d16-d31}
        addne   \base, \base, #32*4                 @ step over unused register space
 #endif
 #endif
        ldr     \tmp, =elf_hwcap                    @ may not have MVFR regs
        ldr     \tmp, [\tmp, #0]
        tst     \tmp, #HWCAP_VFPD32
-       stcnel  p11, cr0, [\base],#32*4             @ FSTMIAD \base!, {d16-d31}
+       stclne  p11, cr0, [\base],#32*4             @ FSTMIAD \base!, {d16-d31}
        addeq   \base, \base, #32*4                 @ step over unused register space
 #else
        VFPFMRX \tmp, MVFR0                         @ Media and VFP Feature Register 0
        and     \tmp, \tmp, #MVFR0_A_SIMD_MASK      @ A_SIMD field
        cmp     \tmp, #2                            @ 32 x 64bit registers?
-       stceql  p11, cr0, [\base],#32*4             @ FSTMIAD \base!, {d16-d31}
+       stcleq  p11, cr0, [\base],#32*4             @ FSTMIAD \base!, {d16-d31}
        addne   \base, \base, #32*4                 @ step over unused register space
 #endif
 #endif
index 3bc80599c02256a8e2f6681a039a98fe67fef82a..4a5a645c76e2f9629c8f888ffeacfa9b3b077591 100644 (file)
 
                .macro  senduart, rd, rx
                cmp     \rx, #0
-               strneb  \rd, [\rx, #UART_TX << UART_SHIFT]
+               strbne  \rd, [\rx, #UART_TX << UART_SHIFT]
 1001:
                .endm
 
index b795dc2408c05a65fb3e28ec2001ebb04eaf50df..b9f94e03d916a9919654ce87284f451e7ee2c488 100644 (file)
@@ -86,7 +86,7 @@ hexbuf_rel:   .long   hexbuf_addr - .
 ENTRY(printascii)
                addruart_current r3, r1, r2
 1:             teq     r0, #0
-               ldrneb  r1, [r0], #1
+               ldrbne  r1, [r0], #1
                teqne   r1, #0
                reteq   lr
 2:             teq     r1, #'\n'
index e85a3af9ddeb5694b793363f8245ba1ad5f99899..ce4aea57130aff81ac1ec01279777a22df6ee815 100644 (file)
@@ -636,7 +636,7 @@ call_fpe:
        @ Test if we need to give access to iWMMXt coprocessors
        ldr     r5, [r10, #TI_FLAGS]
        rsbs    r7, r8, #(1 << 8)               @ CP 0 or 1 only
-       movcss  r7, r5, lsr #(TIF_USING_IWMMXT + 1)
+       movscs  r7, r5, lsr #(TIF_USING_IWMMXT + 1)
        bcs     iwmmxt_task_enable
 #endif
  ARM(  add     pc, pc, r8, lsr #6      )
@@ -872,7 +872,7 @@ __kuser_cmpxchg64:                          @ 0xffff0f60
        smp_dmb arm
 1:     ldrexd  r0, r1, [r2]                    @ load current val
        eors    r3, r0, r4                      @ compare with oldval (1)
-       eoreqs  r3, r1, r5                      @ compare with oldval (2)
+       eorseq  r3, r1, r5                      @ compare with oldval (2)
        strexdeq r3, r6, r7, [r2]               @ store newval if eq
        teqeq   r3, #1                          @ success?
        beq     1b                              @ if no then retry
@@ -896,8 +896,8 @@ __kuser_cmpxchg64:                          @ 0xffff0f60
        ldmia   r1, {r6, lr}                    @ load new val
 1:     ldmia   r2, {r0, r1}                    @ load current val
        eors    r3, r0, r4                      @ compare with oldval (1)
-       eoreqs  r3, r1, r5                      @ compare with oldval (2)
-2:     stmeqia r2, {r6, lr}                    @ store newval if eq
+       eorseq  r3, r1, r5                      @ compare with oldval (2)
+2:     stmiaeq r2, {r6, lr}                    @ store newval if eq
        rsbs    r0, r3, #0                      @ set return val and C flag
        ldmfd   sp!, {r4, r5, r6, pc}
 
@@ -911,7 +911,7 @@ kuser_cmpxchg64_fixup:
        mov     r7, #0xffff0fff
        sub     r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
        subs    r8, r4, r7
-       rsbcss  r8, r8, #(2b - 1b)
+       rsbscs  r8, r8, #(2b - 1b)
        strcs   r7, [sp, #S_PC]
 #if __LINUX_ARM_ARCH__ < 6
        bcc     kuser_cmpxchg32_fixup
@@ -969,7 +969,7 @@ kuser_cmpxchg32_fixup:
        mov     r7, #0xffff0fff
        sub     r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
        subs    r8, r4, r7
-       rsbcss  r8, r8, #(2b - 1b)
+       rsbscs  r8, r8, #(2b - 1b)
        strcs   r7, [sp, #S_PC]
        ret     lr
        .previous
index 0465d65d23de5786ef5df32738d0830ca492353a..f7649adef505ebffc02c3891af9a59cb8bea5792 100644 (file)
@@ -373,7 +373,7 @@ sys_syscall:
                movhs   scno, #0
                csdb
 #endif
-               stmloia sp, {r5, r6}            @ shuffle args
+               stmialo sp, {r5, r6}            @ shuffle args
                movlo   r0, r1
                movlo   r1, r2
                movlo   r2, r3
index 773424843d6efcc2ebeb0ec0cfa88d67643213cc..32051ec5b33fa3dc41eb9c26e063cba7d502c048 100644 (file)
          */
        .macro  v7m_exception_slow_exit ret_r0
        cpsid   i
-       ldr     lr, =EXC_RET_THREADMODE_PROCESSSTACK
+       ldr     lr, =exc_ret
+       ldr     lr, [lr]
 
        @ read original r12, sp, lr, pc and xPSR
        add     r12, sp, #S_IP
        badr    lr, \ret                        @ return address
        .if     \reload
        add     r1, sp, #S_R0 + S_OFF           @ pointer to regs
-       ldmccia r1, {r0 - r6}                   @ reload r0-r6
-       stmccia sp, {r4, r5}                    @ update stack arguments
+       ldmiacc r1, {r0 - r6}                   @ reload r0-r6
+       stmiacc sp, {r4, r5}                    @ update stack arguments
        .endif
        ldrcc   pc, [\table, \tmp, lsl #2]      @ call sys_* routine
 #else
        badr    lr, \ret                        @ return address
        .if     \reload
        add     r1, sp, #S_R0 + S_OFF           @ pointer to regs
-       ldmccia r1, {r0 - r6}                   @ reload r0-r6
-       stmccia sp, {r4, r5}                    @ update stack arguments
+       ldmiacc r1, {r0 - r6}                   @ reload r0-r6
+       stmiacc sp, {r4, r5}                    @ update stack arguments
        .endif
        ldrcc   pc, [\table, \nr, lsl #2]       @ call sys_* routine
 #endif
index abcf4784852593397daf3b1e6cf5d70cf47660e0..19d2dcd6530dc351188bd6c7785705e36e9e64d7 100644 (file)
@@ -146,3 +146,7 @@ ENTRY(vector_table)
        .rept   CONFIG_CPU_V7M_NUM_IRQ
        .long   __irq_entry             @ External Interrupts
        .endr
+       .align  2
+       .globl  exc_ret
+exc_ret:
+       .space  4
index ec29de2500764e11ad839bda165c135f12b6a6e5..c08d2d890f7b918981c472c155c6df368a1b30b3 100644 (file)
@@ -439,8 +439,8 @@ M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(3)])
        str     r5, [r12, #PMSAv8_RBAR_A(0)]
        str     r6, [r12, #PMSAv8_RLAR_A(0)]
 #else
-       mcr     p15, 0, r5, c6, c10, 1                  @ PRBAR4
-       mcr     p15, 0, r6, c6, c10, 2                  @ PRLAR4
+       mcr     p15, 0, r5, c6, c10, 0                  @ PRBAR4
+       mcr     p15, 0, r6, c6, c10, 1                  @ PRLAR4
 #endif
 #endif
        ret     lr
index 60146e32619a5912bf12b5277397f2e19213b2a8..82a942894fc04142b1aaf6eaeb646b6e6552aab6 100644 (file)
@@ -180,8 +180,8 @@ ARM_BE8(orr r7, r7, #(1 << 25))     @ HSCTLR.EE
        @ Check whether GICv3 system registers are available
        mrc     p15, 0, r7, c0, c1, 1   @ ID_PFR1
        ubfx    r7, r7, #28, #4
-       cmp     r7, #1
-       bne     2f
+       teq     r7, #0
+       beq     2f
 
        @ Enable system register accesses
        mrc     p15, 4, r7, c12, c9, 5  @ ICC_HSRE
index dd2eb5f76b9f0a7d64f50169dd0d04a402b2ae67..76300f3813e89bc48a76d83dc3076d9f7b79ee84 100644 (file)
@@ -91,8 +91,11 @@ void machine_crash_nonpanic_core(void *unused)
 
        set_cpu_online(smp_processor_id(), false);
        atomic_dec(&waiting_for_crash_ipi);
-       while (1)
+
+       while (1) {
                cpu_relax();
+               wfe();
+       }
 }
 
 void crash_smp_send_stop(void)
index a50dc00d79a273fac9e5d5c3f8be75f37231f766..d0a05a3bdb9652450ea4a6e1cdc6036c945ab42a 100644 (file)
@@ -16,7 +16,7 @@ struct patch {
        unsigned int insn;
 };
 
-static DEFINE_SPINLOCK(patch_lock);
+static DEFINE_RAW_SPINLOCK(patch_lock);
 
 static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
        __acquires(&patch_lock)
@@ -33,7 +33,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
                return addr;
 
        if (flags)
-               spin_lock_irqsave(&patch_lock, *flags);
+               raw_spin_lock_irqsave(&patch_lock, *flags);
        else
                __acquire(&patch_lock);
 
@@ -48,7 +48,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
        clear_fixmap(fixmap);
 
        if (flags)
-               spin_unlock_irqrestore(&patch_lock, *flags);
+               raw_spin_unlock_irqrestore(&patch_lock, *flags);
        else
                __release(&patch_lock);
 }
index a8257fc9cf2a908c4dd2b5f19c819b4c7fc40588..5dc8b80bb69383643eddec5ba62164e0458b4512 100644 (file)
@@ -120,6 +120,14 @@ ENDPROC(cpu_resume_after_mmu)
        .text
        .align
 
+#ifdef CONFIG_MCPM
+       .arm
+THUMB( .thumb                  )
+ENTRY(cpu_resume_no_hyp)
+ARM_BE8(setend be)                     @ ensure we are in BE mode
+       b       no_hyp
+#endif
+
 #ifdef CONFIG_MMU
        .arm
 ENTRY(cpu_resume_arm)
@@ -135,6 +143,7 @@ ARM_BE8(setend be)                  @ ensure we are in BE mode
        bl      __hyp_stub_install_secondary
 #endif
        safe_svcmode_maskall r1
+no_hyp:
        mov     r1, #0
        ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
        ALT_UP_B(1f)
@@ -163,6 +172,9 @@ ENDPROC(cpu_resume)
 
 #ifdef CONFIG_MMU
 ENDPROC(cpu_resume_arm)
+#endif
+#ifdef CONFIG_MCPM
+ENDPROC(cpu_resume_no_hyp)
 #endif
 
        .align 2
index 1d6f5ea522f49184c53a7d996769104107b4de8e..facd4240ca02c776716a2e1e14c803c359967cc8 100644 (file)
  */
 struct secondary_data secondary_data;
 
-/*
- * control for which core is the next to come out of the secondary
- * boot "holding pen"
- */
-volatile int pen_release = -1;
-
 enum ipi_msg_type {
        IPI_WAKEUP,
        IPI_TIMER,
@@ -604,8 +598,10 @@ static void ipi_cpu_stop(unsigned int cpu)
        local_fiq_disable();
        local_irq_disable();
 
-       while (1)
+       while (1) {
                cpu_relax();
+               wfe();
+       }
 }
 
 static DEFINE_PER_CPU(struct completion *, cpu_completion);
index b30eafeef09633d24b1f55e9cf9b4f14314f0fee..3cdc399b9fc32064fd7c419962ad84f42c4dff53 100644 (file)
@@ -100,8 +100,6 @@ static void twd_timer_stop(void)
        disable_percpu_irq(clk->irq);
 }
 
-#ifdef CONFIG_COMMON_CLK
-
 /*
  * Updates clockevent frequency when the cpu frequency changes.
  * Called on the cpu that is changing frequency with interrupts disabled.
@@ -143,54 +141,6 @@ static int twd_clk_init(void)
 }
 core_initcall(twd_clk_init);
 
-#elif defined (CONFIG_CPU_FREQ)
-
-#include <linux/cpufreq.h>
-
-/*
- * Updates clockevent frequency when the cpu frequency changes.
- * Called on the cpu that is changing frequency with interrupts disabled.
- */
-static void twd_update_frequency(void *data)
-{
-       twd_timer_rate = clk_get_rate(twd_clk);
-
-       clockevents_update_freq(raw_cpu_ptr(twd_evt), twd_timer_rate);
-}
-
-static int twd_cpufreq_transition(struct notifier_block *nb,
-       unsigned long state, void *data)
-{
-       struct cpufreq_freqs *freqs = data;
-
-       /*
-        * The twd clock events must be reprogrammed to account for the new
-        * frequency.  The timer is local to a cpu, so cross-call to the
-        * changing cpu.
-        */
-       if (state == CPUFREQ_POSTCHANGE)
-               smp_call_function_single(freqs->cpu, twd_update_frequency,
-                       NULL, 1);
-
-       return NOTIFY_OK;
-}
-
-static struct notifier_block twd_cpufreq_nb = {
-       .notifier_call = twd_cpufreq_transition,
-};
-
-static int twd_cpufreq_init(void)
-{
-       if (twd_evt && raw_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
-               return cpufreq_register_notifier(&twd_cpufreq_nb,
-                       CPUFREQ_TRANSITION_NOTIFIER);
-
-       return 0;
-}
-core_initcall(twd_cpufreq_init);
-
-#endif
-
 static void twd_calibrate_rate(void)
 {
        unsigned long count;
@@ -366,21 +316,6 @@ out_free:
        return err;
 }
 
-int __init twd_local_timer_register(struct twd_local_timer *tlt)
-{
-       if (twd_base || twd_evt)
-               return -EBUSY;
-
-       twd_ppi = tlt->res[1].start;
-
-       twd_base = ioremap(tlt->res[0].start, resource_size(&tlt->res[0]));
-       if (!twd_base)
-               return -ENOMEM;
-
-       return twd_local_timer_common_register(NULL);
-}
-
-#ifdef CONFIG_OF
 static int __init twd_local_timer_of_register(struct device_node *np)
 {
        int err;
@@ -406,4 +341,3 @@ out:
 TIMER_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register);
 TIMER_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register);
 TIMER_OF_DECLARE(arm_twd_11mp, "arm,arm11mp-twd-timer", twd_local_timer_of_register);
-#endif
index 0bee233fef9a30bc92df73da7beee4e3f2966f2f..314cfb232a6353165dc899f35e5747a27a7ef617 100644 (file)
@@ -93,7 +93,7 @@ extern const struct unwind_idx __start_unwind_idx[];
 static const struct unwind_idx *__origin_unwind_idx;
 extern const struct unwind_idx __stop_unwind_idx[];
 
-static DEFINE_SPINLOCK(unwind_lock);
+static DEFINE_RAW_SPINLOCK(unwind_lock);
 static LIST_HEAD(unwind_tables);
 
 /* Convert a prel31 symbol to an absolute address */
@@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
                /* module unwind tables */
                struct unwind_table *table;
 
-               spin_lock_irqsave(&unwind_lock, flags);
+               raw_spin_lock_irqsave(&unwind_lock, flags);
                list_for_each_entry(table, &unwind_tables, list) {
                        if (addr >= table->begin_addr &&
                            addr < table->end_addr) {
@@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
                                break;
                        }
                }
-               spin_unlock_irqrestore(&unwind_lock, flags);
+               raw_spin_unlock_irqrestore(&unwind_lock, flags);
        }
 
        pr_debug("%s: idx = %p\n", __func__, idx);
@@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
        tab->begin_addr = text_addr;
        tab->end_addr = text_addr + text_size;
 
-       spin_lock_irqsave(&unwind_lock, flags);
+       raw_spin_lock_irqsave(&unwind_lock, flags);
        list_add_tail(&tab->list, &unwind_tables);
-       spin_unlock_irqrestore(&unwind_lock, flags);
+       raw_spin_unlock_irqrestore(&unwind_lock, flags);
 
        return tab;
 }
@@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_table *tab)
        if (!tab)
                return;
 
-       spin_lock_irqsave(&unwind_lock, flags);
+       raw_spin_lock_irqsave(&unwind_lock, flags);
        list_del(&tab->list);
-       spin_unlock_irqrestore(&unwind_lock, flags);
+       raw_spin_unlock_irqrestore(&unwind_lock, flags);
 
        kfree(tab);
 }
index 48de846f22464637be95c64e0a1ff9357b6e5a65..531e59f5be9c8f77370b926f5b77f2f8189e77ca 100644 (file)
@@ -8,9 +8,8 @@ ifeq ($(plus_virt),+virt)
        plus_virt_def := -DREQUIRES_VIRT=1
 endif
 
-ccflags-y += -Iarch/arm/kvm -Ivirt/kvm/arm/vgic
-CFLAGS_arm.o := -I. $(plus_virt_def)
-CFLAGS_mmu.o := -I.
+ccflags-y += -I $(srctree)/$(src) -I $(srctree)/virt/kvm/arm/vgic
+CFLAGS_arm.o := $(plus_virt_def)
 
 AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
 AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
index e8bd288fd5be909dad8ec74561330fea3a972ff7..14915c78bd99b6bdeed2ebb4ac02b006e14c714c 100644 (file)
@@ -293,15 +293,16 @@ static bool access_cntp_tval(struct kvm_vcpu *vcpu,
                             const struct coproc_params *p,
                             const struct coproc_reg *r)
 {
-       u64 now = kvm_phys_timer_read();
-       u64 val;
+       u32 val;
 
        if (p->is_write) {
                val = *vcpu_reg(vcpu, p->Rt1);
-               kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, val + now);
+               kvm_arm_timer_write_sysreg(vcpu,
+                                          TIMER_PTIMER, TIMER_REG_TVAL, val);
        } else {
-               val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
-               *vcpu_reg(vcpu, p->Rt1) = val - now;
+               val = kvm_arm_timer_read_sysreg(vcpu,
+                                               TIMER_PTIMER, TIMER_REG_TVAL);
+               *vcpu_reg(vcpu, p->Rt1) = val;
        }
 
        return true;
@@ -315,9 +316,11 @@ static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
 
        if (p->is_write) {
                val = *vcpu_reg(vcpu, p->Rt1);
-               kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, val);
+               kvm_arm_timer_write_sysreg(vcpu,
+                                          TIMER_PTIMER, TIMER_REG_CTL, val);
        } else {
-               val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
+               val = kvm_arm_timer_read_sysreg(vcpu,
+                                               TIMER_PTIMER, TIMER_REG_CTL);
                *vcpu_reg(vcpu, p->Rt1) = val;
        }
 
@@ -333,9 +336,11 @@ static bool access_cntp_cval(struct kvm_vcpu *vcpu,
        if (p->is_write) {
                val = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
                val |= *vcpu_reg(vcpu, p->Rt1);
-               kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, val);
+               kvm_arm_timer_write_sysreg(vcpu,
+                                          TIMER_PTIMER, TIMER_REG_CVAL, val);
        } else {
-               val = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
+               val = kvm_arm_timer_read_sysreg(vcpu,
+                                               TIMER_PTIMER, TIMER_REG_CVAL);
                *vcpu_reg(vcpu, p->Rt1) = val;
                *vcpu_reg(vcpu, p->Rt2) = val >> 32;
        }
index c4782812714cf4cbdbd532c8b975cd5cd20ee4cf..8bf895ec6e04231f9849ef3ca4bf88d45eb3f74f 100644 (file)
@@ -27,7 +27,6 @@ static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx)
 
 void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
 {
-       ctxt->cp15[c0_MPIDR]            = read_sysreg(VMPIDR);
        ctxt->cp15[c0_CSSELR]           = read_sysreg(CSSELR);
        ctxt->cp15[c1_SCTLR]            = read_sysreg(SCTLR);
        ctxt->cp15[c1_CPACR]            = read_sysreg(CPACR);
index aa3f9a9837acafb43e8d97acdac7fac03a37d2b2..6ed3cf23fe8900c7bca998468c22ff55d2c61ae3 100644 (file)
@@ -176,7 +176,7 @@ THUMB(      orr     lr, lr, #PSR_T_BIT      )
        msr     spsr_cxsf, lr
        ldr     lr, =panic
        msr     ELR_hyp, lr
-       ldr     lr, =kvm_call_hyp
+       ldr     lr, =__kvm_call_hyp
        clrex
        eret
 ENDPROC(__hyp_do_panic)
index acf1c37fa49c218234a3927617eb65476ef21d06..3b058a5d7c5f145e411e426bfad4606406307762 100644 (file)
@@ -77,7 +77,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
 static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
 {
        struct kvm *kvm = kern_hyp_va(vcpu->kvm);
-       write_sysreg(kvm->arch.vttbr, VTTBR);
+       write_sysreg(kvm_get_vttbr(kvm), VTTBR);
        write_sysreg(vcpu->arch.midr, VPIDR);
 }
 
index c0edd450e10459612e37cc292ad8585494d12773..8e4afba7363571df59e755bacb84f8dda407e2f9 100644 (file)
@@ -41,7 +41,7 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
 
        /* Switch to requested VMID */
        kvm = kern_hyp_va(kvm);
-       write_sysreg(kvm->arch.vttbr, VTTBR);
+       write_sysreg(kvm_get_vttbr(kvm), VTTBR);
        isb();
 
        write_sysreg(0, TLBIALLIS);
@@ -61,7 +61,7 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
        struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
 
        /* Switch to requested VMID */
-       write_sysreg(kvm->arch.vttbr, VTTBR);
+       write_sysreg(kvm_get_vttbr(kvm), VTTBR);
        isb();
 
        write_sysreg(0, TLBIALL);
index 80a1d6cd261cecf25df08f0e00e8b66c4a539e1d..a08e6419ebe90c53a79da18c091070d0aef7d703 100644 (file)
@@ -42,7 +42,7 @@
  *   r12:     caller save
  *   rest:    callee save
  */
-ENTRY(kvm_call_hyp)
+ENTRY(__kvm_call_hyp)
        hvc     #0
        bx      lr
-ENDPROC(kvm_call_hyp)
+ENDPROC(__kvm_call_hyp)
index ad25fd1872c7d7dd5c6c837e9f579b284dec2c15..0bff0176db2c4f1bb31dd9cdaa3b0eceaa26dcd9 100644 (file)
@@ -39,7 +39,7 @@ $(obj)/csumpartialcopy.o:     $(obj)/csumpartialcopygeneric.S
 $(obj)/csumpartialcopyuser.o:  $(obj)/csumpartialcopygeneric.S
 
 ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
-  NEON_FLAGS                   := -mfloat-abi=softfp -mfpu=neon
+  NEON_FLAGS                   := -march=armv7-a -mfloat-abi=softfp -mfpu=neon
   CFLAGS_xor-neon.o            += $(NEON_FLAGS)
   obj-$(CONFIG_XOR_BLOCKS)     += xor-neon.o
 endif
index 93cddab73072cc716b07c0353ec79bdbaec3757a..95bd359912889a5d31ceaefeaefb7597deaf5c14 100644 (file)
@@ -7,7 +7,7 @@
 ENTRY( \name           )
 UNWIND(        .fnstart        )
        ands    ip, r1, #3
-       strneb  r1, [ip]                @ assert word-aligned
+       strbne  r1, [ip]                @ assert word-aligned
        mov     r2, #1
        and     r3, r0, #31             @ Get bit offset
        mov     r0, r0, lsr #5
@@ -32,7 +32,7 @@ ENDPROC(\name         )
 ENTRY( \name           )
 UNWIND(        .fnstart        )
        ands    ip, r1, #3
-       strneb  r1, [ip]                @ assert word-aligned
+       strbne  r1, [ip]                @ assert word-aligned
        mov     r2, #1
        and     r3, r0, #31             @ Get bit offset
        mov     r0, r0, lsr #5
@@ -62,7 +62,7 @@ ENDPROC(\name         )
 ENTRY( \name           )
 UNWIND(        .fnstart        )
        ands    ip, r1, #3
-       strneb  r1, [ip]                @ assert word-aligned
+       strbne  r1, [ip]                @ assert word-aligned
        and     r2, r0, #31
        mov     r0, r0, lsr #5
        mov     r3, #1
@@ -89,7 +89,7 @@ ENDPROC(\name         )
 ENTRY( \name           )
 UNWIND(        .fnstart        )
        ands    ip, r1, #3
-       strneb  r1, [ip]                @ assert word-aligned
+       strbne  r1, [ip]                @ assert word-aligned
        and     r3, r0, #31
        mov     r0, r0, lsr #5
        save_and_disable_irqs ip
index e936352ccb0013e040fcd9b22bda1c583cfff361..55946e3fa2ba8407a8108e664101481a3a7eff7d 100644 (file)
@@ -44,7 +44,7 @@ UNWIND(.save {r1, lr})
                strusr  r2, r0, 1, ne, rept=2
                tst     r1, #1                  @ x1 x0 x1 x0 x1 x0 x1
                it      ne                      @ explicit IT needed for the label
-USER(          strnebt r2, [r0])
+USER(          strbtne r2, [r0])
                mov     r0, #0
                ldmfd   sp!, {r1, pc}
 UNWIND(.fnend)
index 0d4c189c7f4f00ca4795ae7aa78917838697580b..6a3419e2c6d86e69dcea30f5921a95bab89f7c31 100644 (file)
@@ -91,7 +91,7 @@
        .endm
 
        .macro str1b ptr reg cond=al abort
-       str\cond\()b \reg, [\ptr], #1
+       strb\cond \reg, [\ptr], #1
        .endm
 
        .macro enter reg1 reg2
index 6ee2f6706f869b03c95f30d2b1a1cf7adf9086d9..b84ce17920439e0c45d7712faf461a796d490c27 100644 (file)
@@ -39,9 +39,9 @@ ENTRY(copy_page)
        .endr
                subs    r2, r2, #1                      @       1
                stmia   r0!, {r3, r4, ip, lr}           @       4
-               ldmgtia r1!, {r3, r4, ip, lr}           @       4
+               ldmiagt r1!, {r3, r4, ip, lr}           @       4
                bgt     1b                              @       1
-       PLD(    ldmeqia r1!, {r3, r4, ip, lr}   )
+       PLD(    ldmiaeq r1!, {r3, r4, ip, lr}   )
        PLD(    beq     2b                      )
                ldmfd   sp!, {r4, pc}                   @       3
 ENDPROC(copy_page)
index 652e4d98cd47b7c56fefbbc055451ace6d3e99e1..a11f2c25e03a7a130e8f40c881aedc1cb49e582c 100644 (file)
@@ -99,7 +99,7 @@
 
        CALGN(  ands    ip, r0, #31             )
        CALGN(  rsb     r3, ip, #32             )
-       CALGN(  sbcnes  r4, r3, r2              )  @ C is always set here
+       CALGN(  sbcsne  r4, r3, r2              )  @ C is always set here
        CALGN(  bcs     2f                      )
        CALGN(  adr     r4, 6f                  )
        CALGN(  subs    r2, r2, r3              )  @ C gets set
 
        CALGN(  ands    ip, r0, #31             )
        CALGN(  rsb     ip, ip, #32             )
-       CALGN(  sbcnes  r4, ip, r2              )  @ C is always set here
+       CALGN(  sbcsne  r4, ip, r2              )  @ C is always set here
        CALGN(  subcc   r2, r2, ip              )
        CALGN(  bcc     15f                     )
 
                orr     r9, r9, ip, lspush #\push
                mov     ip, ip, lspull #\pull
                orr     ip, ip, lr, lspush #\push
-               str8w   r0, r3, r4, r5, r6, r7, r8, r9, ip, abort=19f
+               str8w   r0, r3, r4, r5, r6, r7, r8, r9, ip, abort=19f
                bge     12b
        PLD(    cmn     r2, #96                 )
        PLD(    bge     13b                     )
index 97a6ff4b7e3cab0bd4501498bf8a7433dc9ca08a..c7d08096e35453652b043323f54b69f4a5812842 100644 (file)
@@ -49,7 +49,7 @@
        .endm
 
        .macro ldr1b ptr reg cond=al abort
-       ldr\cond\()b \reg, [\ptr], #1
+       ldrb\cond \reg, [\ptr], #1
        .endm
 
 #ifdef CONFIG_CPU_USE_DOMAINS
index 984e0f29d548b456884e643d9f9337e4cd42fc31..bd84e2db353b17f763d0d876360e6494ff3d15fa 100644 (file)
@@ -40,9 +40,9 @@ td3   .req    lr
                /* we must have at least one byte. */
                tst     buf, #1                 @ odd address?
                movne   sum, sum, ror #8
-               ldrneb  td0, [buf], #1
+               ldrbne  td0, [buf], #1
                subne   len, len, #1
-               adcnes  sum, sum, td0, put_byte_1
+               adcsne  sum, sum, td0, put_byte_1
 
 .Lless4:               tst     len, #6
                beq     .Lless8_byte
@@ -68,8 +68,8 @@ td3   .req    lr
                bne     .Lless8_wordlp
 
 .Lless8_byte:  tst     len, #1                 @ odd number of bytes
-               ldrneb  td0, [buf], #1          @ include last byte
-               adcnes  sum, sum, td0, put_byte_0       @ update checksum
+               ldrbne  td0, [buf], #1          @ include last byte
+               adcsne  sum, sum, td0, put_byte_0       @ update checksum
 
 .Ldone:                adc     r0, sum, #0             @ collect up the last carry
                ldr     td0, [sp], #4
@@ -78,17 +78,17 @@ td3 .req    lr
                ldr     pc, [sp], #4            @ return
 
 .Lnot_aligned: tst     buf, #1                 @ odd address
-               ldrneb  td0, [buf], #1          @ make even
+               ldrbne  td0, [buf], #1          @ make even
                subne   len, len, #1
-               adcnes  sum, sum, td0, put_byte_1       @ update checksum
+               adcsne  sum, sum, td0, put_byte_1       @ update checksum
 
                tst     buf, #2                 @ 32-bit aligned?
 #if __LINUX_ARM_ARCH__ >= 4
-               ldrneh  td0, [buf], #2          @ make 32-bit aligned
+               ldrhne  td0, [buf], #2          @ make 32-bit aligned
                subne   len, len, #2
 #else
-               ldrneb  td0, [buf], #1
-               ldrneb  ip, [buf], #1
+               ldrbne  td0, [buf], #1
+               ldrbne  ip, [buf], #1
                subne   len, len, #2
 #ifndef __ARMEB__
                orrne   td0, td0, ip, lsl #8
@@ -96,7 +96,7 @@ td3   .req    lr
                orrne   td0, ip, td0, lsl #8
 #endif
 #endif
-               adcnes  sum, sum, td0           @ update checksum
+               adcsne  sum, sum, td0           @ update checksum
                ret     lr
 
 ENTRY(csum_partial)
index 10b45909610ca6f4ca6f6f8bdc664b79c2f2bd6f..08e17758cbea9fb08d8293d122494367e9df449d 100644 (file)
@@ -148,9 +148,9 @@ FN_ENTRY
                strb    r5, [dst], #1
                mov     r5, r4, get_byte_2
 .Lexit:                tst     len, #1
-               strneb  r5, [dst], #1
+               strbne  r5, [dst], #1
                andne   r5, r5, #255
-               adcnes  sum, sum, r5, put_byte_0
+               adcsne  sum, sum, r5, put_byte_0
 
                /*
                 * If the dst pointer was not 16-bit aligned, we
index b83fdc06286a64ece150fb7e419bc587e47c3e34..f4716d98e0b4afcce0c1d696cc775cad04b16556 100644 (file)
@@ -95,7 +95,7 @@
                add     r2, r2, r1
                mov     r0, #0                  @ zero the buffer
 9002:          teq     r2, r1
-               strneb  r0, [r1], #1
+               strbne  r0, [r1], #1
                bne     9002b
                load_regs
                .popsection
index a9eafe4981eb847e2f07e0e245aa8e1f1747fa59..4d80f690c48bf1b55e95355053691c5c14126651 100644 (file)
@@ -88,8 +88,8 @@ UNWIND(.fnstart)
        @ Break out early if dividend reaches 0.
 2:     cmp     xh, yl
        orrcs   yh, yh, ip
-       subcss  xh, xh, yl
-       movnes  ip, ip, lsr #1
+       subscs  xh, xh, yl
+       movsne  ip, ip, lsr #1
        mov     yl, yl, lsr #1
        bne     2b
 
index 617150b1baef06e8de8e822852b3dd6d0cec0a2d..de68d3b343e30a47aaa3963ccf4f981b2e557598 100644 (file)
@@ -14,8 +14,8 @@
                .global floppy_fiqin_end
 ENTRY(floppy_fiqin_start)
                subs    r9, r9, #1
-               ldrgtb  r12, [r11, #-4]
-               ldrleb  r12, [r11], #0
+               ldrbgt  r12, [r11, #-4]
+               ldrble  r12, [r11], #0
                strb    r12, [r10], #1
                subs    pc, lr, #4
 floppy_fiqin_end:
@@ -23,10 +23,10 @@ floppy_fiqin_end:
                .global floppy_fiqout_end
 ENTRY(floppy_fiqout_start)
                subs    r9, r9, #1
-               ldrgeb  r12, [r10], #1
+               ldrbge  r12, [r10], #1
                movlt   r12, #0
-               strleb  r12, [r11], #0
-               subles  pc, lr, #4
+               strble  r12, [r11], #0
+               subsle  pc, lr, #4
                strb    r12, [r11, #-4]
                subs    pc, lr, #4
 floppy_fiqout_end:
index c31b2f3153f171fd09602aed2ea9cb8c97797f4d..91038a0a77b57f3d0d5345773d6c72b08dd86f5d 100644 (file)
                cmp     ip, #2
                ldrb    r3, [r0]
                strb    r3, [r1], #1
-               ldrgeb  r3, [r0]
-               strgeb  r3, [r1], #1
-               ldrgtb  r3, [r0]
-               strgtb  r3, [r1], #1
+               ldrbge  r3, [r0]
+               strbge  r3, [r1], #1
+               ldrbgt  r3, [r0]
+               strbgt  r3, [r1], #1
                subs    r2, r2, ip
                bne     .Linsb_aligned
 
@@ -72,7 +72,7 @@ ENTRY(__raw_readsb)
                bpl     .Linsb_16_lp
 
                tst     r2, #15
-               ldmeqfd sp!, {r4 - r6, pc}
+               ldmfdeq sp!, {r4 - r6, pc}
 
 .Linsb_no_16:  tst     r2, #8
                beq     .Linsb_no_8
@@ -109,15 +109,15 @@ ENTRY(__raw_readsb)
                str     r3, [r1], #4
 
 .Linsb_no_4:   ands    r2, r2, #3
-               ldmeqfd sp!, {r4 - r6, pc}
+               ldmfdeq sp!, {r4 - r6, pc}
 
                cmp     r2, #2
                ldrb    r3, [r0]
                strb    r3, [r1], #1
-               ldrgeb  r3, [r0]
-               strgeb  r3, [r1], #1
-               ldrgtb  r3, [r0]
-               strgtb  r3, [r1]
+               ldrbge  r3, [r0]
+               strbge  r3, [r1], #1
+               ldrbgt  r3, [r0]
+               strbgt  r3, [r1]
 
                ldmfd   sp!, {r4 - r6, pc}
 ENDPROC(__raw_readsb)
index 2ed86fa5465f70cdcb92a46a167d9aa81edad68f..f2e2064318d2142d43464d298d69b146006dc2e5 100644 (file)
@@ -30,7 +30,7 @@ ENTRY(__raw_readsl)
 2:             movs    r2, r2, lsl #31
                ldrcs   r3, [r0, #0]
                ldrcs   ip, [r0, #0]
-               stmcsia r1!, {r3, ip}
+               stmiacs r1!, {r3, ip}
                ldrne   r3, [r0, #0]
                strne   r3, [r1, #0]
                ret     lr
index 413da99145292f3e535b618fee2a5c9c96e114b4..8b25b69c516e79f4ef0580994b744bbfc5b39d58 100644 (file)
@@ -68,7 +68,7 @@ ENTRY(__raw_readsw)
                bpl     .Linsw_8_lp
 
                tst     r2, #7
-               ldmeqfd sp!, {r4, r5, r6, pc}
+               ldmfdeq sp!, {r4, r5, r6, pc}
 
 .Lno_insw_8:   tst     r2, #4
                beq     .Lno_insw_4
@@ -97,9 +97,9 @@ ENTRY(__raw_readsw)
 
 .Lno_insw_2:   tst     r2, #1
                ldrne   r3, [r0]
-               strneb  r3, [r1], #1
+               strbne  r3, [r1], #1
                movne   r3, r3, lsr #8
-               strneb  r3, [r1]
+               strbne  r3, [r1]
 
                ldmfd   sp!, {r4, r5, r6, pc}
 
index d9a45e9692aee3ad1de5dea37653a65cd8c18da4..5efdd66f5dcd695e88b5673264f064d35a615c25 100644 (file)
@@ -76,8 +76,8 @@ ENTRY(__raw_readsw)
                pack    r3, r3, ip
                str     r3, [r1], #4
 
-.Lno_insw_2:   ldrneh  r3, [r0]
-               strneh  r3, [r1]
+.Lno_insw_2:   ldrhne  r3, [r0]
+               strhne  r3, [r1]
 
                ldmfd   sp!, {r4, r5, pc}
 
@@ -94,7 +94,7 @@ ENTRY(__raw_readsw)
 #endif
 
 .Linsw_noalign:        stmfd   sp!, {r4, lr}
-               ldrccb  ip, [r1, #-1]!
+               ldrbcc  ip, [r1, #-1]!
                bcc     1f
 
                ldrh    ip, [r0]
@@ -121,11 +121,11 @@ ENTRY(__raw_readsw)
 
 3:             tst     r2, #1
                strb    ip, [r1], #1
-               ldrneh  ip, [r0]
+               ldrhne  ip, [r0]
    _BE_ONLY_(  movne   ip, ip, ror #8          )
-               strneb  ip, [r1], #1
+               strbne  ip, [r1], #1
    _LE_ONLY_(  movne   ip, ip, lsr #8          )
    _BE_ONLY_(  movne   ip, ip, lsr #24         )
-               strneb  ip, [r1]
+               strbne  ip, [r1]
                ldmfd   sp!, {r4, pc}
 ENDPROC(__raw_readsw)
index a46bbc9b168b45f7016096244eb4933a911d4ac0..7d2881a2381eb01e32336a39384b9bf8edef8db2 100644 (file)
                cmp     ip, #2
                ldrb    r3, [r1], #1
                strb    r3, [r0]
-               ldrgeb  r3, [r1], #1
-               strgeb  r3, [r0]
-               ldrgtb  r3, [r1], #1
-               strgtb  r3, [r0]
+               ldrbge  r3, [r1], #1
+               strbge  r3, [r0]
+               ldrbgt  r3, [r1], #1
+               strbgt  r3, [r0]
                subs    r2, r2, ip
                bne     .Loutsb_aligned
 
@@ -64,7 +64,7 @@ ENTRY(__raw_writesb)
                bpl     .Loutsb_16_lp
 
                tst     r2, #15
-               ldmeqfd sp!, {r4, r5, pc}
+               ldmfdeq sp!, {r4, r5, pc}
 
 .Loutsb_no_16: tst     r2, #8
                beq     .Loutsb_no_8
@@ -80,15 +80,15 @@ ENTRY(__raw_writesb)
                outword r3
 
 .Loutsb_no_4:  ands    r2, r2, #3
-               ldmeqfd sp!, {r4, r5, pc}
+               ldmfdeq sp!, {r4, r5, pc}
 
                cmp     r2, #2
                ldrb    r3, [r1], #1
                strb    r3, [r0]
-               ldrgeb  r3, [r1], #1
-               strgeb  r3, [r0]
-               ldrgtb  r3, [r1]
-               strgtb  r3, [r0]
+               ldrbge  r3, [r1], #1
+               strbge  r3, [r0]
+               ldrbgt  r3, [r1]
+               strbgt  r3, [r0]
 
                ldmfd   sp!, {r4, r5, pc}
 ENDPROC(__raw_writesb)
index 4ea2435988c1f75d8fddac8ac63a499067d02cae..7596ac0c90b05d4559be1fd344395e5af6b912fd 100644 (file)
@@ -28,7 +28,7 @@ ENTRY(__raw_writesl)
                bpl     1b
                ldmfd   sp!, {r4, lr}
 2:             movs    r2, r2, lsl #31
-               ldmcsia r1!, {r3, ip}
+               ldmiacs r1!, {r3, ip}
                strcs   r3, [r0, #0]
                ldrne   r3, [r1, #0]
                strcs   ip, [r0, #0]
index 121789eb680235f9dad2c8f1492960d2f26fded1..cb94b9b4940569f6c81f5fa316223388ff00939b 100644 (file)
@@ -79,7 +79,7 @@ ENTRY(__raw_writesw)
                bpl     .Loutsw_8_lp
 
                tst     r2, #7
-               ldmeqfd sp!, {r4, r5, r6, pc}
+               ldmfdeq sp!, {r4, r5, r6, pc}
 
 .Lno_outsw_8:  tst     r2, #4
                beq     .Lno_outsw_4
index 269f90c51ad279c63bf4dd9f8bfed8c6827a75d2..e6645b2f249ef225fcb3a57df97a710e5ed8461d 100644 (file)
@@ -61,8 +61,8 @@ ENTRY(__raw_writesw)
                ldr     r3, [r1], #4
                outword r3
 
-.Lno_outsw_2:  ldrneh  r3, [r1]
-               strneh  r3, [r0]
+.Lno_outsw_2:  ldrhne  r3, [r1]
+               strhne  r3, [r0]
 
                ldmfd   sp!, {r4, r5, pc}
 
@@ -95,6 +95,6 @@ ENTRY(__raw_writesw)
 
                tst     r2, #1
 3:             movne   ip, r3, lsr #8
-               strneh  ip, [r0]
+               strhne  ip, [r0]
                ret     lr
 ENDPROC(__raw_writesw)
index 9397b2e532afa3d863930b4e29a663c166ae475e..c23f9d9e29704be4c834185a22d8ca9eefef7013 100644 (file)
@@ -96,7 +96,7 @@ Boston, MA 02111-1307, USA.  */
        subhs   \dividend, \dividend, \divisor, lsr #3
        orrhs   \result,   \result,   \curbit,  lsr #3
        cmp     \dividend, #0                   @ Early termination?
-       movnes  \curbit,   \curbit,  lsr #4     @ No, any more bits to do?
+       movsne  \curbit,   \curbit,  lsr #4     @ No, any more bits to do?
        movne   \divisor,  \divisor, lsr #4
        bne     1b
 
@@ -182,7 +182,7 @@ Boston, MA 02111-1307, USA.  */
        subhs   \dividend, \dividend, \divisor, lsr #3
        cmp     \dividend, #1
        mov     \divisor, \divisor, lsr #4
-       subges  \order, \order, #4
+       subsge  \order, \order, #4
        bge     1b
 
        tst     \order, #3
index 64111bd4440b1aa3702c469ce349b303a0244ebd..4a6997bb4404316a98268c4394b236aa395a2721 100644 (file)
@@ -30,7 +30,7 @@
        .endm
 
        .macro ldr1b ptr reg cond=al abort
-       ldr\cond\()b \reg, [\ptr], #1
+       ldrb\cond \reg, [\ptr], #1
        .endm
 
        .macro str1w ptr reg abort
@@ -42,7 +42,7 @@
        .endm
 
        .macro str1b ptr reg cond=al abort
-       str\cond\()b \reg, [\ptr], #1
+       strb\cond \reg, [\ptr], #1
        .endm
 
        .macro enter reg1 reg2
index 69a9d47fc5abdcb9f1801cbfe249eaed99b00d99..d70304cb2cd0ddc36a521fbc75713d05210f28f0 100644 (file)
@@ -59,7 +59,7 @@ ENTRY(memmove)
                blt     5f
 
        CALGN(  ands    ip, r0, #31             )
-       CALGN(  sbcnes  r4, ip, r2              )  @ C is always set here
+       CALGN(  sbcsne  r4, ip, r2              )  @ C is always set here
        CALGN(  bcs     2f                      )
        CALGN(  adr     r4, 6f                  )
        CALGN(  subs    r2, r2, ip              )  @ C is set here
@@ -114,20 +114,20 @@ ENTRY(memmove)
        UNWIND( .save   {r0, r4, lr}            ) @ still in first stmfd block
 
 8:             movs    r2, r2, lsl #31
-               ldrneb  r3, [r1, #-1]!
-               ldrcsb  r4, [r1, #-1]!
-               ldrcsb  ip, [r1, #-1]
-               strneb  r3, [r0, #-1]!
-               strcsb  r4, [r0, #-1]!
-               strcsb  ip, [r0, #-1]
+               ldrbne  r3, [r1, #-1]!
+               ldrbcs  r4, [r1, #-1]!
+               ldrbcs  ip, [r1, #-1]
+               strbne  r3, [r0, #-1]!
+               strbcs  r4, [r0, #-1]!
+               strbcs  ip, [r0, #-1]
                ldmfd   sp!, {r0, r4, pc}
 
 9:             cmp     ip, #2
-               ldrgtb  r3, [r1, #-1]!
-               ldrgeb  r4, [r1, #-1]!
+               ldrbgt  r3, [r1, #-1]!
+               ldrbge  r4, [r1, #-1]!
                ldrb    lr, [r1, #-1]!
-               strgtb  r3, [r0, #-1]!
-               strgeb  r4, [r0, #-1]!
+               strbgt  r3, [r0, #-1]!
+               strbge  r4, [r0, #-1]!
                subs    r2, r2, ip
                strb    lr, [r0, #-1]!
                blt     8b
@@ -150,7 +150,7 @@ ENTRY(memmove)
                blt     14f
 
        CALGN(  ands    ip, r0, #31             )
-       CALGN(  sbcnes  r4, ip, r2              )  @ C is always set here
+       CALGN(  sbcsne  r4, ip, r2              )  @ C is always set here
        CALGN(  subcc   r2, r2, ip              )
        CALGN(  bcc     15f                     )
 
index ed6d35d9cdb5a6288f70d116e6914f803455006e..5593a45e0a8c69a54c31bcc00056554a4d65b145 100644 (file)
@@ -44,20 +44,20 @@ UNWIND( .save {r8, lr}      )
        mov     lr, r3
 
 2:     subs    r2, r2, #64
-       stmgeia ip!, {r1, r3, r8, lr}   @ 64 bytes at a time.
-       stmgeia ip!, {r1, r3, r8, lr}
-       stmgeia ip!, {r1, r3, r8, lr}
-       stmgeia ip!, {r1, r3, r8, lr}
+       stmiage ip!, {r1, r3, r8, lr}   @ 64 bytes at a time.
+       stmiage ip!, {r1, r3, r8, lr}
+       stmiage ip!, {r1, r3, r8, lr}
+       stmiage ip!, {r1, r3, r8, lr}
        bgt     2b
-       ldmeqfd sp!, {r8, pc}           @ Now <64 bytes to go.
+       ldmfdeq sp!, {r8, pc}           @ Now <64 bytes to go.
 /*
  * No need to correct the count; we're only testing bits from now on
  */
        tst     r2, #32
-       stmneia ip!, {r1, r3, r8, lr}
-       stmneia ip!, {r1, r3, r8, lr}
+       stmiane ip!, {r1, r3, r8, lr}
+       stmiane ip!, {r1, r3, r8, lr}
        tst     r2, #16
-       stmneia ip!, {r1, r3, r8, lr}
+       stmiane ip!, {r1, r3, r8, lr}
        ldmfd   sp!, {r8, lr}
 UNWIND( .fnend              )
 
@@ -87,22 +87,22 @@ UNWIND( .save {r4-r8, lr}      )
        rsb     r8, r8, #32
        sub     r2, r2, r8
        movs    r8, r8, lsl #(32 - 4)
-       stmcsia ip!, {r4, r5, r6, r7}
-       stmmiia ip!, {r4, r5}
+       stmiacs ip!, {r4, r5, r6, r7}
+       stmiami ip!, {r4, r5}
        tst     r8, #(1 << 30)
        mov     r8, r1
        strne   r1, [ip], #4
 
 3:     subs    r2, r2, #64
-       stmgeia ip!, {r1, r3-r8, lr}
-       stmgeia ip!, {r1, r3-r8, lr}
+       stmiage ip!, {r1, r3-r8, lr}
+       stmiage ip!, {r1, r3-r8, lr}
        bgt     3b
-       ldmeqfd sp!, {r4-r8, pc}
+       ldmfdeq sp!, {r4-r8, pc}
 
        tst     r2, #32
-       stmneia ip!, {r1, r3-r8, lr}
+       stmiane ip!, {r1, r3-r8, lr}
        tst     r2, #16
-       stmneia ip!, {r4-r7}
+       stmiane ip!, {r4-r7}
        ldmfd   sp!, {r4-r8, lr}
 UNWIND( .fnend                 )
 
@@ -110,7 +110,7 @@ UNWIND( .fnend                 )
 
 UNWIND( .fnstart            )
 4:     tst     r2, #8
-       stmneia ip!, {r1, r3}
+       stmiane ip!, {r1, r3}
        tst     r2, #4
        strne   r1, [ip], #4
 /*
@@ -118,17 +118,17 @@ UNWIND( .fnstart            )
  * may have an unaligned pointer as well.
  */
 5:     tst     r2, #2
-       strneb  r1, [ip], #1
-       strneb  r1, [ip], #1
+       strbne  r1, [ip], #1
+       strbne  r1, [ip], #1
        tst     r2, #1
-       strneb  r1, [ip], #1
+       strbne  r1, [ip], #1
        ret     lr
 
 6:     subs    r2, r2, #4              @ 1 do we have enough
        blt     5b                      @ 1 bytes to align with?
        cmp     r3, #2                  @ 1
-       strltb  r1, [ip], #1            @ 1
-       strleb  r1, [ip], #1            @ 1
+       strblt  r1, [ip], #1            @ 1
+       strble  r1, [ip], #1            @ 1
        strb    r1, [ip], #1            @ 1
        add     r2, r2, r3              @ 1 (r2 = r2 - (4 - r3))
        b       1b
index 2c40aeab3eaae8cb038a283b6fa2dc422d744d08..c691b901092f55a8f251c186a6938ba19d79f6ec 100644 (file)
@@ -14,7 +14,7 @@
 MODULE_LICENSE("GPL");
 
 #ifndef __ARM_NEON__
-#error You should compile this file with '-mfloat-abi=softfp -mfpu=neon'
+#error You should compile this file with '-march=armv7-a -mfloat-abi=softfp -mfpu=neon'
 #endif
 
 /*
index 3efaa10efc43929010c20aa31a453de58a633fbd..4fd479c948e670429449fe1bd170bc448f4bc27f 100644 (file)
@@ -39,10 +39,6 @@ static void __iomem *sps_base_addr;
 static void __iomem *timer_base_addr;
 static int ncores;
 
-static DEFINE_SPINLOCK(boot_lock);
-
-void owl_secondary_startup(void);
-
 static int s500_wakeup_secondary(unsigned int cpu)
 {
        int ret;
@@ -84,7 +80,6 @@ static int s500_wakeup_secondary(unsigned int cpu)
 
 static int s500_smp_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
-       unsigned long timeout;
        int ret;
 
        ret = s500_wakeup_secondary(cpu);
@@ -93,21 +88,11 @@ static int s500_smp_boot_secondary(unsigned int cpu, struct task_struct *idle)
 
        udelay(10);
 
-       spin_lock(&boot_lock);
-
        smp_send_reschedule(cpu);
 
-       timeout = jiffies + (1 * HZ);
-       while (time_before(jiffies, timeout)) {
-               if (pen_release == -1)
-                       break;
-       }
-
        writel(0, timer_base_addr + OWL_CPU1_ADDR + (cpu - 1) * 4);
        writel(0, timer_base_addr + OWL_CPU1_FLAG + (cpu - 1) * 4);
 
-       spin_unlock(&boot_lock);
-
        return 0;
 }
 
index 005695c9bf4006130719acc36e0ee2f4b7ae7797..0ac2cb9a735568613c3dc7cdd52b599945e0134c 100644 (file)
@@ -36,4 +36,4 @@ ENDPROC(exynos4_secondary_startup)
 
        .align 2
 1:     .long   .
-       .long   pen_release
+       .long   exynos_pen_release
index b6da7edbbd2fd1a7d27dbf62e4b9c73a79747e31..abcac616423319bf0634bce260a85c3fe9134b60 100644 (file)
@@ -28,6 +28,9 @@
 
 extern void exynos4_secondary_startup(void);
 
+/* XXX exynos_pen_release is cargo culted code - DO NOT COPY XXX */
+volatile int exynos_pen_release = -1;
+
 #ifdef CONFIG_HOTPLUG_CPU
 static inline void cpu_leave_lowpower(u32 core_id)
 {
@@ -57,7 +60,7 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
 
                wfi();
 
-               if (pen_release == core_id) {
+               if (exynos_pen_release == core_id) {
                        /*
                         * OK, proper wakeup, we're done
                         */
@@ -228,15 +231,17 @@ void exynos_core_restart(u32 core_id)
 }
 
 /*
- * Write pen_release in a way that is guaranteed to be visible to all
- * observers, irrespective of whether they're taking part in coherency
+ * XXX CARGO CULTED CODE - DO NOT COPY XXX
+ *
+ * Write exynos_pen_release in a way that is guaranteed to be visible to
+ * all observers, irrespective of whether they're taking part in coherency
  * or not.  This is necessary for the hotplug code to work reliably.
  */
-static void write_pen_release(int val)
+static void exynos_write_pen_release(int val)
 {
-       pen_release = val;
+       exynos_pen_release = val;
        smp_wmb();
-       sync_cache_w(&pen_release);
+       sync_cache_w(&exynos_pen_release);
 }
 
 static DEFINE_SPINLOCK(boot_lock);
@@ -247,7 +252,7 @@ static void exynos_secondary_init(unsigned int cpu)
         * let the primary processor know we're out of the
         * pen, then head off into the C entry point
         */
-       write_pen_release(-1);
+       exynos_write_pen_release(-1);
 
        /*
         * Synchronise with the boot thread.
@@ -322,12 +327,12 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
        /*
         * The secondary processor is waiting to be released from
         * the holding pen - release it, then wait for it to flag
-        * that it has been released by resetting pen_release.
+        * that it has been released by resetting exynos_pen_release.
         *
-        * Note that "pen_release" is the hardware CPU core ID, whereas
+        * Note that "exynos_pen_release" is the hardware CPU core ID, whereas
         * "cpu" is Linux's internal ID.
         */
-       write_pen_release(core_id);
+       exynos_write_pen_release(core_id);
 
        if (!exynos_cpu_power_state(core_id)) {
                exynos_cpu_power_up(core_id);
@@ -376,13 +381,13 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
                else
                        arch_send_wakeup_ipi_mask(cpumask_of(cpu));
 
-               if (pen_release == -1)
+               if (exynos_pen_release == -1)
                        break;
 
                udelay(10);
        }
 
-       if (pen_release != -1)
+       if (exynos_pen_release != -1)
                ret = -ETIMEDOUT;
 
        /*
@@ -392,7 +397,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
 fail:
        spin_unlock(&boot_lock);
 
-       return pen_release != -1 ? ret : 0;
+       return exynos_pen_release != -1 ? ret : 0;
 }
 
 static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
index 8315b34f32ff00923738c48bf9d654a37b3c5f43..7ff812cb010bb5e53f7cca7c1cb7552c1a2974a2 100644 (file)
@@ -42,6 +42,6 @@
                moveq   \irqstat, \irqstat, lsr #2
                addeq   \irqnr, \irqnr, #2
                tst     \irqstat, #0x01
-               addeqs  \irqnr, \irqnr, #1
+               addseq  \irqnr, \irqnr, #1
 1001:
        .endm
index 058a37e6d11c34955ab37f4df9833cdb0166fb6c..fd6e0671f957342e06e0a1601837f221969a01af 100644 (file)
@@ -523,8 +523,10 @@ void omap_prm_reset_system(void)
 
        prm_ll_data->reset_system();
 
-       while (1)
+       while (1) {
                cpu_relax();
+               wfe();
+       }
 }
 
 /**
index b625906a99702d7ac36941401ef4c67aa475b067..61a34e1c0f2217f3a8eb7da0e09909dff832d2c4 100644 (file)
@@ -1,2 +1 @@
 obj-$(CONFIG_SMP)              += platsmp.o headsmp.o
-obj-$(CONFIG_HOTPLUG_CPU)      += hotplug.o
diff --git a/arch/arm/mach-oxnas/hotplug.c b/arch/arm/mach-oxnas/hotplug.c
deleted file mode 100644 (file)
index 854f29b..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- *  Copyright (C) 2002 ARM Ltd.
- *  All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/smp.h>
-
-#include <asm/cp15.h>
-#include <asm/smp_plat.h>
-
-static inline void cpu_enter_lowpower(void)
-{
-       unsigned int v;
-
-       asm volatile(
-       "       mcr     p15, 0, %1, c7, c5, 0\n"
-       "       mcr     p15, 0, %1, c7, c10, 4\n"
-       /*
-        * Turn off coherency
-        */
-       "       mrc     p15, 0, %0, c1, c0, 1\n"
-       "       bic     %0, %0, #0x20\n"
-       "       mcr     p15, 0, %0, c1, c0, 1\n"
-       "       mrc     p15, 0, %0, c1, c0, 0\n"
-       "       bic     %0, %0, %2\n"
-       "       mcr     p15, 0, %0, c1, c0, 0\n"
-         : "=&r" (v)
-         : "r" (0), "Ir" (CR_C)
-         : "cc");
-}
-
-static inline void cpu_leave_lowpower(void)
-{
-       unsigned int v;
-
-       asm volatile(   "mrc    p15, 0, %0, c1, c0, 0\n"
-       "       orr     %0, %0, %1\n"
-       "       mcr     p15, 0, %0, c1, c0, 0\n"
-       "       mrc     p15, 0, %0, c1, c0, 1\n"
-       "       orr     %0, %0, #0x20\n"
-       "       mcr     p15, 0, %0, c1, c0, 1\n"
-         : "=&r" (v)
-         : "Ir" (CR_C)
-         : "cc");
-}
-
-static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
-{
-       /*
-        * there is no power-control hardware on this platform, so all
-        * we can do is put the core into WFI; this is safe as the calling
-        * code will have already disabled interrupts
-        */
-       for (;;) {
-               /*
-                * here's the WFI
-                */
-               asm(".word      0xe320f003\n"
-                   :
-                   :
-                   : "memory", "cc");
-
-               if (pen_release == cpu_logical_map(cpu)) {
-                       /*
-                        * OK, proper wakeup, we're done
-                        */
-                       break;
-               }
-
-               /*
-                * Getting here, means that we have come out of WFI without
-                * having been woken up - this shouldn't happen
-                *
-                * Just note it happening - when we're woken, we can report
-                * its occurrence.
-                */
-               (*spurious)++;
-       }
-}
-
-/*
- * platform-specific code to shutdown a CPU
- *
- * Called with IRQs disabled
- */
-void ox820_cpu_die(unsigned int cpu)
-{
-       int spurious = 0;
-
-       /*
-        * we're ready for shutdown now, so do it
-        */
-       cpu_enter_lowpower();
-       platform_do_lowpower(cpu, &spurious);
-
-       /*
-        * bring this CPU back into the world of cache
-        * coherency, and then restore interrupts
-        */
-       cpu_leave_lowpower();
-
-       if (spurious)
-               pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
-}
index 442cc8a2f7dc81e43a88c2ef6510ba177218ac09..735141c0e3a377275f3c241f8dc181edee2c2c98 100644 (file)
@@ -19,7 +19,6 @@
 #include <asm/smp_scu.h>
 
 extern void ox820_secondary_startup(void);
-extern void ox820_cpu_die(unsigned int cpu);
 
 static void __iomem *cpu_ctrl;
 static void __iomem *gic_cpu_ctrl;
@@ -94,9 +93,6 @@ unmap_scu:
 static const struct smp_operations ox820_smp_ops __initconst = {
        .smp_prepare_cpus       = ox820_smp_prepare_cpus,
        .smp_boot_secondary     = ox820_boot_secondary,
-#ifdef CONFIG_HOTPLUG_CPU
-       .cpu_die                = ox820_cpu_die,
-#endif
 };
 
 CPU_METHOD_OF_DECLARE(ox820_smp, "oxsemi,ox820-smp", &ox820_smp_ops);
index 6d77b622d168502978369df3613abf0763e0563f..457eb7b1816007edde0307f4f80b5c5537438b1f 100644 (file)
@@ -15,6 +15,8 @@
 #include <asm/mach/time.h>
 #include <asm/exception.h>
 
+extern volatile int prima2_pen_release;
+
 extern const struct smp_operations sirfsoc_smp_ops;
 extern void sirfsoc_secondary_startup(void);
 extern void sirfsoc_cpu_die(unsigned int cpu);
index 209d9fc5c16cf49909434ac243c1f794f3d22f81..6cf4fc60347b5fdad94708f739c811c39777c80e 100644 (file)
@@ -34,4 +34,4 @@ ENDPROC(sirfsoc_secondary_startup)
 
         .align
 1:      .long   .
-        .long   pen_release
+        .long   prima2_pen_release
index a728c78b996f7fa0e1050f9e79c775cfa14b42da..b6cf1527e3309ce3ee0a18f20189c1be0099bd39 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/smp.h>
 
 #include <asm/smp_plat.h>
+#include "common.h"
 
 static inline void platform_do_lowpower(unsigned int cpu)
 {
@@ -18,7 +19,7 @@ static inline void platform_do_lowpower(unsigned int cpu)
        for (;;) {
                __asm__ __volatile__("dsb\n\t" "wfi\n\t"
                        : : : "memory");
-               if (pen_release == cpu_logical_map(cpu)) {
+               if (prima2_pen_release == cpu_logical_map(cpu)) {
                        /*
                         * OK, proper wakeup, we're done
                         */
index 75ef5d4be554ce9f8564f347e52da1e6766bf5ac..d1f8b5168083c345ec6e4481a65b69be68661a99 100644 (file)
@@ -24,13 +24,16 @@ static void __iomem *clk_base;
 
 static DEFINE_SPINLOCK(boot_lock);
 
+/* XXX prima2_pen_release is cargo culted code - DO NOT COPY XXX */
+volatile int prima2_pen_release = -1;
+
 static void sirfsoc_secondary_init(unsigned int cpu)
 {
        /*
         * let the primary processor know we're out of the
         * pen, then head off into the C entry point
         */
-       pen_release = -1;
+       prima2_pen_release = -1;
        smp_wmb();
 
        /*
@@ -80,13 +83,13 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
        /*
         * The secondary processor is waiting to be released from
         * the holding pen - release it, then wait for it to flag
-        * that it has been released by resetting pen_release.
+        * that it has been released by resetting prima2_pen_release.
         *
-        * Note that "pen_release" is the hardware CPU ID, whereas
+        * Note that "prima2_pen_release" is the hardware CPU ID, whereas
         * "cpu" is Linux's internal ID.
         */
-       pen_release = cpu_logical_map(cpu);
-       sync_cache_w(&pen_release);
+       prima2_pen_release = cpu_logical_map(cpu);
+       sync_cache_w(&prima2_pen_release);
 
        /*
         * Send the secondary CPU SEV, thereby causing the boot monitor to read
@@ -97,7 +100,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
        timeout = jiffies + (1 * HZ);
        while (time_before(jiffies, timeout)) {
                smp_rmb();
-               if (pen_release == -1)
+               if (prima2_pen_release == -1)
                        break;
 
                udelay(10);
@@ -109,7 +112,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
         */
        spin_unlock(&boot_lock);
 
-       return pen_release != -1 ? -ENOSYS : 0;
+       return prima2_pen_release != -1 ? -ENOSYS : 0;
 }
 
 const struct smp_operations sirfsoc_smp_ops __initconst = {
index 5494c9e0c909b549ec696a9482852c339a4039a9..99a6a5e809e0e953545c552001d7ffbb44fb6ad8 100644 (file)
@@ -46,8 +46,6 @@
 
 extern void secondary_startup_arm(void);
 
-static DEFINE_SPINLOCK(boot_lock);
-
 #ifdef CONFIG_HOTPLUG_CPU
 static void qcom_cpu_die(unsigned int cpu)
 {
@@ -55,15 +53,6 @@ static void qcom_cpu_die(unsigned int cpu)
 }
 #endif
 
-static void qcom_secondary_init(unsigned int cpu)
-{
-       /*
-        * Synchronise with the boot thread.
-        */
-       spin_lock(&boot_lock);
-       spin_unlock(&boot_lock);
-}
-
 static int scss_release_secondary(unsigned int cpu)
 {
        struct device_node *node;
@@ -280,12 +269,6 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
                        per_cpu(cold_boot_done, cpu) = true;
        }
 
-       /*
-        * set synchronisation state between this boot processor
-        * and the secondary one
-        */
-       spin_lock(&boot_lock);
-
        /*
         * Send the secondary CPU a soft interrupt, thereby causing
         * the boot monitor to read the system wide flags register,
@@ -293,12 +276,6 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
         */
        arch_send_wakeup_ipi_mask(cpumask_of(cpu));
 
-       /*
-        * now the secondary core is starting up let it run its
-        * calibrations, then wait for it to finish
-        */
-       spin_unlock(&boot_lock);
-
        return ret;
 }
 
@@ -334,7 +311,6 @@ static void __init qcom_smp_prepare_cpus(unsigned int max_cpus)
 
 static const struct smp_operations smp_msm8660_ops __initconst = {
        .smp_prepare_cpus       = qcom_smp_prepare_cpus,
-       .smp_secondary_init     = qcom_secondary_init,
        .smp_boot_secondary     = msm8660_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
        .cpu_die                = qcom_cpu_die,
@@ -344,7 +320,6 @@ CPU_METHOD_OF_DECLARE(qcom_smp, "qcom,gcc-msm8660", &smp_msm8660_ops);
 
 static const struct smp_operations qcom_smp_kpssv1_ops __initconst = {
        .smp_prepare_cpus       = qcom_smp_prepare_cpus,
-       .smp_secondary_init     = qcom_secondary_init,
        .smp_boot_secondary     = kpssv1_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
        .cpu_die                = qcom_cpu_die,
@@ -354,7 +329,6 @@ CPU_METHOD_OF_DECLARE(qcom_smp_kpssv1, "qcom,kpss-acc-v1", &qcom_smp_kpssv1_ops)
 
 static const struct smp_operations qcom_smp_kpssv2_ops __initconst = {
        .smp_prepare_cpus       = qcom_smp_prepare_cpus,
-       .smp_secondary_init     = qcom_secondary_init,
        .smp_boot_secondary     = kpssv2_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
        .cpu_die                = qcom_cpu_die,
index 909b97c0b23719010d4950532bcfada3f5ea1c30..25b4c5e66e39610e34de8ba0128e1b43ac18ebb0 100644 (file)
@@ -20,6 +20,8 @@
 
 #include <asm/mach/time.h>
 
+extern volatile int spear_pen_release;
+
 extern void spear13xx_timer_init(void);
 extern void spear3xx_timer_init(void);
 extern struct pl022_ssp_controller pl022_plat_data;
index c52192dc3d9f9e3040bf5f5c49c7ce8d644276d6..6e250b6c0aa230815e41e37c4090152107d1cfd5 100644 (file)
@@ -43,5 +43,5 @@ pen:  ldr     r7, [r6]
 
        .align
 1:     .long   .
-       .long   pen_release
+       .long   spear_pen_release
 ENDPROC(spear13xx_secondary_startup)
index 12edd1cf8a12f11a2a07851f59bc5747e925cd36..0dd84f609627ac7194679ea49a4259f6620d13dc 100644 (file)
@@ -16,6 +16,8 @@
 #include <asm/cp15.h>
 #include <asm/smp_plat.h>
 
+#include "generic.h"
+
 static inline void cpu_enter_lowpower(void)
 {
        unsigned int v;
@@ -57,7 +59,7 @@ static inline void spear13xx_do_lowpower(unsigned int cpu, int *spurious)
        for (;;) {
                wfi();
 
-               if (pen_release == cpu) {
+               if (spear_pen_release == cpu) {
                        /*
                         * OK, proper wakeup, we're done
                         */
index 39038a03836acb8f3288488f063a99d5ef0f814c..b1ff4bb86f6d8aaeaa6c662b07a05a5a26847ed8 100644 (file)
 #include <mach/spear.h>
 #include "generic.h"
 
+/* XXX spear_pen_release is cargo culted code - DO NOT COPY XXX */
+volatile int spear_pen_release = -1;
+
 /*
- * Write pen_release in a way that is guaranteed to be visible to all
- * observers, irrespective of whether they're taking part in coherency
+ * XXX CARGO CULTED CODE - DO NOT COPY XXX
+ *
+ * Write spear_pen_release in a way that is guaranteed to be visible to
+ * all observers, irrespective of whether they're taking part in coherency
  * or not.  This is necessary for the hotplug code to work reliably.
  */
-static void write_pen_release(int val)
+static void spear_write_pen_release(int val)
 {
-       pen_release = val;
+       spear_pen_release = val;
        smp_wmb();
-       sync_cache_w(&pen_release);
+       sync_cache_w(&spear_pen_release);
 }
 
 static DEFINE_SPINLOCK(boot_lock);
@@ -42,7 +47,7 @@ static void spear13xx_secondary_init(unsigned int cpu)
         * let the primary processor know we're out of the
         * pen, then head off into the C entry point
         */
-       write_pen_release(-1);
+       spear_write_pen_release(-1);
 
        /*
         * Synchronise with the boot thread.
@@ -64,17 +69,17 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
        /*
         * The secondary processor is waiting to be released from
         * the holding pen - release it, then wait for it to flag
-        * that it has been released by resetting pen_release.
+        * that it has been released by resetting spear_pen_release.
         *
-        * Note that "pen_release" is the hardware CPU ID, whereas
+        * Note that "spear_pen_release" is the hardware CPU ID, whereas
         * "cpu" is Linux's internal ID.
         */
-       write_pen_release(cpu);
+       spear_write_pen_release(cpu);
 
        timeout = jiffies + (1 * HZ);
        while (time_before(jiffies, timeout)) {
                smp_rmb();
-               if (pen_release == -1)
+               if (spear_pen_release == -1)
                        break;
 
                udelay(10);
@@ -86,7 +91,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
         */
        spin_unlock(&boot_lock);
 
-       return pen_release != -1 ? -ENOSYS : 0;
+       return spear_pen_release != -1 ? -ENOSYS : 0;
 }
 
 /*
index 805f306fa6f707f055878a31f00a2f412a89f9c5..e22ccf87eded394ff99df3187ddf5309f886fdc4 100644 (file)
@@ -172,7 +172,7 @@ after_errata:
        mov32   r5, TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET
        mov     r0, #CPU_NOT_RESETTABLE
        cmp     r10, #0
-       strneb  r0, [r5, #__tegra20_cpu1_resettable_status_offset]
+       strbne  r0, [r5, #__tegra20_cpu1_resettable_status_offset]
 1:
 #endif
 
index 24659952c2784de64a53dc2e889ab616bd19b12b..be68d62566c7cd86f5ffd3aabfd9b1bd1e9a70de 100644 (file)
@@ -215,8 +215,8 @@ v6_dma_inv_range:
 #endif
        tst     r1, #D_CACHE_LINE_SIZE - 1
 #ifdef CONFIG_DMA_CACHE_RWFO
-       ldrneb  r2, [r1, #-1]                   @ read for ownership
-       strneb  r2, [r1, #-1]                   @ write for ownership
+       ldrbne  r2, [r1, #-1]                   @ read for ownership
+       strbne  r2, [r1, #-1]                   @ write for ownership
 #endif
        bic     r1, r1, #D_CACHE_LINE_SIZE - 1
 #ifdef HARVARD_CACHE
@@ -284,8 +284,8 @@ ENTRY(v6_dma_flush_range)
        add     r0, r0, #D_CACHE_LINE_SIZE
        cmp     r0, r1
 #ifdef CONFIG_DMA_CACHE_RWFO
-       ldrlob  r2, [r0]                        @ read for ownership
-       strlob  r2, [r0]                        @ write for ownership
+       ldrblo  r2, [r0]                        @ read for ownership
+       strblo  r2, [r0]                        @ write for ownership
 #endif
        blo     1b
        mov     r0, #0
index b03202cddddb2d07bf2fcfe3ee2d9d118066f846..f74cdce6d4dad47fd51ab18212e2d7979fbfc34f 100644 (file)
@@ -45,6 +45,7 @@ static void mc_copy_user_page(void *from, void *to)
        int tmp;
 
        asm volatile ("\
+       .syntax unified\n\
        ldmia   %0!, {r2, r3, ip, lr}           @ 4\n\
 1:     mcr     p15, 0, %1, c7, c6, 1           @ 1   invalidate D line\n\
        stmia   %1!, {r2, r3, ip, lr}           @ 4\n\
@@ -56,7 +57,7 @@ static void mc_copy_user_page(void *from, void *to)
        ldmia   %0!, {r2, r3, ip, lr}           @ 4\n\
        subs    %2, %2, #1                      @ 1\n\
        stmia   %1!, {r2, r3, ip, lr}           @ 4\n\
-       ldmneia %0!, {r2, r3, ip, lr}           @ 4\n\
+       ldmiane %0!, {r2, r3, ip, lr}           @ 4\n\
        bne     1b                              @ "
        : "+&r" (from), "+&r" (to), "=&r" (tmp)
        : "2" (PAGE_SIZE / 64)
index cd3e165afeedeb400c19b1dbb1b578e10b0d2400..6d336740aae49374c37946dc292cf5270dc5629a 100644 (file)
@@ -27,6 +27,7 @@ static void v4wb_copy_user_page(void *kto, const void *kfrom)
        int tmp;
 
        asm volatile ("\
+       .syntax unified\n\
        ldmia   %1!, {r3, r4, ip, lr}           @ 4\n\
 1:     mcr     p15, 0, %0, c7, c6, 1           @ 1   invalidate D line\n\
        stmia   %0!, {r3, r4, ip, lr}           @ 4\n\
@@ -38,7 +39,7 @@ static void v4wb_copy_user_page(void *kto, const void *kfrom)
        ldmia   %1!, {r3, r4, ip, lr}           @ 4\n\
        subs    %2, %2, #1                      @ 1\n\
        stmia   %0!, {r3, r4, ip, lr}           @ 4\n\
-       ldmneia %1!, {r3, r4, ip, lr}           @ 4\n\
+       ldmiane %1!, {r3, r4, ip, lr}           @ 4\n\
        bne     1b                              @ 1\n\
        mcr     p15, 0, %1, c7, c10, 4          @ 1   drain WB"
        : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
index 8614572e1296ba904a018fd07b2dfe66843a5272..3851bb39644286bd49122cc0cd16b3df58ba2d07 100644 (file)
@@ -25,6 +25,7 @@ static void v4wt_copy_user_page(void *kto, const void *kfrom)
        int tmp;
 
        asm volatile ("\
+       .syntax unified\n\
        ldmia   %1!, {r3, r4, ip, lr}           @ 4\n\
 1:     stmia   %0!, {r3, r4, ip, lr}           @ 4\n\
        ldmia   %1!, {r3, r4, ip, lr}           @ 4+1\n\
@@ -34,7 +35,7 @@ static void v4wt_copy_user_page(void *kto, const void *kfrom)
        ldmia   %1!, {r3, r4, ip, lr}           @ 4\n\
        subs    %2, %2, #1                      @ 1\n\
        stmia   %0!, {r3, r4, ip, lr}           @ 4\n\
-       ldmneia %1!, {r3, r4, ip, lr}           @ 4\n\
+       ldmiane %1!, {r3, r4, ip, lr}           @ 4\n\
        bne     1b                              @ 1\n\
        mcr     p15, 0, %2, c7, c7, 0           @ flush ID cache"
        : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
index c6aab9c36ff189b98373586cf619324b0ebb6160..43f46aa7ef3351e6cc278d42ea8ab7d0dc861dcb 100644 (file)
@@ -2279,7 +2279,7 @@ EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
  * @dev: valid struct device pointer
  *
  * Detaches the provided device from a previously attached map.
- * This voids the dma operations (dma_map_ops pointer)
+ * This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
  */
 void arm_iommu_detach_device(struct device *dev)
 {
index 1d1edd0641995490b520b690f2ecbe3410d6524a..a033f6134a6499030252585eb933505569ecaed1 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <asm/cputype.h>
 #include <asm/idmap.h>
+#include <asm/hwcap.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
 #include <asm/sections.h>
@@ -110,7 +111,8 @@ static int __init init_static_idmap(void)
                             __idmap_text_end, 0);
 
        /* Flush L1 for the hardware to see this page table content */
-       flush_cache_louis();
+       if (!(elf_hwcap & HWCAP_LPAE))
+               flush_cache_louis();
 
        return 0;
 }
index 15dddfe43319547d2e3c81a80a33f0315a3e2c55..c2daabbe0af05da23a469efe0ea2ed1a6eb2eb7b 100644 (file)
@@ -282,15 +282,12 @@ void __init arm_memblock_init(const struct machine_desc *mdesc)
 
 void __init bootmem_init(void)
 {
-       unsigned long min, max_low, max_high;
-
        memblock_allow_resize();
-       max_low = max_high = 0;
 
-       find_limits(&min, &max_low, &max_high);
+       find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
 
-       early_memtest((phys_addr_t)min << PAGE_SHIFT,
-                     (phys_addr_t)max_low << PAGE_SHIFT);
+       early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
+                     (phys_addr_t)max_low_pfn << PAGE_SHIFT);
 
        /*
         * Sparsemem tries to allocate bootmem in memory_present(),
@@ -308,16 +305,7 @@ void __init bootmem_init(void)
         * the sparse mem_map arrays initialized by sparse_init()
         * for memmap_init_zone(), otherwise all PFNs are invalid.
         */
-       zone_sizes_init(min, max_low, max_high);
-
-       /*
-        * This doesn't seem to be used by the Linux memory manager any
-        * more, but is used by ll_rw_block.  If we can get rid of it, we
-        * also get rid of some of the stuff above as well.
-        */
-       min_low_pfn = min;
-       max_low_pfn = max_low;
-       max_pfn = max_high;
+       zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
 }
 
 /*
@@ -498,55 +486,6 @@ void __init mem_init(void)
 
        mem_init_print_info(NULL);
 
-#define MLK(b, t) b, t, ((t) - (b)) >> 10
-#define MLM(b, t) b, t, ((t) - (b)) >> 20
-#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
-
-       pr_notice("Virtual kernel memory layout:\n"
-                       "    vector  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
-#ifdef CONFIG_HAVE_TCM
-                       "    DTCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
-                       "    ITCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
-#endif
-                       "    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
-                       "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
-                       "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
-#ifdef CONFIG_HIGHMEM
-                       "    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n"
-#endif
-#ifdef CONFIG_MODULES
-                       "    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
-#endif
-                       "      .text : 0x%p" " - 0x%p" "   (%4td kB)\n"
-                       "      .init : 0x%p" " - 0x%p" "   (%4td kB)\n"
-                       "      .data : 0x%p" " - 0x%p" "   (%4td kB)\n"
-                       "       .bss : 0x%p" " - 0x%p" "   (%4td kB)\n",
-
-                       MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE),
-#ifdef CONFIG_HAVE_TCM
-                       MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
-                       MLK(ITCM_OFFSET, (unsigned long) itcm_end),
-#endif
-                       MLK(FIXADDR_START, FIXADDR_END),
-                       MLM(VMALLOC_START, VMALLOC_END),
-                       MLM(PAGE_OFFSET, (unsigned long)high_memory),
-#ifdef CONFIG_HIGHMEM
-                       MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
-                               (PAGE_SIZE)),
-#endif
-#ifdef CONFIG_MODULES
-                       MLM(MODULES_VADDR, MODULES_END),
-#endif
-
-                       MLK_ROUNDUP(_text, _etext),
-                       MLK_ROUNDUP(__init_begin, __init_end),
-                       MLK_ROUNDUP(_sdata, _edata),
-                       MLK_ROUNDUP(__bss_start, __bss_stop));
-
-#undef MLK
-#undef MLM
-#undef MLK_ROUNDUP
-
        /*
         * Check boundaries twice: Some fundamental inconsistencies can
         * be detected at build time already.
index 617a83def88a9f8e5d907b7a9965544a26ba1b40..0d7d5fb59247d42038e0c69dcec3d89499c77cd7 100644 (file)
@@ -165,7 +165,7 @@ static int __init pmsav8_setup_ram(unsigned int number, phys_addr_t start,phys_a
                return -EINVAL;
 
        bar = start;
-       lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);;
+       lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
 
        bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED;
        lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
@@ -181,7 +181,7 @@ static int __init pmsav8_setup_io(unsigned int number, phys_addr_t start,phys_ad
                return -EINVAL;
 
        bar = start;
-       lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);;
+       lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
 
        bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN;
        lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN;
index 47a5acc644333f7f995293ef6b3dc6fb3527270a..acd5a66dfc23bb2dbe2b95e03453cbd3df2a999b 100644 (file)
@@ -139,6 +139,9 @@ __v7m_setup_cont:
        cpsie   i
        svc     #0
 1:     cpsid   i
+       ldr     r0, =exc_ret
+       orr     lr, lr, #EXC_RET_THREADMODE_PROCESSSTACK
+       str     lr, [r0]
        ldmia   sp, {r0-r3, r12}
        str     r5, [r12, #11 * 4]      @ restore the original SVC vector entry
        mov     lr, r6                  @ restore LR
@@ -149,10 +152,10 @@ __v7m_setup_cont:
 
        @ Configure caches (if implemented)
        teq     r8, #0
-       stmneia sp, {r0-r6, lr}         @ v7m_invalidate_l1 touches r0-r6
+       stmiane sp, {r0-r6, lr}         @ v7m_invalidate_l1 touches r0-r6
        blne    v7m_invalidate_l1
        teq     r8, #0                  @ re-evalutae condition
-       ldmneia sp, {r0-r6, lr}
+       ldmiane sp, {r0-r6, lr}
 
        @ Configure the System Control Register to ensure 8-byte stack alignment
        @ Note the STKALIGN bit is either RW or RAO.
index 506386a3eddecd2e12decd7a19ab203932e2293c..d3842791e1c42a2a9bc8b62238b1658c1e650193 100644 (file)
@@ -77,6 +77,10 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
         */
        if (!vcpu_el1_is_32bit(vcpu))
                vcpu->arch.hcr_el2 |= HCR_TID3;
+
+       if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
+           vcpu_el1_is_32bit(vcpu))
+               vcpu->arch.hcr_el2 |= HCR_TID2;
 }
 
 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
@@ -331,6 +335,14 @@ static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
        return ESR_ELx_SYS64_ISS_RT(esr);
 }
 
+static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
+{
+       if (kvm_vcpu_trap_is_iabt(vcpu))
+               return false;
+
+       return kvm_vcpu_dabt_iswrite(vcpu);
+}
+
 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
 {
        return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
index 222af1d2c3e4ac50695b053d6c3994c1e6047b25..a01fe087e022882d63f50e6fb766b13217a38208 100644 (file)
@@ -31,6 +31,7 @@
 #include <asm/kvm.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_mmio.h>
+#include <asm/smp_plat.h>
 #include <asm/thread_info.h>
 
 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -58,16 +59,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
 int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
 void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
 
-struct kvm_arch {
+struct kvm_vmid {
        /* The VMID generation used for the virt. memory system */
        u64    vmid_gen;
        u32    vmid;
+};
+
+struct kvm_arch {
+       struct kvm_vmid vmid;
 
        /* stage2 entry level table */
        pgd_t *pgd;
+       phys_addr_t pgd_phys;
 
-       /* VTTBR value associated with above pgd and vmid */
-       u64    vttbr;
        /* VTCR_EL2 value for this VM */
        u64    vtcr;
 
@@ -382,7 +386,36 @@ void kvm_arm_halt_guest(struct kvm *kvm);
 void kvm_arm_resume_guest(struct kvm *kvm);
 
 u64 __kvm_call_hyp(void *hypfn, ...);
-#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
+
+/*
+ * The couple of isb() below are there to guarantee the same behaviour
+ * on VHE as on !VHE, where the eret to EL1 acts as a context
+ * synchronization event.
+ */
+#define kvm_call_hyp(f, ...)                                           \
+       do {                                                            \
+               if (has_vhe()) {                                        \
+                       f(__VA_ARGS__);                                 \
+                       isb();                                          \
+               } else {                                                \
+                       __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
+               }                                                       \
+       } while(0)
+
+#define kvm_call_hyp_ret(f, ...)                                       \
+       ({                                                              \
+               typeof(f(__VA_ARGS__)) ret;                             \
+                                                                       \
+               if (has_vhe()) {                                        \
+                       ret = f(__VA_ARGS__);                           \
+                       isb();                                          \
+               } else {                                                \
+                       ret = __kvm_call_hyp(kvm_ksym_ref(f),           \
+                                            ##__VA_ARGS__);            \
+               }                                                       \
+                                                                       \
+               ret;                                                    \
+       })
 
 void force_vm_exit(const cpumask_t *mask);
 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
@@ -401,6 +434,13 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
 
 DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
 
+static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt,
+                                            int cpu)
+{
+       /* The host's MPIDR is immutable, so let's set it up at boot time */
+       cpu_ctxt->sys_regs[MPIDR_EL1] = cpu_logical_map(cpu);
+}
+
 void __kvm_enable_ssbs(void);
 
 static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
index a80a7ef573252a048401a20789683eb507ace060..4da765f2cca589a6ba0761b5002e876269b6a21f 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/compiler.h>
 #include <linux/kvm_host.h>
 #include <asm/alternative.h>
+#include <asm/kvm_mmu.h>
 #include <asm/sysreg.h>
 
 #define __hyp_text __section(.hyp.text) notrace
@@ -163,7 +164,7 @@ void __noreturn __hyp_do_panic(unsigned long, ...);
 static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm)
 {
        write_sysreg(kvm->arch.vtcr, vtcr_el2);
-       write_sysreg(kvm->arch.vttbr, vttbr_el2);
+       write_sysreg(kvm_get_vttbr(kvm), vttbr_el2);
 
        /*
         * ARM erratum 1165522 requires the actual execution of the above
index 8af4b1befa421338fc4c40e5824f58f938c60e9c..b0742a16c6c9e43ca73888c2c9778042174328a2 100644 (file)
@@ -138,7 +138,8 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
        })
 
 /*
- * We currently only support a 40bit IPA.
+ * We currently support using a VM-specified IPA size. For backward
+ * compatibility, the default IPA size is fixed to 40bits.
  */
 #define KVM_PHYS_SHIFT (40)
 
@@ -591,9 +592,15 @@ static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm)
        return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm));
 }
 
-static inline bool kvm_cpu_has_cnp(void)
+static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
 {
-       return system_supports_cnp();
+       struct kvm_vmid *vmid = &kvm->arch.vmid;
+       u64 vmid_field, baddr;
+       u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
+
+       baddr = kvm->arch.pgd_phys;
+       vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
+       return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
 }
 
 #endif /* __ASSEMBLY__ */
index 72dc4c011014c208108ab878b4f9f65da2d56740..5b267dec6194e9675bb48f710a0d16b58eca3d64 100644 (file)
 
 #define SYS_CNTKCTL_EL1                        sys_reg(3, 0, 14, 1, 0)
 
+#define SYS_CCSIDR_EL1                 sys_reg(3, 1, 0, 0, 0)
 #define SYS_CLIDR_EL1                  sys_reg(3, 1, 0, 0, 1)
 #define SYS_AIDR_EL1                   sys_reg(3, 1, 0, 0, 7)
 
 #define SYS_CNTP_CTL_EL0               sys_reg(3, 3, 14, 2, 1)
 #define SYS_CNTP_CVAL_EL0              sys_reg(3, 3, 14, 2, 2)
 
+#define SYS_AARCH32_CNTP_TVAL          sys_reg(0, 0, 14, 2, 0)
+#define SYS_AARCH32_CNTP_CTL           sys_reg(0, 0, 14, 2, 1)
+#define SYS_AARCH32_CNTP_CVAL          sys_reg(0, 2, 0, 14, 0)
+
 #define __PMEV_op2(n)                  ((n) & 0x7)
 #define __CNTR_CRm(n)                  (0x8 | (((n) >> 3) & 0x3))
 #define SYS_PMEVCNTRn_EL0(n)           sys_reg(3, 3, 14, __CNTR_CRm(n), __PMEV_op2(n))
 #define SYS_ICH_VTR_EL2                        sys_reg(3, 4, 12, 11, 1)
 #define SYS_ICH_MISR_EL2               sys_reg(3, 4, 12, 11, 2)
 #define SYS_ICH_EISR_EL2               sys_reg(3, 4, 12, 11, 3)
-#define SYS_ICH_ELSR_EL2               sys_reg(3, 4, 12, 11, 5)
+#define SYS_ICH_ELRSR_EL2              sys_reg(3, 4, 12, 11, 5)
 #define SYS_ICH_VMCR_EL2               sys_reg(3, 4, 12, 11, 7)
 
 #define __SYS__LR0_EL2(x)              sys_reg(3, 4, 12, 12, x)
index 0f2a135ba15bbe5bd66d148325d3ed227b1fe072..690e033a91c000513281a45f38ce39e680720e8b 100644 (file)
@@ -3,9 +3,7 @@
 # Makefile for Kernel-based Virtual Machine module
 #
 
-ccflags-y += -Iarch/arm64/kvm -Ivirt/kvm/arm/vgic
-CFLAGS_arm.o := -I.
-CFLAGS_mmu.o := -I.
+ccflags-y += -I $(srctree)/$(src) -I $(srctree)/virt/kvm/arm/vgic
 
 KVM=../../../virt/kvm
 
index f39801e4136cd0e27c3ba718a461d476de805992..fd917d6d12afb4060725e8342289390ea5461a2c 100644 (file)
@@ -76,7 +76,7 @@ static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
 
 void kvm_arm_init_debug(void)
 {
-       __this_cpu_write(mdcr_el2, kvm_call_hyp(__kvm_get_mdcr_el2));
+       __this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
 }
 
 /**
index 952f6cb9cf72051ec1415ad5d63954985544ba53..2845aa680841ea9623e517ebb1050524544a1fc6 100644 (file)
@@ -40,9 +40,6 @@
  * arch/arm64/kernel/hyp_stub.S.
  */
 ENTRY(__kvm_call_hyp)
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
        hvc     #0
        ret
-alternative_else_nop_endif
-       b       __vhe_hyp_call
 ENDPROC(__kvm_call_hyp)
index 73c1b483ec3963817aca5a8c650766eb6d3d9508..2b1e686772bfd6786378ac4e4ee5afa3ce415fa8 100644 (file)
        ldr     lr, [sp], #16
 .endm
 
-ENTRY(__vhe_hyp_call)
-       do_el2_call
-       /*
-        * We used to rely on having an exception return to get
-        * an implicit isb. In the E2H case, we don't have it anymore.
-        * rather than changing all the leaf functions, just do it here
-        * before returning to the rest of the kernel.
-        */
-       isb
-       ret
-ENDPROC(__vhe_hyp_call)
-
 el1_sync:                              // Guest trapped into EL2
 
        mrs     x0, esr_el2
index b426e2cf973cfe01a90ae40545abb2ee46c66bca..c52a8451637c483f949b40f931e3a0ba3a99351e 100644 (file)
@@ -53,7 +53,6 @@ static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
 
 static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
 {
-       ctxt->sys_regs[MPIDR_EL1]       = read_sysreg(vmpidr_el2);
        ctxt->sys_regs[CSSELR_EL1]      = read_sysreg(csselr_el1);
        ctxt->sys_regs[SCTLR_EL1]       = read_sysreg_el1(sctlr);
        ctxt->sys_regs[ACTLR_EL1]       = read_sysreg(actlr_el1);
index c936aa40c3f4a0393d03ee66e4b8316c35fa0566..539feecda5b8123eed039c0dcda7221695f339b5 100644 (file)
@@ -982,6 +982,10 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
        return true;
 }
 
+#define reg_to_encoding(x)                                             \
+       sys_reg((u32)(x)->Op0, (u32)(x)->Op1,                           \
+               (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
+
 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
 #define DBG_BCR_BVR_WCR_WVR_EL1(n)                                     \
        { SYS_DESC(SYS_DBGBVRn_EL1(n)),                                 \
@@ -1003,44 +1007,38 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
        { SYS_DESC(SYS_PMEVTYPERn_EL0(n)),                                      \
          access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
 
-static bool access_cntp_tval(struct kvm_vcpu *vcpu,
-               struct sys_reg_params *p,
-               const struct sys_reg_desc *r)
+static bool access_arch_timer(struct kvm_vcpu *vcpu,
+                             struct sys_reg_params *p,
+                             const struct sys_reg_desc *r)
 {
-       u64 now = kvm_phys_timer_read();
-       u64 cval;
+       enum kvm_arch_timers tmr;
+       enum kvm_arch_timer_regs treg;
+       u64 reg = reg_to_encoding(r);
 
-       if (p->is_write) {
-               kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL,
-                                     p->regval + now);
-       } else {
-               cval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
-               p->regval = cval - now;
+       switch (reg) {
+       case SYS_CNTP_TVAL_EL0:
+       case SYS_AARCH32_CNTP_TVAL:
+               tmr = TIMER_PTIMER;
+               treg = TIMER_REG_TVAL;
+               break;
+       case SYS_CNTP_CTL_EL0:
+       case SYS_AARCH32_CNTP_CTL:
+               tmr = TIMER_PTIMER;
+               treg = TIMER_REG_CTL;
+               break;
+       case SYS_CNTP_CVAL_EL0:
+       case SYS_AARCH32_CNTP_CVAL:
+               tmr = TIMER_PTIMER;
+               treg = TIMER_REG_CVAL;
+               break;
+       default:
+               BUG();
        }
 
-       return true;
-}
-
-static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
-               struct sys_reg_params *p,
-               const struct sys_reg_desc *r)
-{
-       if (p->is_write)
-               kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, p->regval);
-       else
-               p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
-
-       return true;
-}
-
-static bool access_cntp_cval(struct kvm_vcpu *vcpu,
-               struct sys_reg_params *p,
-               const struct sys_reg_desc *r)
-{
        if (p->is_write)
-               kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, p->regval);
+               kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
        else
-               p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
+               p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
 
        return true;
 }
@@ -1160,6 +1158,64 @@ static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
        return __set_id_reg(rd, uaddr, true);
 }
 
+static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+                      const struct sys_reg_desc *r)
+{
+       if (p->is_write)
+               return write_to_read_only(vcpu, p, r);
+
+       p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
+       return true;
+}
+
+static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+                        const struct sys_reg_desc *r)
+{
+       if (p->is_write)
+               return write_to_read_only(vcpu, p, r);
+
+       p->regval = read_sysreg(clidr_el1);
+       return true;
+}
+
+static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+                         const struct sys_reg_desc *r)
+{
+       if (p->is_write)
+               vcpu_write_sys_reg(vcpu, p->regval, r->reg);
+       else
+               p->regval = vcpu_read_sys_reg(vcpu, r->reg);
+       return true;
+}
+
+static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+                         const struct sys_reg_desc *r)
+{
+       u32 csselr;
+
+       if (p->is_write)
+               return write_to_read_only(vcpu, p, r);
+
+       csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
+       p->regval = get_ccsidr(csselr);
+
+       /*
+        * Guests should not be doing cache operations by set/way at all, and
+        * for this reason, we trap them and attempt to infer the intent, so
+        * that we can flush the entire guest's address space at the appropriate
+        * time.
+        * To prevent this trapping from causing performance problems, let's
+        * expose the geometry of all data and unified caches (which are
+        * guaranteed to be PIPT and thus non-aliasing) as 1 set and 1 way.
+        * [If guests should attempt to infer aliasing properties from the
+        * geometry (which is not permitted by the architecture), they would
+        * only do so for virtually indexed caches.]
+        */
+       if (!(csselr & 1)) // data or unified cache
+               p->regval &= ~GENMASK(27, 3);
+       return true;
+}
+
 /* sys_reg_desc initialiser for known cpufeature ID registers */
 #define ID_SANITISED(name) {                   \
        SYS_DESC(SYS_##name),                   \
@@ -1377,7 +1433,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
 
        { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
 
-       { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
+       { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
+       { SYS_DESC(SYS_CLIDR_EL1), access_clidr },
+       { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
+       { SYS_DESC(SYS_CTR_EL0), access_ctr },
 
        { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
        { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
@@ -1400,9 +1459,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
        { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
 
-       { SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval },
-       { SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl },
-       { SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval },
+       { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
+       { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
+       { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
 
        /* PMEVCNTRn_EL0 */
        PMU_PMEVCNTR_EL0(0),
@@ -1476,7 +1535,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
 
        { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
        { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
-       { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
+       { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
 };
 
 static bool trap_dbgidr(struct kvm_vcpu *vcpu,
@@ -1677,6 +1736,7 @@ static const struct sys_reg_desc cp14_64_regs[] = {
  * register).
  */
 static const struct sys_reg_desc cp15_regs[] = {
+       { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
        { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
        { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
        { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
@@ -1723,10 +1783,9 @@ static const struct sys_reg_desc cp15_regs[] = {
 
        { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
 
-       /* CNTP_TVAL */
-       { Op1( 0), CRn(14), CRm( 2), Op2( 0), access_cntp_tval },
-       /* CNTP_CTL */
-       { Op1( 0), CRn(14), CRm( 2), Op2( 1), access_cntp_ctl },
+       /* Arch Tmers */
+       { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
+       { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
 
        /* PMEVCNTRn */
        PMU_PMEVCNTR(0),
@@ -1794,6 +1853,10 @@ static const struct sys_reg_desc cp15_regs[] = {
        PMU_PMEVTYPER(30),
        /* PMCCFILTR */
        { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
+
+       { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
+       { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
+       { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, c0_CSSELR },
 };
 
 static const struct sys_reg_desc cp15_64_regs[] = {
@@ -1803,7 +1866,7 @@ static const struct sys_reg_desc cp15_64_regs[] = {
        { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
        { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
        { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
-       { Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval },
+       { SYS_DESC(SYS_AARCH32_CNTP_CVAL),    access_arch_timer },
 };
 
 /* Target specific emulation tables */
@@ -1832,30 +1895,19 @@ static const struct sys_reg_desc *get_target_table(unsigned target,
        }
 }
 
-#define reg_to_match_value(x)                                          \
-       ({                                                              \
-               unsigned long val;                                      \
-               val  = (x)->Op0 << 14;                                  \
-               val |= (x)->Op1 << 11;                                  \
-               val |= (x)->CRn << 7;                                   \
-               val |= (x)->CRm << 3;                                   \
-               val |= (x)->Op2;                                        \
-               val;                                                    \
-        })
-
 static int match_sys_reg(const void *key, const void *elt)
 {
        const unsigned long pval = (unsigned long)key;
        const struct sys_reg_desc *r = elt;
 
-       return pval - reg_to_match_value(r);
+       return pval - reg_to_encoding(r);
 }
 
 static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
                                         const struct sys_reg_desc table[],
                                         unsigned int num)
 {
-       unsigned long pval = reg_to_match_value(params);
+       unsigned long pval = reg_to_encoding(params);
 
        return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
 }
@@ -2218,11 +2270,15 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
        }
 
 FUNCTION_INVARIANT(midr_el1)
-FUNCTION_INVARIANT(ctr_el0)
 FUNCTION_INVARIANT(revidr_el1)
 FUNCTION_INVARIANT(clidr_el1)
 FUNCTION_INVARIANT(aidr_el1)
 
+static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
+{
+       ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
+}
+
 /* ->val is filled in by kvm_sys_reg_table_init() */
 static struct sys_reg_desc invariant_sys_regs[] = {
        { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
index d2abd98471e860ca2e42cf2576c5902cc82e1360..41204a49cf95eaa8ef2735468ebbb062d23a457a 100644 (file)
@@ -1134,7 +1134,7 @@ static inline void kvm_arch_hardware_unsetup(void) {}
 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
 static inline void kvm_arch_free_memslot(struct kvm *kvm,
                struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
index 0f98f00da2ea3b7027e29efbf0bf3c4b8a7116be..e6b5bb012ccb962fa5dcf7fdcd2db8962da1a57e 100644 (file)
@@ -99,6 +99,8 @@ struct kvm_nested_guest;
 
 struct kvm_vm_stat {
        ulong remote_tlb_flush;
+       ulong num_2M_pages;
+       ulong num_1G_pages;
 };
 
 struct kvm_vcpu_stat {
@@ -377,6 +379,7 @@ struct kvmppc_mmu {
        void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
        u64  (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr);
        u64  (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr);
+       int  (*slbfee)(struct kvm_vcpu *vcpu, gva_t eaddr, ulong *ret_slb);
        void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr);
        void (*slbia)(struct kvm_vcpu *vcpu);
        /* book3s */
@@ -837,7 +840,7 @@ struct kvm_vcpu_arch {
 static inline void kvm_arch_hardware_disable(void) {}
 static inline void kvm_arch_hardware_unsetup(void) {}
 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
 static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
 static inline void kvm_arch_exit(void) {}
index a6c8548ed9faa6c9188d496950ab5ac268756ceb..ac22b28ae78d4bc52223c94b478b83fc1c5ce48e 100644 (file)
@@ -36,6 +36,8 @@
 #endif
 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
 #include <asm/paca.h>
+#include <asm/xive.h>
+#include <asm/cpu_has_feature.h>
 #endif
 
 /*
@@ -617,6 +619,18 @@ static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 ir
 static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
 #endif /* CONFIG_KVM_XIVE */
 
+#if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
+static inline bool xics_on_xive(void)
+{
+       return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE);
+}
+#else
+static inline bool xics_on_xive(void)
+{
+       return false;
+}
+#endif
+
 /*
  * Prototypes for functions called only from assembler code.
  * Having prototypes reduces sparse errors.
index 8c876c166ef27b2c6fa754781fdbb103f2addc54..26ca425f4c2c39515bccee31029b3cada4c73639 100644 (file)
@@ -463,10 +463,12 @@ struct kvm_ppc_cpu_char {
 #define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED      (1ULL << 58)
 #define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF     (1ULL << 57)
 #define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS       (1ULL << 56)
+#define KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST    (1ull << 54)
 
 #define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY      (1ULL << 63)
 #define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR         (1ULL << 62)
 #define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR    (1ULL << 61)
+#define KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE    (1ull << 58)
 
 /* Per-vcpu XICS interrupt controller state */
 #define KVM_REG_PPC_ICP_STATE  (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
index 9a7dadbe1f1733a8f7cf60a08363b447bea1bf51..10c5579d20cec64152946f2f703a79e2da055154 100644 (file)
@@ -39,6 +39,7 @@
 #include "book3s.h"
 #include "trace.h"
 
+#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
 
 /* #define EXIT_DEBUG */
@@ -71,6 +72,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "pthru_all",       VCPU_STAT(pthru_all) },
        { "pthru_host",      VCPU_STAT(pthru_host) },
        { "pthru_bad_aff",   VCPU_STAT(pthru_bad_aff) },
+       { "largepages_2M",    VM_STAT(num_2M_pages) },
+       { "largepages_1G",    VM_STAT(num_1G_pages) },
        { NULL }
 };
 
@@ -642,7 +645,7 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
                                r = -ENXIO;
                                break;
                        }
-                       if (xive_enabled())
+                       if (xics_on_xive())
                                *val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
                        else
                                *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
@@ -715,7 +718,7 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
                                r = -ENXIO;
                                break;
                        }
-                       if (xive_enabled())
+                       if (xics_on_xive())
                                r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
                        else
                                r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
@@ -991,7 +994,7 @@ int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
                bool line_status)
 {
-       if (xive_enabled())
+       if (xics_on_xive())
                return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
                                           line_status);
        else
@@ -1044,7 +1047,7 @@ static int kvmppc_book3s_init(void)
 
 #ifdef CONFIG_KVM_XICS
 #ifdef CONFIG_KVM_XIVE
-       if (xive_enabled()) {
+       if (xics_on_xive()) {
                kvmppc_xive_init_module();
                kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
        } else
@@ -1057,7 +1060,7 @@ static int kvmppc_book3s_init(void)
 static void kvmppc_book3s_exit(void)
 {
 #ifdef CONFIG_KVM_XICS
-       if (xive_enabled())
+       if (xics_on_xive())
                kvmppc_xive_exit_module();
 #endif
 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
index 612169988a3d8a15262665e03d3cc9cfddb4a87e..6f789f674048a4c2a534ad338f3162e33c4068cb 100644 (file)
@@ -425,6 +425,7 @@ void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu)
        mmu->slbmte = NULL;
        mmu->slbmfee = NULL;
        mmu->slbmfev = NULL;
+       mmu->slbfee = NULL;
        mmu->slbie = NULL;
        mmu->slbia = NULL;
 }
index c92dd25bed237bcd9ac488401c80ed4b3d304aed..d4b967f0e8d4bd1de83f40a40b11c9b02ef97d89 100644 (file)
@@ -435,6 +435,19 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
        kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT);
 }
 
+static int kvmppc_mmu_book3s_64_slbfee(struct kvm_vcpu *vcpu, gva_t eaddr,
+                                      ulong *ret_slb)
+{
+       struct kvmppc_slb *slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
+
+       if (slbe) {
+               *ret_slb = slbe->origv;
+               return 0;
+       }
+       *ret_slb = 0;
+       return -ENOENT;
+}
+
 static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
 {
        struct kvmppc_slb *slbe;
@@ -670,6 +683,7 @@ void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
        mmu->slbmte = kvmppc_mmu_book3s_64_slbmte;
        mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee;
        mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev;
+       mmu->slbfee = kvmppc_mmu_book3s_64_slbfee;
        mmu->slbie = kvmppc_mmu_book3s_64_slbie;
        mmu->slbia = kvmppc_mmu_book3s_64_slbia;
        mmu->xlate = kvmppc_mmu_book3s_64_xlate;
index bd2dcfbf00cdb1cc6a20a766bb68bf17d9f5e3dc..be7bc070eae5fc701251d7d53ce7979195051f7d 100644 (file)
@@ -441,6 +441,24 @@ int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
 {
        u32 last_inst;
 
+       /*
+        * Fast path - check if the guest physical address corresponds to a
+        * device on the FAST_MMIO_BUS, if so we can avoid loading the
+        * instruction all together, then we can just handle it and return.
+        */
+       if (is_store) {
+               int idx, ret;
+
+               idx = srcu_read_lock(&vcpu->kvm->srcu);
+               ret = kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, (gpa_t) gpa, 0,
+                                      NULL);
+               srcu_read_unlock(&vcpu->kvm->srcu, idx);
+               if (!ret) {
+                       kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
+                       return RESUME_GUEST;
+               }
+       }
+
        /*
         * If we fail, we just return to the guest and try executing it again.
         */
index 1b821c6efdefba002f3c104208857d7b0694b478..f55ef071883f13166662ca006e10d314d70a135c 100644 (file)
@@ -403,8 +403,13 @@ void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
                if (!memslot)
                        return;
        }
-       if (shift)
+       if (shift) { /* 1GB or 2MB page */
                page_size = 1ul << shift;
+               if (shift == PMD_SHIFT)
+                       kvm->stat.num_2M_pages--;
+               else if (shift == PUD_SHIFT)
+                       kvm->stat.num_1G_pages--;
+       }
 
        gpa &= ~(page_size - 1);
        hpa = old & PTE_RPN_MASK;
@@ -878,6 +883,14 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
                put_page(page);
        }
 
+       /* Increment number of large pages if we (successfully) inserted one */
+       if (!ret) {
+               if (level == 1)
+                       kvm->stat.num_2M_pages++;
+               else if (level == 2)
+                       kvm->stat.num_1G_pages++;
+       }
+
        return ret;
 }
 
index 532ab79734c7a08c356666a968c025a432c2f482..f02b049737109c670b1af440f9f5704bbdf0afc0 100644 (file)
@@ -133,7 +133,6 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
                                        continue;
 
                                kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
-                               return;
                        }
                }
        }
@@ -338,14 +337,15 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
                }
        }
 
+       kvm_get_kvm(kvm);
        if (!ret)
                ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
                                       stt, O_RDWR | O_CLOEXEC);
 
-       if (ret >= 0) {
+       if (ret >= 0)
                list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
-               kvm_get_kvm(kvm);
-       }
+       else
+               kvm_put_kvm(kvm);
 
        mutex_unlock(&kvm->lock);
 
index 8c7e933e942e5b56c42faf9eacdf08d5a183cde4..6ef7c5f00a49c0ac503a2d3eb2cf02f4ead324a9 100644 (file)
@@ -47,6 +47,7 @@
 #define OP_31_XOP_SLBMFEV      851
 #define OP_31_XOP_EIOIO                854
 #define OP_31_XOP_SLBMFEE      915
+#define OP_31_XOP_SLBFEE       979
 
 #define OP_31_XOP_TBEGIN       654
 #define OP_31_XOP_TABORT       910
@@ -416,6 +417,23 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
 
                        vcpu->arch.mmu.slbia(vcpu);
                        break;
+               case OP_31_XOP_SLBFEE:
+                       if (!(inst & 1) || !vcpu->arch.mmu.slbfee) {
+                               return EMULATE_FAIL;
+                       } else {
+                               ulong b, t;
+                               ulong cr = kvmppc_get_cr(vcpu) & ~CR0_MASK;
+
+                               b = kvmppc_get_gpr(vcpu, rb);
+                               if (!vcpu->arch.mmu.slbfee(vcpu, b, &t))
+                                       cr |= 2 << CR0_SHIFT;
+                               kvmppc_set_gpr(vcpu, rt, t);
+                               /* copy XER[SO] bit to CR0[SO] */
+                               cr |= (vcpu->arch.regs.xer & 0x80000000) >>
+                                       (31 - CR0_SHIFT);
+                               kvmppc_set_cr(vcpu, cr);
+                       }
+                       break;
                case OP_31_XOP_SLBMFEE:
                        if (!vcpu->arch.mmu.slbmfee) {
                                emulated = EMULATE_FAIL;
index a3d5318f5d1e9a9e2654525cfa65059a232982b8..06964350b97a94118d065d90a257c882b5280136 100644 (file)
@@ -922,7 +922,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
        case H_IPOLL:
        case H_XIRR_X:
                if (kvmppc_xics_enabled(vcpu)) {
-                       if (xive_enabled()) {
+                       if (xics_on_xive()) {
                                ret = H_NOT_AVAILABLE;
                                return RESUME_GUEST;
                        }
@@ -937,6 +937,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                ret = kvmppc_h_set_xdabr(vcpu, kvmppc_get_gpr(vcpu, 4),
                                                kvmppc_get_gpr(vcpu, 5));
                break;
+#ifdef CONFIG_SPAPR_TCE_IOMMU
        case H_GET_TCE:
                ret = kvmppc_h_get_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
                                                kvmppc_get_gpr(vcpu, 5));
@@ -966,6 +967,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                if (ret == H_TOO_HARD)
                        return RESUME_HOST;
                break;
+#endif
        case H_RANDOM:
                if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4]))
                        ret = H_HARDWARE;
@@ -1445,7 +1447,7 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
        case BOOK3S_INTERRUPT_HV_RM_HARD:
                vcpu->arch.trap = 0;
                r = RESUME_GUEST;
-               if (!xive_enabled())
+               if (!xics_on_xive())
                        kvmppc_xics_rm_complete(vcpu, 0);
                break;
        default:
@@ -3648,11 +3650,12 @@ static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
 
 static void grow_halt_poll_ns(struct kvmppc_vcore *vc)
 {
-       /* 10us base */
-       if (vc->halt_poll_ns == 0 && halt_poll_ns_grow)
-               vc->halt_poll_ns = 10000;
-       else
-               vc->halt_poll_ns *= halt_poll_ns_grow;
+       if (!halt_poll_ns_grow)
+               return;
+
+       vc->halt_poll_ns *= halt_poll_ns_grow;
+       if (vc->halt_poll_ns < halt_poll_ns_grow_start)
+               vc->halt_poll_ns = halt_poll_ns_grow_start;
 }
 
 static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
@@ -3666,7 +3669,7 @@ static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
 #ifdef CONFIG_KVM_XICS
 static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
 {
-       if (!xive_enabled())
+       if (!xics_on_xive())
                return false;
        return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr <
                vcpu->arch.xive_saved_state.cppr;
@@ -4226,7 +4229,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
                                vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
                        srcu_read_unlock(&kvm->srcu, srcu_idx);
                } else if (r == RESUME_PASSTHROUGH) {
-                       if (WARN_ON(xive_enabled()))
+                       if (WARN_ON(xics_on_xive()))
                                r = H_SUCCESS;
                        else
                                r = kvmppc_xics_rm_complete(vcpu, 0);
@@ -4750,7 +4753,7 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
                 * If xive is enabled, we route 0x500 interrupts directly
                 * to the guest.
                 */
-               if (xive_enabled())
+               if (xics_on_xive())
                        lpcr |= LPCR_LPES;
        }
 
@@ -4986,7 +4989,7 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
        if (i == pimap->n_mapped)
                pimap->n_mapped++;
 
-       if (xive_enabled())
+       if (xics_on_xive())
                rc = kvmppc_xive_set_mapped(kvm, guest_gsi, desc);
        else
                kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq);
@@ -5027,7 +5030,7 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
                return -ENODEV;
        }
 
-       if (xive_enabled())
+       if (xics_on_xive())
                rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, pimap->mapped[i].desc);
        else
                kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
@@ -5359,13 +5362,11 @@ static int kvm_init_subcore_bitmap(void)
                        continue;
 
                sibling_subcore_state =
-                       kmalloc_node(sizeof(struct sibling_subcore_state),
+                       kzalloc_node(sizeof(struct sibling_subcore_state),
                                                        GFP_KERNEL, node);
                if (!sibling_subcore_state)
                        return -ENOMEM;
 
-               memset(sibling_subcore_state, 0,
-                               sizeof(struct sibling_subcore_state));
 
                for (j = 0; j < threads_per_core; j++) {
                        int cpu = first_cpu + j;
@@ -5406,7 +5407,7 @@ static int kvmppc_book3s_init_hv(void)
         * indirectly, via OPAL.
         */
 #ifdef CONFIG_SMP
-       if (!xive_enabled() && !kvmhv_on_pseries() &&
+       if (!xics_on_xive() && !kvmhv_on_pseries() &&
            !local_paca->kvm_hstate.xics_phys) {
                struct device_node *np;
 
index a71e2fc00a4e899be931d16ae08dd6042927bdde..b0cf22477e879b74ce4c0fa771d0deabb6c54af7 100644 (file)
@@ -257,7 +257,7 @@ void kvmhv_rm_send_ipi(int cpu)
        }
 
        /* We should never reach this */
-       if (WARN_ON_ONCE(xive_enabled()))
+       if (WARN_ON_ONCE(xics_on_xive()))
            return;
 
        /* Else poke the target with an IPI */
@@ -577,7 +577,7 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
 {
        if (!kvmppc_xics_enabled(vcpu))
                return H_TOO_HARD;
-       if (xive_enabled()) {
+       if (xics_on_xive()) {
                if (is_rm())
                        return xive_rm_h_xirr(vcpu);
                if (unlikely(!__xive_vm_h_xirr))
@@ -592,7 +592,7 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
        if (!kvmppc_xics_enabled(vcpu))
                return H_TOO_HARD;
        vcpu->arch.regs.gpr[5] = get_tb();
-       if (xive_enabled()) {
+       if (xics_on_xive()) {
                if (is_rm())
                        return xive_rm_h_xirr(vcpu);
                if (unlikely(!__xive_vm_h_xirr))
@@ -606,7 +606,7 @@ unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
 {
        if (!kvmppc_xics_enabled(vcpu))
                return H_TOO_HARD;
-       if (xive_enabled()) {
+       if (xics_on_xive()) {
                if (is_rm())
                        return xive_rm_h_ipoll(vcpu, server);
                if (unlikely(!__xive_vm_h_ipoll))
@@ -621,7 +621,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
 {
        if (!kvmppc_xics_enabled(vcpu))
                return H_TOO_HARD;
-       if (xive_enabled()) {
+       if (xics_on_xive()) {
                if (is_rm())
                        return xive_rm_h_ipi(vcpu, server, mfrr);
                if (unlikely(!__xive_vm_h_ipi))
@@ -635,7 +635,7 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
 {
        if (!kvmppc_xics_enabled(vcpu))
                return H_TOO_HARD;
-       if (xive_enabled()) {
+       if (xics_on_xive()) {
                if (is_rm())
                        return xive_rm_h_cppr(vcpu, cppr);
                if (unlikely(!__xive_vm_h_cppr))
@@ -649,7 +649,7 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
 {
        if (!kvmppc_xics_enabled(vcpu))
                return H_TOO_HARD;
-       if (xive_enabled()) {
+       if (xics_on_xive()) {
                if (is_rm())
                        return xive_rm_h_eoi(vcpu, xirr);
                if (unlikely(!__xive_vm_h_eoi))
index b3f5786b20dcf33098280cdc1af37792b6182cbd..3b9662a4207e06125d108a2cd13724dbf665632a 100644 (file)
@@ -144,6 +144,13 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
                return;
        }
 
+       if (xive_enabled() && kvmhv_on_pseries()) {
+               /* No XICS access or hypercalls available, too hard */
+               this_icp->rm_action |= XICS_RM_KICK_VCPU;
+               this_icp->rm_kick_target = vcpu;
+               return;
+       }
+
        /*
         * Check if the core is loaded,
         * if not, find an available host core to post to wake the VCPU,
index 25043b50cb30a4b7d5dcde8e45ba61bc3b3e547f..3a5e719ef032bcdc7097f840b1932c47408bafc3 100644 (file)
@@ -2272,8 +2272,13 @@ hcall_real_table:
        .long   DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
        .long   DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
        .long   DOTSYM(kvmppc_h_protect) - hcall_real_table
+#ifdef CONFIG_SPAPR_TCE_IOMMU
        .long   DOTSYM(kvmppc_h_get_tce) - hcall_real_table
        .long   DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
+#else
+       .long   0               /* 0x1c */
+       .long   0               /* 0x20 */
+#endif
        .long   0               /* 0x24 - H_SET_SPRG0 */
        .long   DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
        .long   0               /* 0x2c */
@@ -2351,8 +2356,13 @@ hcall_real_table:
        .long   0               /* 0x12c */
        .long   0               /* 0x130 */
        .long   DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
+#ifdef CONFIG_SPAPR_TCE_IOMMU
        .long   DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
        .long   DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
+#else
+       .long   0               /* 0x138 */
+       .long   0               /* 0x13c */
+#endif
        .long   0               /* 0x140 */
        .long   0               /* 0x144 */
        .long   0               /* 0x148 */
index 2d3b2b1cc272b0989858bfb4e945567ddef96369..4e178c4c1ea5074d638bbbb3eaa3315f39b2bfe0 100644 (file)
@@ -33,7 +33,7 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
        server = be32_to_cpu(args->args[1]);
        priority = be32_to_cpu(args->args[2]);
 
-       if (xive_enabled())
+       if (xics_on_xive())
                rc = kvmppc_xive_set_xive(vcpu->kvm, irq, server, priority);
        else
                rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
@@ -56,7 +56,7 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
        irq = be32_to_cpu(args->args[0]);
 
        server = priority = 0;
-       if (xive_enabled())
+       if (xics_on_xive())
                rc = kvmppc_xive_get_xive(vcpu->kvm, irq, &server, &priority);
        else
                rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
@@ -83,7 +83,7 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
 
        irq = be32_to_cpu(args->args[0]);
 
-       if (xive_enabled())
+       if (xics_on_xive())
                rc = kvmppc_xive_int_off(vcpu->kvm, irq);
        else
                rc = kvmppc_xics_int_off(vcpu->kvm, irq);
@@ -105,7 +105,7 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
 
        irq = be32_to_cpu(args->args[0]);
 
-       if (xive_enabled())
+       if (xics_on_xive())
                rc = kvmppc_xive_int_on(vcpu->kvm, irq);
        else
                rc = kvmppc_xics_int_on(vcpu->kvm, irq);
index b90a7d154180032d97efdb9150752d678453f9b3..8885377ec3e0c611b3ec3f14b8565e2f7ffde4aa 100644 (file)
@@ -748,7 +748,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
                kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
                break;
        case KVMPPC_IRQ_XICS:
-               if (xive_enabled())
+               if (xics_on_xive())
                        kvmppc_xive_cleanup_vcpu(vcpu);
                else
                        kvmppc_xics_free_icp(vcpu);
@@ -1931,7 +1931,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
                r = -EPERM;
                dev = kvm_device_from_filp(f.file);
                if (dev) {
-                       if (xive_enabled())
+                       if (xics_on_xive())
                                r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
                        else
                                r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
@@ -2189,10 +2189,12 @@ static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
                        KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
                        KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
                        KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
-                       KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
+                       KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
+                       KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
                cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
                        KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
-                       KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
+                       KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
+                       KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
        }
        return 0;
 }
@@ -2251,12 +2253,16 @@ static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
                if (have_fw_feat(fw_features, "enabled",
                                 "fw-count-cache-disabled"))
                        cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
+               if (have_fw_feat(fw_features, "enabled",
+                                "fw-count-cache-flush-bcctr2,0,0"))
+                       cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
                cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
                        KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
                        KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
                        KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
                        KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
-                       KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
+                       KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
+                       KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
 
                if (have_fw_feat(fw_features, "enabled",
                                 "speculation-policy-favor-security"))
@@ -2267,9 +2273,13 @@ static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
                if (!have_fw_feat(fw_features, "disabled",
                                  "needs-spec-barrier-for-bound-checks"))
                        cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
+               if (have_fw_feat(fw_features, "enabled",
+                                "needs-count-cache-flush-on-context-switch"))
+                       cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
                cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
                        KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
-                       KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
+                       KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
+                       KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
 
                of_node_put(fw_features);
        }
index 22566765206925ac7d427896031fe0775bddec6d..1727180e8ca17124c97e039e9f3368d00680a66b 100644 (file)
@@ -331,5 +331,6 @@ extern void css_schedule_reprobe(void);
 /* Function from drivers/s390/cio/chsc.c */
 int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta);
 int chsc_sstpi(void *page, void *result, size_t size);
+int chsc_sgib(u32 origin);
 
 #endif
index 2f7f27e5493f6b28c2f0cb79f840ed546c0660ac..afaf5e3c57fd8b66b59b220699131c4ee70018cb 100644 (file)
@@ -62,6 +62,7 @@ enum interruption_class {
        IRQIO_MSI,
        IRQIO_VIR,
        IRQIO_VAI,
+       IRQIO_GAL,
        NMI_NMI,
        CPU_RST,
        NR_ARCH_IRQS
index 6cb9e2ed05b6c1b211f9d36fcb7e512399c61339..b2cc1ec78d062052e3289682ae3cbac1670a48f7 100644 (file)
@@ -21,6 +21,7 @@
 /* Adapter interrupts. */
 #define QDIO_AIRQ_ISC IO_SCH_ISC       /* I/O subchannel in qdio mode */
 #define PCI_ISC 2                      /* PCI I/O subchannels */
+#define GAL_ISC 5                      /* GIB alert */
 #define AP_ISC 6                       /* adjunct processor (crypto) devices */
 
 /* Functions for registration of I/O interruption subclasses */
index d5d24889c3bcf44b6acea740164d6144e82c0e0f..c47e22bba87fac58b08ecf28c498fe54074ede5d 100644 (file)
@@ -591,7 +591,6 @@ struct kvm_s390_float_interrupt {
        struct kvm_s390_mchk_info mchk;
        struct kvm_s390_ext_info srv_signal;
        int next_rr_cpu;
-       unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
        struct mutex ais_lock;
        u8 simm;
        u8 nimm;
@@ -712,6 +711,7 @@ struct s390_io_adapter {
 struct kvm_s390_cpu_model {
        /* facility mask supported by kvm & hosting machine */
        __u64 fac_mask[S390_ARCH_FAC_LIST_SIZE_U64];
+       struct kvm_s390_vm_cpu_subfunc subfuncs;
        /* facility list requested by guest (in dma page) */
        __u64 *fac_list;
        u64 cpuid;
@@ -782,9 +782,21 @@ struct kvm_s390_gisa {
                        u8  reserved03[11];
                        u32 airq_count;
                } g1;
+               struct {
+                       u64 word[4];
+               } u64;
        };
 };
 
+struct kvm_s390_gib {
+       u32 alert_list_origin;
+       u32 reserved01;
+       u8:5;
+       u8  nisc:3;
+       u8  reserved03[3];
+       u32 reserved04[5];
+};
+
 /*
  * sie_page2 has to be allocated as DMA because fac_list, crycb and
  * gisa need 31bit addresses in the sie control block.
@@ -793,7 +805,8 @@ struct sie_page2 {
        __u64 fac_list[S390_ARCH_FAC_LIST_SIZE_U64];    /* 0x0000 */
        struct kvm_s390_crypto_cb crycb;                /* 0x0800 */
        struct kvm_s390_gisa gisa;                      /* 0x0900 */
-       u8 reserved920[0x1000 - 0x920];                 /* 0x0920 */
+       struct kvm *kvm;                                /* 0x0920 */
+       u8 reserved928[0x1000 - 0x928];                 /* 0x0928 */
 };
 
 struct kvm_s390_vsie {
@@ -804,6 +817,20 @@ struct kvm_s390_vsie {
        struct page *pages[KVM_MAX_VCPUS];
 };
 
+struct kvm_s390_gisa_iam {
+       u8 mask;
+       spinlock_t ref_lock;
+       u32 ref_count[MAX_ISC + 1];
+};
+
+struct kvm_s390_gisa_interrupt {
+       struct kvm_s390_gisa *origin;
+       struct kvm_s390_gisa_iam alert;
+       struct hrtimer timer;
+       u64 expires;
+       DECLARE_BITMAP(kicked_mask, KVM_MAX_VCPUS);
+};
+
 struct kvm_arch{
        void *sca;
        int use_esca;
@@ -837,7 +864,8 @@ struct kvm_arch{
        atomic64_t cmma_dirty_pages;
        /* subset of available cpu features enabled by user space */
        DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
-       struct kvm_s390_gisa *gisa;
+       DECLARE_BITMAP(idle_mask, KVM_MAX_VCPUS);
+       struct kvm_s390_gisa_interrupt gisa_int;
 };
 
 #define KVM_HVA_ERR_BAD                (-1UL)
@@ -871,6 +899,9 @@ void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
 extern int sie64a(struct kvm_s390_sie_block *, u64 *);
 extern char sie_exit;
 
+extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);
+extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc);
+
 static inline void kvm_arch_hardware_disable(void) {}
 static inline void kvm_arch_check_processor_compat(void *rtn) {}
 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
@@ -878,7 +909,7 @@ static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
 static inline void kvm_arch_free_memslot(struct kvm *kvm,
                struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
 static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
 static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
                struct kvm_memory_slot *slot) {}
index 0e8d68bac82c29356886e24b24088d0463c50880..0cd5a5f96729dad40540016bf2d32acc1ff18893 100644 (file)
@@ -88,6 +88,7 @@ static const struct irq_class irqclass_sub_desc[] = {
        {.irq = IRQIO_MSI,  .name = "MSI", .desc = "[I/O] MSI Interrupt" },
        {.irq = IRQIO_VIR,  .name = "VIR", .desc = "[I/O] Virtual I/O Devices"},
        {.irq = IRQIO_VAI,  .name = "VAI", .desc = "[I/O] Virtual I/O Devices AI"},
+       {.irq = IRQIO_GAL,  .name = "GAL", .desc = "[I/O] GIB Alert"},
        {.irq = NMI_NMI,    .name = "NMI", .desc = "[NMI] Machine Check"},
        {.irq = CPU_RST,    .name = "RST", .desc = "[CPU] CPU Restart"},
 };
index fcb55b02990ef96e20148472828de2e324c6a56f..82162867f378d225ede29ff32adee1983072a7ac 100644 (file)
@@ -7,6 +7,9 @@
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  */
 
+#define KMSG_COMPONENT "kvm-s390"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
 #include <linux/interrupt.h>
 #include <linux/kvm_host.h>
 #include <linux/hrtimer.h>
@@ -23,6 +26,7 @@
 #include <asm/gmap.h>
 #include <asm/switch_to.h>
 #include <asm/nmi.h>
+#include <asm/airq.h>
 #include "kvm-s390.h"
 #include "gaccess.h"
 #include "trace-s390.h"
@@ -31,6 +35,8 @@
 #define PFAULT_DONE 0x0680
 #define VIRTIO_PARAM 0x0d00
 
+static struct kvm_s390_gib *gib;
+
 /* handle external calls via sigp interpretation facility */
 static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
 {
@@ -217,22 +223,100 @@ static inline u8 int_word_to_isc(u32 int_word)
  */
 #define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
 
-static inline void kvm_s390_gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
+/**
+ * gisa_set_iam - change the GISA interruption alert mask
+ *
+ * @gisa: gisa to operate on
+ * @iam: new IAM value to use
+ *
+ * Change the IAM atomically with the next alert address and the IPM
+ * of the GISA if the GISA is not part of the GIB alert list. All three
+ * fields are located in the first long word of the GISA.
+ *
+ * Returns: 0 on success
+ *          -EBUSY in case the gisa is part of the alert list
+ */
+static inline int gisa_set_iam(struct kvm_s390_gisa *gisa, u8 iam)
+{
+       u64 word, _word;
+
+       do {
+               word = READ_ONCE(gisa->u64.word[0]);
+               if ((u64)gisa != word >> 32)
+                       return -EBUSY;
+               _word = (word & ~0xffUL) | iam;
+       } while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
+
+       return 0;
+}
+
+/**
+ * gisa_clear_ipm - clear the GISA interruption pending mask
+ *
+ * @gisa: gisa to operate on
+ *
+ * Clear the IPM atomically with the next alert address and the IAM
+ * of the GISA unconditionally. All three fields are located in the
+ * first long word of the GISA.
+ */
+static inline void gisa_clear_ipm(struct kvm_s390_gisa *gisa)
+{
+       u64 word, _word;
+
+       do {
+               word = READ_ONCE(gisa->u64.word[0]);
+               _word = word & ~(0xffUL << 24);
+       } while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
+}
+
+/**
+ * gisa_get_ipm_or_restore_iam - return IPM or restore GISA IAM
+ *
+ * @gi: gisa interrupt struct to work on
+ *
+ * Atomically restores the interruption alert mask if none of the
+ * relevant ISCs are pending and return the IPM.
+ *
+ * Returns: the relevant pending ISCs
+ */
+static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi)
+{
+       u8 pending_mask, alert_mask;
+       u64 word, _word;
+
+       do {
+               word = READ_ONCE(gi->origin->u64.word[0]);
+               alert_mask = READ_ONCE(gi->alert.mask);
+               pending_mask = (u8)(word >> 24) & alert_mask;
+               if (pending_mask)
+                       return pending_mask;
+               _word = (word & ~0xffUL) | alert_mask;
+       } while (cmpxchg(&gi->origin->u64.word[0], word, _word) != word);
+
+       return 0;
+}
+
+static inline int gisa_in_alert_list(struct kvm_s390_gisa *gisa)
+{
+       return READ_ONCE(gisa->next_alert) != (u32)(u64)gisa;
+}
+
+static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
 {
        set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
 }
 
-static inline u8 kvm_s390_gisa_get_ipm(struct kvm_s390_gisa *gisa)
+static inline u8 gisa_get_ipm(struct kvm_s390_gisa *gisa)
 {
        return READ_ONCE(gisa->ipm);
 }
 
-static inline void kvm_s390_gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
+static inline void gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
 {
        clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
 }
 
-static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
+static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
 {
        return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
 }
@@ -245,8 +329,13 @@ static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
 
 static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
 {
-       return pending_irqs_no_gisa(vcpu) |
-               kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7;
+       struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
+       unsigned long pending_mask;
+
+       pending_mask = pending_irqs_no_gisa(vcpu);
+       if (gi->origin)
+               pending_mask |= gisa_get_ipm(gi->origin) << IRQ_PEND_IO_ISC_7;
+       return pending_mask;
 }
 
 static inline int isc_to_irq_type(unsigned long isc)
@@ -318,13 +407,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
 {
        kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
-       set_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
+       set_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
 }
 
 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
 {
        kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
-       clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);