Associate an event fd to an AFU interrupt so that the user process
can be notified when the AFU sends an interrupt.
+OCXL_IOCTL_GET_METADATA:
+
+ Obtains configuration information from the card, such at the size of
+ MMIO areas, the AFU version, and the PASID for the current context.
+
mmap
----
interrupts.
Optional properties:
-- clocks: Optional reference to the clock used by the XOR engine.
+- clocks: Optional reference to the clocks used by the XOR engine.
+- clock-names: mandatory if there is a second clock, in this case the
+ name must be "core" for the first clock and "reg" for the second
+ one
+
Example:
from docutils import nodes, statemachine
from docutils.statemachine import ViewList
-from docutils.parsers.rst import directives
-from sphinx.util.compat import Directive
+from docutils.parsers.rst import directives, Directive
from sphinx.ext.autodoc import AutodocReporter
__version__ = '1.0'
VERSION = 4
PATCHLEVEL = 16
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
NAME = Fearless Coyote
# *DOCUMENTATION*
case PSCI_CONDUIT_HVC:
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
- if (res.a0)
+ if ((int)res.a0 < 0)
return 0;
cb = call_hvc_arch_workaround_1;
smccc_start = __smccc_workaround_1_hvc_start;
case PSCI_CONDUIT_SMC:
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
- if (res.a0)
+ if ((int)res.a0 < 0)
return 0;
cb = call_smc_arch_workaround_1;
smccc_start = __smccc_workaround_1_smc_start;
* The following mapping attributes may be updated in live
* kernel mappings without the need for break-before-make.
*/
- static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE;
+ static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
/* creating or taking down mappings is always safe */
if (old == 0 || new == 0)
if ((old | new) & PTE_CONT)
return false;
- /* Transitioning from Global to Non-Global is safe */
- if (((old ^ new) == PTE_NG) && (new & PTE_NG))
- return true;
+ /* Transitioning from Non-Global to Global is unsafe */
+ if (old & ~new & PTE_NG)
+ return false;
return ((old ^ new) & ~mask) == 0;
}
}
board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL);
+ if (!board_data)
+ goto error;
ath25_board.config = (struct ath25_boarddata *)board_data;
memcpy_fromio(board_data, bcfg, 0x100);
if (broken_boarddata) {
}
host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
+ if (!host_data)
+ return -ENOMEM;
raw_spin_lock_init(&host_data->lock);
addr = of_get_address(ciu_node, 0, NULL, NULL);
return;
}
- if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
- "smp_ipi0", NULL))
+ if (request_irq(IPI0_IRQ, bmips_ipi_interrupt,
+ IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL))
panic("Can't request IPI0 interrupt");
- if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
- "smp_ipi1", NULL))
+ if (request_irq(IPI1_IRQ, bmips_ipi_interrupt,
+ IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL))
panic("Can't request IPI1 interrupt");
}
config LEMOTE_FULOONG2E
bool "Lemote Fuloong(2e) mini-PC"
select ARCH_SPARSEMEM_ENABLE
+ select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_MIGHT_HAVE_PC_SERIO
select CEVT_R4K
select CSRC_R4K
select SYS_HAS_CPU_LOONGSON2E
config LEMOTE_MACH2F
bool "Lemote Loongson 2F family machines"
select ARCH_SPARSEMEM_ENABLE
+ select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_MIGHT_HAVE_PC_SERIO
select BOARD_SCACHE
select BOOT_ELF32
select CEVT_R4K if ! MIPS_EXTERNAL_TIMER
config LOONGSON_MACH3X
bool "Generic Loongson 3 family machines"
select ARCH_SPARSEMEM_ENABLE
+ select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_MIGHT_HAVE_PC_SERIO
select GENERIC_ISA_DMA_SUPPORT_BROKEN
select BOOT_ELF32
select BOARD_SCACHE
libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
libfdtheader := fdt.h libfdt.h libfdt_internal.h
-$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \
+$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o \
+ treeboot-akebono.o treeboot-currituck.o treeboot-iss4xx.o): \
$(addprefix $(obj)/,$(libfdtheader))
src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \
.mmu = 0,
.hash_ext = 0,
.radix_ext = 0,
- .byte22 = 0,
},
/* option vector 6: IBM PAPR hints */
kmem_cache_free(kvm_pte_cache, ptep);
}
+/* Like pmd_huge() and pmd_large(), but works regardless of config options */
+static inline int pmd_is_leaf(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_PTE);
+}
+
static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
unsigned int level, unsigned long mmu_seq)
{
else
new_pmd = pmd_alloc_one(kvm->mm, gpa);
- if (level == 0 && !(pmd && pmd_present(*pmd)))
+ if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
new_ptep = kvmppc_pte_alloc();
/* Check if we might have been invalidated; let the guest retry if so */
new_pmd = NULL;
}
pmd = pmd_offset(pud, gpa);
- if (pmd_large(*pmd)) {
- /* Someone else has instantiated a large page here; retry */
- ret = -EAGAIN;
- goto out_unlock;
- }
- if (level == 1 && !pmd_none(*pmd)) {
+ if (pmd_is_leaf(*pmd)) {
+ unsigned long lgpa = gpa & PMD_MASK;
+
+ /*
+ * If we raced with another CPU which has just put
+ * a 2MB pte in after we saw a pte page, try again.
+ */
+ if (level == 0 && !new_ptep) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+ /* Valid 2MB page here already, remove it */
+ old = kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
+ ~0UL, 0, lgpa, PMD_SHIFT);
+ kvmppc_radix_tlbie_page(kvm, lgpa, PMD_SHIFT);
+ if (old & _PAGE_DIRTY) {
+ unsigned long gfn = lgpa >> PAGE_SHIFT;
+ struct kvm_memory_slot *memslot;
+ memslot = gfn_to_memslot(kvm, gfn);
+ if (memslot && memslot->dirty_bitmap)
+ kvmppc_update_dirty_map(memslot,
+ gfn, PMD_SIZE);
+ }
+ } else if (level == 1 && !pmd_none(*pmd)) {
/*
* There's a page table page here, but we wanted
* to install a large page. Tell the caller and let
} else {
page = pages[0];
pfn = page_to_pfn(page);
- if (PageHuge(page)) {
- page = compound_head(page);
- pte_size <<= compound_order(page);
+ if (PageCompound(page)) {
+ pte_size <<= compound_order(compound_head(page));
/* See if we can insert a 2MB large-page PTE here */
if (pte_size >= PMD_SIZE &&
- (gpa & PMD_MASK & PAGE_MASK) ==
- (hva & PMD_MASK & PAGE_MASK)) {
+ (gpa & (PMD_SIZE - PAGE_SIZE)) ==
+ (hva & (PMD_SIZE - PAGE_SIZE))) {
level = 1;
pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
}
}
/* See if we can provide write access */
if (writing) {
- /*
- * We assume gup_fast has set dirty on the host PTE.
- */
pgflags |= _PAGE_WRITE;
} else {
local_irq_save(flags);
ptep = find_current_mm_pte(current->mm->pgd,
hva, NULL, NULL);
- if (ptep && pte_write(*ptep) && pte_dirty(*ptep))
+ if (ptep && pte_write(*ptep))
pgflags |= _PAGE_WRITE;
local_irq_restore(flags);
}
pte = pfn_pte(pfn, __pgprot(pgflags));
ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
}
- if (ret == 0 || ret == -EAGAIN)
- ret = RESUME_GUEST;
if (page) {
- /*
- * We drop pages[0] here, not page because page might
- * have been set to the head page of a compound, but
- * we have to drop the reference on the correct tail
- * page to match the get inside gup()
- */
- put_page(pages[0]);
+ if (!ret && (pgflags & _PAGE_WRITE))
+ set_page_dirty_lock(page);
+ put_page(page);
}
+
+ if (ret == 0 || ret == -EAGAIN)
+ ret = RESUME_GUEST;
return ret;
}
continue;
pmd = pmd_offset(pud, 0);
for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) {
- if (pmd_huge(*pmd)) {
+ if (pmd_is_leaf(*pmd)) {
pmd_clear(pmd);
continue;
}
*/
trace_hardirqs_on();
- guest_enter();
+ guest_enter_irqoff();
srcu_idx = srcu_read_lock(&vc->kvm->srcu);
srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
- guest_exit();
-
trace_hardirqs_off();
set_irq_happened(trap);
kvmppc_set_host_core(pcpu);
local_irq_enable();
+ guest_exit();
/* Let secondaries go back to the offline loop */
for (i = 0; i < controlled_threads; ++i) {
goto up_out;
psize = vma_kernel_pagesize(vma);
- porder = __ilog2(psize);
up_read(¤t->mm->mmap_sem);
/* We can handle 4k, 64k or 16M pages in the VRMA */
- err = -EINVAL;
- if (!(psize == 0x1000 || psize == 0x10000 ||
- psize == 0x1000000))
- goto out_srcu;
+ if (psize >= 0x1000000)
+ psize = 0x1000000;
+ else if (psize >= 0x10000)
+ psize = 0x10000;
+ else
+ psize = 0x1000;
+ porder = __ilog2(psize);
senc = slb_pgsize_encoding(psize);
kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, int is_default_endian)
{
- enum emulation_result emulated;
+ enum emulation_result emulated = EMULATE_DONE;
while (vcpu->arch.mmio_vmx_copy_nums) {
emulated = __kvmppc_handle_load(run, vcpu, rt, 8,
kvm_sigset_deactivate(vcpu);
+#ifdef CONFIG_ALTIVEC
out:
+#endif
vcpu_put(vcpu);
return r;
}
{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
+ { "deliver_io_interrupt", VCPU_STAT(deliver_io_int) },
{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
{ "instruction_epsw", VCPU_STAT(instruction_epsw) },
{ "instruction_gs", VCPU_STAT(instruction_gs) },
/* we still need the basic sca for the ipte control */
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
+ return;
}
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
it can be used to assist security vulnerability exploitation.
This setting can be changed at boot time via the kernel command
- line parameter vsyscall=[native|emulate|none].
+ line parameter vsyscall=[emulate|none].
On a system with recent enough glibc (2.14 or newer) and no
static binaries, you can say None without a performance penalty
If unsure, select "Emulate".
- config LEGACY_VSYSCALL_NATIVE
- bool "Native"
- help
- Actual executable code is located in the fixed vsyscall
- address mapping, implementing time() efficiently. Since
- this makes the mapping executable, it can be used during
- security vulnerability exploitation (traditionally as
- ROP gadgets). This configuration is not recommended.
-
config LEGACY_VSYSCALL_EMULATE
bool "Emulate"
help
pushq 2*8(%rdi) /* regs->ip */
pushq 1*8(%rdi) /* regs->orig_ax */
- movq (%rdi), %rdi /* restore %rdi */
-
- pushq %rdi /* pt_regs->di */
+ pushq (%rdi) /* pt_regs->di */
pushq %rsi /* pt_regs->si */
pushq %rdx /* pt_regs->dx */
pushq %rcx /* pt_regs->cx */
TRACE_IRQS_ON
jmp swapgs_restore_regs_and_return_to_usermode
END(entry_INT80_compat)
-
-ENTRY(stub32_clone)
- /*
- * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr).
- * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val).
- *
- * The native 64-bit kernel's sys_clone() implements the latter,
- * so we need to swap arguments here before calling it:
- */
- xchg %r8, %rcx
- jmp sys_clone
-ENDPROC(stub32_clone)
#
0 i386 restart_syscall sys_restart_syscall
1 i386 exit sys_exit
-2 i386 fork sys_fork sys_fork
+2 i386 fork sys_fork
3 i386 read sys_read
4 i386 write sys_write
5 i386 open sys_open compat_sys_open
6 i386 close sys_close
-7 i386 waitpid sys_waitpid sys32_waitpid
+7 i386 waitpid sys_waitpid compat_sys_x86_waitpid
8 i386 creat sys_creat
9 i386 link sys_link
10 i386 unlink sys_unlink
69 i386 ssetmask sys_ssetmask
70 i386 setreuid sys_setreuid16
71 i386 setregid sys_setregid16
-72 i386 sigsuspend sys_sigsuspend sys_sigsuspend
+72 i386 sigsuspend sys_sigsuspend
73 i386 sigpending sys_sigpending compat_sys_sigpending
74 i386 sethostname sys_sethostname
75 i386 setrlimit sys_setrlimit compat_sys_setrlimit
87 i386 swapon sys_swapon
88 i386 reboot sys_reboot
89 i386 readdir sys_old_readdir compat_sys_old_readdir
-90 i386 mmap sys_old_mmap sys32_mmap
+90 i386 mmap sys_old_mmap compat_sys_x86_mmap
91 i386 munmap sys_munmap
92 i386 truncate sys_truncate compat_sys_truncate
93 i386 ftruncate sys_ftruncate compat_sys_ftruncate
117 i386 ipc sys_ipc compat_sys_ipc
118 i386 fsync sys_fsync
119 i386 sigreturn sys_sigreturn sys32_sigreturn
-120 i386 clone sys_clone stub32_clone
+120 i386 clone sys_clone compat_sys_x86_clone
121 i386 setdomainname sys_setdomainname
122 i386 uname sys_newuname
123 i386 modify_ldt sys_modify_ldt
177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
179 i386 rt_sigsuspend sys_rt_sigsuspend
-180 i386 pread64 sys_pread64 sys32_pread
-181 i386 pwrite64 sys_pwrite64 sys32_pwrite
+180 i386 pread64 sys_pread64 compat_sys_x86_pread
+181 i386 pwrite64 sys_pwrite64 compat_sys_x86_pwrite
182 i386 chown sys_chown16
183 i386 getcwd sys_getcwd
184 i386 capget sys_capget
187 i386 sendfile sys_sendfile compat_sys_sendfile
188 i386 getpmsg
189 i386 putpmsg
-190 i386 vfork sys_vfork sys_vfork
+190 i386 vfork sys_vfork
191 i386 ugetrlimit sys_getrlimit compat_sys_getrlimit
192 i386 mmap2 sys_mmap_pgoff
-193 i386 truncate64 sys_truncate64 sys32_truncate64
-194 i386 ftruncate64 sys_ftruncate64 sys32_ftruncate64
-195 i386 stat64 sys_stat64 sys32_stat64
-196 i386 lstat64 sys_lstat64 sys32_lstat64
-197 i386 fstat64 sys_fstat64 sys32_fstat64
+193 i386 truncate64 sys_truncate64 compat_sys_x86_truncate64
+194 i386 ftruncate64 sys_ftruncate64 compat_sys_x86_ftruncate64
+195 i386 stat64 sys_stat64 compat_sys_x86_stat64
+196 i386 lstat64 sys_lstat64 compat_sys_x86_lstat64
+197 i386 fstat64 sys_fstat64 compat_sys_x86_fstat64
198 i386 lchown32 sys_lchown
199 i386 getuid32 sys_getuid
200 i386 getgid32 sys_getgid
# 222 is unused
# 223 is unused
224 i386 gettid sys_gettid
-225 i386 readahead sys_readahead sys32_readahead
+225 i386 readahead sys_readahead compat_sys_x86_readahead
226 i386 setxattr sys_setxattr
227 i386 lsetxattr sys_lsetxattr
228 i386 fsetxattr sys_fsetxattr
247 i386 io_getevents sys_io_getevents compat_sys_io_getevents
248 i386 io_submit sys_io_submit compat_sys_io_submit
249 i386 io_cancel sys_io_cancel
-250 i386 fadvise64 sys_fadvise64 sys32_fadvise64
+250 i386 fadvise64 sys_fadvise64 compat_sys_x86_fadvise64
# 251 is available for reuse (was briefly sys_set_zone_reclaim)
252 i386 exit_group sys_exit_group
253 i386 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
269 i386 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
270 i386 tgkill sys_tgkill
271 i386 utimes sys_utimes compat_sys_utimes
-272 i386 fadvise64_64 sys_fadvise64_64 sys32_fadvise64_64
+272 i386 fadvise64_64 sys_fadvise64_64 compat_sys_x86_fadvise64_64
273 i386 vserver
274 i386 mbind sys_mbind
275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
297 i386 mknodat sys_mknodat
298 i386 fchownat sys_fchownat
299 i386 futimesat sys_futimesat compat_sys_futimesat
-300 i386 fstatat64 sys_fstatat64 sys32_fstatat
+300 i386 fstatat64 sys_fstatat64 compat_sys_x86_fstatat
301 i386 unlinkat sys_unlinkat
302 i386 renameat sys_renameat
303 i386 linkat sys_linkat
311 i386 set_robust_list sys_set_robust_list compat_sys_set_robust_list
312 i386 get_robust_list sys_get_robust_list compat_sys_get_robust_list
313 i386 splice sys_splice
-314 i386 sync_file_range sys_sync_file_range sys32_sync_file_range
+314 i386 sync_file_range sys_sync_file_range compat_sys_x86_sync_file_range
315 i386 tee sys_tee
316 i386 vmsplice sys_vmsplice compat_sys_vmsplice
317 i386 move_pages sys_move_pages compat_sys_move_pages
321 i386 signalfd sys_signalfd compat_sys_signalfd
322 i386 timerfd_create sys_timerfd_create
323 i386 eventfd sys_eventfd
-324 i386 fallocate sys_fallocate sys32_fallocate
+324 i386 fallocate sys_fallocate compat_sys_x86_fallocate
325 i386 timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
326 i386 timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
327 i386 signalfd4 sys_signalfd4 compat_sys_signalfd4
#define CREATE_TRACE_POINTS
#include "vsyscall_trace.h"
-static enum { EMULATE, NATIVE, NONE } vsyscall_mode =
-#if defined(CONFIG_LEGACY_VSYSCALL_NATIVE)
- NATIVE;
-#elif defined(CONFIG_LEGACY_VSYSCALL_NONE)
+static enum { EMULATE, NONE } vsyscall_mode =
+#ifdef CONFIG_LEGACY_VSYSCALL_NONE
NONE;
#else
EMULATE;
if (str) {
if (!strcmp("emulate", str))
vsyscall_mode = EMULATE;
- else if (!strcmp("native", str))
- vsyscall_mode = NATIVE;
else if (!strcmp("none", str))
vsyscall_mode = NONE;
else
WARN_ON_ONCE(address != regs->ip);
- /* This should be unreachable in NATIVE mode. */
- if (WARN_ON(vsyscall_mode == NATIVE))
- return false;
-
if (vsyscall_mode == NONE) {
warn_bad_vsyscall(KERN_INFO, regs,
"vsyscall attempted with vsyscall=none");
if (vsyscall_mode != NONE) {
__set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
- vsyscall_mode == NATIVE
- ? PAGE_KERNEL_VSYSCALL
- : PAGE_KERNEL_VVAR);
+ PAGE_KERNEL_VVAR);
set_vsyscall_pgtable_user_bits(swapper_pg_dir);
}
};
static struct attribute *skx_upi_uncore_formats_attr[] = {
- &format_attr_event_ext.attr,
+ &format_attr_event.attr,
&format_attr_umask_ext.attr,
&format_attr_edge.attr,
&format_attr_inv.attr,
#define AA(__x) ((unsigned long)(__x))
-asmlinkage long sys32_truncate64(const char __user *filename,
- unsigned long offset_low,
- unsigned long offset_high)
+COMPAT_SYSCALL_DEFINE3(x86_truncate64, const char __user *, filename,
+ unsigned long, offset_low, unsigned long, offset_high)
{
return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low);
}
-asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
- unsigned long offset_high)
+COMPAT_SYSCALL_DEFINE3(x86_ftruncate64, unsigned int, fd,
+ unsigned long, offset_low, unsigned long, offset_high)
{
return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low);
}
return 0;
}
-asmlinkage long sys32_stat64(const char __user *filename,
- struct stat64 __user *statbuf)
+COMPAT_SYSCALL_DEFINE2(x86_stat64, const char __user *, filename,
+ struct stat64 __user *, statbuf)
{
struct kstat stat;
int ret = vfs_stat(filename, &stat);
return ret;
}
-asmlinkage long sys32_lstat64(const char __user *filename,
- struct stat64 __user *statbuf)
+COMPAT_SYSCALL_DEFINE2(x86_lstat64, const char __user *, filename,
+ struct stat64 __user *, statbuf)
{
struct kstat stat;
int ret = vfs_lstat(filename, &stat);
return ret;
}
-asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
+COMPAT_SYSCALL_DEFINE2(x86_fstat64, unsigned int, fd,
+ struct stat64 __user *, statbuf)
{
struct kstat stat;
int ret = vfs_fstat(fd, &stat);
return ret;
}
-asmlinkage long sys32_fstatat(unsigned int dfd, const char __user *filename,
- struct stat64 __user *statbuf, int flag)
+COMPAT_SYSCALL_DEFINE4(x86_fstatat, unsigned int, dfd,
+ const char __user *, filename,
+ struct stat64 __user *, statbuf, int, flag)
{
struct kstat stat;
int error;
unsigned int offset;
};
-asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg)
+COMPAT_SYSCALL_DEFINE1(x86_mmap, struct mmap_arg_struct32 __user *, arg)
{
struct mmap_arg_struct32 a;
a.offset>>PAGE_SHIFT);
}
-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
- int options)
+COMPAT_SYSCALL_DEFINE3(x86_waitpid, compat_pid_t, pid, unsigned int __user *,
+ stat_addr, int, options)
{
return compat_sys_wait4(pid, stat_addr, options, NULL);
}
/* warning: next two assume little endian */
-asmlinkage long sys32_pread(unsigned int fd, char __user *ubuf, u32 count,
- u32 poslo, u32 poshi)
+COMPAT_SYSCALL_DEFINE5(x86_pread, unsigned int, fd, char __user *, ubuf,
+ u32, count, u32, poslo, u32, poshi)
{
return sys_pread64(fd, ubuf, count,
((loff_t)AA(poshi) << 32) | AA(poslo));
}
-asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf,
- u32 count, u32 poslo, u32 poshi)
+COMPAT_SYSCALL_DEFINE5(x86_pwrite, unsigned int, fd, const char __user *, ubuf,
+ u32, count, u32, poslo, u32, poshi)
{
return sys_pwrite64(fd, ubuf, count,
((loff_t)AA(poshi) << 32) | AA(poslo));
* Some system calls that need sign extended arguments. This could be
* done by a generic wrapper.
*/
-long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
- __u32 len_low, __u32 len_high, int advice)
+COMPAT_SYSCALL_DEFINE6(x86_fadvise64_64, int, fd, __u32, offset_low,
+ __u32, offset_high, __u32, len_low, __u32, len_high,
+ int, advice)
{
return sys_fadvise64_64(fd,
(((u64)offset_high)<<32) | offset_low,
advice);
}
-asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi,
- size_t count)
+COMPAT_SYSCALL_DEFINE4(x86_readahead, int, fd, unsigned int, off_lo,
+ unsigned int, off_hi, size_t, count)
{
return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count);
}
-asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi,
- unsigned n_low, unsigned n_hi, int flags)
+COMPAT_SYSCALL_DEFINE6(x86_sync_file_range, int, fd, unsigned int, off_low,
+ unsigned int, off_hi, unsigned int, n_low,
+ unsigned int, n_hi, int, flags)
{
return sys_sync_file_range(fd,
((u64)off_hi << 32) | off_low,
((u64)n_hi << 32) | n_low, flags);
}
-asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi,
- size_t len, int advice)
+COMPAT_SYSCALL_DEFINE5(x86_fadvise64, int, fd, unsigned int, offset_lo,
+ unsigned int, offset_hi, size_t, len, int, advice)
{
return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
len, advice);
}
-asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
- unsigned offset_hi, unsigned len_lo,
- unsigned len_hi)
+COMPAT_SYSCALL_DEFINE6(x86_fallocate, int, fd, int, mode,
+ unsigned int, offset_lo, unsigned int, offset_hi,
+ unsigned int, len_lo, unsigned int, len_hi)
{
return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
((u64)len_hi << 32) | len_lo);
}
+
+/*
+ * The 32-bit clone ABI is CONFIG_CLONE_BACKWARDS
+ */
+COMPAT_SYSCALL_DEFINE5(x86_clone, unsigned long, clone_flags,
+ unsigned long, newsp, int __user *, parent_tidptr,
+ unsigned long, tls_val, int __user *, child_tidptr)
+{
+ return sys_clone(clone_flags, newsp, parent_tidptr, child_tidptr,
+ tls_val);
+}
#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
#define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC)
#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC)
#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC)
-#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL | _PAGE_ENC)
#define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC)
#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
#if defined(CONFIG_X86_64)
extern char __end_rodata_hpage_align[];
+extern char __entry_trampoline_start[], __entry_trampoline_end[];
#endif
#endif /* _ASM_X86_SECTIONS_H */
#include <asm/ia32.h>
/* ia32/sys_ia32.c */
-asmlinkage long sys32_truncate64(const char __user *, unsigned long, unsigned long);
-asmlinkage long sys32_ftruncate64(unsigned int, unsigned long, unsigned long);
+asmlinkage long compat_sys_x86_truncate64(const char __user *, unsigned long,
+ unsigned long);
+asmlinkage long compat_sys_x86_ftruncate64(unsigned int, unsigned long,
+ unsigned long);
-asmlinkage long sys32_stat64(const char __user *, struct stat64 __user *);
-asmlinkage long sys32_lstat64(const char __user *, struct stat64 __user *);
-asmlinkage long sys32_fstat64(unsigned int, struct stat64 __user *);
-asmlinkage long sys32_fstatat(unsigned int, const char __user *,
+asmlinkage long compat_sys_x86_stat64(const char __user *,
+ struct stat64 __user *);
+asmlinkage long compat_sys_x86_lstat64(const char __user *,
+ struct stat64 __user *);
+asmlinkage long compat_sys_x86_fstat64(unsigned int, struct stat64 __user *);
+asmlinkage long compat_sys_x86_fstatat(unsigned int, const char __user *,
struct stat64 __user *, int);
struct mmap_arg_struct32;
-asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *);
+asmlinkage long compat_sys_x86_mmap(struct mmap_arg_struct32 __user *);
-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
+asmlinkage long compat_sys_x86_waitpid(compat_pid_t, unsigned int __user *,
+ int);
-asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32);
-asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32);
+asmlinkage long compat_sys_x86_pread(unsigned int, char __user *, u32, u32,
+ u32);
+asmlinkage long compat_sys_x86_pwrite(unsigned int, const char __user *, u32,
+ u32, u32);
-long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int);
-long sys32_vm86_warning(void);
+asmlinkage long compat_sys_x86_fadvise64_64(int, __u32, __u32, __u32, __u32,
+ int);
-asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t);
-asmlinkage long sys32_sync_file_range(int, unsigned, unsigned,
- unsigned, unsigned, int);
-asmlinkage long sys32_fadvise64(int, unsigned, unsigned, size_t, int);
-asmlinkage long sys32_fallocate(int, int, unsigned,
- unsigned, unsigned, unsigned);
+asmlinkage ssize_t compat_sys_x86_readahead(int, unsigned int, unsigned int,
+ size_t);
+asmlinkage long compat_sys_x86_sync_file_range(int, unsigned int, unsigned int,
+ unsigned int, unsigned int,
+ int);
+asmlinkage long compat_sys_x86_fadvise64(int, unsigned int, unsigned int,
+ size_t, int);
+asmlinkage long compat_sys_x86_fallocate(int, int, unsigned int, unsigned int,
+ unsigned int, unsigned int);
+asmlinkage long compat_sys_x86_clone(unsigned long, unsigned long, int __user *,
+ unsigned long, int __user *);
/* ia32/ia32_signal.c */
asmlinkage long sys32_sigreturn(void);
__u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */
__u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */
__u64 ppin; /* Protected Processor Inventory Number */
+ __u32 microcode;/* Microcode revision */
};
#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
{
int i;
+ /*
+ * We know that the hypervisor lie to us on the microcode version so
+ * we may as well hope that it is running the correct version.
+ */
+ if (cpu_has(c, X86_FEATURE_HYPERVISOR))
+ return false;
+
for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
if (c->x86_model == spectre_bad_microcodes[i].model &&
c->x86_stepping == spectre_bad_microcodes[i].stepping)
static DEFINE_MUTEX(mce_log_mutex);
+/* sysfs synchronization */
+static DEFINE_MUTEX(mce_sysfs_mutex);
+
#define CREATE_TRACE_POINTS
#include <trace/events/mce.h>
if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
rdmsrl(MSR_PPIN, m->ppin);
+
+ m->microcode = boot_cpu_data.microcode;
}
DEFINE_PER_CPU(struct mce, injectm);
*/
pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
- cpu_data(m->extcpu).microcode);
+ m->microcode);
}
static void print_mce(struct mce *m)
if (kstrtou64(buf, 0, &new) < 0)
return -EINVAL;
+ mutex_lock(&mce_sysfs_mutex);
if (mca_cfg.ignore_ce ^ !!new) {
if (new) {
/* disable ce features */
on_each_cpu(mce_enable_ce, (void *)1, 1);
}
}
+ mutex_unlock(&mce_sysfs_mutex);
+
return size;
}
if (kstrtou64(buf, 0, &new) < 0)
return -EINVAL;
+ mutex_lock(&mce_sysfs_mutex);
if (mca_cfg.cmci_disabled ^ !!new) {
if (new) {
/* disable cmci */
on_each_cpu(mce_enable_ce, NULL, 1);
}
}
+ mutex_unlock(&mce_sysfs_mutex);
+
return size;
}
struct device_attribute *attr,
const char *buf, size_t size)
{
- ssize_t ret = device_store_int(s, attr, buf, size);
+ unsigned long old_check_interval = check_interval;
+ ssize_t ret = device_store_ulong(s, attr, buf, size);
+
+ if (check_interval == old_check_interval)
+ return ret;
+
+ if (check_interval < 1)
+ check_interval = 1;
+
+ mutex_lock(&mce_sysfs_mutex);
mce_restart();
+ mutex_unlock(&mce_sysfs_mutex);
+
return ret;
}
#define pr_fmt(fmt) "microcode: " fmt
#include <linux/platform_device.h>
+#include <linux/stop_machine.h>
#include <linux/syscore_ops.h>
#include <linux/miscdevice.h>
#include <linux/capability.h>
#include <linux/firmware.h>
#include <linux/kernel.h>
+#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
+#include <linux/nmi.h>
#include <linux/fs.h>
#include <linux/mm.h>
*/
static DEFINE_MUTEX(microcode_mutex);
+/*
+ * Serialize late loading so that CPUs get updated one-by-one.
+ */
+static DEFINE_SPINLOCK(update_lock);
+
struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
struct cpu_info_ctx {
return ret;
}
-struct apply_microcode_ctx {
- enum ucode_state err;
-};
-
static void apply_microcode_local(void *arg)
{
- struct apply_microcode_ctx *ctx = arg;
+ enum ucode_state *err = arg;
- ctx->err = microcode_ops->apply_microcode(smp_processor_id());
+ *err = microcode_ops->apply_microcode(smp_processor_id());
}
static int apply_microcode_on_target(int cpu)
{
- struct apply_microcode_ctx ctx = { .err = 0 };
+ enum ucode_state err;
int ret;
- ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1);
- if (!ret)
- ret = ctx.err;
-
+ ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1);
+ if (!ret) {
+ if (err == UCODE_ERROR)
+ ret = 1;
+ }
return ret;
}
/* fake device for request_firmware */
static struct platform_device *microcode_pdev;
-static enum ucode_state reload_for_cpu(int cpu)
+/*
+ * Late loading dance. Why the heavy-handed stomp_machine effort?
+ *
+ * - HT siblings must be idle and not execute other code while the other sibling
+ * is loading microcode in order to avoid any negative interactions caused by
+ * the loading.
+ *
+ * - In addition, microcode update on the cores must be serialized until this
+ * requirement can be relaxed in the future. Right now, this is conservative
+ * and good.
+ */
+#define SPINUNIT 100 /* 100 nsec */
+
+static int check_online_cpus(void)
{
- struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
- enum ucode_state ustate;
+ if (num_online_cpus() == num_present_cpus())
+ return 0;
- if (!uci->valid)
- return UCODE_OK;
+ pr_err("Not all CPUs online, aborting microcode update.\n");
+
+ return -EINVAL;
+}
+
+static atomic_t late_cpus;
+
+/*
+ * Returns:
+ * < 0 - on error
+ * 0 - no update done
+ * 1 - microcode was updated
+ */
+static int __reload_late(void *info)
+{
+ unsigned int timeout = NSEC_PER_SEC;
+ int all_cpus = num_online_cpus();
+ int cpu = smp_processor_id();
+ enum ucode_state err;
+ int ret = 0;
+
+ atomic_dec(&late_cpus);
+
+ /*
+ * Wait for all CPUs to arrive. A load will not be attempted unless all
+ * CPUs show up.
+ * */
+ while (atomic_read(&late_cpus)) {
+ if (timeout < SPINUNIT) {
+ pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
+ atomic_read(&late_cpus));
+ return -1;
+ }
- ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, true);
- if (ustate != UCODE_OK)
- return ustate;
+ ndelay(SPINUNIT);
+ timeout -= SPINUNIT;
- return apply_microcode_on_target(cpu);
+ touch_nmi_watchdog();
+ }
+
+ spin_lock(&update_lock);
+ apply_microcode_local(&err);
+ spin_unlock(&update_lock);
+
+ if (err > UCODE_NFOUND) {
+ pr_warn("Error reloading microcode on CPU %d\n", cpu);
+ ret = -1;
+ } else if (err == UCODE_UPDATED) {
+ ret = 1;
+ }
+
+ atomic_inc(&late_cpus);
+
+ while (atomic_read(&late_cpus) != all_cpus)
+ cpu_relax();
+
+ return ret;
+}
+
+/*
+ * Reload microcode late on all CPUs. Wait for a sec until they
+ * all gather together.
+ */
+static int microcode_reload_late(void)
+{
+ int ret;
+
+ atomic_set(&late_cpus, num_online_cpus());
+
+ ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ microcode_check();
+
+ return ret;
}
static ssize_t reload_store(struct device *dev,
const char *buf, size_t size)
{
enum ucode_state tmp_ret = UCODE_OK;
- bool do_callback = false;
+ int bsp = boot_cpu_data.cpu_index;
unsigned long val;
ssize_t ret = 0;
- int cpu;
ret = kstrtoul(buf, 0, &val);
if (ret)
if (val != 1)
return size;
- get_online_cpus();
- mutex_lock(µcode_mutex);
- for_each_online_cpu(cpu) {
- tmp_ret = reload_for_cpu(cpu);
- if (tmp_ret > UCODE_NFOUND) {
- pr_warn("Error reloading microcode on CPU %d\n", cpu);
-
- /* set retval for the first encountered reload error */
- if (!ret)
- ret = -EINVAL;
- }
+ tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true);
+ if (tmp_ret != UCODE_OK)
+ return size;
- if (tmp_ret == UCODE_UPDATED)
- do_callback = true;
- }
+ get_online_cpus();
- if (!ret && do_callback)
- microcode_check();
+ ret = check_online_cpus();
+ if (ret)
+ goto put;
+ mutex_lock(µcode_mutex);
+ ret = microcode_reload_late();
mutex_unlock(µcode_mutex);
+
+put:
put_online_cpus();
- if (!ret)
+ if (ret >= 0)
ret = size;
return ret;
if (!mc)
return 0;
+ /*
+ * Save us the MSR write below - which is a particular expensive
+ * operation - when the other hyperthread has updated the microcode
+ * already.
+ */
+ rev = intel_get_microcode_revision();
+ if (rev >= mc->hdr.rev) {
+ uci->cpu_sig.rev = rev;
+ return UCODE_OK;
+ }
+
+ /*
+ * Writeback and invalidate caches before updating microcode to avoid
+ * internal issues depending on what the microcode is updating.
+ */
+ native_wbinvd();
+
/* write microcode via MSR 0x79 */
native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
static enum ucode_state apply_microcode_intel(int cpu)
{
+ struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
struct microcode_intel *mc;
- struct ucode_cpu_info *uci;
- struct cpuinfo_x86 *c;
static int prev_rev;
u32 rev;
if (WARN_ON(raw_smp_processor_id() != cpu))
return UCODE_ERROR;
- uci = ucode_cpu_info + cpu;
- mc = uci->mc;
+ /* Look for a newer patch in our cache: */
+ mc = find_patch(uci);
if (!mc) {
- /* Look for a newer patch in our cache: */
- mc = find_patch(uci);
+ mc = uci->mc;
if (!mc)
return UCODE_NFOUND;
}
+ /*
+ * Save us the MSR write below - which is a particular expensive
+ * operation - when the other hyperthread has updated the microcode
+ * already.
+ */
+ rev = intel_get_microcode_revision();
+ if (rev >= mc->hdr.rev) {
+ uci->cpu_sig.rev = rev;
+ c->microcode = rev;
+ return UCODE_OK;
+ }
+
+ /*
+ * Writeback and invalidate caches before updating microcode to avoid
+ * internal issues depending on what the microcode is updating.
+ */
+ native_wbinvd();
+
/* write microcode via MSR 0x79 */
wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
prev_rev = rev;
}
- c = &cpu_data(cpu);
-
uci->cpu_sig.rev = rev;
c->microcode = rev;
/*
* this changes the io permissions bitmap in the current task.
*/
-asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+SYSCALL_DEFINE3(ioperm, unsigned long, from, unsigned long, num, int, turn_on)
{
struct thread_struct *t = ¤t->thread;
struct tss_struct *tss;
bool arch_within_kprobe_blacklist(unsigned long addr)
{
+ bool is_in_entry_trampoline_section = false;
+
+#ifdef CONFIG_X86_64
+ is_in_entry_trampoline_section =
+ (addr >= (unsigned long)__entry_trampoline_start &&
+ addr < (unsigned long)__entry_trampoline_end);
+#endif
return (addr >= (unsigned long)__kprobes_text_start &&
addr < (unsigned long)__kprobes_text_end) ||
(addr >= (unsigned long)__entry_text_start &&
- addr < (unsigned long)__entry_text_end);
+ addr < (unsigned long)__entry_text_end) ||
+ is_in_entry_trampoline_section;
}
int __init arch_init_kprobes(void)
#ifdef CONFIG_X86_64
. = ALIGN(PAGE_SIZE);
+ VMLINUX_SYMBOL(__entry_trampoline_start) = .;
_entry_trampoline = .;
*(.entry_trampoline)
. = ALIGN(PAGE_SIZE);
+ VMLINUX_SYMBOL(__entry_trampoline_end) = .;
ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
#endif
}
/*
- * Clone the ESPFIX P4D into the user space visinble page table
+ * Clone the ESPFIX P4D into the user space visible page table
*/
static void __init pti_setup_espfix64(void)
{
struct iov_iter i;
ssize_t bw;
- iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);
+ iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len);
file_start_write(file);
bw = vfs_iter_write(file, &i, ppos, 0);
static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
static void blkfront_gather_backend_features(struct blkfront_info *info);
+static int negotiate_mq(struct blkfront_info *info);
static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
{
unsigned int i, max_page_order;
unsigned int ring_page_order;
+ if (!info)
+ return -ENODEV;
+
max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
"max-ring-page-order", 0);
ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
info->nr_ring_pages = 1 << ring_page_order;
+ err = negotiate_mq(info);
+ if (err)
+ goto destroy_blkring;
+
for (i = 0; i < info->nr_rings; i++) {
struct blkfront_ring_info *rinfo = &info->rinfo[i];
}
info->xbdev = dev;
- err = negotiate_mq(info);
- if (err) {
- kfree(info);
- return err;
- }
mutex_init(&info->mutex);
info->vdevice = vdevice;
blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
- err = negotiate_mq(info);
- if (err)
- return err;
-
err = talk_to_blkback(dev, info);
if (!err)
blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
config ATMEL_ST
bool "Atmel ST timer support" if COMPILE_TEST
+ depends on HAS_IOMEM
select TIMER_OF
select MFD_SYSCON
help
void __iomem *dma_base;
void __iomem *glob_base;
struct clk *clk;
+ struct clk *reg_clk;
struct tasklet_struct irq_tasklet;
struct list_head free_sw_desc;
struct dma_device dmadev;
if (ret)
return ret;
+ xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg");
+ if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) {
+ if (!IS_ERR(xor_dev->reg_clk)) {
+ ret = clk_prepare_enable(xor_dev->reg_clk);
+ if (ret)
+ return ret;
+ } else {
+ return PTR_ERR(xor_dev->reg_clk);
+ }
+ }
+
xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
+ ret = EPROBE_DEFER;
+ goto disable_reg_clk;
+ }
if (!IS_ERR(xor_dev->clk)) {
ret = clk_prepare_enable(xor_dev->clk);
if (ret)
- return ret;
+ goto disable_reg_clk;
}
ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
free_msi_irqs:
platform_msi_domain_free_irqs(&pdev->dev);
disable_clk:
- if (!IS_ERR(xor_dev->clk))
- clk_disable_unprepare(xor_dev->clk);
+ clk_disable_unprepare(xor_dev->clk);
+disable_reg_clk:
+ clk_disable_unprepare(xor_dev->reg_clk);
return ret;
}
rcar_dmac_chan_configure_desc(chan, desc);
- max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift;
+ max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
/*
* Allocate and fill the transfer chunk descriptors. We own the only
* GNU General Public License for more details.
*/
-#include <linux/clk.h>
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/init.h>
struct platform_device *pdev;
struct gpio_chip gpio_chip;
struct irq_chip irq_chip;
- struct clk *clk;
unsigned int irq_parent;
+ atomic_t wakeup_path;
bool has_both_edge_trigger;
- bool needs_clk;
};
#define IOINTSEL 0x00 /* General IO/Interrupt Switching Register */
}
}
- if (!p->clk)
- return 0;
-
if (on)
- clk_enable(p->clk);
+ atomic_inc(&p->wakeup_path);
else
- clk_disable(p->clk);
+ atomic_dec(&p->wakeup_path);
return 0;
}
struct gpio_rcar_info {
bool has_both_edge_trigger;
- bool needs_clk;
};
static const struct gpio_rcar_info gpio_rcar_info_gen1 = {
.has_both_edge_trigger = false,
- .needs_clk = false,
};
static const struct gpio_rcar_info gpio_rcar_info_gen2 = {
.has_both_edge_trigger = true,
- .needs_clk = true,
};
static const struct of_device_id gpio_rcar_of_table[] = {
ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args);
*npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK;
p->has_both_edge_trigger = info->has_both_edge_trigger;
- p->needs_clk = info->needs_clk;
if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) {
dev_warn(&p->pdev->dev,
platform_set_drvdata(pdev, p);
- p->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(p->clk)) {
- if (p->needs_clk) {
- dev_err(dev, "unable to get clock\n");
- ret = PTR_ERR(p->clk);
- goto err0;
- }
- p->clk = NULL;
- }
-
pm_runtime_enable(dev);
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
return 0;
}
+static int __maybe_unused gpio_rcar_suspend(struct device *dev)
+{
+ struct gpio_rcar_priv *p = dev_get_drvdata(dev);
+
+ if (atomic_read(&p->wakeup_path))
+ device_set_wakeup_path(dev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(gpio_rcar_pm_ops, gpio_rcar_suspend, NULL);
+
static struct platform_driver gpio_rcar_device_driver = {
.probe = gpio_rcar_probe,
.remove = gpio_rcar_remove,
.driver = {
.name = "gpio_rcar",
+ .pm = &gpio_rcar_pm_ops,
.of_match_table = of_match_ptr(gpio_rcar_of_table),
}
};
size_t size;
u32 retry = 3;
+ if (amdgpu_acpi_pcie_notify_device_ready(adev))
+ return -EINVAL;
+
/* Get the device handle */
handle = ACPI_HANDLE(&adev->pdev->dev);
if (!handle)
result = 0;
if (*pos < 12) {
- early[0] = amdgpu_ring_get_rptr(ring);
+ early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
early[2] = ring->wptr & ring->buf_mask;
for (i = *pos / 4; i < 3 && size; i++) {
cancel_delayed_work_sync(&adev->uvd.idle_work);
- for (i = 0; i < adev->uvd.max_handles; ++i)
- if (atomic_read(&adev->uvd.handles[i]))
- break;
+ /* only valid for physical mode */
+ if (adev->asic_type < CHIP_POLARIS10) {
+ for (i = 0; i < adev->uvd.max_handles; ++i)
+ if (atomic_read(&adev->uvd.handles[i]))
+ break;
- if (i == AMDGPU_MAX_UVD_HANDLES)
- return 0;
+ if (i == adev->uvd.max_handles)
+ return 0;
+ }
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
ptr = adev->uvd.cpu_addr;
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
schedule_work(&adev->hotplug_work);
- DRM_INFO("IH: HPD%d\n", hpd + 1);
+ DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
return 0;
case CHIP_KAVERI:
adev->gfx.config.max_shader_engines = 1;
adev->gfx.config.max_tile_pipes = 4;
- if ((adev->pdev->device == 0x1304) ||
- (adev->pdev->device == 0x1305) ||
- (adev->pdev->device == 0x130C) ||
- (adev->pdev->device == 0x130F) ||
- (adev->pdev->device == 0x1310) ||
- (adev->pdev->device == 0x1311) ||
- (adev->pdev->device == 0x131C)) {
- adev->gfx.config.max_cu_per_sh = 8;
- adev->gfx.config.max_backends_per_se = 2;
- } else if ((adev->pdev->device == 0x1309) ||
- (adev->pdev->device == 0x130A) ||
- (adev->pdev->device == 0x130D) ||
- (adev->pdev->device == 0x1313) ||
- (adev->pdev->device == 0x131D)) {
- adev->gfx.config.max_cu_per_sh = 6;
- adev->gfx.config.max_backends_per_se = 2;
- } else if ((adev->pdev->device == 0x1306) ||
- (adev->pdev->device == 0x1307) ||
- (adev->pdev->device == 0x130B) ||
- (adev->pdev->device == 0x130E) ||
- (adev->pdev->device == 0x1315) ||
- (adev->pdev->device == 0x131B)) {
- adev->gfx.config.max_cu_per_sh = 4;
- adev->gfx.config.max_backends_per_se = 1;
- } else {
- adev->gfx.config.max_cu_per_sh = 3;
- adev->gfx.config.max_backends_per_se = 1;
- }
+ adev->gfx.config.max_cu_per_sh = 8;
+ adev->gfx.config.max_backends_per_se = 2;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_texture_channel_caches = 4;
adev->gfx.config.max_gprs = 256;
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "atom.h"
+#include "amd_pcie.h"
#include "amdgpu_powerplay.h"
#include "sid.h"
#include "si_ih.h"
{
struct pci_dev *root = adev->pdev->bus->self;
int bridge_pos, gpu_pos;
- u32 speed_cntl, mask, current_data_rate;
- int ret, i;
+ u32 speed_cntl, current_data_rate;
+ int i;
u16 tmp16;
if (pci_is_root_bus(adev->pdev->bus))
if (adev->flags & AMD_IS_APU)
return;
- ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
- if (ret != 0)
- return;
-
- if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+ if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
return;
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
LC_CURRENT_DATA_RATE_SHIFT;
- if (mask & DRM_PCIE_SPEED_80) {
+ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
if (current_data_rate == 2) {
DRM_INFO("PCIE gen 3 link speeds already enabled\n");
return;
}
DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
- } else if (mask & DRM_PCIE_SPEED_50) {
+ } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
if (current_data_rate == 1) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
if (!gpu_pos)
return;
- if (mask & DRM_PCIE_SPEED_80) {
+ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
if (current_data_rate != 2) {
u16 bridge_cfg, gpu_cfg;
u16 bridge_cfg2, gpu_cfg2;
pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
tmp16 &= ~0xf;
- if (mask & DRM_PCIE_SPEED_80)
+ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
tmp16 |= 3;
- else if (mask & DRM_PCIE_SPEED_50)
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
tmp16 |= 2;
else
tmp16 |= 1;
#include "amdgpu_pm.h"
#include "amdgpu_dpm.h"
#include "amdgpu_atombios.h"
+#include "amd_pcie.h"
#include "sid.h"
#include "r600_dpm.h"
#include "si_dpm.h"
}
}
-static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
- u32 sys_mask,
- enum amdgpu_pcie_gen asic_gen,
- enum amdgpu_pcie_gen default_gen)
-{
- switch (asic_gen) {
- case AMDGPU_PCIE_GEN1:
- return AMDGPU_PCIE_GEN1;
- case AMDGPU_PCIE_GEN2:
- return AMDGPU_PCIE_GEN2;
- case AMDGPU_PCIE_GEN3:
- return AMDGPU_PCIE_GEN3;
- default:
- if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
- return AMDGPU_PCIE_GEN3;
- else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
- return AMDGPU_PCIE_GEN2;
- else
- return AMDGPU_PCIE_GEN1;
- }
- return AMDGPU_PCIE_GEN1;
-}
-
static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
u32 *p, u32 *u)
{
table->ACPIState.levels[0].vddc.index,
&table->ACPIState.levels[0].std_vddc);
}
- table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev,
- si_pi->sys_pcie_mask,
- si_pi->boot_pcie_gen,
- AMDGPU_PCIE_GEN1);
+ table->ACPIState.levels[0].gen2PCIE =
+ (u8)amdgpu_get_pcie_gen_support(adev,
+ si_pi->sys_pcie_mask,
+ si_pi->boot_pcie_gen,
+ AMDGPU_PCIE_GEN1);
if (si_pi->vddc_phase_shed_control)
si_populate_phase_shedding_value(adev,
pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
pl->flags = le32_to_cpu(clock_info->si.ulFlags);
- pl->pcie_gen = r600_get_pcie_gen_support(adev,
- si_pi->sys_pcie_mask,
- si_pi->boot_pcie_gen,
- clock_info->si.ucPCIEGen);
+ pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
+ si_pi->sys_pcie_mask,
+ si_pi->boot_pcie_gen,
+ clock_info->si.ucPCIEGen);
/* patch up vddc if necessary */
ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
struct si_power_info *si_pi;
struct atom_clock_dividers dividers;
int ret;
- u32 mask;
si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
if (si_pi == NULL)
eg_pi = &ni_pi->eg;
pi = &eg_pi->rv7xx;
- ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
- if (ret)
- si_pi->sys_pcie_mask = 0;
- else
- si_pi->sys_pcie_mask = mask;
+ si_pi->sys_pcie_mask =
+ (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
+ CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
!is_mst_root_connector) {
/* Downstream Port status changed. */
if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
+
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
amdgpu_dm_update_connector_after_detect(aconnector);
dst.width = stream->timing.h_addressable;
dst.height = stream->timing.v_addressable;
- rmx_type = dm_state->scaling;
- if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
- if (src.width * dst.height <
- src.height * dst.width) {
- /* height needs less upscaling/more downscaling */
- dst.width = src.width *
- dst.height / src.height;
- } else {
- /* width needs less upscaling/more downscaling */
- dst.height = src.height *
- dst.width / src.width;
+ if (dm_state) {
+ rmx_type = dm_state->scaling;
+ if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
+ if (src.width * dst.height <
+ src.height * dst.width) {
+ /* height needs less upscaling/more downscaling */
+ dst.width = src.width *
+ dst.height / src.height;
+ } else {
+ /* width needs less upscaling/more downscaling */
+ dst.height = src.height *
+ dst.width / src.width;
+ }
+ } else if (rmx_type == RMX_CENTER) {
+ dst = src;
}
- } else if (rmx_type == RMX_CENTER) {
- dst = src;
- }
- dst.x = (stream->timing.h_addressable - dst.width) / 2;
- dst.y = (stream->timing.v_addressable - dst.height) / 2;
+ dst.x = (stream->timing.h_addressable - dst.width) / 2;
+ dst.y = (stream->timing.v_addressable - dst.height) / 2;
- if (dm_state->underscan_enable) {
- dst.x += dm_state->underscan_hborder / 2;
- dst.y += dm_state->underscan_vborder / 2;
- dst.width -= dm_state->underscan_hborder;
- dst.height -= dm_state->underscan_vborder;
+ if (dm_state->underscan_enable) {
+ dst.x += dm_state->underscan_hborder / 2;
+ dst.y += dm_state->underscan_vborder / 2;
+ dst.width -= dm_state->underscan_hborder;
+ dst.height -= dm_state->underscan_vborder;
+ }
}
stream->src = src;
if (aconnector == NULL) {
DRM_ERROR("aconnector is NULL!\n");
- goto drm_connector_null;
- }
-
- if (dm_state == NULL) {
- DRM_ERROR("dm_state is NULL!\n");
- goto dm_state_null;
+ return stream;
}
drm_connector = &aconnector->base;
*/
if (aconnector->mst_port) {
dm_dp_mst_dc_sink_create(drm_connector);
- goto mst_dc_sink_create_done;
+ return stream;
}
if (create_fake_sink(aconnector))
- goto stream_create_fail;
+ return stream;
}
stream = dc_create_stream_for_sink(aconnector->dc_sink);
if (stream == NULL) {
DRM_ERROR("Failed to create stream for sink!\n");
- goto stream_create_fail;
+ return stream;
}
list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
} else {
decide_crtc_timing_for_drm_display_mode(
&mode, preferred_mode,
- dm_state->scaling != RMX_OFF);
+ dm_state ? (dm_state->scaling != RMX_OFF) : false);
}
+ if (!dm_state)
+ drm_mode_set_crtcinfo(&mode, 0);
+
fill_stream_properties_from_drm_display_mode(stream,
&mode, &aconnector->base);
update_stream_scaling_settings(&mode, dm_state, stream);
drm_connector,
aconnector->dc_sink);
-stream_create_fail:
-dm_state_null:
-drm_connector_null:
-mst_dc_sink_create_done:
+ update_stream_signal(stream);
+
return stream;
}
return &state->base;
}
+
+static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
+{
+ enum dc_irq_source irq_source;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+
+ irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
+ return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
+}
+
+static int dm_enable_vblank(struct drm_crtc *crtc)
+{
+ return dm_set_vblank(crtc, true);
+}
+
+static void dm_disable_vblank(struct drm_crtc *crtc)
+{
+ dm_set_vblank(crtc, false);
+}
+
/* Implemented only the options currently availible for the driver */
static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.reset = dm_crtc_reset_state,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = dm_crtc_duplicate_state,
.atomic_destroy_state = dm_crtc_destroy_state,
+ .enable_vblank = dm_enable_vblank,
+ .disable_vblank = dm_disable_vblank,
};
static enum drm_connector_status
goto fail;
}
- stream = dc_create_stream_for_sink(dc_sink);
+ stream = create_stream_for_sink(aconnector, mode, NULL);
if (stream == NULL) {
DRM_ERROR("Failed to create stream for sink!\n");
goto fail;
if (!dm_plane_state->dc_state)
return 0;
+ if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
+ return -EINVAL;
+
if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
return 0;
bool pflip_needed = !state->allow_modeset;
int ret = 0;
- if (pflip_needed)
- return ret;
/* Add new planes */
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
/* Remove any changed/removed planes */
if (!enable) {
+ if (pflip_needed)
+ continue;
if (!old_plane_crtc)
continue;
*lock_and_validation_needed = true;
} else { /* Add new planes */
+ struct dc_plane_state *dc_new_plane_state;
if (drm_atomic_plane_disabling(plane->state, new_plane_state))
continue;
if (!dm_new_crtc_state->stream)
continue;
+ if (pflip_needed)
+ continue;
WARN_ON(dm_new_plane_state->dc_state);
- dm_new_plane_state->dc_state = dc_create_plane_state(dc);
-
- DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
- plane->base.id, new_plane_crtc->base.id);
-
- if (!dm_new_plane_state->dc_state) {
+ dc_new_plane_state = dc_create_plane_state(dc);
+ if (!dc_new_plane_state) {
ret = -EINVAL;
return ret;
}
+ DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, new_plane_crtc->base.id);
+
ret = fill_plane_attributes(
new_plane_crtc->dev->dev_private,
- dm_new_plane_state->dc_state,
+ dc_new_plane_state,
new_plane_state,
new_crtc_state);
- if (ret)
+ if (ret) {
+ dc_plane_state_release(dc_new_plane_state);
return ret;
+ }
-
+ /*
+ * Any atomic check errors that occur after this will
+ * not need a release. The plane state will be attached
+ * to the stream, and therefore part of the atomic
+ * state. It'll be released when the atomic state is
+ * cleaned.
+ */
if (!dc_add_plane_to_context(
dc,
dm_new_crtc_state->stream,
- dm_new_plane_state->dc_state,
+ dc_new_plane_state,
dm_state->context)) {
+ dc_plane_state_release(dc_new_plane_state);
ret = -EINVAL;
return ret;
}
+ dm_new_plane_state->dc_state = dc_new_plane_state;
+
/* Tell DC to do a full surface update every time there
* is a plane change. Inefficient, but works for now.
*/
return ret;
}
+static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ struct drm_plane *plane;
+ struct drm_crtc_state *crtc_state;
+
+ WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
+
+ drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_plane_state(state, plane);
+
+ if (IS_ERR(plane_state))
+ return -EDEADLK;
+
+ crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
+ if (crtc->primary == plane && crtc_state->active) {
+ if (!plane_state->fb)
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
goto fail;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ ret = dm_atomic_check_plane_state_fb(state, crtc);
+ if (ret)
+ goto fail;
+
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->color_mgmt_changed)
continue;
void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
{
- if (adev->mode_info.num_crtc > 0)
- adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
- else
- adev->crtc_irq.num_types = 0;
+
+ adev->crtc_irq.num_types = adev->mode_info.num_crtc;
adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+ /*
+ * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
+ */
+ if (!aconnector->port || !aconnector->port->aux.ddc.algo)
+ return;
+
edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
if (!edid) {
return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
}
-void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
+bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
{
if (dc == NULL)
- return;
+ return false;
- dal_irq_service_set(dc->res_pool->irqs, src, enable);
+ return dal_irq_service_set(dc->res_pool->irqs, src, enable);
}
void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
link->link_enc,
pipe_ctx->clock_source->id,
display_color_depth,
- pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A,
- pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK,
+ pipe_ctx->stream->signal,
stream->phy_pix_clk);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
return true;
}
-/* Maximum TMDS single link pixel clock 165MHz */
-#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ 165000
-
static void update_stream_engine_usage(
struct resource_context *res_ctx,
const struct resource_pool *pool,
/*******************************************************************************
* Private functions
******************************************************************************/
-#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST 297000
-static void update_stream_signal(struct dc_stream_state *stream)
+void update_stream_signal(struct dc_stream_state *stream)
{
struct dc_sink *dc_sink = stream->sink;
stream->signal = dc_sink->sink_signal;
if (dc_is_dvi_signal(stream->signal)) {
- if (stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST &&
- stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
+ if (stream->ctx->dc->caps.dual_link_dvi &&
+ stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK &&
+ stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK;
else
stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
core_dc = stream->ctx->dc;
res_ctx = &core_dc->current_state->res_ctx;
+ stream->cursor_attributes = *attributes;
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
continue;
- if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes != NULL)
- pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
- pipe_ctx->plane_res.ipp, attributes);
-
- if (pipe_ctx->plane_res.hubp != NULL &&
- pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes != NULL)
- pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
- pipe_ctx->plane_res.hubp, attributes);
-
- if (pipe_ctx->plane_res.mi != NULL &&
- pipe_ctx->plane_res.mi->funcs->set_cursor_attributes != NULL)
- pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
- pipe_ctx->plane_res.mi, attributes);
-
-
- if (pipe_ctx->plane_res.xfm != NULL &&
- pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes != NULL)
- pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
- pipe_ctx->plane_res.xfm, attributes);
-
- if (pipe_ctx->plane_res.dpp != NULL &&
- pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes != NULL)
- pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
- pipe_ctx->plane_res.dpp, attributes->color_format);
+ core_dc->hwss.set_cursor_attribute(pipe_ctx);
}
-
- stream->cursor_attributes = *attributes;
-
return true;
}
core_dc = stream->ctx->dc;
res_ctx = &core_dc->current_state->res_ctx;
+ stream->cursor_position = *position;
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
- struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
- struct mem_input *mi = pipe_ctx->plane_res.mi;
- struct hubp *hubp = pipe_ctx->plane_res.hubp;
- struct dpp *dpp = pipe_ctx->plane_res.dpp;
- struct dc_cursor_position pos_cpy = *position;
- struct dc_cursor_mi_param param = {
- .pixel_clk_khz = stream->timing.pix_clk_khz,
- .ref_clk_khz = core_dc->res_pool->ref_clock_inKhz,
- .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
- .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
- .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
- };
if (pipe_ctx->stream != stream ||
(!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) ||
!pipe_ctx->plane_res.ipp)
continue;
- if (pipe_ctx->plane_state->address.type
- == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
- pos_cpy.enable = false;
-
- if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
- pos_cpy.enable = false;
-
-
- if (ipp != NULL && ipp->funcs->ipp_cursor_set_position != NULL)
- ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, ¶m);
-
- if (mi != NULL && mi->funcs->set_cursor_position != NULL)
- mi->funcs->set_cursor_position(mi, &pos_cpy, ¶m);
-
- if (!hubp)
- continue;
-
- if (hubp->funcs->set_cursor_position != NULL)
- hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
-
- if (dpp != NULL && dpp->funcs->set_cursor_position != NULL)
- dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width);
-
+ core_dc->hwss.set_cursor_position(pipe_ctx);
}
- stream->cursor_position = *position;
-
return true;
}
bool dcc_const_color;
bool dynamic_audio;
bool is_apu;
+ bool dual_link_dvi;
};
struct dc_dcc_surface_param {
struct dc *dc,
uint32_t src_id,
uint32_t ext_id);
-void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable);
+bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable);
void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src);
enum dc_irq_source dc_get_hpd_irq_source_at_index(
struct dc *dc, uint32_t link_index);
*/
struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink);
+void update_stream_signal(struct dc_stream_state *stream);
+
void dc_stream_retain(struct dc_stream_state *dc_stream);
void dc_stream_release(struct dc_stream_state *dc_stream);
SR(D2VGA_CONTROL), \
SR(D3VGA_CONTROL), \
SR(D4VGA_CONTROL), \
+ SR(VGA_TEST_CONTROL), \
SR(DC_IP_REQUEST_CNTL), \
BL_REG_LIST()
uint32_t D2VGA_CONTROL;
uint32_t D3VGA_CONTROL;
uint32_t D4VGA_CONTROL;
+ uint32_t VGA_TEST_CONTROL;
/* MMHUB registers. read only. temporary hack */
uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;
uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
+ HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\
+ HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
+ HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
type DCFCLK_GATE_DIS; \
type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
type DENTIST_DPPCLK_WDIVIDER; \
- type DENTIST_DISPCLK_WDIVIDER;
+ type DENTIST_DISPCLK_WDIVIDER; \
+ type VGA_TEST_ENABLE; \
+ type VGA_TEST_RENDER_START; \
+ type D1VGA_MODE_ENABLE;
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
#define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20
#define DCE110_DIG_FE_SOURCE_SELECT_DIGG 0x40
-/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
-#define TMDS_MIN_PIXEL_CLOCK 25000
-/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
-#define TMDS_MAX_PIXEL_CLOCK 165000
-/* For current ASICs pixel clock - 600MHz */
-#define MAX_ENCODER_CLOCK 600000
-
enum {
DP_MST_UPDATE_MAX_RETRY = 50
};
{
struct bp_encoder_cap_info bp_cap_info = {0};
const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
+ enum bp_result result = BP_RESULT_OK;
enc110->base.funcs = &dce110_lnk_enc_funcs;
enc110->base.ctx = init_data->ctx;
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
}
+ /* default to one to mirror Windows behavior */
+ enc110->base.features.flags.bits.HDMI_6GB_EN = 1;
+
+ result = bp_funcs->get_encoder_cap_info(enc110->base.ctx->dc_bios,
+ enc110->base.id, &bp_cap_info);
+
/* Override features with DCE-specific values */
- if (BP_RESULT_OK == bp_funcs->get_encoder_cap_info(
- enc110->base.ctx->dc_bios, enc110->base.id,
- &bp_cap_info)) {
+ if (BP_RESULT_OK == result) {
enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
bp_cap_info.DP_HBR2_EN;
enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ } else {
+ dm_logger_write(enc110->base.ctx->logger, LOG_WARNING,
+ "%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
}
}
struct link_encoder *enc,
enum clock_source_id clock_source,
enum dc_color_depth color_depth,
- bool hdmi,
- bool dual_link,
+ enum signal_type signal,
uint32_t pixel_clock)
{
struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
cntl.engine_id = enc->preferred_engine;
cntl.transmitter = enc110->base.transmitter;
cntl.pll_id = clock_source;
- if (hdmi) {
- cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
- cntl.lanes_number = 4;
- } else if (dual_link) {
- cntl.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ cntl.signal = signal;
+ if (cntl.signal == SIGNAL_TYPE_DVI_DUAL_LINK)
cntl.lanes_number = 8;
- } else {
- cntl.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ else
cntl.lanes_number = 4;
- }
+
cntl.hpd_sel = enc110->base.hpd_source;
cntl.pixel_clock = pixel_clock;
struct link_encoder *enc,
enum clock_source_id clock_source,
enum dc_color_depth color_depth,
- bool hdmi,
- bool dual_link,
+ enum signal_type signal,
uint32_t pixel_clock);
/* enables DP PHY output */
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 40;
dc->caps.max_cursor_size = 128;
+ dc->caps.dual_link_dvi = true;
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.timing_generators[i] =
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
struct dc_link *link = pipe_ctx->stream->sink->link;
- /* 1. update AVI info frame (HDMI, DP)
- * we always need to update info frame
- */
+
uint32_t active_total_with_borders;
uint32_t early_control = 0;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
- /* TODOFPGA may change to hwss.update_info_frame */
+ /* For MST, there are multiply stream go to only one link.
+ * connect DIG back_end to front_end while enable_stream and
+ * disconnect them during disable_stream
+ * BY this, it is logic clean to separate stream and link */
+ link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
+ pipe_ctx->stream_res.stream_enc->id, true);
+
+ /* update AVI info frame (HDMI, DP)*/
+ /* TODO: FPGA may change to hwss.update_info_frame */
dce110_update_info_frame(pipe_ctx);
+
/* enable early control to avoid corruption on DP monitor*/
active_total_with_borders =
timing->h_addressable
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc);
}
- /* For MST, there are multiply stream go to only one link.
- * connect DIG back_end to front_end while enable_stream and
- * disconnect them during disable_stream
- * BY this, it is logic clean to separate stream and link */
- link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
- pipe_ctx->stream_res.stream_enc->id, true);
+
+
}
* Check if FBC can be enabled
*/
static bool should_enable_fbc(struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context,
+ uint32_t *pipe_idx)
{
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0];
+ uint32_t i;
+ struct pipe_ctx *pipe_ctx = NULL;
+ struct resource_context *res_ctx = &context->res_ctx;
+
ASSERT(dc->fbc_compressor);
if (context->stream_count != 1)
return false;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (res_ctx->pipe_ctx[i].stream) {
+ pipe_ctx = &res_ctx->pipe_ctx[i];
+ *pipe_idx = i;
+ break;
+ }
+ }
+
/* Only supports eDP */
if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
return false;
static void enable_fbc(struct dc *dc,
struct dc_state *context)
{
- if (should_enable_fbc(dc, context)) {
+ uint32_t pipe_idx = 0;
+
+ if (should_enable_fbc(dc, context, &pipe_idx)) {
/* Program GRPH COMPRESSED ADDRESS and PITCH */
struct compr_addr_and_pitch_params params = {0, 0, 0};
struct compressor *compr = dc->fbc_compressor;
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
+
params.source_view_width = pipe_ctx->stream->timing.h_addressable;
params.source_view_height = pipe_ctx->stream->timing.v_addressable;
}
}
+void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
+ struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
+ struct mem_input *mi = pipe_ctx->plane_res.mi;
+ struct dc_cursor_mi_param param = {
+ .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
+ .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
+ .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
+ .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
+ .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
+ };
+
+ if (pipe_ctx->plane_state->address.type
+ == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+ pos_cpy.enable = false;
+
+ if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
+ pos_cpy.enable = false;
+
+ if (ipp->funcs->ipp_cursor_set_position)
+ ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, ¶m);
+ if (mi->funcs->set_cursor_position)
+ mi->funcs->set_cursor_position(mi, &pos_cpy, ¶m);
+}
+
+void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
+
+ if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes)
+ pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
+ pipe_ctx->plane_res.ipp, attributes);
+
+ if (pipe_ctx->plane_res.mi->funcs->set_cursor_attributes)
+ pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.mi, attributes);
+
+ if (pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes)
+ pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.xfm, attributes);
+}
+
static void ready_shared_resources(struct dc *dc, struct dc_state *context) {}
static void optimize_shared_resources(struct dc *dc) {}
.edp_backlight_control = hwss_edp_backlight_control,
.edp_power_control = hwss_edp_power_control,
.edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
+ .set_cursor_position = dce110_set_cursor_position,
+ .set_cursor_attribute = dce110_set_cursor_attribute
};
void dce110_hw_sequencer_construct(struct dc *dc)
return result;
}
+enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
+ struct dc_caps *caps)
+{
+ if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) ||
+ ((plane_state->dst_rect.height * 2) < plane_state->src_rect.height))
+ return DC_FAIL_SURFACE_VALIDATE;
+
+ return DC_OK;
+}
+
static bool dce110_validate_surface_sets(
struct dc_state *context)
{
plane->src_rect.height > 1080))
return false;
+ /* we don't have the logic to support underlay
+ * only yet so block the use case where we get
+ * NV12 plane as top layer
+ */
+ if (j == 0)
+ return false;
+
/* irrespective of plane format,
* stream should be RGB encoded
*/
.link_enc_create = dce110_link_encoder_create,
.validate_guaranteed = dce110_validate_guaranteed,
.validate_bandwidth = dce110_validate_bandwidth,
+ .validate_plane = dce110_validate_plane,
.acquire_idle_pipe_for_layer = dce110_acquire_underlay,
.add_stream_to_ctx = dce110_add_stream_to_ctx,
.validate_global = dce110_validate_global
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 128;
+ dc->caps.dual_link_dvi = true;
+
/*************************************************
* Create resources *
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 128;
+ dc->caps.dual_link_dvi = true;
+
dc->debug = debug_defaults;
/*************************************************
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 40;
dc->caps.max_cursor_size = 128;
+ dc->caps.dual_link_dvi = true;
/*************************************************
* Create resources *
static void disable_vga(
struct dce_hwseq *hws)
{
+ unsigned int in_vga_mode = 0;
+
+ REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga_mode);
+
+ if (in_vga_mode == 0)
+ return;
+
REG_WRITE(D1VGA_CONTROL, 0);
- REG_WRITE(D2VGA_CONTROL, 0);
- REG_WRITE(D3VGA_CONTROL, 0);
- REG_WRITE(D4VGA_CONTROL, 0);
+
+ /* HW Engineer's Notes:
+ * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
+ * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
+ *
+ * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
+ * VGA_TEST_ENABLE, to leave it in the same state as before.
+ */
+ REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
+ REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
}
static void dpp_pg_control(
&pipe_ctx->plane_res.scl_data.viewport_c);
}
+ if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ dc->hwss.set_cursor_position(pipe_ctx);
+ dc->hwss.set_cursor_attribute(pipe_ctx);
+ }
+
if (plane_state->update_flags.bits.full_update) {
/*gamut remap*/
program_gamut_remap(pipe_ctx);
return true;
}
-void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
+static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
{
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
}
}
-void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
+static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
{
if (hws->ctx->dc->res_pool->hubbub != NULL)
hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
}
+static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_cursor_mi_param param = {
+ .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
+ .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
+ .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
+ .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
+ .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
+ };
+
+ if (pipe_ctx->plane_state->address.type
+ == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+ pos_cpy.enable = false;
+
+ if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
+ pos_cpy.enable = false;
+
+ hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
+ dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width);
+}
+
+static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
+
+ pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.hubp, attributes);
+ pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.dpp, attributes->color_format);
+}
+
static const struct hw_sequencer_funcs dcn10_funcs = {
.program_gamut_remap = program_gamut_remap,
.program_csc_matrix = program_csc_matrix,
.edp_backlight_control = hwss_edp_backlight_control,
.edp_power_control = hwss_edp_power_control,
.edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn10_set_cursor_attribute
};
void (*enable_tmds_output)(struct link_encoder *enc,
enum clock_source_id clock_source,
enum dc_color_depth color_depth,
- bool hdmi,
- bool dual_link,
+ enum signal_type signal,
uint32_t pixel_clock);
void (*enable_dp_output)(struct link_encoder *enc,
const struct dc_link_settings *link_settings,
bool enable);
void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up);
+ void (*set_cursor_position)(struct pipe_ctx *pipe);
+ void (*set_cursor_attribute)(struct pipe_ctx *pipe);
+
};
void color_space_to_black_color(
core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
if (enable) {
- if (!tg->funcs->arm_vert_intr(tg, 2)) {
+ if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
DC_ERROR("Failed to get VBLANK!\n");
return false;
}
struct link_encoder *enc,
enum clock_source_id clock_source,
enum dc_color_depth color_depth,
- bool hdmi,
- bool dual_link,
+ enum signal_type signal,
uint32_t pixel_clock) {}
static void virtual_link_encoder_enable_dp_output(
bool backlight_changed;
};
-enum {
- HDMI_PIXEL_CLOCK_IN_KHZ_297 = 297000,
- TMDS_PIXEL_CLOCK_IN_KHZ_165 = 165000
-};
-
/*
* DFS-bypass flag
*/
#ifndef __DC_SIGNAL_TYPES_H__
#define __DC_SIGNAL_TYPES_H__
+/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
+#define TMDS_MIN_PIXEL_CLOCK 25000
+/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
+#define TMDS_MAX_PIXEL_CLOCK 165000
+
enum signal_type {
SIGNAL_TYPE_NONE = 0L, /* no signal */
SIGNAL_TYPE_DVI_SINGLE_LINK = (1 << 0),
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, i915, id) {
+ i915_gem_reset_prepare_engine(engine);
engine->submit_request = nop_submit_request;
+ }
/*
* Make sure no one is running the old callback before we proceed with
intel_engine_init_global_seqno(engine,
intel_engine_last_submit(engine));
spin_unlock_irqrestore(&engine->timeline->lock, flags);
+
+ i915_gem_reset_finish_engine(engine);
}
set_bit(I915_WEDGED, &i915->gpu_error.flags);
*/
mutex_lock(&dev_priv->drm.struct_mutex);
dev_priv->perf.oa.exclusive_stream = NULL;
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
free_oa_buffer(dev_priv);
* Note: it's only the RCS/Render context that has any OA state.
*/
static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
- const struct i915_oa_config *oa_config,
- bool interruptible)
+ const struct i915_oa_config *oa_config)
{
struct i915_gem_context *ctx;
int ret;
unsigned int wait_flags = I915_WAIT_LOCKED;
- if (interruptible) {
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- return ret;
-
- wait_flags |= I915_WAIT_INTERRUPTIBLE;
- } else {
- mutex_lock(&dev_priv->drm.struct_mutex);
- }
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* Switch away from any user context. */
ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
}
out:
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
return ret;
}
* to make sure all slices/subslices are ON before writing to NOA
* registers.
*/
- ret = gen8_configure_all_contexts(dev_priv, oa_config, true);
+ ret = gen8_configure_all_contexts(dev_priv, oa_config);
if (ret)
return ret;
static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
{
/* Reset all contexts' slices/subslices configurations. */
- gen8_configure_all_contexts(dev_priv, NULL, false);
+ gen8_configure_all_contexts(dev_priv, NULL);
I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
~GT_NOA_ENABLE));
static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
{
/* Reset all contexts' slices/subslices configurations. */
- gen8_configure_all_contexts(dev_priv, NULL, false);
+ gen8_configure_all_contexts(dev_priv, NULL);
/* Make sure we disable noa to save power. */
I915_WRITE(RPM_CONFIG1,
if (ret)
goto err_oa_buf_alloc;
+ ret = i915_mutex_lock_interruptible(&dev_priv->drm);
+ if (ret)
+ goto err_lock;
+
ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
stream->oa_config);
if (ret)
stream->ops = &i915_oa_stream_ops;
- /* Lock device for exclusive_stream access late because
- * enable_metric_set() might lock as well on gen8+.
- */
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- goto err_lock;
-
dev_priv->perf.oa.exclusive_stream = stream;
mutex_unlock(&dev_priv->drm.struct_mutex);
return 0;
-err_lock:
+err_enable:
dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
-err_enable:
+err_lock:
free_oa_buffer(dev_priv);
err_oa_buf_alloc:
struct rb_node *rb;
unsigned long flags;
+ GEM_TRACE("%s\n", engine->name);
+
spin_lock_irqsave(&engine->timeline->lock, flags);
/* Cancel the requests on the HW and clear the ELSP tracker. */
*/
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ /* Mark all CS interrupts as complete */
+ execlists->active = 0;
+
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
case CHIP_KAVERI:
rdev->config.cik.max_shader_engines = 1;
rdev->config.cik.max_tile_pipes = 4;
- if ((rdev->pdev->device == 0x1304) ||
- (rdev->pdev->device == 0x1305) ||
- (rdev->pdev->device == 0x130C) ||
- (rdev->pdev->device == 0x130F) ||
- (rdev->pdev->device == 0x1310) ||
- (rdev->pdev->device == 0x1311) ||
- (rdev->pdev->device == 0x131C)) {
- rdev->config.cik.max_cu_per_sh = 8;
- rdev->config.cik.max_backends_per_se = 2;
- } else if ((rdev->pdev->device == 0x1309) ||
- (rdev->pdev->device == 0x130A) ||
- (rdev->pdev->device == 0x130D) ||
- (rdev->pdev->device == 0x1313) ||
- (rdev->pdev->device == 0x131D)) {
- rdev->config.cik.max_cu_per_sh = 6;
- rdev->config.cik.max_backends_per_se = 2;
- } else if ((rdev->pdev->device == 0x1306) ||
- (rdev->pdev->device == 0x1307) ||
- (rdev->pdev->device == 0x130B) ||
- (rdev->pdev->device == 0x130E) ||
- (rdev->pdev->device == 0x1315) ||
- (rdev->pdev->device == 0x1318) ||
- (rdev->pdev->device == 0x131B)) {
- rdev->config.cik.max_cu_per_sh = 4;
- rdev->config.cik.max_backends_per_se = 1;
- } else {
- rdev->config.cik.max_cu_per_sh = 3;
- rdev->config.cik.max_backends_per_se = 1;
- }
+ rdev->config.cik.max_cu_per_sh = 8;
+ rdev->config.cik.max_backends_per_se = 2;
rdev->config.cik.max_sh_per_se = 1;
rdev->config.cik.max_texture_channel_caches = 4;
rdev->config.cik.max_gprs = 256;
DRM_DEBUG_DRIVER("Disabling the CRTC\n");
+ drm_crtc_vblank_off(crtc);
+
sun4i_tcon_set_status(scrtc->tcon, encoder, false);
if (crtc->state->event && !crtc->state->active) {
DRM_DEBUG_DRIVER("Enabling the CRTC\n");
sun4i_tcon_set_status(scrtc->tcon, encoder, true);
+
+ drm_crtc_vblank_on(crtc);
}
static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc)
static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees)
{
struct sun4i_dclk *dclk = hw_to_dclk(hw);
+ u32 val = degrees / 120;
+
+ val <<= 28;
regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG,
GENMASK(29, 28),
- degrees / 120);
+ val);
return 0;
}
DRM_DEBUG_DRIVER("Vertical parameters OK\n");
+ tcon->dclk_min_div = 6;
+ tcon->dclk_max_div = 127;
rounded_rate = clk_round_rate(tcon->dclk, rate);
if (rounded_rate < rate)
return MODE_CLOCK_LOW;
return;
}
- if (enabled)
+ if (enabled) {
clk_prepare_enable(clk);
- else
+ } else {
+ clk_rate_exclusive_put(clk);
clk_disable_unprepare(clk);
+ }
}
static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon,
return ret;
}
- /*
- * This can only be made optional since we've had DT nodes
- * without the LVDS reset properties.
- *
- * If the property is missing, just disable LVDS, and print a
- * warning.
- */
- tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds");
- if (IS_ERR(tcon->lvds_rst)) {
- dev_err(dev, "Couldn't get our reset line\n");
- return PTR_ERR(tcon->lvds_rst);
- } else if (tcon->lvds_rst) {
- has_lvds_rst = true;
- reset_control_reset(tcon->lvds_rst);
- } else {
- has_lvds_rst = false;
- }
+ if (tcon->quirks->supports_lvds) {
+ /*
+ * This can only be made optional since we've had DT
+ * nodes without the LVDS reset properties.
+ *
+ * If the property is missing, just disable LVDS, and
+ * print a warning.
+ */
+ tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds");
+ if (IS_ERR(tcon->lvds_rst)) {
+ dev_err(dev, "Couldn't get our reset line\n");
+ return PTR_ERR(tcon->lvds_rst);
+ } else if (tcon->lvds_rst) {
+ has_lvds_rst = true;
+ reset_control_reset(tcon->lvds_rst);
+ } else {
+ has_lvds_rst = false;
+ }
- /*
- * This can only be made optional since we've had DT nodes
- * without the LVDS reset properties.
- *
- * If the property is missing, just disable LVDS, and print a
- * warning.
- */
- if (tcon->quirks->has_lvds_alt) {
- tcon->lvds_pll = devm_clk_get(dev, "lvds-alt");
- if (IS_ERR(tcon->lvds_pll)) {
- if (PTR_ERR(tcon->lvds_pll) == -ENOENT) {
- has_lvds_alt = false;
+ /*
+ * This can only be made optional since we've had DT
+ * nodes without the LVDS reset properties.
+ *
+ * If the property is missing, just disable LVDS, and
+ * print a warning.
+ */
+ if (tcon->quirks->has_lvds_alt) {
+ tcon->lvds_pll = devm_clk_get(dev, "lvds-alt");
+ if (IS_ERR(tcon->lvds_pll)) {
+ if (PTR_ERR(tcon->lvds_pll) == -ENOENT) {
+ has_lvds_alt = false;
+ } else {
+ dev_err(dev, "Couldn't get the LVDS PLL\n");
+ return PTR_ERR(tcon->lvds_pll);
+ }
} else {
- dev_err(dev, "Couldn't get the LVDS PLL\n");
- return PTR_ERR(tcon->lvds_pll);
+ has_lvds_alt = true;
}
- } else {
- has_lvds_alt = true;
}
- }
- if (!has_lvds_rst || (tcon->quirks->has_lvds_alt && !has_lvds_alt)) {
- dev_warn(dev,
- "Missing LVDS properties, Please upgrade your DT\n");
- dev_warn(dev, "LVDS output disabled\n");
- can_lvds = false;
+ if (!has_lvds_rst ||
+ (tcon->quirks->has_lvds_alt && !has_lvds_alt)) {
+ dev_warn(dev, "Missing LVDS properties, Please upgrade your DT\n");
+ dev_warn(dev, "LVDS output disabled\n");
+ can_lvds = false;
+ } else {
+ can_lvds = true;
+ }
} else {
- can_lvds = true;
+ can_lvds = false;
}
ret = sun4i_tcon_init_clocks(dev, tcon);
};
static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
- /* nothing is supported */
+ .supports_lvds = true,
};
static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
bool has_channel_1; /* a33 does not have channel 1 */
bool has_lvds_alt; /* Does the LVDS clock have a parent other than the TCON clock? */
bool needs_de_be_mux; /* sun6i needs mux to select backend */
+ bool supports_lvds; /* Does the TCON support an LVDS output? */
/* callback to handle tcon muxing options */
int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
dst_release(dst);
}
- if (ndev->flags & IFF_LOOPBACK) {
- ret = rdma_translate_ip(dst_in, addr);
- /*
- * Put the loopback device and get the translated
- * device instead.
- */
+ if (ndev) {
+ if (ndev->flags & IFF_LOOPBACK)
+ ret = rdma_translate_ip(dst_in, addr);
+ else
+ addr->bound_dev_if = ndev->ifindex;
dev_put(ndev);
- ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
- } else {
- addr->bound_dev_if = ndev->ifindex;
}
- dev_put(ndev);
return ret;
}
/* # of WCs to poll for with a single call to ib_poll_cq */
#define IB_POLL_BATCH 16
+#define IB_POLL_BATCH_DIRECT 8
/* # of WCs to iterate over before yielding */
#define IB_POLL_BUDGET_IRQ 256
#define IB_POLL_FLAGS \
(IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
-static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
+static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
+ int batch)
{
int i, n, completed = 0;
- struct ib_wc *wcs = poll_wc ? : cq->wc;
/*
* budget might be (-1) if the caller does not
* want to bound this call, thus we need unsigned
* minimum here.
*/
- while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH,
- budget - completed), wcs)) > 0) {
+ while ((n = ib_poll_cq(cq, min_t(u32, batch,
+ budget - completed), wcs)) > 0) {
for (i = 0; i < n; i++) {
struct ib_wc *wc = &wcs[i];
completed += n;
- if (n != IB_POLL_BATCH ||
- (budget != -1 && completed >= budget))
+ if (n != batch || (budget != -1 && completed >= budget))
break;
}
*/
int ib_process_cq_direct(struct ib_cq *cq, int budget)
{
- struct ib_wc wcs[IB_POLL_BATCH];
+ struct ib_wc wcs[IB_POLL_BATCH_DIRECT];
- return __ib_process_cq(cq, budget, wcs);
+ return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT);
}
EXPORT_SYMBOL(ib_process_cq_direct);
struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
int completed;
- completed = __ib_process_cq(cq, budget, NULL);
+ completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
if (completed < budget) {
irq_poll_complete(&cq->iop);
if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
struct ib_cq *cq = container_of(work, struct ib_cq, work);
int completed;
- completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL);
+ completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc,
+ IB_POLL_BATCH);
if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
queue_work(ib_comp_wq, &cq->work);
ret = device->query_device(device, &device->attrs, &uhw);
if (ret) {
pr_warn("Couldn't query the device attributes\n");
- goto cache_cleanup;
+ goto cg_cleanup;
}
ret = ib_device_register_sysfs(device, port_callback);
if (ret) {
pr_warn("Couldn't register device %s with driver model\n",
device->name);
- goto cache_cleanup;
+ goto cg_cleanup;
}
device->reg_state = IB_DEV_REGISTERED;
mutex_unlock(&device_mutex);
return 0;
+cg_cleanup:
+ ib_device_unregister_rdmacg(device);
cache_cleanup:
ib_cache_cleanup_one(device);
ib_cache_release_one(device);
resolved_dev = dev_get_by_index(dev_addr.net,
dev_addr.bound_dev_if);
- if (resolved_dev->flags & IFF_LOOPBACK) {
- dev_put(resolved_dev);
- resolved_dev = idev;
- dev_hold(resolved_dev);
+ if (!resolved_dev) {
+ dev_put(idev);
+ return -ENODEV;
}
ndev = ib_get_ndev_from_path(rec);
rcu_read_lock();
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
+ if (cmd.qp_state > IB_QPS_ERR)
+ return -EINVAL;
+
ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
+ return -EINVAL;
+
optval = memdup_user((void __user *) (unsigned long) cmd.optval,
cmd.optlen);
if (IS_ERR(optval)) {
return 0;
}
-static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
+unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
__acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
{
unsigned long flags;
return flags;
}
-static void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
- unsigned long flags)
+void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
+ unsigned long flags)
__releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
{
if (qp->rcq != qp->scq)
int status;
union ib_gid sgid;
struct ib_gid_attr sgid_attr;
+ unsigned int flags;
u8 nw_type;
qp->qplib_qp.modify_flags = 0;
dev_dbg(rdev_to_dev(rdev),
"Move QP = %p to flush list\n",
qp);
+ flags = bnxt_re_lock_cqs(qp);
bnxt_qplib_add_flush_qp(&qp->qplib_qp);
+ bnxt_re_unlock_cqs(qp, flags);
}
if (!qp->sumem &&
qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
dev_dbg(rdev_to_dev(rdev),
"Move QP = %p out of flush list\n",
qp);
+ flags = bnxt_re_lock_cqs(qp);
bnxt_qplib_clean_qp(&qp->qplib_qp);
+ bnxt_re_unlock_cqs(qp, flags);
}
}
if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
+ /* Need unconditional fence for local invalidate
+ * opcode to work as expected.
+ */
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+
if (wr->send_flags & IB_SEND_SIGNALED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
- if (wr->send_flags & IB_SEND_FENCE)
- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
if (wr->send_flags & IB_SEND_SOLICITED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
wqe->frmr.levels = qplib_frpl->hwq.level + 1;
wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
- if (wr->wr.send_flags & IB_SEND_FENCE)
- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+ /* Need unconditional fence for reg_mr
+ * opcode to function as expected.
+ */
+
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+
if (wr->wr.send_flags & IB_SEND_SIGNALED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
struct ib_udata *udata);
int bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+
+unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
+void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
#endif /* __BNXT_RE_IB_VERBS_H__ */
struct bnxt_re_qp *qp)
{
struct ib_event event;
+ unsigned int flags;
+
+ if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ flags = bnxt_re_lock_cqs(qp);
+ bnxt_qplib_add_flush_qp(&qp->qplib_qp);
+ bnxt_re_unlock_cqs(qp, flags);
+ }
memset(&event, 0, sizeof(event));
if (qp->qplib_qp.srq) {
switch (re_work->event) {
case NETDEV_REGISTER:
rc = bnxt_re_ib_reg(rdev);
- if (rc)
+ if (rc) {
dev_err(rdev_to_dev(rdev),
"Failed to register with IB: %#x", rc);
+ bnxt_re_remove_one(rdev);
+ bnxt_re_dev_unreg(rdev);
+ }
break;
case NETDEV_UP:
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
}
}
-void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
- unsigned long *flags)
- __acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock)
+static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
+ unsigned long *flags)
+ __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
{
- spin_lock_irqsave(&qp->scq->hwq.lock, *flags);
+ spin_lock_irqsave(&qp->scq->flush_lock, *flags);
if (qp->scq == qp->rcq)
- __acquire(&qp->rcq->hwq.lock);
+ __acquire(&qp->rcq->flush_lock);
else
- spin_lock(&qp->rcq->hwq.lock);
+ spin_lock(&qp->rcq->flush_lock);
}
-void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
- unsigned long *flags)
- __releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock)
+static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
+ unsigned long *flags)
+ __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
{
if (qp->scq == qp->rcq)
- __release(&qp->rcq->hwq.lock);
+ __release(&qp->rcq->flush_lock);
else
- spin_unlock(&qp->rcq->hwq.lock);
- spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags);
-}
-
-static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp,
- struct bnxt_qplib_cq *cq)
-{
- struct bnxt_qplib_cq *buddy_cq = NULL;
-
- if (qp->scq == qp->rcq)
- buddy_cq = NULL;
- else if (qp->scq == cq)
- buddy_cq = qp->rcq;
- else
- buddy_cq = qp->scq;
- return buddy_cq;
-}
-
-static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp,
- struct bnxt_qplib_cq *cq)
- __acquires(&buddy_cq->hwq.lock)
-{
- struct bnxt_qplib_cq *buddy_cq = NULL;
-
- buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
- if (!buddy_cq)
- __acquire(&cq->hwq.lock);
- else
- spin_lock(&buddy_cq->hwq.lock);
-}
-
-static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp,
- struct bnxt_qplib_cq *cq)
- __releases(&buddy_cq->hwq.lock)
-{
- struct bnxt_qplib_cq *buddy_cq = NULL;
-
- buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
- if (!buddy_cq)
- __release(&cq->hwq.lock);
- else
- spin_unlock(&buddy_cq->hwq.lock);
+ spin_unlock(&qp->rcq->flush_lock);
+ spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
}
void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
{
unsigned long flags;
- bnxt_qplib_acquire_cq_locks(qp, &flags);
+ bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
__bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_release_cq_locks(qp, &flags);
+ bnxt_qplib_release_cq_flush_locks(qp, &flags);
}
static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
{
unsigned long flags;
- bnxt_qplib_acquire_cq_locks(qp, &flags);
+ bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
__clean_cq(qp->scq, (u64)(unsigned long)qp);
qp->sq.hwq.prod = 0;
qp->sq.hwq.cons = 0;
qp->rq.hwq.cons = 0;
__bnxt_qplib_del_flush_qp(qp);
- bnxt_qplib_release_cq_locks(qp, &flags);
+ bnxt_qplib_release_cq_flush_locks(qp, &flags);
}
static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
/* Must block new posting of SQ and RQ */
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
bnxt_qplib_cancel_phantom_processing(qp);
-
- /* Add qp to flush list of the CQ */
- __bnxt_qplib_add_flush_qp(qp);
}
/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
sw_sq_cons, cqe->wr_id, cqe->status);
cqe++;
(*budget)--;
- bnxt_qplib_lock_buddy_cq(qp, cq);
bnxt_qplib_mark_qp_error(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_add_flush_qp(qp);
} else {
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
/* Before we complete, do WA 9060 */
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
/* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ bnxt_qplib_add_flush_qp(qp);
}
}
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
/* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ bnxt_qplib_add_flush_qp(qp);
}
}
done:
bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
{
struct cq_base *hw_cqe, **hw_cqe_ptr;
- unsigned long flags;
u32 sw_cons, raw_cons;
bool rc = true;
- spin_lock_irqsave(&cq->hwq.lock, flags);
raw_cons = cq->hwq.cons;
sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
/* Check for Valid bit. If the CQE is valid, return false */
rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
- spin_unlock_irqrestore(&cq->hwq.lock, flags);
return rc;
}
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
/* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ bnxt_qplib_add_flush_qp(qp);
}
}
*/
/* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ bnxt_qplib_add_flush_qp(qp);
done:
return rc;
}
u32 budget = num_cqes;
unsigned long flags;
- spin_lock_irqsave(&cq->hwq.lock, flags);
+ spin_lock_irqsave(&cq->flush_lock, flags);
list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
dev_dbg(&cq->hwq.pdev->dev,
"QPLIB: FP: Flushing SQ QP= %p",
qp);
__flush_rq(&qp->rq, qp, &cqe, &budget);
}
- spin_unlock_irqrestore(&cq->hwq.lock, flags);
+ spin_unlock_irqrestore(&cq->flush_lock, flags);
return num_cqes - budget;
}
int num_cqes, struct bnxt_qplib_qp **lib_qp)
{
struct cq_base *hw_cqe, **hw_cqe_ptr;
- unsigned long flags;
u32 sw_cons, raw_cons;
int budget, rc = 0;
- spin_lock_irqsave(&cq->hwq.lock, flags);
raw_cons = cq->hwq.cons;
budget = num_cqes;
bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
}
exit:
- spin_unlock_irqrestore(&cq->hwq.lock, flags);
return num_cqes - budget;
}
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cq->hwq.lock, flags);
if (arm_type)
bnxt_qplib_arm_cq(cq, arm_type);
/* Using cq->arm_state variable to track whether to issue cq handler */
atomic_set(&cq->arm_state, 1);
- spin_unlock_irqrestore(&cq->hwq.lock, flags);
}
void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
struct list_head sqf_head, rqf_head;
atomic_t arm_state;
spinlock_t compl_lock; /* synch CQ handlers */
+/* Locking Notes:
+ * QP can move to error state from modify_qp, async error event or error
+ * CQE as part of poll_cq. When QP is moved to error state, it gets added
+ * to two flush lists, one each for SQ and RQ.
+ * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq
+ * flush_locks should be acquired when QP is moved to error. The control path
+ * operations(modify_qp and async error events) are synchronized with poll_cq
+ * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ.
+ * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq
+ * of the same QP while manipulating the flush list.
+ */
+ spinlock_t flush_lock; /* QP flush management */
};
#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
err_event->res_err_state_reason);
if (!qp)
break;
- bnxt_qplib_acquire_cq_locks(qp, &flags);
bnxt_qplib_mark_qp_error(qp);
- bnxt_qplib_release_cq_locks(qp, &flags);
+ rcfw->aeq_handler(rcfw, qp_event, qp);
break;
default:
/* Command Response */
int rc;
RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
-
+ /* Supply (log-base-2-of-host-page-size - base-page-shift)
+ * to bono to adjust the doorbell page sizes.
+ */
+ req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
+ RCFW_DBR_BASE_PAGE_SHIFT);
/*
* VFs need not setup the HW context area, PF
* shall setup this area for VF. Skipping the
#define RCFW_COMM_SIZE 0x104
#define RCFW_DBR_PCI_BAR_REGION 2
+#define RCFW_DBR_BASE_PAGE_SHIFT 12
#define RCFW_CMD_PREP(req, CMD, cmd_flags) \
do { \
attr->max_pkey = le32_to_cpu(sb->max_pkeys);
attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
- attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE;
+ attr->l2_db_size = (sb->l2_db_space_size + 1) *
+ (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
attr->max_sgid = le32_to_cpu(sb->max_gid);
bnxt_qplib_query_version(rcfw, attr->fw_ver);
#define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4)
#define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4)
#define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4)
- __le16 reserved16;
+ /* This value is (log-base-2-of-DBR-page-size - 12).
+ * 0 for 4KB. HW supported values are enumerated below.
+ */
+ __le16 log2_dbr_pg_size;
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST \
+ CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M
__le64 qpc_page_dir;
__le64 mrw_page_dir;
__le64 srq_page_dir;
wc->dlid_path_bits = 0;
if (is_eth) {
+ wc->slid = 0;
wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
}
}
- wc->slid = be16_to_cpu(cqe->rlid);
g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
wc->src_qp = g_mlpath_rqpn & 0xffffff;
wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
if (is_eth) {
+ wc->slid = 0;
wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
if (be32_to_cpu(cqe->vlan_my_qpn) &
MLX4_CQE_CVLAN_PRESENT_MASK) {
memcpy(wc->smac, cqe->smac, ETH_ALEN);
wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
} else {
+ wc->slid = be16_to_cpu(cqe->rlid);
wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
wc->vlan_id = 0xffff;
}
gid_tbl[i].version = 2;
if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
gid_tbl[i].type = 1;
- else
- memset(&gid_tbl[i].gid, 0, 12);
}
}
if (!gids) {
ret = -ENOMEM;
} else {
- for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
- memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
+ for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
+ memcpy(&gids[i].gid,
+ &port_gid_table->gids[i].gid,
+ sizeof(union ib_gid));
+ gids[i].gid_type =
+ port_gid_table->gids[i].gid_type;
+ }
}
}
spin_unlock_bh(&iboe->lock);
wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
break;
}
- wc->slid = be16_to_cpu(cqe->slid);
wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
wc->dlid_path_bits = cqe->ml_path;
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
}
if (ll != IB_LINK_LAYER_ETHERNET) {
+ wc->slid = be16_to_cpu(cqe->slid);
wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
return;
}
+ wc->slid = 0;
vlan_present = cqe->l4_l3_hdr_type & 0x1;
roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
if (vlan_present) {
if (ucmd.reserved0 || ucmd.reserved1)
return -EINVAL;
- umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
+ /* check multiplication overflow */
+ if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
+ return -EINVAL;
+
+ umem = ib_umem_get(context, ucmd.buf_addr,
+ (size_t)ucmd.cqe_size * entries,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(umem)) {
err = PTR_ERR(umem);
struct mlx5_ib_multiport_info *mpi;
struct mlx5_ib_port *port;
+ if (!mlx5_core_mp_enabled(ibdev->mdev) ||
+ ll != IB_LINK_LAYER_ETHERNET) {
+ if (native_port_num)
+ *native_port_num = ib_port_num;
+ return ibdev->mdev;
+ }
+
if (native_port_num)
*native_port_num = 1;
- if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
- return ibdev->mdev;
-
port = &ibdev->port[ib_port_num - 1];
if (!port)
return NULL;
struct mlx5_ib_dev *ibdev;
struct ib_event ibev;
bool fatal = false;
- u8 port = 0;
+ u8 port = (u8)work->param;
if (mlx5_core_is_mp_slave(work->dev)) {
ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
case MLX5_DEV_EVENT_PORT_UP:
case MLX5_DEV_EVENT_PORT_DOWN:
case MLX5_DEV_EVENT_PORT_INITIALIZED:
- port = (u8)work->param;
-
/* In RoCE, port up/down events are handled in
* mlx5_netdev_event().
*/
case MLX5_DEV_EVENT_LID_CHANGE:
ibev.event = IB_EVENT_LID_CHANGE;
- port = (u8)work->param;
break;
case MLX5_DEV_EVENT_PKEY_CHANGE:
ibev.event = IB_EVENT_PKEY_CHANGE;
- port = (u8)work->param;
-
schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
break;
case MLX5_DEV_EVENT_GUID_CHANGE:
ibev.event = IB_EVENT_GID_CHANGE;
- port = (u8)work->param;
break;
case MLX5_DEV_EVENT_CLIENT_REREG:
ibev.event = IB_EVENT_CLIENT_REREGISTER;
- port = (u8)work->param;
break;
case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
schedule_work(&ibdev->delay_drop.delay_drop_work);
ibev.device = &ibdev->ib_dev;
ibev.element.port_num = port;
- if (port < 1 || port > ibdev->num_ports) {
+ if (!rdma_is_port_valid(&ibdev->ib_dev, port)) {
mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
goto out;
}
mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
mr->ibmr.length = 0;
- mr->ndescs = sg_nents;
for_each_sg(sgl, sg, sg_nents, i) {
if (unlikely(i >= mr->max_descs))
sg_offset = 0;
}
+ mr->ndescs = i;
if (sg_offset_p)
*sg_offset_p = sg_offset;
u32 uidx = MLX5_IB_DEFAULT_UIDX;
struct mlx5_ib_create_qp ucmd;
struct mlx5_ib_qp_base *base;
+ int mlx5_st;
void *qpc;
u32 *in;
int err;
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
+ mlx5_st = to_mlx5_st(init_attr->qp_type);
+ if (mlx5_st < 0)
+ return -EINVAL;
+
if (init_attr->rwq_ind_tbl) {
if (!udata)
return -ENOSYS;
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
- MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type));
+ MLX5_SET(qpc, qpc, st, mlx5_st);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
goto out;
if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
- !optab[mlx5_cur][mlx5_new])
+ !optab[mlx5_cur][mlx5_new]) {
+ err = -EINVAL;
goto out;
+ }
op = optab[mlx5_cur][mlx5_new];
optpar = ib_mask_to_mlx5_opt(attr_mask);
}
return -EINVAL;
}
- neigh = dst_neigh_lookup(dst, &dst_in);
-
+ neigh = dst_neigh_lookup(dst, &fl6.daddr);
if (neigh) {
rcu_read_lock();
if (neigh->nud_state & NUD_VALID) {
qp = idr_find(&dev->qpidr, conn_param->qpn);
- laddr = (struct sockaddr_in *)&cm_id->local_addr;
- raddr = (struct sockaddr_in *)&cm_id->remote_addr;
- laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr;
- raddr6 = (struct sockaddr_in6 *)&cm_id->remote_addr;
+ laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+ raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
+
+ DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n",
+ ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port),
+ ntohs(raddr->sin_port));
DP_DEBUG(dev, QEDR_MSG_IWARP,
"Connect source address: %pISpc, remote address: %pISpc\n",
int rc;
int i;
- laddr = (struct sockaddr_in *)&cm_id->local_addr;
- laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr;
+ laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
DP_DEBUG(dev, QEDR_MSG_IWARP,
"Create Listener address: %pISpc\n", &cm_id->local_addr);
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
+ if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
+ rc = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
swqe = (struct rdma_sq_send_wqe_1st *)wqe;
swqe->wqe_size = 2;
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
+ rc = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
{
struct qedr_dev *dev = get_qedr_dev(ibcq->device);
struct qedr_cq *cq = get_qedr_cq(ibcq);
- union rdma_cqe *cqe = cq->latest_cqe;
+ union rdma_cqe *cqe;
u32 old_cons, new_cons;
unsigned long flags;
int update = 0;
return qedr_gsi_poll_cq(ibcq, num_entries, wc);
spin_lock_irqsave(&cq->cq_lock, flags);
+ cqe = cq->latest_cqe;
old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
while (num_entries && is_valid_cqe(cq, cqe)) {
struct qedr_qp *qp;
uint32_t rtime = cpu_to_le32(get_seconds());
struct uuid_entry *u;
char buf[BDEVNAME_SIZE];
+ struct cached_dev *exist_dc, *t;
bdevname(dc->bdev, buf);
return -EINVAL;
}
+ /* Check whether already attached */
+ list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
+ if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
+ pr_err("Tried to attach %s but duplicate UUID already attached",
+ buf);
+
+ return -EINVAL;
+ }
+ }
+
u = uuid_find(c, dc->sb.uuid);
if (u &&
return;
err:
- pr_notice("error opening %s: %s", bdevname(bdev, name), err);
+ pr_notice("error %s: %s", bdevname(bdev, name), err);
bcache_device_stop(&dc->disk);
}
const char *err = NULL; /* must be set for any error case */
int ret = 0;
+ bdevname(bdev, name);
+
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
ca->bdev->bd_holder = ca;
bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
get_page(sb_page);
- if (blk_queue_discard(bdev_get_queue(ca->bdev)))
+ if (blk_queue_discard(bdev_get_queue(bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
ret = cache_alloc(ca);
if (ret != 0) {
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
if (ret == -ENOMEM)
err = "cache_alloc(): -ENOMEM";
else
goto out;
}
- pr_info("registered cache device %s", bdevname(bdev, name));
+ pr_info("registered cache device %s", name);
out:
kobject_put(&ca->kobj);
err:
if (err)
- pr_notice("error opening %s: %s", bdevname(bdev, name), err);
+ pr_notice("error %s: %s", name, err);
return ret;
}
if (err)
goto err_close;
+ err = "failed to register device";
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
if (!dc)
goto err_close;
if (register_cache(sb, sb_page, bdev, ca) != 0)
- goto err_close;
+ goto err;
}
out:
if (sb_page)
err_close:
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
err:
- pr_info("error opening %s: %s", path, err);
+ pr_info("error %s: %s", path, err);
ret = -EINVAL;
goto out;
}
static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
enum data_mode *data_mode)
{
- unsigned noio_flag;
- void *ptr;
-
if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
*data_mode = DATA_MODE_SLAB;
return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
* all allocations done by this process (including pagetables) are done
* as if GFP_NOIO was specified.
*/
+ if (gfp_mask & __GFP_NORETRY) {
+ unsigned noio_flag = memalloc_noio_save();
+ void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
- if (gfp_mask & __GFP_NORETRY)
- noio_flag = memalloc_noio_save();
-
- ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
-
- if (gfp_mask & __GFP_NORETRY)
memalloc_noio_restore(noio_flag);
+ return ptr;
+ }
- return ptr;
+ return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
}
/*
#include <linux/time.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
+#include <scsi/scsi_device.h>
#include <scsi/scsi_dh.h>
#include <linux/atomic.h>
#include <linux/blk-mq.h>
else
m->queue_mode = DM_TYPE_REQUEST_BASED;
- } else if (m->queue_mode == DM_TYPE_BIO_BASED ||
- m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
+ } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
INIT_WORK(&m->process_queued_bios, process_queued_bios);
-
- if (m->queue_mode == DM_TYPE_BIO_BASED) {
- /*
- * bio-based doesn't support any direct scsi_dh management;
- * it just discovers if a scsi_dh is attached.
- */
- set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
- }
- }
-
- if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) {
- set_bit(MPATHF_QUEUE_IO, &m->flags);
- atomic_set(&m->pg_init_in_progress, 0);
- atomic_set(&m->pg_init_count, 0);
- m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
- init_waitqueue_head(&m->pg_init_wait);
+ /*
+ * bio-based doesn't support any direct scsi_dh management;
+ * it just discovers if a scsi_dh is attached.
+ */
+ set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
}
dm_table_set_type(ti->table, m->queue_mode);
{
m->current_pg = pg;
- if (m->queue_mode == DM_TYPE_NVME_BIO_BASED)
- return;
-
/* Must we initialise the PG first, and queue I/O till it's ready? */
if (m->hw_handler_name) {
set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
set_bit(MPATHF_QUEUE_IO, &m->flags);
} else {
+ /* FIXME: not needed if no scsi_dh is attached */
clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
clear_bit(MPATHF_QUEUE_IO, &m->flags);
}
unsigned bypassed = 1;
if (!atomic_read(&m->nr_valid_paths)) {
- if (m->queue_mode != DM_TYPE_NVME_BIO_BASED)
- clear_bit(MPATHF_QUEUE_IO, &m->flags);
+ clear_bit(MPATHF_QUEUE_IO, &m->flags);
goto failed;
}
return pgpath;
}
-static struct pgpath *__map_bio_nvme(struct multipath *m, struct bio *bio)
+static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio)
{
struct pgpath *pgpath;
unsigned long flags;
{
struct pgpath *pgpath;
- if (m->queue_mode == DM_TYPE_NVME_BIO_BASED)
- pgpath = __map_bio_nvme(m, bio);
+ if (!m->hw_handler_name)
+ pgpath = __map_bio_fast(m, bio);
else
pgpath = __map_bio(m, bio);
{
if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
- else if (m->queue_mode == DM_TYPE_BIO_BASED ||
- m->queue_mode == DM_TYPE_NVME_BIO_BASED)
+ else if (m->queue_mode == DM_TYPE_BIO_BASED)
queue_work(kmultipathd, &m->process_queued_bios);
}
*/
kfree(m->hw_handler_name);
m->hw_handler_name = attached_handler_name;
+
+ /*
+ * Init fields that are only used when a scsi_dh is attached
+ */
+ if (!test_and_set_bit(MPATHF_QUEUE_IO, &m->flags)) {
+ atomic_set(&m->pg_init_in_progress, 0);
+ atomic_set(&m->pg_init_count, 0);
+ m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
+ init_waitqueue_head(&m->pg_init_wait);
+ }
}
}
int r;
struct pgpath *p;
struct multipath *m = ti->private;
+ struct scsi_device *sdev;
/* we need at least a path arg */
if (as->argc < 1) {
goto bad;
}
- if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) {
+ sdev = scsi_device_from_queue(bdev_get_queue(p->path.dev->bdev));
+ if (sdev) {
+ put_device(&sdev->sdev_gendev);
INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error);
if (r) {
if (!hw_argc)
return 0;
- if (m->queue_mode == DM_TYPE_BIO_BASED ||
- m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
+ if (m->queue_mode == DM_TYPE_BIO_BASED) {
dm_consume_args(as, hw_argc);
DMERR("bio-based multipath doesn't allow hardware handler args");
return 0;
if (!strcasecmp(queue_mode_name, "bio"))
m->queue_mode = DM_TYPE_BIO_BASED;
- else if (!strcasecmp(queue_mode_name, "nvme"))
- m->queue_mode = DM_TYPE_NVME_BIO_BASED;
else if (!strcasecmp(queue_mode_name, "rq"))
m->queue_mode = DM_TYPE_REQUEST_BASED;
else if (!strcasecmp(queue_mode_name, "mq"))
ti->num_discard_bios = 1;
ti->num_write_same_bios = 1;
ti->num_write_zeroes_bios = 1;
- if (m->queue_mode == DM_TYPE_BIO_BASED || m->queue_mode == DM_TYPE_NVME_BIO_BASED)
+ if (m->queue_mode == DM_TYPE_BIO_BASED)
ti->per_io_data_size = multipath_per_bio_data_size();
else
ti->per_io_data_size = sizeof(struct dm_mpath_io);
case DM_TYPE_BIO_BASED:
DMEMIT("queue_mode bio ");
break;
- case DM_TYPE_NVME_BIO_BASED:
- DMEMIT("queue_mode nvme ");
- break;
case DM_TYPE_MQ_REQUEST_BASED:
DMEMIT("queue_mode mq ");
break;
set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
} else {
- if (test_bit(MD_RECOVERY_NEEDED, &recovery) ||
- test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
- test_bit(MD_RECOVERY_RUNNING, &recovery))
+ if (!test_bit(MD_RECOVERY_INTR, &recovery) &&
+ (test_bit(MD_RECOVERY_NEEDED, &recovery) ||
+ test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
+ test_bit(MD_RECOVERY_RUNNING, &recovery)))
r = mddev->curr_resync_completed;
else
r = mddev->recovery_cp;
if (t->type != DM_TYPE_NONE) {
/* target already set the table's type */
- if (t->type == DM_TYPE_BIO_BASED)
- return 0;
- else if (t->type == DM_TYPE_NVME_BIO_BASED) {
- if (!dm_table_does_not_support_partial_completion(t)) {
- DMERR("nvme bio-based is only possible with devices"
- " that don't support partial completion");
- return -EINVAL;
- }
- /* Fallthru, also verify all devices are blk-mq */
+ if (t->type == DM_TYPE_BIO_BASED) {
+ /* possibly upgrade to a variant of bio-based */
+ goto verify_bio_based;
}
BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
+ BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);
goto verify_rq_based;
}
}
if (bio_based) {
+verify_bio_based:
/* We must use this table as bio-based */
t->type = DM_TYPE_BIO_BASED;
if (dm_table_supports_dax(t) ||
char b[BDEVNAME_SIZE];
/* For now, NVMe devices are the only devices of this class */
- return (strncmp(bdevname(dev->bdev, b), "nvme", 3) == 0);
+ return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0);
}
static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
return dm_get_geometry(md, geo);
}
-static int dm_grab_bdev_for_ioctl(struct mapped_device *md,
- struct block_device **bdev,
- fmode_t *mode)
+static char *_dm_claim_ptr = "I belong to device-mapper";
+
+static int dm_get_bdev_for_ioctl(struct mapped_device *md,
+ struct block_device **bdev,
+ fmode_t *mode)
{
struct dm_target *tgt;
struct dm_table *map;
goto out;
bdgrab(*bdev);
+ r = blkdev_get(*bdev, *mode, _dm_claim_ptr);
+ if (r < 0)
+ goto out;
+
dm_put_live_table(md, srcu_idx);
return r;
struct mapped_device *md = bdev->bd_disk->private_data;
int r;
- r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
+ r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
if (r < 0)
return r;
r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
out:
- bdput(bdev);
+ blkdev_put(bdev, mode);
return r;
}
static int open_table_device(struct table_device *td, dev_t dev,
struct mapped_device *md)
{
- static char *_claim_ptr = "I belong to device-mapper";
struct block_device *bdev;
int r;
BUG_ON(td->dm_dev.bdev);
- bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr);
+ bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
fmode_t mode;
int r;
- r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
+ r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
if (r < 0)
return r;
else
r = -EOPNOTSUPP;
- bdput(bdev);
+ blkdev_put(bdev, mode);
return r;
}
fmode_t mode;
int r;
- r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
+ r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
if (r < 0)
return r;
else
r = -EOPNOTSUPP;
- bdput(bdev);
+ blkdev_put(bdev, mode);
return r;
}
fmode_t mode;
int r;
- r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
+ r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
if (r < 0)
return r;
else
r = -EOPNOTSUPP;
- bdput(bdev);
+ blkdev_put(bdev, mode);
return r;
}
fmode_t mode;
int r;
- r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
+ r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
if (r < 0)
return r;
else
r = -EOPNOTSUPP;
- bdput(bdev);
+ blkdev_put(bdev, mode);
return r;
}
return rc;
}
+static long afu_ioctl_get_metadata(struct ocxl_context *ctx,
+ struct ocxl_ioctl_metadata __user *uarg)
+{
+ struct ocxl_ioctl_metadata arg;
+
+ memset(&arg, 0, sizeof(arg));
+
+ arg.version = 0;
+
+ arg.afu_version_major = ctx->afu->config.version_major;
+ arg.afu_version_minor = ctx->afu->config.version_minor;
+ arg.pasid = ctx->pasid;
+ arg.pp_mmio_size = ctx->afu->config.pp_mmio_stride;
+ arg.global_mmio_size = ctx->afu->config.global_mmio_size;
+
+ if (copy_to_user(uarg, &arg, sizeof(arg)))
+ return -EFAULT;
+
+ return 0;
+}
+
#define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" : \
x == OCXL_IOCTL_IRQ_ALLOC ? "IRQ_ALLOC" : \
x == OCXL_IOCTL_IRQ_FREE ? "IRQ_FREE" : \
x == OCXL_IOCTL_IRQ_SET_FD ? "IRQ_SET_FD" : \
+ x == OCXL_IOCTL_GET_METADATA ? "GET_METADATA" : \
"UNKNOWN")
static long afu_ioctl(struct file *file, unsigned int cmd,
irq_fd.eventfd);
break;
+ case OCXL_IOCTL_GET_METADATA:
+ rc = afu_ioctl_get_metadata(ctx,
+ (struct ocxl_ioctl_metadata __user *) args);
+ break;
+
default:
rc = -EINVAL;
}
trigger_cmd_completions(dev);
}
- mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
+ mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1);
mlx5_core_err(dev, "end\n");
unlock:
ns->disk->disk_name);
nvme_mpath_add_disk(ns->head);
- nvme_mpath_add_disk_links(ns);
return;
out_unlink_ns:
mutex_lock(&ctrl->subsys->lock);
return;
if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
- nvme_mpath_remove_disk_links(ns);
sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
&nvme_ns_id_attr_group);
if (ns->ndev)
ret = -EINVAL;
goto out;
}
+ if (opts->discovery_nqn) {
+ pr_debug("Ignoring nr_io_queues value for discovery controller\n");
+ break;
+ }
+
opts->nr_io_queues = min_t(unsigned int,
num_online_cpus(), token);
break;
sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
- assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
+ assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
/* Linux supports only Dynamic controllers */
assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
- conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize);
+ conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
lsop->queue = queue;
lsreq->rqstaddr = conn_rqst;
goto out_free_tag_set;
}
- ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
+ ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
goto out_cleanup_blk_queue;
- ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
+ ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
goto out_delete_hw_queues;
if (ret)
goto out_free_io_queues;
- ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
+ ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
goto out_free_io_queues;
- ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
+ ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
goto out_delete_hw_queues;
nvme_fc_init_queue(ctrl, 0);
ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
- NVME_AQ_BLK_MQ_DEPTH);
+ NVME_AQ_DEPTH);
if (ret)
goto out_free_queue;
ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
- NVME_AQ_BLK_MQ_DEPTH,
- (NVME_AQ_BLK_MQ_DEPTH / 4));
+ NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
if (ret)
goto out_delete_hw_queue;
}
ctrl->ctrl.sqsize =
- min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap) + 1, ctrl->ctrl.sqsize);
+ min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
if (ret)
opts->queue_size = ctrl->ctrl.maxcmd;
}
+ if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
+ /* warn if sqsize is lower than queue_size */
+ dev_warn(ctrl->ctrl.device,
+ "queue_size %zu > ctrl sqsize %u, clamping down\n",
+ opts->queue_size, ctrl->ctrl.sqsize + 1);
+ opts->queue_size = ctrl->ctrl.sqsize&nb