diff options
author | 2018-03-18 18:15:16 -0400 | |
---|---|---|
committer | 2018-11-14 09:36:57 -0500 | |
commit | b7965a2ac11bf6476d10269ba85bf8a90955e8bd (patch) | |
tree | b6f60f6ac9fd484eaa090970faf6bd212447bd02 /1087_linux-4.9.88.patch | |
parent | Linux patch 4.9.87 (diff) | |
download | linux-patches-b7965a2ac11bf6476d10269ba85bf8a90955e8bd.tar.gz linux-patches-b7965a2ac11bf6476d10269ba85bf8a90955e8bd.tar.bz2 linux-patches-b7965a2ac11bf6476d10269ba85bf8a90955e8bd.zip |
Linux patch 4.9.88
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
Diffstat (limited to '1087_linux-4.9.88.patch')
-rw-r--r-- | 1087_linux-4.9.88.patch | 3807 |
1 files changed, 3807 insertions, 0 deletions
diff --git a/1087_linux-4.9.88.patch b/1087_linux-4.9.88.patch new file mode 100644 index 00000000..d50329d0 --- /dev/null +++ b/1087_linux-4.9.88.patch @@ -0,0 +1,3807 @@ +diff --git a/Makefile b/Makefile +index 3043937a65d1..1512ebceffda 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 87 ++SUBLEVEL = 88 + EXTRAVERSION = + NAME = Roaring Lionus + +diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c +index 9ff92050053c..fa7f308c9027 100644 +--- a/arch/arm/mach-omap2/omap-secure.c ++++ b/arch/arm/mach-omap2/omap-secure.c +@@ -73,6 +73,7 @@ phys_addr_t omap_secure_ram_mempool_base(void) + return omap_secure_memblock_base; + } + ++#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) + u32 omap3_save_secure_ram(void __iomem *addr, int size) + { + u32 ret; +@@ -91,6 +92,7 @@ u32 omap3_save_secure_ram(void __iomem *addr, int size) + + return ret; + } ++#endif + + /** + * rx51_secure_dispatcher: Routine to dispatch secure PPA API calls +diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c +index 9ab48ff80c1c..6d11ae581ea7 100644 +--- a/arch/mips/ath25/board.c ++++ b/arch/mips/ath25/board.c +@@ -135,6 +135,8 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size) + } + + board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL); ++ if (!board_data) ++ goto error; + ath25_board.config = (struct ath25_boarddata *)board_data; + memcpy_fromio(board_data, bcfg, 0x100); + if (broken_boarddata) { +diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c +index c1eb1ff7c800..6ed1ded87b8f 100644 +--- a/arch/mips/cavium-octeon/octeon-irq.c ++++ b/arch/mips/cavium-octeon/octeon-irq.c +@@ -2277,6 +2277,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, + } + + host_data = kzalloc(sizeof(*host_data), GFP_KERNEL); ++ if (!host_data) ++ return -ENOMEM; + raw_spin_lock_init(&host_data->lock); + + addr = of_get_address(ciu_node, 0, NULL, NULL); +diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c +index 47c9646f93b3..d4a293b68249 100644 +--- a/arch/mips/kernel/smp-bmips.c ++++ b/arch/mips/kernel/smp-bmips.c +@@ -166,11 +166,11 @@ static void bmips_prepare_cpus(unsigned int max_cpus) + return; + } + +- if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, +- "smp_ipi0", NULL)) ++ if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, ++ IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL)) + panic("Can't request IPI0 interrupt"); +- if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, +- "smp_ipi1", NULL)) ++ if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, ++ IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL)) + panic("Can't request IPI1 interrupt"); + } + +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c +index 5ba494ed18c1..a70ff09b4982 100644 +--- a/arch/s390/kvm/kvm-s390.c ++++ b/arch/s390/kvm/kvm-s390.c +@@ -1601,6 +1601,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu) + /* we still need the basic sca for the ipte control */ + vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); + vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; ++ return; + } + read_lock(&vcpu->kvm->arch.sca_lock); + if (vcpu->kvm->arch.use_esca) { +diff --git a/arch/x86/Makefile b/arch/x86/Makefile +index cd22cb8ebd42..b60996184fa4 100644 +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -184,7 +184,10 @@ KBUILD_AFLAGS += $(mflags-y) + + # Avoid indirect branches in kernel to deal with Spectre + ifdef CONFIG_RETPOLINE +- RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register) ++ RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register ++ RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk ++ ++ RETPOLINE_CFLAGS += $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) + ifneq ($(RETPOLINE_CFLAGS),) + KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE + endif +diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S +index f5434b4670c1..a76dc738ec61 100644 +--- a/arch/x86/entry/entry_32.S ++++ b/arch/x86/entry/entry_32.S +@@ -237,8 +237,7 @@ ENTRY(__switch_to_asm) + * exist, overwrite the RSB with entries which capture + * speculative execution to prevent attack. + */ +- /* Clobbers %ebx */ +- FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW ++ FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW + #endif + + /* restore callee-saved registers */ +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S +index 8d7e4d48db0d..58610fe93f5d 100644 +--- a/arch/x86/entry/entry_64.S ++++ b/arch/x86/entry/entry_64.S +@@ -331,8 +331,7 @@ ENTRY(__switch_to_asm) + * exist, overwrite the RSB with entries which capture + * speculative execution to prevent attack. + */ +- /* Clobbers %rbx */ +- FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW ++ FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW + #endif + + /* restore callee-saved registers */ +diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h +index 93eebc636c76..46e40aeae446 100644 +--- a/arch/x86/include/asm/apm.h ++++ b/arch/x86/include/asm/apm.h +@@ -6,6 +6,8 @@ + #ifndef _ASM_X86_MACH_DEFAULT_APM_H + #define _ASM_X86_MACH_DEFAULT_APM_H + ++#include <asm/nospec-branch.h> ++ + #ifdef APM_ZERO_SEGS + # define APM_DO_ZERO_SEGS \ + "pushl %%ds\n\t" \ +@@ -31,6 +33,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, + * N.B. We do NOT need a cld after the BIOS call + * because we always save and restore the flags. + */ ++ firmware_restrict_branch_speculation_start(); + __asm__ __volatile__(APM_DO_ZERO_SEGS + "pushl %%edi\n\t" + "pushl %%ebp\n\t" +@@ -43,6 +46,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, + "=S" (*esi) + : "a" (func), "b" (ebx_in), "c" (ecx_in) + : "memory", "cc"); ++ firmware_restrict_branch_speculation_end(); + } + + static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, +@@ -55,6 +59,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, + * N.B. We do NOT need a cld after the BIOS call + * because we always save and restore the flags. + */ ++ firmware_restrict_branch_speculation_start(); + __asm__ __volatile__(APM_DO_ZERO_SEGS + "pushl %%edi\n\t" + "pushl %%ebp\n\t" +@@ -67,6 +72,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, + "=S" (si) + : "a" (func), "b" (ebx_in), "c" (ecx_in) + : "memory", "cc"); ++ firmware_restrict_branch_speculation_end(); + return error; + } + +diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h +index 166654218329..5a25ada75aeb 100644 +--- a/arch/x86/include/asm/asm-prototypes.h ++++ b/arch/x86/include/asm/asm-prototypes.h +@@ -37,7 +37,4 @@ INDIRECT_THUNK(dx) + INDIRECT_THUNK(si) + INDIRECT_THUNK(di) + INDIRECT_THUNK(bp) +-asmlinkage void __fill_rsb(void); +-asmlinkage void __clear_rsb(void); +- + #endif /* CONFIG_RETPOLINE */ +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index 8eb23f5cf7f4..ed7a1d2c4235 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -203,6 +203,7 @@ + #define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */ + + #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ ++#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ + + /* Virtualization flags: Linux defined, word 8 */ + #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ +diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h +index 389d700b961e..9df22bb07f7f 100644 +--- a/arch/x86/include/asm/efi.h ++++ b/arch/x86/include/asm/efi.h +@@ -5,6 +5,7 @@ + #include <asm/pgtable.h> + #include <asm/processor-flags.h> + #include <asm/tlb.h> ++#include <asm/nospec-branch.h> + + /* + * We map the EFI regions needed for runtime services non-contiguously, +@@ -35,8 +36,18 @@ + + extern unsigned long asmlinkage efi_call_phys(void *, ...); + +-#define arch_efi_call_virt_setup() kernel_fpu_begin() +-#define arch_efi_call_virt_teardown() kernel_fpu_end() ++#define arch_efi_call_virt_setup() \ ++({ \ ++ kernel_fpu_begin(); \ ++ firmware_restrict_branch_speculation_start(); \ ++}) ++ ++#define arch_efi_call_virt_teardown() \ ++({ \ ++ firmware_restrict_branch_speculation_end(); \ ++ kernel_fpu_end(); \ ++}) ++ + + /* + * Wrap all the virtual calls in a way that forces the parameters on the stack. +@@ -72,6 +83,7 @@ struct efi_scratch { + efi_sync_low_kernel_mappings(); \ + preempt_disable(); \ + __kernel_fpu_begin(); \ ++ firmware_restrict_branch_speculation_start(); \ + \ + if (efi_scratch.use_pgd) { \ + efi_scratch.prev_cr3 = read_cr3(); \ +@@ -90,6 +102,7 @@ struct efi_scratch { + __flush_tlb_all(); \ + } \ + \ ++ firmware_restrict_branch_speculation_end(); \ + __kernel_fpu_end(); \ + preempt_enable(); \ + }) +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h +index 81a1be326571..d0dabeae0505 100644 +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -8,6 +8,50 @@ + #include <asm/cpufeatures.h> + #include <asm/msr-index.h> + ++/* ++ * Fill the CPU return stack buffer. ++ * ++ * Each entry in the RSB, if used for a speculative 'ret', contains an ++ * infinite 'pause; lfence; jmp' loop to capture speculative execution. ++ * ++ * This is required in various cases for retpoline and IBRS-based ++ * mitigations for the Spectre variant 2 vulnerability. Sometimes to ++ * eliminate potentially bogus entries from the RSB, and sometimes ++ * purely to ensure that it doesn't get empty, which on some CPUs would ++ * allow predictions from other (unwanted!) sources to be used. ++ * ++ * We define a CPP macro such that it can be used from both .S files and ++ * inline assembly. It's possible to do a .macro and then include that ++ * from C via asm(".include <asm/nospec-branch.h>") but let's not go there. ++ */ ++ ++#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ ++#define RSB_FILL_LOOPS 16 /* To avoid underflow */ ++ ++/* ++ * Google experimented with loop-unrolling and this turned out to be ++ * the optimal version — two calls, each with their own speculation ++ * trap should their return address end up getting used, in a loop. ++ */ ++#define __FILL_RETURN_BUFFER(reg, nr, sp) \ ++ mov $(nr/2), reg; \ ++771: \ ++ call 772f; \ ++773: /* speculation trap */ \ ++ pause; \ ++ lfence; \ ++ jmp 773b; \ ++772: \ ++ call 774f; \ ++775: /* speculation trap */ \ ++ pause; \ ++ lfence; \ ++ jmp 775b; \ ++774: \ ++ dec reg; \ ++ jnz 771b; \ ++ add $(BITS_PER_LONG/8) * nr, sp; ++ + #ifdef __ASSEMBLY__ + + /* +@@ -23,6 +67,18 @@ + .popsection + .endm + ++/* ++ * This should be used immediately before an indirect jump/call. It tells ++ * objtool the subsequent indirect jump/call is vouched safe for retpoline ++ * builds. ++ */ ++.macro ANNOTATE_RETPOLINE_SAFE ++ .Lannotate_\@: ++ .pushsection .discard.retpoline_safe ++ _ASM_PTR .Lannotate_\@ ++ .popsection ++.endm ++ + /* + * These are the bare retpoline primitives for indirect jmp and call. + * Do not use these directly; they only exist to make the ALTERNATIVE +@@ -59,9 +115,9 @@ + .macro JMP_NOSPEC reg:req + #ifdef CONFIG_RETPOLINE + ANNOTATE_NOSPEC_ALTERNATIVE +- ALTERNATIVE_2 __stringify(jmp *\reg), \ ++ ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg), \ + __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \ +- __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD ++ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD + #else + jmp *\reg + #endif +@@ -70,18 +126,25 @@ + .macro CALL_NOSPEC reg:req + #ifdef CONFIG_RETPOLINE + ANNOTATE_NOSPEC_ALTERNATIVE +- ALTERNATIVE_2 __stringify(call *\reg), \ ++ ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \ + __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\ +- __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD ++ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD + #else + call *\reg + #endif + .endm + +-/* This clobbers the BX register */ +-.macro FILL_RETURN_BUFFER nr:req ftr:req ++ /* ++ * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP ++ * monstrosity above, manually. ++ */ ++.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req + #ifdef CONFIG_RETPOLINE +- ALTERNATIVE "", "call __clear_rsb", \ftr ++ ANNOTATE_NOSPEC_ALTERNATIVE ++ ALTERNATIVE "jmp .Lskip_rsb_\@", \ ++ __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \ ++ \ftr ++.Lskip_rsb_\@: + #endif + .endm + +@@ -93,6 +156,12 @@ + ".long 999b - .\n\t" \ + ".popsection\n\t" + ++#define ANNOTATE_RETPOLINE_SAFE \ ++ "999:\n\t" \ ++ ".pushsection .discard.retpoline_safe\n\t" \ ++ _ASM_PTR " 999b\n\t" \ ++ ".popsection\n\t" ++ + #if defined(CONFIG_X86_64) && defined(RETPOLINE) + + /* +@@ -102,6 +171,7 @@ + # define CALL_NOSPEC \ + ANNOTATE_NOSPEC_ALTERNATIVE \ + ALTERNATIVE( \ ++ ANNOTATE_RETPOLINE_SAFE \ + "call *%[thunk_target]\n", \ + "call __x86_indirect_thunk_%V[thunk_target]\n", \ + X86_FEATURE_RETPOLINE) +@@ -156,26 +226,54 @@ extern char __indirect_thunk_end[]; + static inline void vmexit_fill_RSB(void) + { + #ifdef CONFIG_RETPOLINE +- alternative_input("", +- "call __fill_rsb", +- X86_FEATURE_RETPOLINE, +- ASM_NO_INPUT_CLOBBER(_ASM_BX, "memory")); ++ unsigned long loops; ++ ++ asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE ++ ALTERNATIVE("jmp 910f", ++ __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)), ++ X86_FEATURE_RETPOLINE) ++ "910:" ++ : "=r" (loops), ASM_CALL_CONSTRAINT ++ : : "memory" ); + #endif + } + ++#define alternative_msr_write(_msr, _val, _feature) \ ++ asm volatile(ALTERNATIVE("", \ ++ "movl %[msr], %%ecx\n\t" \ ++ "movl %[val], %%eax\n\t" \ ++ "movl $0, %%edx\n\t" \ ++ "wrmsr", \ ++ _feature) \ ++ : : [msr] "i" (_msr), [val] "i" (_val) \ ++ : "eax", "ecx", "edx", "memory") ++ + static inline void indirect_branch_prediction_barrier(void) + { +- asm volatile(ALTERNATIVE("", +- "movl %[msr], %%ecx\n\t" +- "movl %[val], %%eax\n\t" +- "movl $0, %%edx\n\t" +- "wrmsr", +- X86_FEATURE_USE_IBPB) +- : : [msr] "i" (MSR_IA32_PRED_CMD), +- [val] "i" (PRED_CMD_IBPB) +- : "eax", "ecx", "edx", "memory"); ++ alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, ++ X86_FEATURE_USE_IBPB); + } + ++/* ++ * With retpoline, we must use IBRS to restrict branch prediction ++ * before calling into firmware. ++ * ++ * (Implemented as CPP macros due to header hell.) ++ */ ++#define firmware_restrict_branch_speculation_start() \ ++do { \ ++ preempt_disable(); \ ++ alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \ ++ X86_FEATURE_USE_IBRS_FW); \ ++} while (0) ++ ++#define firmware_restrict_branch_speculation_end() \ ++do { \ ++ alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \ ++ X86_FEATURE_USE_IBRS_FW); \ ++ preempt_enable(); \ ++} while (0) ++ + #endif /* __ASSEMBLY__ */ + + /* +diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h +index ce932812f142..24af8b1de438 100644 +--- a/arch/x86/include/asm/paravirt.h ++++ b/arch/x86/include/asm/paravirt.h +@@ -6,6 +6,7 @@ + #ifdef CONFIG_PARAVIRT + #include <asm/pgtable_types.h> + #include <asm/asm.h> ++#include <asm/nospec-branch.h> + + #include <asm/paravirt_types.h> + +@@ -869,23 +870,27 @@ extern void default_banner(void); + + #define INTERRUPT_RETURN \ + PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ +- jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret)) ++ ANNOTATE_RETPOLINE_SAFE; \ ++ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret);) + + #define DISABLE_INTERRUPTS(clobbers) \ + PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ + PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ ++ ANNOTATE_RETPOLINE_SAFE; \ + call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ + PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) + + #define ENABLE_INTERRUPTS(clobbers) \ + PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ + PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ ++ ANNOTATE_RETPOLINE_SAFE; \ + call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ + PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) + + #ifdef CONFIG_X86_32 + #define GET_CR0_INTO_EAX \ + push %ecx; push %edx; \ ++ ANNOTATE_RETPOLINE_SAFE; \ + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ + pop %edx; pop %ecx + #else /* !CONFIG_X86_32 */ +@@ -907,11 +912,13 @@ extern void default_banner(void); + */ + #define SWAPGS \ + PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ +- call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \ ++ ANNOTATE_RETPOLINE_SAFE; \ ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \ + ) + + #define GET_CR2_INTO_RAX \ +- call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2) ++ ANNOTATE_RETPOLINE_SAFE; \ ++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); + + #define PARAVIRT_ADJUST_EXCEPTION_FRAME \ + PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \ +@@ -921,7 +928,8 @@ extern void default_banner(void); + #define USERGS_SYSRET64 \ + PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \ + CLBR_NONE, \ +- jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64)) ++ ANNOTATE_RETPOLINE_SAFE; \ ++ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64);) + #endif /* CONFIG_X86_32 */ + + #endif /* __ASSEMBLY__ */ +diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h +index 0f400c0e4979..04b79712b09c 100644 +--- a/arch/x86/include/asm/paravirt_types.h ++++ b/arch/x86/include/asm/paravirt_types.h +@@ -42,6 +42,7 @@ + #include <asm/desc_defs.h> + #include <asm/kmap_types.h> + #include <asm/pgtable_types.h> ++#include <asm/nospec-branch.h> + + struct page; + struct thread_struct; +@@ -391,7 +392,9 @@ int paravirt_disable_iospace(void); + * offset into the paravirt_patch_template structure, and can therefore be + * freely converted back into a structure offset. + */ +-#define PARAVIRT_CALL "call *%c[paravirt_opptr];" ++#define PARAVIRT_CALL \ ++ ANNOTATE_RETPOLINE_SAFE \ ++ "call *%c[paravirt_opptr];" + + /* + * These macros are intended to wrap calls through one of the paravirt +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index baddc9ed3454..b8b0b6e78371 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -299,6 +299,15 @@ static void __init spectre_v2_select_mitigation(void) + setup_force_cpu_cap(X86_FEATURE_USE_IBPB); + pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n"); + } ++ ++ /* ++ * Retpoline means the kernel is safe because it has no indirect ++ * branches. But firmware isn't, so use IBRS to protect that. ++ */ ++ if (boot_cpu_has(X86_FEATURE_IBRS)) { ++ setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); ++ pr_info("Enabling Restricted Speculation for firmware calls\n"); ++ } + } + + #undef pr_fmt +@@ -325,8 +334,9 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + return sprintf(buf, "Not affected\n"); + +- return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], ++ return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], + boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", ++ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", + spectre_v2_module_string()); + } + #endif +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c +index 6ed206bd9071..768042530af2 100644 +--- a/arch/x86/kernel/cpu/intel.c ++++ b/arch/x86/kernel/cpu/intel.c +@@ -103,6 +103,13 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c) + { + int i; + ++ /* ++ * We know that the hypervisor lie to us on the microcode version so ++ * we may as well hope that it is running the correct version. ++ */ ++ if (cpu_has(c, X86_FEATURE_HYPERVISOR)) ++ return false; ++ + for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { + if (c->x86_model == spectre_bad_microcodes[i].model && + c->x86_stepping == spectre_bad_microcodes[i].stepping) +diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c +index fe5cd6ea1f0e..684d9fd191e0 100644 +--- a/arch/x86/kernel/cpu/mcheck/mce.c ++++ b/arch/x86/kernel/cpu/mcheck/mce.c +@@ -61,6 +61,9 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex); + smp_load_acquire(&(p)); \ + }) + ++/* sysfs synchronization */ ++static DEFINE_MUTEX(mce_sysfs_mutex); ++ + #define CREATE_TRACE_POINTS + #include <trace/events/mce.h> + +@@ -2308,6 +2311,7 @@ static ssize_t set_ignore_ce(struct device *s, + if (kstrtou64(buf, 0, &new) < 0) + return -EINVAL; + ++ mutex_lock(&mce_sysfs_mutex); + if (mca_cfg.ignore_ce ^ !!new) { + if (new) { + /* disable ce features */ +@@ -2320,6 +2324,8 @@ static ssize_t set_ignore_ce(struct device *s, + on_each_cpu(mce_enable_ce, (void *)1, 1); + } + } ++ mutex_unlock(&mce_sysfs_mutex); ++ + return size; + } + +@@ -2332,6 +2338,7 @@ static ssize_t set_cmci_disabled(struct device *s, + if (kstrtou64(buf, 0, &new) < 0) + return -EINVAL; + ++ mutex_lock(&mce_sysfs_mutex); + if (mca_cfg.cmci_disabled ^ !!new) { + if (new) { + /* disable cmci */ +@@ -2343,6 +2350,8 @@ static ssize_t set_cmci_disabled(struct device *s, + on_each_cpu(mce_enable_ce, NULL, 1); + } + } ++ mutex_unlock(&mce_sysfs_mutex); ++ + return size; + } + +@@ -2350,8 +2359,19 @@ static ssize_t store_int_with_restart(struct device *s, + struct device_attribute *attr, + const char *buf, size_t size) + { +- ssize_t ret = device_store_int(s, attr, buf, size); ++ unsigned long old_check_interval = check_interval; ++ ssize_t ret = device_store_ulong(s, attr, buf, size); ++ ++ if (check_interval == old_check_interval) ++ return ret; ++ ++ if (check_interval < 1) ++ check_interval = 1; ++ ++ mutex_lock(&mce_sysfs_mutex); + mce_restart(); ++ mutex_unlock(&mce_sysfs_mutex); ++ + return ret; + } + +diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S +index 67cd7c1b99da..9d72cf547c88 100644 +--- a/arch/x86/kernel/head_64.S ++++ b/arch/x86/kernel/head_64.S +@@ -22,6 +22,7 @@ + #include <asm/nops.h> + #include "../entry/calling.h" + #include <asm/export.h> ++#include <asm/nospec-branch.h> + + #ifdef CONFIG_PARAVIRT + #include <asm/asm-offsets.h> +@@ -200,6 +201,7 @@ ENTRY(secondary_startup_64) + + /* Ensure I am executing from virtual addresses */ + movq $1f, %rax ++ ANNOTATE_RETPOLINE_SAFE + jmp *%rax + 1: + +diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c +index 8c1f218926d7..a5784a14f8d1 100644 +--- a/arch/x86/kernel/machine_kexec_64.c ++++ b/arch/x86/kernel/machine_kexec_64.c +@@ -524,6 +524,7 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr, + goto overflow; + break; + case R_X86_64_PC32: ++ case R_X86_64_PLT32: + value -= (u64)address; + *(u32 *)location = value; + break; +diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c +index 477ae806c2fa..19977d2f97fb 100644 +--- a/arch/x86/kernel/module.c ++++ b/arch/x86/kernel/module.c +@@ -171,19 +171,28 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, + case R_X86_64_NONE: + break; + case R_X86_64_64: ++ if (*(u64 *)loc != 0) ++ goto invalid_relocation; + *(u64 *)loc = val; + break; + case R_X86_64_32: ++ if (*(u32 *)loc != 0) ++ goto invalid_relocation; + *(u32 *)loc = val; + if (val != *(u32 *)loc) + goto overflow; + break; + case R_X86_64_32S: ++ if (*(s32 *)loc != 0) ++ goto invalid_relocation; + *(s32 *)loc = val; + if ((s64)val != *(s32 *)loc) + goto overflow; + break; + case R_X86_64_PC32: ++ case R_X86_64_PLT32: ++ if (*(u32 *)loc != 0) ++ goto invalid_relocation; + val -= (u64)loc; + *(u32 *)loc = val; + #if 0 +@@ -199,6 +208,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, + } + return 0; + ++invalid_relocation: ++ pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n", ++ (int)ELF64_R_TYPE(rel[i].r_info), loc, val); ++ return -ENOEXEC; ++ + overflow: + pr_err("overflow in relocation type %d val %Lx\n", + (int)ELF64_R_TYPE(rel[i].r_info), val); +diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile +index 4ad7c4dd311c..6bf1898ddf49 100644 +--- a/arch/x86/lib/Makefile ++++ b/arch/x86/lib/Makefile +@@ -26,7 +26,6 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o + lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o + lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o + lib-$(CONFIG_RETPOLINE) += retpoline.o +-OBJECT_FILES_NON_STANDARD_retpoline.o :=y + + obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o + +diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S +index 480edc3a5e03..c909961e678a 100644 +--- a/arch/x86/lib/retpoline.S ++++ b/arch/x86/lib/retpoline.S +@@ -7,7 +7,6 @@ + #include <asm/alternative-asm.h> + #include <asm/export.h> + #include <asm/nospec-branch.h> +-#include <asm/bitsperlong.h> + + .macro THUNK reg + .section .text.__x86.indirect_thunk +@@ -47,58 +46,3 @@ GENERATE_THUNK(r13) + GENERATE_THUNK(r14) + GENERATE_THUNK(r15) + #endif +- +-/* +- * Fill the CPU return stack buffer. +- * +- * Each entry in the RSB, if used for a speculative 'ret', contains an +- * infinite 'pause; lfence; jmp' loop to capture speculative execution. +- * +- * This is required in various cases for retpoline and IBRS-based +- * mitigations for the Spectre variant 2 vulnerability. Sometimes to +- * eliminate potentially bogus entries from the RSB, and sometimes +- * purely to ensure that it doesn't get empty, which on some CPUs would +- * allow predictions from other (unwanted!) sources to be used. +- * +- * Google experimented with loop-unrolling and this turned out to be +- * the optimal version - two calls, each with their own speculation +- * trap should their return address end up getting used, in a loop. +- */ +-.macro STUFF_RSB nr:req sp:req +- mov $(\nr / 2), %_ASM_BX +- .align 16 +-771: +- call 772f +-773: /* speculation trap */ +- pause +- lfence +- jmp 773b +- .align 16 +-772: +- call 774f +-775: /* speculation trap */ +- pause +- lfence +- jmp 775b +- .align 16 +-774: +- dec %_ASM_BX +- jnz 771b +- add $((BITS_PER_LONG/8) * \nr), \sp +-.endm +- +-#define RSB_FILL_LOOPS 16 /* To avoid underflow */ +- +-ENTRY(__fill_rsb) +- STUFF_RSB RSB_FILL_LOOPS, %_ASM_SP +- ret +-END(__fill_rsb) +-EXPORT_SYMBOL_GPL(__fill_rsb) +- +-#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ +- +-ENTRY(__clear_rsb) +- STUFF_RSB RSB_CLEAR_LOOPS, %_ASM_SP +- ret +-END(__clear_rsb) +-EXPORT_SYMBOL_GPL(__clear_rsb) +diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c +index 73eb7fd4aec4..5b6c8486a0be 100644 +--- a/arch/x86/tools/relocs.c ++++ b/arch/x86/tools/relocs.c +@@ -769,9 +769,12 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym, + break; + + case R_X86_64_PC32: ++ case R_X86_64_PLT32: + /* + * PC relative relocations don't need to be adjusted unless + * referencing a percpu symbol. ++ * ++ * NB: R_X86_64_PLT32 can be treated as R_X86_64_PC32. + */ + if (is_percpu_sym(sym, symname)) + add_reloc(&relocs32neg, offset); +diff --git a/drivers/block/loop.c b/drivers/block/loop.c +index 402254d26247..68bfcef24701 100644 +--- a/drivers/block/loop.c ++++ b/drivers/block/loop.c +@@ -263,7 +263,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) + struct iov_iter i; + ssize_t bw; + +- iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); ++ iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len); + + file_start_write(file); + bw = vfs_iter_write(file, &i, ppos); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +index 5796539a0bcb..648ecf69bad5 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +@@ -540,6 +540,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, + size_t size; + u32 retry = 3; + ++ if (amdgpu_acpi_pcie_notify_device_ready(adev)) ++ return -EINVAL; ++ + /* Get the device handle */ + handle = ACPI_HANDLE(&adev->pdev->dev); + if (!handle) +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +index 086aa5c9c634..c82b04b24bf9 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +@@ -739,9 +739,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) + enum drm_connector_status ret = connector_status_disconnected; + int r; + +- r = pm_runtime_get_sync(connector->dev->dev); +- if (r < 0) +- return connector_status_disconnected; ++ if (!drm_kms_helper_is_poll_worker()) { ++ r = pm_runtime_get_sync(connector->dev->dev); ++ if (r < 0) ++ return connector_status_disconnected; ++ } + + if (encoder) { + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); +@@ -760,8 +762,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) + /* check acpi lid status ??? */ + + amdgpu_connector_update_scratch_regs(connector, ret); +- pm_runtime_mark_last_busy(connector->dev->dev); +- pm_runtime_put_autosuspend(connector->dev->dev); ++ ++ if (!drm_kms_helper_is_poll_worker()) { ++ pm_runtime_mark_last_busy(connector->dev->dev); ++ pm_runtime_put_autosuspend(connector->dev->dev); ++ } ++ + return ret; + } + +@@ -871,9 +877,11 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) + enum drm_connector_status ret = connector_status_disconnected; + int r; + +- r = pm_runtime_get_sync(connector->dev->dev); +- if (r < 0) +- return connector_status_disconnected; ++ if (!drm_kms_helper_is_poll_worker()) { ++ r = pm_runtime_get_sync(connector->dev->dev); ++ if (r < 0) ++ return connector_status_disconnected; ++ } + + encoder = amdgpu_connector_best_single_encoder(connector); + if (!encoder) +@@ -927,8 +935,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) + amdgpu_connector_update_scratch_regs(connector, ret); + + out: +- pm_runtime_mark_last_busy(connector->dev->dev); +- pm_runtime_put_autosuspend(connector->dev->dev); ++ if (!drm_kms_helper_is_poll_worker()) { ++ pm_runtime_mark_last_busy(connector->dev->dev); ++ pm_runtime_put_autosuspend(connector->dev->dev); ++ } + + return ret; + } +@@ -991,9 +1001,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) + enum drm_connector_status ret = connector_status_disconnected; + bool dret = false, broken_edid = false; + +- r = pm_runtime_get_sync(connector->dev->dev); +- if (r < 0) +- return connector_status_disconnected; ++ if (!drm_kms_helper_is_poll_worker()) { ++ r = pm_runtime_get_sync(connector->dev->dev); ++ if (r < 0) ++ return connector_status_disconnected; ++ } + + if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { + ret = connector->status; +@@ -1118,8 +1130,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) + amdgpu_connector_update_scratch_regs(connector, ret); + + exit: +- pm_runtime_mark_last_busy(connector->dev->dev); +- pm_runtime_put_autosuspend(connector->dev->dev); ++ if (!drm_kms_helper_is_poll_worker()) { ++ pm_runtime_mark_last_busy(connector->dev->dev); ++ pm_runtime_put_autosuspend(connector->dev->dev); ++ } + + return ret; + } +@@ -1362,9 +1376,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) + struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); + int r; + +- r = pm_runtime_get_sync(connector->dev->dev); +- if (r < 0) +- return connector_status_disconnected; ++ if (!drm_kms_helper_is_poll_worker()) { ++ r = pm_runtime_get_sync(connector->dev->dev); ++ if (r < 0) ++ return connector_status_disconnected; ++ } + + if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { + ret = connector->status; +@@ -1432,8 +1448,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) + + amdgpu_connector_update_scratch_regs(connector, ret); + out: +- pm_runtime_mark_last_busy(connector->dev->dev); +- pm_runtime_put_autosuspend(connector->dev->dev); ++ if (!drm_kms_helper_is_poll_worker()) { ++ pm_runtime_mark_last_busy(connector->dev->dev); ++ pm_runtime_put_autosuspend(connector->dev->dev); ++ } + + return ret; + } +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +index e3281cacc586..5caf517eec9f 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +@@ -273,12 +273,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) + if (adev->uvd.vcpu_bo == NULL) + return 0; + +- for (i = 0; i < adev->uvd.max_handles; ++i) +- if (atomic_read(&adev->uvd.handles[i])) +- break; ++ /* only valid for physical mode */ ++ if (adev->asic_type < CHIP_POLARIS10) { ++ for (i = 0; i < adev->uvd.max_handles; ++i) ++ if (atomic_read(&adev->uvd.handles[i])) ++ break; + +- if (i == AMDGPU_MAX_UVD_HANDLES) +- return 0; ++ if (i == adev->uvd.max_handles) ++ return 0; ++ } + + cancel_delayed_work_sync(&adev->uvd.idle_work); + +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +index 71116da9e782..e040a896179c 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +@@ -4475,34 +4475,8 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) + case CHIP_KAVERI: + adev->gfx.config.max_shader_engines = 1; + adev->gfx.config.max_tile_pipes = 4; +- if ((adev->pdev->device == 0x1304) || +- (adev->pdev->device == 0x1305) || +- (adev->pdev->device == 0x130C) || +- (adev->pdev->device == 0x130F) || +- (adev->pdev->device == 0x1310) || +- (adev->pdev->device == 0x1311) || +- (adev->pdev->device == 0x131C)) { +- adev->gfx.config.max_cu_per_sh = 8; +- adev->gfx.config.max_backends_per_se = 2; +- } else if ((adev->pdev->device == 0x1309) || +- (adev->pdev->device == 0x130A) || +- (adev->pdev->device == 0x130D) || +- (adev->pdev->device == 0x1313) || +- (adev->pdev->device == 0x131D)) { +- adev->gfx.config.max_cu_per_sh = 6; +- adev->gfx.config.max_backends_per_se = 2; +- } else if ((adev->pdev->device == 0x1306) || +- (adev->pdev->device == 0x1307) || +- (adev->pdev->device == 0x130B) || +- (adev->pdev->device == 0x130E) || +- (adev->pdev->device == 0x1315) || +- (adev->pdev->device == 0x131B)) { +- adev->gfx.config.max_cu_per_sh = 4; +- adev->gfx.config.max_backends_per_se = 1; +- } else { +- adev->gfx.config.max_cu_per_sh = 3; +- adev->gfx.config.max_backends_per_se = 1; +- } ++ adev->gfx.config.max_cu_per_sh = 8; ++ adev->gfx.config.max_backends_per_se = 2; + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_texture_channel_caches = 4; + adev->gfx.config.max_gprs = 256; +diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c +index 276474d13763..d7822bef1986 100644 +--- a/drivers/gpu/drm/drm_probe_helper.c ++++ b/drivers/gpu/drm/drm_probe_helper.c +@@ -460,6 +460,26 @@ static void output_poll_execute(struct work_struct *work) + schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD); + } + ++/** ++ * drm_kms_helper_is_poll_worker - is %current task an output poll worker? ++ * ++ * Determine if %current task is an output poll worker. This can be used ++ * to select distinct code paths for output polling versus other contexts. ++ * ++ * One use case is to avoid a deadlock between the output poll worker and ++ * the autosuspend worker wherein the latter waits for polling to finish ++ * upon calling drm_kms_helper_poll_disable(), while the former waits for ++ * runtime suspend to finish upon calling pm_runtime_get_sync() in a ++ * connector ->detect hook. ++ */ ++bool drm_kms_helper_is_poll_worker(void) ++{ ++ struct work_struct *work = current_work(); ++ ++ return work && work->func == output_poll_execute; ++} ++EXPORT_SYMBOL(drm_kms_helper_is_poll_worker); ++ + /** + * drm_kms_helper_poll_disable - disable output polling + * @dev: drm_device +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c +index 7513e7678263..bae62cf934cf 100644 +--- a/drivers/gpu/drm/i915/i915_drv.c ++++ b/drivers/gpu/drm/i915/i915_drv.c +@@ -1703,6 +1703,8 @@ static int i915_drm_resume_early(struct drm_device *dev) + if (IS_BROXTON(dev_priv) || + !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) + intel_power_domains_init_hw(dev_priv, true); ++ else ++ intel_display_set_init_power(dev_priv, true); + + enable_rpm_wakeref_asserts(dev_priv); + +diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c +index 13c306173f27..0c935dede9f4 100644 +--- a/drivers/gpu/drm/i915/intel_hdmi.c ++++ b/drivers/gpu/drm/i915/intel_hdmi.c +@@ -1452,12 +1452,20 @@ intel_hdmi_set_edid(struct drm_connector *connector) + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct edid *edid; + bool connected = false; ++ struct i2c_adapter *i2c; + + intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); + +- edid = drm_get_edid(connector, +- intel_gmbus_get_adapter(dev_priv, +- intel_hdmi->ddc_bus)); ++ i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus); ++ ++ edid = drm_get_edid(connector, i2c); ++ ++ if (!edid && !intel_gmbus_is_forced_bit(i2c)) { ++ DRM_DEBUG_KMS("HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n"); ++ intel_gmbus_force_bit(i2c, true); ++ edid = drm_get_edid(connector, i2c); ++ intel_gmbus_force_bit(i2c, false); ++ } + + intel_hdmi_dp_dual_mode_detect(connector, edid != NULL); + +diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c +index c1084088f9e4..56c288f78d8a 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_connector.c ++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c +@@ -271,9 +271,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) + nv_connector->edid = NULL; + } + +- ret = pm_runtime_get_sync(connector->dev->dev); +- if (ret < 0 && ret != -EACCES) +- return conn_status; ++ /* Outputs are only polled while runtime active, so acquiring a ++ * runtime PM ref here is unnecessary (and would deadlock upon ++ * runtime suspend because it waits for polling to finish). ++ */ ++ if (!drm_kms_helper_is_poll_worker()) { ++ ret = pm_runtime_get_sync(connector->dev->dev); ++ if (ret < 0 && ret != -EACCES) ++ return conn_status; ++ } + + nv_encoder = nouveau_connector_ddc_detect(connector); + if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) { +@@ -348,8 +354,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) + + out: + +- pm_runtime_mark_last_busy(connector->dev->dev); +- pm_runtime_put_autosuspend(connector->dev->dev); ++ if (!drm_kms_helper_is_poll_worker()) { ++ pm_runtime_mark_last_busy(connector->dev->dev); ++ pm_runtime_put_autosuspend(connector->dev->dev); ++ } + + return conn_status; + } +diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c +index edee6a5f4da9..b99f3e59011c 100644 +--- a/drivers/gpu/drm/radeon/cik.c ++++ b/drivers/gpu/drm/radeon/cik.c +@@ -3244,35 +3244,8 @@ static void cik_gpu_init(struct radeon_device *rdev) + case CHIP_KAVERI: + rdev->config.cik.max_shader_engines = 1; + rdev->config.cik.max_tile_pipes = 4; +- if ((rdev->pdev->device == 0x1304) || +- (rdev->pdev->device == 0x1305) || +- (rdev->pdev->device == 0x130C) || +- (rdev->pdev->device == 0x130F) || +- (rdev->pdev->device == 0x1310) || +- (rdev->pdev->device == 0x1311) || +- (rdev->pdev->device == 0x131C)) { +- rdev->config.cik.max_cu_per_sh = 8; +- rdev->config.cik.max_backends_per_se = 2; +- } else if ((rdev->pdev->device == 0x1309) || +- (rdev->pdev->device == 0x130A) || +- (rdev->pdev->device == 0x130D) || +- (rdev->pdev->device == 0x1313) || +- (rdev->pdev->device == 0x131D)) { +- rdev->config.cik.max_cu_per_sh = 6; +- rdev->config.cik.max_backends_per_se = 2; +- } else if ((rdev->pdev->device == 0x1306) || +- (rdev->pdev->device == 0x1307) || +- (rdev->pdev->device == 0x130B) || +- (rdev->pdev->device == 0x130E) || +- (rdev->pdev->device == 0x1315) || +- (rdev->pdev->device == 0x1318) || +- (rdev->pdev->device == 0x131B)) { +- rdev->config.cik.max_cu_per_sh = 4; +- rdev->config.cik.max_backends_per_se = 1; +- } else { +- rdev->config.cik.max_cu_per_sh = 3; +- rdev->config.cik.max_backends_per_se = 1; +- } ++ rdev->config.cik.max_cu_per_sh = 8; ++ rdev->config.cik.max_backends_per_se = 2; + rdev->config.cik.max_sh_per_se = 1; + rdev->config.cik.max_texture_channel_caches = 4; + rdev->config.cik.max_gprs = 256; +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c +index 27affbde058c..af0d7fd5706b 100644 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c +@@ -897,9 +897,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) + enum drm_connector_status ret = connector_status_disconnected; + int r; + +- r = pm_runtime_get_sync(connector->dev->dev); +- if (r < 0) +- return connector_status_disconnected; ++ if (!drm_kms_helper_is_poll_worker()) { ++ r = pm_runtime_get_sync(connector->dev->dev); ++ if (r < 0) ++ return connector_status_disconnected; ++ } + + if (encoder) { + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); +@@ -922,8 +924,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) + /* check acpi lid status ??? */ + + radeon_connector_update_scratch_regs(connector, ret); +- pm_runtime_mark_last_busy(connector->dev->dev); +- pm_runtime_put_autosuspend(connector->dev->dev); ++ ++ if (!drm_kms_helper_is_poll_worker()) { ++ pm_runtime_mark_last_busy(connector->dev->dev); ++ pm_runtime_put_autosuspend(connector->dev->dev); ++ } ++ + return ret; + } + +@@ -1037,9 +1043,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force) + enum drm_connector_status ret = connector_status_disconnected; + int r; + +- r = pm_runtime_get_sync(connector->dev->dev); +- if (r < 0) +- return connector_status_disconnected; ++ if (!drm_kms_helper_is_poll_worker()) { ++ r = pm_runtime_get_sync(connector->dev->dev); ++ if (r < 0) ++ return connector_status_disconnected; ++ } + + encoder = radeon_best_single_encoder(connector); + if (!encoder) +@@ -1106,8 +1114,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force) + radeon_connector_update_scratch_regs(connector, ret); + + out: +- pm_runtime_mark_last_busy(connector->dev->dev); +- pm_runtime_put_autosuspend(connector->dev->dev); ++ if (!drm_kms_helper_is_poll_worker()) { ++ pm_runtime_mark_last_busy(connector->dev->dev); ++ pm_runtime_put_autosuspend(connector->dev->dev); ++ } + + return ret; + } +@@ -1171,9 +1181,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force) + if (!radeon_connector->dac_load_detect) + return ret; + +- r = pm_runtime_get_sync(connector->dev->dev); +- if (r < 0) +- return connector_status_disconnected; ++ if (!drm_kms_helper_is_poll_worker()) { ++ r = pm_runtime_get_sync(connector->dev->dev); ++ if (r < 0) ++ return connector_status_disconnected; ++ } + + encoder = radeon_best_single_encoder(connector); + if (!encoder) +@@ -1185,8 +1197,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force) + if (ret == connector_status_connected) + ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false); + radeon_connector_update_scratch_regs(connector, ret); +- pm_runtime_mark_last_busy(connector->dev->dev); +- pm_runtime_put_autosuspend(connector->dev->dev); ++ ++ if (!drm_kms_helper_is_poll_worker()) { ++ pm_runtime_mark_last_busy(connector->dev->dev); ++ pm_runtime_put_autosuspend(connector->dev->dev); ++ } ++ + return ret; + } + +@@ -1249,9 +1265,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) + enum drm_connector_status ret = connector_status_disconnected; + bool dret = false, broken_edid = false; + +- r = pm_runtime_get_sync(connector->dev->dev); +- if (r < 0) +- return connector_status_disconnected; ++ if (!drm_kms_helper_is_poll_worker()) { ++ r = pm_runtime_get_sync(connector->dev->dev); ++ if (r < 0) ++ return connector_status_disconnected; ++ } + + if (radeon_connector->detected_hpd_without_ddc) { + force = true; +@@ -1434,8 +1452,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) + } + + exit: +- pm_runtime_mark_last_busy(connector->dev->dev); +- pm_runtime_put_autosuspend(connector->dev->dev); ++ if (!drm_kms_helper_is_poll_worker()) { ++ pm_runtime_mark_last_busy(connector->dev->dev); ++ pm_runtime_put_autosuspend(connector->dev->dev); ++ } + + return ret; + } +@@ -1686,9 +1706,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force) + if (radeon_dig_connector->is_mst) + return connector_status_disconnected; + +- r = pm_runtime_get_sync(connector->dev->dev); +- if (r < 0) +- return connector_status_disconnected; ++ if (!drm_kms_helper_is_poll_worker()) { ++ r = pm_runtime_get_sync(connector->dev->dev); ++ if (r < 0) ++ return connector_status_disconnected; ++ } + + if (!force && radeon_check_hpd_status_unchanged(connector)) { + ret = connector->status; +@@ -1775,8 +1797,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force) + } + + out: +- pm_runtime_mark_last_busy(connector->dev->dev); +- pm_runtime_put_autosuspend(connector->dev->dev); ++ if (!drm_kms_helper_is_poll_worker()) { ++ pm_runtime_mark_last_busy(connector->dev->dev); ++ pm_runtime_put_autosuspend(connector->dev->dev); ++ } + + return ret; + } +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c +index 9520154f1d7c..6840d3c5cd64 100644 +--- a/drivers/infiniband/core/ucma.c ++++ b/drivers/infiniband/core/ucma.c +@@ -1139,6 +1139,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file, + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) + return -EFAULT; + ++ if (cmd.qp_state > IB_QPS_ERR) ++ return -EINVAL; ++ + ctx = ucma_get_ctx(file, cmd.id); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); +@@ -1275,6 +1278,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + ++ if (unlikely(cmd.optval > KMALLOC_MAX_SIZE)) ++ return -EINVAL; ++ + optval = memdup_user((void __user *) (unsigned long) cmd.optval, + cmd.optlen); + if (IS_ERR(optval)) { +diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c +index fcd04b881ec1..9cdcff77b9a8 100644 +--- a/drivers/infiniband/hw/mlx5/cq.c ++++ b/drivers/infiniband/hw/mlx5/cq.c +@@ -1117,7 +1117,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, + if (ucmd.reserved0 || ucmd.reserved1) + return -EINVAL; + +- umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, ++ /* check multiplication overflow */ ++ if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) ++ return -EINVAL; ++ ++ umem = ib_umem_get(context, ucmd.buf_addr, ++ (size_t)ucmd.cqe_size * entries, + IB_ACCESS_LOCAL_WRITE, 1); + if (IS_ERR(umem)) { + err = PTR_ERR(umem); +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c +index c7d5786d366b..7e466f031e30 100644 +--- a/drivers/infiniband/hw/mlx5/mr.c ++++ b/drivers/infiniband/hw/mlx5/mr.c +@@ -1821,7 +1821,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, + + mr->ibmr.iova = sg_dma_address(sg) + sg_offset; + mr->ibmr.length = 0; +- mr->ndescs = sg_nents; + + for_each_sg(sgl, sg, sg_nents, i) { + if (unlikely(i >= mr->max_descs)) +@@ -1833,6 +1832,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, + + sg_offset = 0; + } ++ mr->ndescs = i; + + if (sg_offset_p) + *sg_offset_p = sg_offset; +diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c +index 7f12b6579f82..795fa353de7c 100644 +--- a/drivers/input/keyboard/matrix_keypad.c ++++ b/drivers/input/keyboard/matrix_keypad.c +@@ -216,8 +216,10 @@ static void matrix_keypad_stop(struct input_dev *dev) + { + struct matrix_keypad *keypad = input_get_drvdata(dev); + ++ spin_lock_irq(&keypad->lock); + keypad->stopped = true; +- mb(); ++ spin_unlock_irq(&keypad->lock); ++ + flush_work(&keypad->work.work); + /* + * matrix_keypad_scan() will leave IRQs enabled; +diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c +index 3048ef3e3e16..a5e8998047fe 100644 +--- a/drivers/input/keyboard/tca8418_keypad.c ++++ b/drivers/input/keyboard/tca8418_keypad.c +@@ -189,8 +189,6 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data) + input_event(input, EV_MSC, MSC_SCAN, code); + input_report_key(input, keymap[code], state); + +- /* Read for next loop */ +- error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, ®); + } while (1); + + input_sync(input); +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c +index 28ce342348a9..c61341c84d2d 100644 +--- a/drivers/md/bcache/super.c ++++ b/drivers/md/bcache/super.c +@@ -937,6 +937,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) + uint32_t rtime = cpu_to_le32(get_seconds()); + struct uuid_entry *u; + char buf[BDEVNAME_SIZE]; ++ struct cached_dev *exist_dc, *t; + + bdevname(dc->bdev, buf); + +@@ -960,6 +961,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) + return -EINVAL; + } + ++ /* Check whether already attached */ ++ list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { ++ if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { ++ pr_err("Tried to attach %s but duplicate UUID already attached", ++ buf); ++ ++ return -EINVAL; ++ } ++ } ++ + u = uuid_find(c, dc->sb.uuid); + + if (u && +@@ -1182,7 +1193,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, + + return; + err: +- pr_notice("error opening %s: %s", bdevname(bdev, name), err); ++ pr_notice("error %s: %s", bdevname(bdev, name), err); + bcache_device_stop(&dc->disk); + } + +@@ -1853,6 +1864,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, + const char *err = NULL; /* must be set for any error case */ + int ret = 0; + ++ bdevname(bdev, name); ++ + memcpy(&ca->sb, sb, sizeof(struct cache_sb)); + ca->bdev = bdev; + ca->bdev->bd_holder = ca; +@@ -1863,11 +1876,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, + ca->sb_bio.bi_io_vec[0].bv_page = sb_page; + get_page(sb_page); + +- if (blk_queue_discard(bdev_get_queue(ca->bdev))) ++ if (blk_queue_discard(bdev_get_queue(bdev))) + ca->discard = CACHE_DISCARD(&ca->sb); + + ret = cache_alloc(ca); + if (ret != 0) { ++ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); + if (ret == -ENOMEM) + err = "cache_alloc(): -ENOMEM"; + else +@@ -1890,14 +1904,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, + goto out; + } + +- pr_info("registered cache device %s", bdevname(bdev, name)); ++ pr_info("registered cache device %s", name); + + out: + kobject_put(&ca->kobj); + + err: + if (err) +- pr_notice("error opening %s: %s", bdevname(bdev, name), err); ++ pr_notice("error %s: %s", name, err); + + return ret; + } +@@ -1986,6 +2000,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, + if (err) + goto err_close; + ++ err = "failed to register device"; + if (SB_IS_BDEV(sb)) { + struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); + if (!dc) +@@ -2000,7 +2015,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, + goto err_close; + + if (register_cache(sb, sb_page, bdev, ca) != 0) +- goto err_close; ++ goto err; + } + out: + if (sb_page) +@@ -2013,7 +2028,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, + err_close: + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); + err: +- pr_info("error opening %s: %s", path, err); ++ pr_info("error %s: %s", path, err); + ret = -EINVAL; + goto out; + } +diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c +index 26d999c812c9..0f572bff64f5 100644 +--- a/drivers/media/i2c/tc358743.c ++++ b/drivers/media/i2c/tc358743.c +@@ -222,7 +222,7 @@ static void i2c_wr8(struct v4l2_subdev *sd, u16 reg, u8 val) + static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg, + u8 mask, u8 val) + { +- i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 2) & mask) | val, 2); ++ i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 1) & mask) | val, 1); + } + + static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg) +diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c +index 7ac78c13dd1c..1bcb25bc35ef 100644 +--- a/drivers/mtd/ubi/vmt.c ++++ b/drivers/mtd/ubi/vmt.c +@@ -265,6 +265,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) + vol->last_eb_bytes = vol->usable_leb_size; + } + ++ /* Make volume "available" before it becomes accessible via sysfs */ ++ spin_lock(&ubi->volumes_lock); ++ ubi->volumes[vol_id] = vol; ++ ubi->vol_count += 1; ++ spin_unlock(&ubi->volumes_lock); ++ + /* Register character device for the volume */ + cdev_init(&vol->cdev, &ubi_vol_cdev_operations); + vol->cdev.owner = THIS_MODULE; +@@ -304,11 +310,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) + if (err) + goto out_sysfs; + +- spin_lock(&ubi->volumes_lock); +- ubi->volumes[vol_id] = vol; +- ubi->vol_count += 1; +- spin_unlock(&ubi->volumes_lock); +- + ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED); + self_check_volumes(ubi); + return err; +@@ -328,6 +329,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) + out_cdev: + cdev_del(&vol->cdev); + out_mapping: ++ spin_lock(&ubi->volumes_lock); ++ ubi->volumes[vol_id] = NULL; ++ ubi->vol_count -= 1; ++ spin_unlock(&ubi->volumes_lock); + if (do_free) + ubi_eba_destroy_table(eba_tbl); + out_acc: +diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c +index af8f6e92e885..b3a87159e457 100644 +--- a/drivers/pci/host/pcie-designware.c ++++ b/drivers/pci/host/pcie-designware.c +@@ -861,7 +861,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp) + /* setup bus numbers */ + val = dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS); + val &= 0xff000000; +- val |= 0x00010100; ++ val |= 0x00ff0100; + dw_pcie_writel_rc(pp, PCI_PRIMARY_BUS, val); + + /* setup command register */ +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c +index 8f12f6baa6b8..4441a559f139 100644 +--- a/drivers/scsi/qla2xxx/qla_init.c ++++ b/drivers/scsi/qla2xxx/qla_init.c +@@ -369,6 +369,7 @@ qla24xx_abort_sp_done(void *data, void *ptr, int res) + srb_t *sp = (srb_t *)ptr; + struct srb_iocb *abt = &sp->u.iocb_cmd; + ++ del_timer(&sp->u.iocb_cmd.timer); + complete(&abt->u.abt.comp); + } + +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c +index 59059ffbb98c..11f45cb99892 100644 +--- a/drivers/scsi/qla2xxx/qla_target.c ++++ b/drivers/scsi/qla2xxx/qla_target.c +@@ -5789,7 +5789,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, + fc_port_t *fcport; + int rc; + +- fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); ++ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (!fcport) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, + "qla_target(%d): Allocation of tmp FC port failed", +diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c +index d08324998933..db100154f85c 100644 +--- a/drivers/staging/android/ashmem.c ++++ b/drivers/staging/android/ashmem.c +@@ -343,24 +343,23 @@ static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) + mutex_lock(&ashmem_mutex); + + if (asma->size == 0) { +- ret = -EINVAL; +- goto out; ++ mutex_unlock(&ashmem_mutex); ++ return -EINVAL; + } + + if (!asma->file) { +- ret = -EBADF; +- goto out; ++ mutex_unlock(&ashmem_mutex); ++ return -EBADF; + } + ++ mutex_unlock(&ashmem_mutex); ++ + ret = vfs_llseek(asma->file, offset, origin); + if (ret < 0) +- goto out; ++ return ret; + + /** Copy f_pos from backing file, since f_ops->llseek() sets it */ + file->f_pos = asma->file->f_pos; +- +-out: +- mutex_unlock(&ashmem_mutex); + return ret; + } + +diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c +index a5bf2cc165c0..1736248bc5b8 100644 +--- a/drivers/staging/comedi/drivers.c ++++ b/drivers/staging/comedi/drivers.c +@@ -484,8 +484,7 @@ unsigned int comedi_nsamples_left(struct comedi_subdevice *s, + struct comedi_cmd *cmd = &async->cmd; + + if (cmd->stop_src == TRIG_COUNT) { +- unsigned int nscans = nsamples / cmd->scan_end_arg; +- unsigned int scans_left = __comedi_nscans_left(s, nscans); ++ unsigned int scans_left = __comedi_nscans_left(s, cmd->stop_arg); + unsigned int scan_pos = + comedi_bytes_to_samples(s, async->scan_progress); + unsigned long long samples_left = 0; +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c +index b80ea872b039..e82b3473b6b8 100644 +--- a/drivers/tty/serial/8250/8250_pci.c ++++ b/drivers/tty/serial/8250/8250_pci.c +@@ -5099,6 +5099,17 @@ static struct pci_device_id serial_pci_tbl[] = { + { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ + pbn_b2_4_115200 }, ++ /* ++ * BrainBoxes UC-260 ++ */ ++ { PCI_VENDOR_ID_INTASHIELD, 0x0D21, ++ PCI_ANY_ID, PCI_ANY_ID, ++ PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, ++ pbn_b2_4_115200 }, ++ { PCI_VENDOR_ID_INTASHIELD, 0x0E34, ++ PCI_ANY_ID, PCI_ANY_ID, ++ PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, ++ pbn_b2_4_115200 }, + /* + * Perle PCI-RAS cards + */ +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c +index 4d079cdaa7a3..addb287cacea 100644 +--- a/drivers/tty/serial/atmel_serial.c ++++ b/drivers/tty/serial/atmel_serial.c +@@ -1780,6 +1780,7 @@ static void atmel_get_ip_name(struct uart_port *port) + switch (version) { + case 0x302: + case 0x10213: ++ case 0x10302: + dev_dbg(port->dev, "This version is usart\n"); + atmel_port->has_frac_baudrate = true; + atmel_port->has_hw_timer = true; +diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c +index c3651540e1ba..3b31fd8863eb 100644 +--- a/drivers/tty/serial/earlycon.c ++++ b/drivers/tty/serial/earlycon.c +@@ -253,11 +253,12 @@ int __init of_setup_earlycon(const struct earlycon_id *match, + } + port->mapbase = addr; + port->uartclk = BASE_BAUD * 16; +- port->membase = earlycon_map(port->mapbase, SZ_4K); + + val = of_get_flat_dt_prop(node, "reg-offset", NULL); + if (val) + port->mapbase += be32_to_cpu(*val); ++ port->membase = earlycon_map(port->mapbase, SZ_4K); ++ + val = of_get_flat_dt_prop(node, "reg-shift", NULL); + if (val) + port->regshift = be32_to_cpu(*val); +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c +index 23973a8124fc..6cbd46c565f8 100644 +--- a/drivers/tty/serial/serial_core.c ++++ b/drivers/tty/serial/serial_core.c +@@ -1135,6 +1135,8 @@ static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state) + uport->ops->config_port(uport, flags); + + ret = uart_startup(tty, state, 1); ++ if (ret == 0) ++ tty_port_set_initialized(port, true); + if (ret > 0) + ret = 0; + } +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c +index 15eaea53b3df..a55c94dfefd2 100644 +--- a/drivers/tty/serial/sh-sci.c ++++ b/drivers/tty/serial/sh-sci.c +@@ -935,6 +935,8 @@ static void sci_receive_chars(struct uart_port *port) + /* Tell the rest of the system the news. New characters! */ + tty_flip_buffer_push(tport); + } else { ++ /* TTY buffers full; read from RX reg to prevent lockup */ ++ serial_port_in(port, SCxRDR); + serial_port_in(port, SCxSR); /* dummy read */ + sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port)); + } +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c +index 4c388451f31f..9cb66475c211 100644 +--- a/drivers/usb/core/message.c ++++ b/drivers/usb/core/message.c +@@ -148,6 +148,10 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, + + ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout); + ++ /* Linger a bit, prior to the next control message. */ ++ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) ++ msleep(200); ++ + kfree(dr); + + return ret; +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index 774c97bb1c08..4f1c6f8d4352 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -229,7 +229,8 @@ static const struct usb_device_id usb_quirk_list[] = { + { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, + + /* Corsair Strafe RGB */ +- { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, ++ { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT | ++ USB_QUIRK_DELAY_CTRL_MSG }, + + /* Corsair K70 LUX */ + { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c +index 48f52138bb1a..071346973dd6 100644 +--- a/drivers/usb/gadget/function/f_fs.c ++++ b/drivers/usb/gadget/function/f_fs.c +@@ -1522,7 +1522,6 @@ ffs_fs_kill_sb(struct super_block *sb) + if (sb->s_fs_info) { + ffs_release_dev(sb->s_fs_info); + ffs_data_closed(sb->s_fs_info); +- ffs_data_put(sb->s_fs_info); + } + } + +diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c +index e59334b09c41..f8ac59e0158d 100644 +--- a/drivers/usb/mon/mon_text.c ++++ b/drivers/usb/mon/mon_text.c +@@ -83,6 +83,8 @@ struct mon_reader_text { + + wait_queue_head_t wait; + int printf_size; ++ size_t printf_offset; ++ size_t printf_togo; + char *printf_buf; + struct mutex printf_lock; + +@@ -374,75 +376,103 @@ static int mon_text_open(struct inode *inode, struct file *file) + return rc; + } + +-/* +- * For simplicity, we read one record in one system call and throw out +- * what does not fit. This means that the following does not work: +- * dd if=/dbg/usbmon/0t bs=10 +- * Also, we do not allow seeks and do not bother advancing the offset. +- */ ++static ssize_t mon_text_copy_to_user(struct mon_reader_text *rp, ++ char __user * const buf, const size_t nbytes) ++{ ++ const size_t togo = min(nbytes, rp->printf_togo); ++ ++ if (copy_to_user(buf, &rp->printf_buf[rp->printf_offset], togo)) ++ return -EFAULT; ++ rp->printf_togo -= togo; ++ rp->printf_offset += togo; ++ return togo; ++} ++ ++/* ppos is not advanced since the llseek operation is not permitted. */ + static ssize_t mon_text_read_t(struct file *file, char __user *buf, +- size_t nbytes, loff_t *ppos) ++ size_t nbytes, loff_t *ppos) + { + struct mon_reader_text *rp = file->private_data; + struct mon_event_text *ep; + struct mon_text_ptr ptr; ++ ssize_t ret; + +- ep = mon_text_read_wait(rp, file); +- if (IS_ERR(ep)) +- return PTR_ERR(ep); + mutex_lock(&rp->printf_lock); +- ptr.cnt = 0; +- ptr.pbuf = rp->printf_buf; +- ptr.limit = rp->printf_size; +- +- mon_text_read_head_t(rp, &ptr, ep); +- mon_text_read_statset(rp, &ptr, ep); +- ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, +- " %d", ep->length); +- mon_text_read_data(rp, &ptr, ep); +- +- if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) +- ptr.cnt = -EFAULT; ++ ++ if (rp->printf_togo == 0) { ++ ++ ep = mon_text_read_wait(rp, file); ++ if (IS_ERR(ep)) { ++ mutex_unlock(&rp->printf_lock); ++ return PTR_ERR(ep); ++ } ++ ptr.cnt = 0; ++ ptr.pbuf = rp->printf_buf; ++ ptr.limit = rp->printf_size; ++ ++ mon_text_read_head_t(rp, &ptr, ep); ++ mon_text_read_statset(rp, &ptr, ep); ++ ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, ++ " %d", ep->length); ++ mon_text_read_data(rp, &ptr, ep); ++ ++ rp->printf_togo = ptr.cnt; ++ rp->printf_offset = 0; ++ ++ kmem_cache_free(rp->e_slab, ep); ++ } ++ ++ ret = mon_text_copy_to_user(rp, buf, nbytes); + mutex_unlock(&rp->printf_lock); +- kmem_cache_free(rp->e_slab, ep); +- return ptr.cnt; ++ return ret; + } + ++/* ppos is not advanced since the llseek operation is not permitted. */ + static ssize_t mon_text_read_u(struct file *file, char __user *buf, +- size_t nbytes, loff_t *ppos) ++ size_t nbytes, loff_t *ppos) + { + struct mon_reader_text *rp = file->private_data; + struct mon_event_text *ep; + struct mon_text_ptr ptr; ++ ssize_t ret; + +- ep = mon_text_read_wait(rp, file); +- if (IS_ERR(ep)) +- return PTR_ERR(ep); + mutex_lock(&rp->printf_lock); +- ptr.cnt = 0; +- ptr.pbuf = rp->printf_buf; +- ptr.limit = rp->printf_size; + +- mon_text_read_head_u(rp, &ptr, ep); +- if (ep->type == 'E') { +- mon_text_read_statset(rp, &ptr, ep); +- } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { +- mon_text_read_isostat(rp, &ptr, ep); +- mon_text_read_isodesc(rp, &ptr, ep); +- } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { +- mon_text_read_intstat(rp, &ptr, ep); +- } else { +- mon_text_read_statset(rp, &ptr, ep); ++ if (rp->printf_togo == 0) { ++ ++ ep = mon_text_read_wait(rp, file); ++ if (IS_ERR(ep)) { ++ mutex_unlock(&rp->printf_lock); ++ return PTR_ERR(ep); ++ } ++ ptr.cnt = 0; ++ ptr.pbuf = rp->printf_buf; ++ ptr.limit = rp->printf_size; ++ ++ mon_text_read_head_u(rp, &ptr, ep); ++ if (ep->type == 'E') { ++ mon_text_read_statset(rp, &ptr, ep); ++ } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { ++ mon_text_read_isostat(rp, &ptr, ep); ++ mon_text_read_isodesc(rp, &ptr, ep); ++ } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { ++ mon_text_read_intstat(rp, &ptr, ep); ++ } else { ++ mon_text_read_statset(rp, &ptr, ep); ++ } ++ ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, ++ " %d", ep->length); ++ mon_text_read_data(rp, &ptr, ep); ++ ++ rp->printf_togo = ptr.cnt; ++ rp->printf_offset = 0; ++ ++ kmem_cache_free(rp->e_slab, ep); + } +- ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, +- " %d", ep->length); +- mon_text_read_data(rp, &ptr, ep); + +- if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) +- ptr.cnt = -EFAULT; ++ ret = mon_text_copy_to_user(rp, buf, nbytes); + mutex_unlock(&rp->printf_lock); +- kmem_cache_free(rp->e_slab, ep); +- return ptr.cnt; ++ return ret; + } + + static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp, +diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c +index 6891e9092775..a96dcc660d0f 100644 +--- a/drivers/usb/storage/uas.c ++++ b/drivers/usb/storage/uas.c +@@ -1076,7 +1076,7 @@ static int uas_post_reset(struct usb_interface *intf) + return 0; + + err = uas_configure_endpoints(devinfo); +- if (err && err != ENODEV) ++ if (err && err != -ENODEV) + shost_printk(KERN_ERR, shost, + "%s: alloc streams error %d after reset", + __func__, err); +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h +index b605115eb47a..ca3a5d430ae1 100644 +--- a/drivers/usb/storage/unusual_devs.h ++++ b/drivers/usb/storage/unusual_devs.h +@@ -2137,6 +2137,13 @@ UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114, + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA ), + ++/* Reported by Teijo Kinnunen <teijo.kinnunen@code-q.fi> */ ++UNUSUAL_DEV( 0x152d, 0x2567, 0x0117, 0x0117, ++ "JMicron", ++ "USB to ATA/ATAPI Bridge", ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, ++ US_FL_BROKEN_FUA ), ++ + /* Reported-by George Cherian <george.cherian@cavium.com> */ + UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999, + "JMicron", +diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c +index 0f98f2c7475f..7efa374a4970 100644 +--- a/drivers/usb/usbip/vudc_sysfs.c ++++ b/drivers/usb/usbip/vudc_sysfs.c +@@ -117,10 +117,14 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr, + if (rv != 0) + return -EINVAL; + ++ if (!udc) { ++ dev_err(dev, "no device"); ++ return -ENODEV; ++ } + spin_lock_irqsave(&udc->lock, flags); + /* Don't export what we don't have */ +- if (!udc || !udc->driver || !udc->pullup) { +- dev_err(dev, "no device or gadget not bound"); ++ if (!udc->driver || !udc->pullup) { ++ dev_err(dev, "gadget not bound"); + ret = -ENODEV; + goto unlock; + } +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c +index 489bfc61cf30..8977f40ea441 100644 +--- a/drivers/virtio/virtio_ring.c ++++ b/drivers/virtio/virtio_ring.c +@@ -423,8 +423,6 @@ static inline int virtqueue_add(struct virtqueue *_vq, + i = vq->vring.desc[i].next; + } + +- vq->vq.num_free += total_sg; +- + if (indirect) + kfree(desc); + +diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c +index 70c7194e2810..b0a158073abd 100644 +--- a/drivers/watchdog/hpwdt.c ++++ b/drivers/watchdog/hpwdt.c +@@ -28,16 +28,7 @@ + #include <linux/types.h> + #include <linux/uaccess.h> + #include <linux/watchdog.h> +-#ifdef CONFIG_HPWDT_NMI_DECODING +-#include <linux/dmi.h> +-#include <linux/spinlock.h> +-#include <linux/nmi.h> +-#include <linux/kdebug.h> +-#include <linux/notifier.h> +-#include <asm/cacheflush.h> +-#endif /* CONFIG_HPWDT_NMI_DECODING */ + #include <asm/nmi.h> +-#include <asm/frame.h> + + #define HPWDT_VERSION "1.4.0" + #define SECS_TO_TICKS(secs) ((secs) * 1000 / 128) +@@ -48,10 +39,14 @@ + static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */ + static unsigned int reload; /* the computed soft_margin */ + static bool nowayout = WATCHDOG_NOWAYOUT; ++#ifdef CONFIG_HPWDT_NMI_DECODING ++static unsigned int allow_kdump = 1; ++#endif + static char expect_release; + static unsigned long hpwdt_is_open; + + static void __iomem *pci_mem_addr; /* the PCI-memory address */ ++static unsigned long __iomem *hpwdt_nmistat; + static unsigned long __iomem *hpwdt_timer_reg; + static unsigned long __iomem *hpwdt_timer_con; + +@@ -62,373 +57,6 @@ static const struct pci_device_id hpwdt_devices[] = { + }; + MODULE_DEVICE_TABLE(pci, hpwdt_devices); + +-#ifdef CONFIG_HPWDT_NMI_DECODING +-#define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */ +-#define CRU_BIOS_SIGNATURE_VALUE 0x55524324 +-#define PCI_BIOS32_PARAGRAPH_LEN 16 +-#define PCI_ROM_BASE1 0x000F0000 +-#define ROM_SIZE 0x10000 +- +-struct bios32_service_dir { +- u32 signature; +- u32 entry_point; +- u8 revision; +- u8 length; +- u8 checksum; +- u8 reserved[5]; +-}; +- +-/* type 212 */ +-struct smbios_cru64_info { +- u8 type; +- u8 byte_length; +- u16 handle; +- u32 signature; +- u64 physical_address; +- u32 double_length; +- u32 double_offset; +-}; +-#define SMBIOS_CRU64_INFORMATION 212 +- +-/* type 219 */ +-struct smbios_proliant_info { +- u8 type; +- u8 byte_length; +- u16 handle; +- u32 power_features; +- u32 omega_features; +- u32 reserved; +- u32 misc_features; +-}; +-#define SMBIOS_ICRU_INFORMATION 219 +- +- +-struct cmn_registers { +- union { +- struct { +- u8 ral; +- u8 rah; +- u16 rea2; +- }; +- u32 reax; +- } u1; +- union { +- struct { +- u8 rbl; +- u8 rbh; +- u8 reb2l; +- u8 reb2h; +- }; +- u32 rebx; +- } u2; +- union { +- struct { +- u8 rcl; +- u8 rch; +- u16 rec2; +- }; +- u32 recx; +- } u3; +- union { +- struct { +- u8 rdl; +- u8 rdh; +- u16 red2; +- }; +- u32 redx; +- } u4; +- +- u32 resi; +- u32 redi; +- u16 rds; +- u16 res; +- u32 reflags; +-} __attribute__((packed)); +- +-static unsigned int hpwdt_nmi_decoding; +-static unsigned int allow_kdump = 1; +-static unsigned int is_icru; +-static unsigned int is_uefi; +-static DEFINE_SPINLOCK(rom_lock); +-static void *cru_rom_addr; +-static struct cmn_registers cmn_regs; +- +-extern asmlinkage void asminline_call(struct cmn_registers *pi86Regs, +- unsigned long *pRomEntry); +- +-#ifdef CONFIG_X86_32 +-/* --32 Bit Bios------------------------------------------------------------ */ +- +-#define HPWDT_ARCH 32 +- +-asm(".text \n\t" +- ".align 4 \n\t" +- ".globl asminline_call \n" +- "asminline_call: \n\t" +- "pushl %ebp \n\t" +- "movl %esp, %ebp \n\t" +- "pusha \n\t" +- "pushf \n\t" +- "push %es \n\t" +- "push %ds \n\t" +- "pop %es \n\t" +- "movl 8(%ebp),%eax \n\t" +- "movl 4(%eax),%ebx \n\t" +- "movl 8(%eax),%ecx \n\t" +- "movl 12(%eax),%edx \n\t" +- "movl 16(%eax),%esi \n\t" +- "movl 20(%eax),%edi \n\t" +- "movl (%eax),%eax \n\t" +- "push %cs \n\t" +- "call *12(%ebp) \n\t" +- "pushf \n\t" +- "pushl %eax \n\t" +- "movl 8(%ebp),%eax \n\t" +- "movl %ebx,4(%eax) \n\t" +- "movl %ecx,8(%eax) \n\t" +- "movl %edx,12(%eax) \n\t" +- "movl %esi,16(%eax) \n\t" +- "movl %edi,20(%eax) \n\t" +- "movw %ds,24(%eax) \n\t" +- "movw %es,26(%eax) \n\t" +- "popl %ebx \n\t" +- "movl %ebx,(%eax) \n\t" +- "popl %ebx \n\t" +- "movl %ebx,28(%eax) \n\t" +- "pop %es \n\t" +- "popf \n\t" +- "popa \n\t" +- "leave \n\t" +- "ret \n\t" +- ".previous"); +- +- +-/* +- * cru_detect +- * +- * Routine Description: +- * This function uses the 32-bit BIOS Service Directory record to +- * search for a $CRU record. +- * +- * Return Value: +- * 0 : SUCCESS +- * <0 : FAILURE +- */ +-static int cru_detect(unsigned long map_entry, +- unsigned long map_offset) +-{ +- void *bios32_map; +- unsigned long *bios32_entrypoint; +- unsigned long cru_physical_address; +- unsigned long cru_length; +- unsigned long physical_bios_base = 0; +- unsigned long physical_bios_offset = 0; +- int retval = -ENODEV; +- +- bios32_map = ioremap(map_entry, (2 * PAGE_SIZE)); +- +- if (bios32_map == NULL) +- return -ENODEV; +- +- bios32_entrypoint = bios32_map + map_offset; +- +- cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE; +- +- set_memory_x((unsigned long)bios32_map, 2); +- asminline_call(&cmn_regs, bios32_entrypoint); +- +- if (cmn_regs.u1.ral != 0) { +- pr_warn("Call succeeded but with an error: 0x%x\n", +- cmn_regs.u1.ral); +- } else { +- physical_bios_base = cmn_regs.u2.rebx; +- physical_bios_offset = cmn_regs.u4.redx; +- cru_length = cmn_regs.u3.recx; +- cru_physical_address = +- physical_bios_base + physical_bios_offset; +- +- /* If the values look OK, then map it in. */ +- if ((physical_bios_base + physical_bios_offset)) { +- cru_rom_addr = +- ioremap(cru_physical_address, cru_length); +- if (cru_rom_addr) { +- set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK, +- (cru_length + PAGE_SIZE - 1) >> PAGE_SHIFT); +- retval = 0; +- } +- } +- +- pr_debug("CRU Base Address: 0x%lx\n", physical_bios_base); +- pr_debug("CRU Offset Address: 0x%lx\n", physical_bios_offset); +- pr_debug("CRU Length: 0x%lx\n", cru_length); +- pr_debug("CRU Mapped Address: %p\n", &cru_rom_addr); +- } +- iounmap(bios32_map); +- return retval; +-} +- +-/* +- * bios_checksum +- */ +-static int bios_checksum(const char __iomem *ptr, int len) +-{ +- char sum = 0; +- int i; +- +- /* +- * calculate checksum of size bytes. This should add up +- * to zero if we have a valid header. +- */ +- for (i = 0; i < len; i++) +- sum += ptr[i]; +- +- return ((sum == 0) && (len > 0)); +-} +- +-/* +- * bios32_present +- * +- * Routine Description: +- * This function finds the 32-bit BIOS Service Directory +- * +- * Return Value: +- * 0 : SUCCESS +- * <0 : FAILURE +- */ +-static int bios32_present(const char __iomem *p) +-{ +- struct bios32_service_dir *bios_32_ptr; +- int length; +- unsigned long map_entry, map_offset; +- +- bios_32_ptr = (struct bios32_service_dir *) p; +- +- /* +- * Search for signature by checking equal to the swizzled value +- * instead of calling another routine to perform a strcmp. +- */ +- if (bios_32_ptr->signature == PCI_BIOS32_SD_VALUE) { +- length = bios_32_ptr->length * PCI_BIOS32_PARAGRAPH_LEN; +- if (bios_checksum(p, length)) { +- /* +- * According to the spec, we're looking for the +- * first 4KB-aligned address below the entrypoint +- * listed in the header. The Service Directory code +- * is guaranteed to occupy no more than 2 4KB pages. +- */ +- map_entry = bios_32_ptr->entry_point & ~(PAGE_SIZE - 1); +- map_offset = bios_32_ptr->entry_point - map_entry; +- +- return cru_detect(map_entry, map_offset); +- } +- } +- return -ENODEV; +-} +- +-static int detect_cru_service(void) +-{ +- char __iomem *p, *q; +- int rc = -1; +- +- /* +- * Search from 0x0f0000 through 0x0fffff, inclusive. +- */ +- p = ioremap(PCI_ROM_BASE1, ROM_SIZE); +- if (p == NULL) +- return -ENOMEM; +- +- for (q = p; q < p + ROM_SIZE; q += 16) { +- rc = bios32_present(q); +- if (!rc) +- break; +- } +- iounmap(p); +- return rc; +-} +-/* ------------------------------------------------------------------------- */ +-#endif /* CONFIG_X86_32 */ +-#ifdef CONFIG_X86_64 +-/* --64 Bit Bios------------------------------------------------------------ */ +- +-#define HPWDT_ARCH 64 +- +-asm(".text \n\t" +- ".align 4 \n\t" +- ".globl asminline_call \n\t" +- ".type asminline_call, @function \n\t" +- "asminline_call: \n\t" +- FRAME_BEGIN +- "pushq %rax \n\t" +- "pushq %rbx \n\t" +- "pushq %rdx \n\t" +- "pushq %r12 \n\t" +- "pushq %r9 \n\t" +- "movq %rsi, %r12 \n\t" +- "movq %rdi, %r9 \n\t" +- "movl 4(%r9),%ebx \n\t" +- "movl 8(%r9),%ecx \n\t" +- "movl 12(%r9),%edx \n\t" +- "movl 16(%r9),%esi \n\t" +- "movl 20(%r9),%edi \n\t" +- "movl (%r9),%eax \n\t" +- "call *%r12 \n\t" +- "pushfq \n\t" +- "popq %r12 \n\t" +- "movl %eax, (%r9) \n\t" +- "movl %ebx, 4(%r9) \n\t" +- "movl %ecx, 8(%r9) \n\t" +- "movl %edx, 12(%r9) \n\t" +- "movl %esi, 16(%r9) \n\t" +- "movl %edi, 20(%r9) \n\t" +- "movq %r12, %rax \n\t" +- "movl %eax, 28(%r9) \n\t" +- "popq %r9 \n\t" +- "popq %r12 \n\t" +- "popq %rdx \n\t" +- "popq %rbx \n\t" +- "popq %rax \n\t" +- FRAME_END +- "ret \n\t" +- ".previous"); +- +-/* +- * dmi_find_cru +- * +- * Routine Description: +- * This function checks whether or not a SMBIOS/DMI record is +- * the 64bit CRU info or not +- */ +-static void dmi_find_cru(const struct dmi_header *dm, void *dummy) +-{ +- struct smbios_cru64_info *smbios_cru64_ptr; +- unsigned long cru_physical_address; +- +- if (dm->type == SMBIOS_CRU64_INFORMATION) { +- smbios_cru64_ptr = (struct smbios_cru64_info *) dm; +- if (smbios_cru64_ptr->signature == CRU_BIOS_SIGNATURE_VALUE) { +- cru_physical_address = +- smbios_cru64_ptr->physical_address + +- smbios_cru64_ptr->double_offset; +- cru_rom_addr = ioremap(cru_physical_address, +- smbios_cru64_ptr->double_length); +- set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK, +- smbios_cru64_ptr->double_length >> PAGE_SHIFT); +- } +- } +-} +- +-static int detect_cru_service(void) +-{ +- cru_rom_addr = NULL; +- +- dmi_walk(dmi_find_cru, NULL); +- +- /* if cru_rom_addr has been set then we found a CRU service */ +- return ((cru_rom_addr != NULL) ? 0 : -ENODEV); +-} +-/* ------------------------------------------------------------------------- */ +-#endif /* CONFIG_X86_64 */ +-#endif /* CONFIG_HPWDT_NMI_DECODING */ + + /* + * Watchdog operations +@@ -475,32 +103,22 @@ static int hpwdt_time_left(void) + } + + #ifdef CONFIG_HPWDT_NMI_DECODING ++static int hpwdt_my_nmi(void) ++{ ++ return ioread8(hpwdt_nmistat) & 0x6; ++} ++ + /* + * NMI Handler + */ + static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs) + { +- unsigned long rom_pl; +- static int die_nmi_called; +- +- if (!hpwdt_nmi_decoding) ++ if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi()) + return NMI_DONE; + +- spin_lock_irqsave(&rom_lock, rom_pl); +- if (!die_nmi_called && !is_icru && !is_uefi) +- asminline_call(&cmn_regs, cru_rom_addr); +- die_nmi_called = 1; +- spin_unlock_irqrestore(&rom_lock, rom_pl); +- + if (allow_kdump) + hpwdt_stop(); + +- if (!is_icru && !is_uefi) { +- if (cmn_regs.u1.ral == 0) { +- nmi_panic(regs, "An NMI occurred, but unable to determine source.\n"); +- return NMI_HANDLED; +- } +- } + nmi_panic(regs, "An NMI occurred. Depending on your system the reason " + "for the NMI is logged in any one of the following " + "resources:\n" +@@ -666,84 +284,11 @@ static struct miscdevice hpwdt_miscdev = { + * Init & Exit + */ + +-#ifdef CONFIG_HPWDT_NMI_DECODING +-#ifdef CONFIG_X86_LOCAL_APIC +-static void hpwdt_check_nmi_decoding(struct pci_dev *dev) +-{ +- /* +- * If nmi_watchdog is turned off then we can turn on +- * our nmi decoding capability. +- */ +- hpwdt_nmi_decoding = 1; +-} +-#else +-static void hpwdt_check_nmi_decoding(struct pci_dev *dev) +-{ +- dev_warn(&dev->dev, "NMI decoding is disabled. " +- "Your kernel does not support a NMI Watchdog.\n"); +-} +-#endif /* CONFIG_X86_LOCAL_APIC */ +- +-/* +- * dmi_find_icru +- * +- * Routine Description: +- * This function checks whether or not we are on an iCRU-based server. +- * This check is independent of architecture and needs to be made for +- * any ProLiant system. +- */ +-static void dmi_find_icru(const struct dmi_header *dm, void *dummy) +-{ +- struct smbios_proliant_info *smbios_proliant_ptr; +- +- if (dm->type == SMBIOS_ICRU_INFORMATION) { +- smbios_proliant_ptr = (struct smbios_proliant_info *) dm; +- if (smbios_proliant_ptr->misc_features & 0x01) +- is_icru = 1; +- if (smbios_proliant_ptr->misc_features & 0x408) +- is_uefi = 1; +- } +-} + + static int hpwdt_init_nmi_decoding(struct pci_dev *dev) + { ++#ifdef CONFIG_HPWDT_NMI_DECODING + int retval; +- +- /* +- * On typical CRU-based systems we need to map that service in +- * the BIOS. For 32 bit Operating Systems we need to go through +- * the 32 Bit BIOS Service Directory. For 64 bit Operating +- * Systems we get that service through SMBIOS. +- * +- * On systems that support the new iCRU service all we need to +- * do is call dmi_walk to get the supported flag value and skip +- * the old cru detect code. +- */ +- dmi_walk(dmi_find_icru, NULL); +- if (!is_icru && !is_uefi) { +- +- /* +- * We need to map the ROM to get the CRU service. +- * For 32 bit Operating Systems we need to go through the 32 Bit +- * BIOS Service Directory +- * For 64 bit Operating Systems we get that service through SMBIOS. +- */ +- retval = detect_cru_service(); +- if (retval < 0) { +- dev_warn(&dev->dev, +- "Unable to detect the %d Bit CRU Service.\n", +- HPWDT_ARCH); +- return retval; +- } +- +- /* +- * We know this is the only CRU call we need to make so lets keep as +- * few instructions as possible once the NMI comes in. +- */ +- cmn_regs.u1.rah = 0x0D; +- cmn_regs.u1.ral = 0x02; +- } +- + /* + * Only one function can register for NMI_UNKNOWN + */ +@@ -771,44 +316,25 @@ static int hpwdt_init_nmi_decoding(struct pci_dev *dev) + dev_warn(&dev->dev, + "Unable to register a die notifier (err=%d).\n", + retval); +- if (cru_rom_addr) +- iounmap(cru_rom_addr); + return retval; ++#endif /* CONFIG_HPWDT_NMI_DECODING */ ++ return 0; + } + + static void hpwdt_exit_nmi_decoding(void) + { ++#ifdef CONFIG_HPWDT_NMI_DECODING + unregister_nmi_handler(NMI_UNKNOWN, "hpwdt"); + unregister_nmi_handler(NMI_SERR, "hpwdt"); + unregister_nmi_handler(NMI_IO_CHECK, "hpwdt"); +- if (cru_rom_addr) +- iounmap(cru_rom_addr); +-} +-#else /* !CONFIG_HPWDT_NMI_DECODING */ +-static void hpwdt_check_nmi_decoding(struct pci_dev *dev) +-{ +-} +- +-static int hpwdt_init_nmi_decoding(struct pci_dev *dev) +-{ +- return 0; ++#endif + } + +-static void hpwdt_exit_nmi_decoding(void) +-{ +-} +-#endif /* CONFIG_HPWDT_NMI_DECODING */ +- + static int hpwdt_init_one(struct pci_dev *dev, + const struct pci_device_id *ent) + { + int retval; + +- /* +- * Check if we can do NMI decoding or not +- */ +- hpwdt_check_nmi_decoding(dev); +- + /* + * First let's find out if we are on an iLO2+ server. We will + * not run on a legacy ASM box. +@@ -842,6 +368,7 @@ static int hpwdt_init_one(struct pci_dev *dev, + retval = -ENOMEM; + goto error_pci_iomap; + } ++ hpwdt_nmistat = pci_mem_addr + 0x6e; + hpwdt_timer_reg = pci_mem_addr + 0x70; + hpwdt_timer_con = pci_mem_addr + 0x72; + +@@ -912,6 +439,6 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + #ifdef CONFIG_HPWDT_NMI_DECODING + module_param(allow_kdump, int, 0); + MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs"); +-#endif /* !CONFIG_HPWDT_NMI_DECODING */ ++#endif /* CONFIG_HPWDT_NMI_DECODING */ + + module_pci_driver(hpwdt_driver); +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c +index 3eeed8f0aa06..3fadfabcac39 100644 +--- a/fs/ext4/xattr.c ++++ b/fs/ext4/xattr.c +@@ -837,8 +837,6 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, + if (!IS_LAST_ENTRY(s->first)) + ext4_xattr_rehash(header(s->base), + s->here); +- ext4_xattr_cache_insert(ext4_mb_cache, +- bs->bh); + } + ext4_xattr_block_csum_set(inode, bs->bh); + unlock_buffer(bs->bh); +@@ -959,6 +957,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, + } else if (bs->bh && s->base == bs->bh->b_data) { + /* We were modifying this block in-place. */ + ea_bdebug(bs->bh, "keeping this block"); ++ ext4_xattr_cache_insert(ext4_mb_cache, bs->bh); + new_bh = bs->bh; + get_bh(new_bh); + } else { +diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c +index 1ac1593aded3..1ab91124a93e 100644 +--- a/fs/nfs/direct.c ++++ b/fs/nfs/direct.c +@@ -86,10 +86,10 @@ struct nfs_direct_req { + struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX]; + int mirror_count; + ++ loff_t io_start; /* Start offset for I/O */ + ssize_t count, /* bytes actually processed */ + max_count, /* max expected count */ + bytes_left, /* bytes left to be sent */ +- io_start, /* start of IO */ + error; /* any reported error */ + struct completion completion; /* wait for i/o completion */ + +diff --git a/fs/nfs/write.c b/fs/nfs/write.c +index 9a3b3820306d..a8b786a648cd 100644 +--- a/fs/nfs/write.c ++++ b/fs/nfs/write.c +@@ -1847,40 +1847,43 @@ int nfs_generic_commit_list(struct inode *inode, struct list_head *head, + return status; + } + +-int nfs_commit_inode(struct inode *inode, int how) ++static int __nfs_commit_inode(struct inode *inode, int how, ++ struct writeback_control *wbc) + { + LIST_HEAD(head); + struct nfs_commit_info cinfo; + int may_wait = how & FLUSH_SYNC; +- int error = 0; +- int res; ++ int ret, nscan; + + nfs_init_cinfo_from_inode(&cinfo, inode); + nfs_commit_begin(cinfo.mds); +- res = nfs_scan_commit(inode, &head, &cinfo); +- if (res) +- error = nfs_generic_commit_list(inode, &head, how, &cinfo); ++ for (;;) { ++ ret = nscan = nfs_scan_commit(inode, &head, &cinfo); ++ if (ret <= 0) ++ break; ++ ret = nfs_generic_commit_list(inode, &head, how, &cinfo); ++ if (ret < 0) ++ break; ++ ret = 0; ++ if (wbc && wbc->sync_mode == WB_SYNC_NONE) { ++ if (nscan < wbc->nr_to_write) ++ wbc->nr_to_write -= nscan; ++ else ++ wbc->nr_to_write = 0; ++ } ++ if (nscan < INT_MAX) ++ break; ++ cond_resched(); ++ } + nfs_commit_end(cinfo.mds); +- if (res == 0) +- return res; +- if (error < 0) +- goto out_error; +- if (!may_wait) +- goto out_mark_dirty; +- error = wait_on_commit(cinfo.mds); +- if (error < 0) +- return error; +- return res; +-out_error: +- res = error; +- /* Note: If we exit without ensuring that the commit is complete, +- * we must mark the inode as dirty. Otherwise, future calls to +- * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure +- * that the data is on the disk. +- */ +-out_mark_dirty: +- __mark_inode_dirty(inode, I_DIRTY_DATASYNC); +- return res; ++ if (ret || !may_wait) ++ return ret; ++ return wait_on_commit(cinfo.mds); ++} ++ ++int nfs_commit_inode(struct inode *inode, int how) ++{ ++ return __nfs_commit_inode(inode, how, NULL); + } + EXPORT_SYMBOL_GPL(nfs_commit_inode); + +@@ -1890,11 +1893,11 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) + int flags = FLUSH_SYNC; + int ret = 0; + +- /* no commits means nothing needs to be done */ +- if (!nfsi->commit_info.ncommit) +- return ret; +- + if (wbc->sync_mode == WB_SYNC_NONE) { ++ /* no commits means nothing needs to be done */ ++ if (!nfsi->commit_info.ncommit) ++ goto check_requests_outstanding; ++ + /* Don't commit yet if this is a non-blocking flush and there + * are a lot of outstanding writes for this mapping. + */ +@@ -1905,16 +1908,16 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) + flags = 0; + } + +- ret = nfs_commit_inode(inode, flags); +- if (ret >= 0) { +- if (wbc->sync_mode == WB_SYNC_NONE) { +- if (ret < wbc->nr_to_write) +- wbc->nr_to_write -= ret; +- else +- wbc->nr_to_write = 0; +- } +- return 0; +- } ++ ret = __nfs_commit_inode(inode, flags, wbc); ++ if (!ret) { ++ if (flags & FLUSH_SYNC) ++ return 0; ++ } else if (nfsi->commit_info.ncommit) ++ goto out_mark_dirty; ++ ++check_requests_outstanding: ++ if (!atomic_read(&nfsi->commit_info.rpcs_out)) ++ return ret; + out_mark_dirty: + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); + return ret; +diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h +index 982c299e435a..642d0597bb73 100644 +--- a/include/drm/drm_crtc_helper.h ++++ b/include/drm/drm_crtc_helper.h +@@ -74,5 +74,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev); + extern void drm_kms_helper_poll_disable(struct drm_device *dev); + extern void drm_kms_helper_poll_enable(struct drm_device *dev); + extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev); ++extern bool drm_kms_helper_is_poll_worker(void); + + #endif +diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h +index de179993e039..01225b0059b1 100644 +--- a/include/linux/compiler-clang.h ++++ b/include/linux/compiler-clang.h +@@ -15,3 +15,8 @@ + * with any version that can compile the kernel + */ + #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) ++ ++/* Clang doesn't have a way to turn it off per-function, yet. */ ++#ifdef __noretpoline ++#undef __noretpoline ++#endif +diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h +index eb0ed31193a3..a1b1de17455c 100644 +--- a/include/linux/compiler-gcc.h ++++ b/include/linux/compiler-gcc.h +@@ -88,6 +88,10 @@ + #define __weak __attribute__((weak)) + #define __alias(symbol) __attribute__((alias(#symbol))) + ++#ifdef RETPOLINE ++#define __noretpoline __attribute__((indirect_branch("keep"))) ++#endif ++ + /* + * it doesn't make sense on ARM (currently the only user of __naked) + * to trace naked functions because then mcount is called without +diff --git a/include/linux/init.h b/include/linux/init.h +index 8e346d1bd837..683508f6bb4e 100644 +--- a/include/linux/init.h ++++ b/include/linux/init.h +@@ -5,10 +5,10 @@ + #include <linux/types.h> + + /* Built-in __init functions needn't be compiled with retpoline */ +-#if defined(RETPOLINE) && !defined(MODULE) +-#define __noretpoline __attribute__((indirect_branch("keep"))) ++#if defined(__noretpoline) && !defined(MODULE) ++#define __noinitretpoline __noretpoline + #else +-#define __noretpoline ++#define __noinitretpoline + #endif + + /* These macros are used to mark some functions or +@@ -46,7 +46,7 @@ + + /* These are for everybody (although not all archs will actually + discard it in modules) */ +-#define __init __section(.init.text) __cold notrace __latent_entropy __noretpoline ++#define __init __section(.init.text) __cold notrace __latent_entropy __noinitretpoline + #define __initdata __section(.init.data) + #define __initconst __section(.init.rodata) + #define __exitdata __section(.exit.data) +diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h +index 2ad1a2b289b5..9bfeb88fb940 100644 +--- a/include/linux/netfilter/x_tables.h ++++ b/include/linux/netfilter/x_tables.h +@@ -375,38 +375,14 @@ static inline unsigned long ifname_compare_aligned(const char *_a, + return ret; + } + ++struct xt_percpu_counter_alloc_state { ++ unsigned int off; ++ const char __percpu *mem; ++}; + +-/* On SMP, ip(6)t_entry->counters.pcnt holds address of the +- * real (percpu) counter. On !SMP, its just the packet count, +- * so nothing needs to be done there. +- * +- * xt_percpu_counter_alloc returns the address of the percpu +- * counter, or 0 on !SMP. We force an alignment of 16 bytes +- * so that bytes/packets share a common cache line. +- * +- * Hence caller must use IS_ERR_VALUE to check for error, this +- * allows us to return 0 for single core systems without forcing +- * callers to deal with SMP vs. NONSMP issues. +- */ +-static inline unsigned long xt_percpu_counter_alloc(void) +-{ +- if (nr_cpu_ids > 1) { +- void __percpu *res = __alloc_percpu(sizeof(struct xt_counters), +- sizeof(struct xt_counters)); +- +- if (res == NULL) +- return -ENOMEM; +- +- return (__force unsigned long) res; +- } +- +- return 0; +-} +-static inline void xt_percpu_counter_free(u64 pcnt) +-{ +- if (nr_cpu_ids > 1) +- free_percpu((void __percpu *) (unsigned long) pcnt); +-} ++bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state, ++ struct xt_counters *counter); ++void xt_percpu_counter_free(struct xt_counters *cnt); + + static inline struct xt_counters * + xt_get_this_cpu_counter(struct xt_counters *cnt) +diff --git a/include/linux/nospec.h b/include/linux/nospec.h +index 132e3f5a2e0d..e791ebc65c9c 100644 +--- a/include/linux/nospec.h ++++ b/include/linux/nospec.h +@@ -5,6 +5,7 @@ + + #ifndef _LINUX_NOSPEC_H + #define _LINUX_NOSPEC_H ++#include <asm/barrier.h> + + /** + * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise +@@ -29,26 +30,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, + } + #endif + +-/* +- * Warn developers about inappropriate array_index_nospec() usage. +- * +- * Even if the CPU speculates past the WARN_ONCE branch, the +- * sign bit of @index is taken into account when generating the +- * mask. +- * +- * This warning is compiled out when the compiler can infer that +- * @index and @size are less than LONG_MAX. +- */ +-#define array_index_mask_nospec_check(index, size) \ +-({ \ +- if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \ +- "array_index_nospec() limited to range of [0, LONG_MAX]\n")) \ +- _mask = 0; \ +- else \ +- _mask = array_index_mask_nospec(index, size); \ +- _mask; \ +-}) +- + /* + * array_index_nospec - sanitize an array index after a bounds check + * +@@ -67,7 +48,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, + ({ \ + typeof(index) _i = (index); \ + typeof(size) _s = (size); \ +- unsigned long _mask = array_index_mask_nospec_check(_i, _s); \ ++ unsigned long _mask = array_index_mask_nospec(_i, _s); \ + \ + BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ + BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ +diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h +index de2a722fe3cf..ea4f81c2a6d5 100644 +--- a/include/linux/usb/quirks.h ++++ b/include/linux/usb/quirks.h +@@ -56,4 +56,7 @@ + */ + #define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11) + ++/* Device needs a pause after every control message. */ ++#define USB_QUIRK_DELAY_CTRL_MSG BIT(13) ++ + #endif /* __LINUX_USB_QUIRKS_H */ +diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h +index 1061add575d2..1def337b16d4 100644 +--- a/include/linux/workqueue.h ++++ b/include/linux/workqueue.h +@@ -453,6 +453,7 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork); + + extern void workqueue_set_max_active(struct workqueue_struct *wq, + int max_active); ++extern struct work_struct *current_work(void); + extern bool current_is_workqueue_rescuer(void); + extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); + extern unsigned int work_busy(struct work_struct *work); +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index ebfea5f94b66..664aebc50fe3 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -4129,6 +4129,22 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) + } + EXPORT_SYMBOL_GPL(workqueue_set_max_active); + ++/** ++ * current_work - retrieve %current task's work struct ++ * ++ * Determine if %current task is a workqueue worker and what it's working on. ++ * Useful to find out the context that the %current task is running in. ++ * ++ * Return: work struct if %current task is a workqueue worker, %NULL otherwise. ++ */ ++struct work_struct *current_work(void) ++{ ++ struct worker *worker = current_wq_worker(); ++ ++ return worker ? worker->current_work : NULL; ++} ++EXPORT_SYMBOL(current_work); ++ + /** + * current_is_workqueue_rescuer - is %current workqueue rescuer? + * +diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c +index 9024283d2bca..9637a681bdda 100644 +--- a/net/bridge/netfilter/ebt_among.c ++++ b/net/bridge/netfilter/ebt_among.c +@@ -172,18 +172,35 @@ ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par) + return true; + } + ++static bool poolsize_invalid(const struct ebt_mac_wormhash *w) ++{ ++ return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple)); ++} ++ + static int ebt_among_mt_check(const struct xt_mtchk_param *par) + { + const struct ebt_among_info *info = par->matchinfo; + const struct ebt_entry_match *em = + container_of(par->matchinfo, const struct ebt_entry_match, data); +- int expected_length = sizeof(struct ebt_among_info); ++ unsigned int expected_length = sizeof(struct ebt_among_info); + const struct ebt_mac_wormhash *wh_dst, *wh_src; + int err; + ++ if (expected_length > em->match_size) ++ return -EINVAL; ++ + wh_dst = ebt_among_wh_dst(info); +- wh_src = ebt_among_wh_src(info); ++ if (poolsize_invalid(wh_dst)) ++ return -EINVAL; ++ + expected_length += ebt_mac_wormhash_size(wh_dst); ++ if (expected_length > em->match_size) ++ return -EINVAL; ++ ++ wh_src = ebt_among_wh_src(info); ++ if (poolsize_invalid(wh_src)) ++ return -EINVAL; ++ + expected_length += ebt_mac_wormhash_size(wh_src); + + if (em->match_size != EBT_ALIGN(expected_length)) { +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c +index f5c11bbe27db..5a89a4ac86ef 100644 +--- a/net/bridge/netfilter/ebtables.c ++++ b/net/bridge/netfilter/ebtables.c +@@ -2031,7 +2031,9 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, + if (match_kern) + match_kern->match_size = ret; + +- WARN_ON(type == EBT_COMPAT_TARGET && size_left); ++ if (WARN_ON(type == EBT_COMPAT_TARGET && size_left)) ++ return -EINVAL; ++ + match32 = (struct compat_ebt_entry_mwt *) buf; + } + +@@ -2087,6 +2089,15 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, + * + * offsets are relative to beginning of struct ebt_entry (i.e., 0). + */ ++ for (i = 0; i < 4 ; ++i) { ++ if (offsets[i] >= *total) ++ return -EINVAL; ++ if (i == 0) ++ continue; ++ if (offsets[i-1] > offsets[i]) ++ return -EINVAL; ++ } ++ + for (i = 0, j = 1 ; j < 4 ; j++, i++) { + struct compat_ebt_entry_mwt *match32; + unsigned int size; +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c +index 697538464e6e..d35815e5967b 100644 +--- a/net/ipv4/netfilter/arp_tables.c ++++ b/net/ipv4/netfilter/arp_tables.c +@@ -261,6 +261,10 @@ unsigned int arpt_do_table(struct sk_buff *skb, + } + if (table_base + v + != arpt_next_entry(e)) { ++ if (unlikely(stackidx >= private->stacksize)) { ++ verdict = NF_DROP; ++ break; ++ } + jumpstack[stackidx++] = e; + } + +@@ -415,17 +419,15 @@ static inline int check_target(struct arpt_entry *e, const char *name) + } + + static inline int +-find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) ++find_check_entry(struct arpt_entry *e, const char *name, unsigned int size, ++ struct xt_percpu_counter_alloc_state *alloc_state) + { + struct xt_entry_target *t; + struct xt_target *target; +- unsigned long pcnt; + int ret; + +- pcnt = xt_percpu_counter_alloc(); +- if (IS_ERR_VALUE(pcnt)) ++ if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) + return -ENOMEM; +- e->counters.pcnt = pcnt; + + t = arpt_get_target(e); + target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, +@@ -443,7 +445,7 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) + err: + module_put(t->u.kernel.target->me); + out: +- xt_percpu_counter_free(e->counters.pcnt); ++ xt_percpu_counter_free(&e->counters); + + return ret; + } +@@ -523,7 +525,7 @@ static inline void cleanup_entry(struct arpt_entry *e) + if (par.target->destroy != NULL) + par.target->destroy(&par); + module_put(par.target->me); +- xt_percpu_counter_free(e->counters.pcnt); ++ xt_percpu_counter_free(&e->counters); + } + + /* Checks and translates the user-supplied table segment (held in +@@ -532,6 +534,7 @@ static inline void cleanup_entry(struct arpt_entry *e) + static int translate_table(struct xt_table_info *newinfo, void *entry0, + const struct arpt_replace *repl) + { ++ struct xt_percpu_counter_alloc_state alloc_state = { 0 }; + struct arpt_entry *iter; + unsigned int *offsets; + unsigned int i; +@@ -594,7 +597,8 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, + /* Finally, each sanity check must pass */ + i = 0; + xt_entry_foreach(iter, entry0, newinfo->size) { +- ret = find_check_entry(iter, repl->name, repl->size); ++ ret = find_check_entry(iter, repl->name, repl->size, ++ &alloc_state); + if (ret != 0) + break; + ++i; +diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c +index 7c00ce90adb8..e78f6521823f 100644 +--- a/net/ipv4/netfilter/ip_tables.c ++++ b/net/ipv4/netfilter/ip_tables.c +@@ -345,8 +345,13 @@ ipt_do_table(struct sk_buff *skb, + continue; + } + if (table_base + v != ipt_next_entry(e) && +- !(e->ip.flags & IPT_F_GOTO)) ++ !(e->ip.flags & IPT_F_GOTO)) { ++ if (unlikely(stackidx >= private->stacksize)) { ++ verdict = NF_DROP; ++ break; ++ } + jumpstack[stackidx++] = e; ++ } + + e = get_entry(table_base, v); + continue; +@@ -535,7 +540,8 @@ static int check_target(struct ipt_entry *e, struct net *net, const char *name) + + static int + find_check_entry(struct ipt_entry *e, struct net *net, const char *name, +- unsigned int size) ++ unsigned int size, ++ struct xt_percpu_counter_alloc_state *alloc_state) + { + struct xt_entry_target *t; + struct xt_target *target; +@@ -543,12 +549,9 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name, + unsigned int j; + struct xt_mtchk_param mtpar; + struct xt_entry_match *ematch; +- unsigned long pcnt; + +- pcnt = xt_percpu_counter_alloc(); +- if (IS_ERR_VALUE(pcnt)) ++ if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) + return -ENOMEM; +- e->counters.pcnt = pcnt; + + j = 0; + mtpar.net = net; +@@ -586,7 +589,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name, + cleanup_match(ematch, net); + } + +- xt_percpu_counter_free(e->counters.pcnt); ++ xt_percpu_counter_free(&e->counters); + + return ret; + } +@@ -674,7 +677,7 @@ cleanup_entry(struct ipt_entry *e, struct net *net) + if (par.target->destroy != NULL) + par.target->destroy(&par); + module_put(par.target->me); +- xt_percpu_counter_free(e->counters.pcnt); ++ xt_percpu_counter_free(&e->counters); + } + + /* Checks and translates the user-supplied table segment (held in +@@ -683,6 +686,7 @@ static int + translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, + const struct ipt_replace *repl) + { ++ struct xt_percpu_counter_alloc_state alloc_state = { 0 }; + struct ipt_entry *iter; + unsigned int *offsets; + unsigned int i; +@@ -742,7 +746,8 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, + /* Finally, each sanity check must pass */ + i = 0; + xt_entry_foreach(iter, entry0, newinfo->size) { +- ret = find_check_entry(iter, net, repl->name, repl->size); ++ ret = find_check_entry(iter, net, repl->name, repl->size, ++ &alloc_state); + if (ret != 0) + break; + ++i; +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c +index 55aacea24396..e26becc9a43d 100644 +--- a/net/ipv6/netfilter/ip6_tables.c ++++ b/net/ipv6/netfilter/ip6_tables.c +@@ -376,6 +376,10 @@ ip6t_do_table(struct sk_buff *skb, + } + if (table_base + v != ip6t_next_entry(e) && + !(e->ipv6.flags & IP6T_F_GOTO)) { ++ if (unlikely(stackidx >= private->stacksize)) { ++ verdict = NF_DROP; ++ break; ++ } + jumpstack[stackidx++] = e; + } + +@@ -566,7 +570,8 @@ static int check_target(struct ip6t_entry *e, struct net *net, const char *name) + + static int + find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, +- unsigned int size) ++ unsigned int size, ++ struct xt_percpu_counter_alloc_state *alloc_state) + { + struct xt_entry_target *t; + struct xt_target *target; +@@ -574,12 +579,9 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, + unsigned int j; + struct xt_mtchk_param mtpar; + struct xt_entry_match *ematch; +- unsigned long pcnt; + +- pcnt = xt_percpu_counter_alloc(); +- if (IS_ERR_VALUE(pcnt)) ++ if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) + return -ENOMEM; +- e->counters.pcnt = pcnt; + + j = 0; + mtpar.net = net; +@@ -616,7 +618,7 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, + cleanup_match(ematch, net); + } + +- xt_percpu_counter_free(e->counters.pcnt); ++ xt_percpu_counter_free(&e->counters); + + return ret; + } +@@ -703,8 +705,7 @@ static void cleanup_entry(struct ip6t_entry *e, struct net *net) + if (par.target->destroy != NULL) + par.target->destroy(&par); + module_put(par.target->me); +- +- xt_percpu_counter_free(e->counters.pcnt); ++ xt_percpu_counter_free(&e->counters); + } + + /* Checks and translates the user-supplied table segment (held in +@@ -713,6 +714,7 @@ static int + translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, + const struct ip6t_replace *repl) + { ++ struct xt_percpu_counter_alloc_state alloc_state = { 0 }; + struct ip6t_entry *iter; + unsigned int *offsets; + unsigned int i; +@@ -772,7 +774,8 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, + /* Finally, each sanity check must pass */ + i = 0; + xt_entry_foreach(iter, entry0, newinfo->size) { +- ret = find_check_entry(iter, net, repl->name, repl->size); ++ ret = find_check_entry(iter, net, repl->name, repl->size, ++ &alloc_state); + if (ret != 0) + break; + ++i; +diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c +index e0be97e636a4..322c7657165b 100644 +--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c ++++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c +@@ -99,6 +99,10 @@ static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb, + !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff, + target, maniptype)) + return false; ++ ++ /* must reload, offset might have changed */ ++ ipv6h = (void *)skb->data + iphdroff; ++ + manip_addr: + if (maniptype == NF_NAT_MANIP_SRC) + ipv6h->saddr = target->src.u3.in6; +diff --git a/net/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c +index fbce552a796e..7d7466dbf663 100644 +--- a/net/netfilter/nf_nat_proto_common.c ++++ b/net/netfilter/nf_nat_proto_common.c +@@ -41,7 +41,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto, + const struct nf_conn *ct, + u16 *rover) + { +- unsigned int range_size, min, i; ++ unsigned int range_size, min, max, i; + __be16 *portptr; + u_int16_t off; + +@@ -71,7 +71,10 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto, + } + } else { + min = ntohs(range->min_proto.all); +- range_size = ntohs(range->max_proto.all) - min + 1; ++ max = ntohs(range->max_proto.all); ++ if (unlikely(max < min)) ++ swap(max, min); ++ range_size = max - min + 1; + } + + if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) { +diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c +index e47ade305a46..ac26b71d900e 100644 +--- a/net/netfilter/x_tables.c ++++ b/net/netfilter/x_tables.c +@@ -39,6 +39,8 @@ MODULE_LICENSE("GPL"); + MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); + MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module"); + ++#define XT_PCPU_BLOCK_SIZE 4096 ++ + struct compat_delta { + unsigned int offset; /* offset in kernel */ + int delta; /* delta in 32bit user land */ +@@ -1619,6 +1621,59 @@ void xt_proto_fini(struct net *net, u_int8_t af) + } + EXPORT_SYMBOL_GPL(xt_proto_fini); + ++/** ++ * xt_percpu_counter_alloc - allocate x_tables rule counter ++ * ++ * @state: pointer to xt_percpu allocation state ++ * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct ++ * ++ * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then ++ * contain the address of the real (percpu) counter. ++ * ++ * Rule evaluation needs to use xt_get_this_cpu_counter() helper ++ * to fetch the real percpu counter. ++ * ++ * To speed up allocation and improve data locality, a 4kb block is ++ * allocated. ++ * ++ * xt_percpu_counter_alloc_state contains the base address of the ++ * allocated page and the current sub-offset. ++ * ++ * returns false on error. ++ */ ++bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state, ++ struct xt_counters *counter) ++{ ++ BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2)); ++ ++ if (nr_cpu_ids <= 1) ++ return true; ++ ++ if (!state->mem) { ++ state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE, ++ XT_PCPU_BLOCK_SIZE); ++ if (!state->mem) ++ return false; ++ } ++ counter->pcnt = (__force unsigned long)(state->mem + state->off); ++ state->off += sizeof(*counter); ++ if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) { ++ state->mem = NULL; ++ state->off = 0; ++ } ++ return true; ++} ++EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc); ++ ++void xt_percpu_counter_free(struct xt_counters *counters) ++{ ++ unsigned long pcnt = counters->pcnt; ++ ++ if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0) ++ free_percpu((void __percpu *)pcnt); ++} ++EXPORT_SYMBOL_GPL(xt_percpu_counter_free); ++ + static int __net_init xt_net_init(struct net *net) + { + int i; +diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c +index daf45da448fa..bb5d6a058fb7 100644 +--- a/net/netfilter/xt_IDLETIMER.c ++++ b/net/netfilter/xt_IDLETIMER.c +@@ -147,11 +147,11 @@ static int idletimer_tg_create(struct idletimer_tg_info *info) + (unsigned long) info->timer); + info->timer->refcnt = 1; + ++ INIT_WORK(&info->timer->work, idletimer_tg_work); ++ + mod_timer(&info->timer->timer, + msecs_to_jiffies(info->timeout * 1000) + jiffies); + +- INIT_WORK(&info->timer->work, idletimer_tg_work); +- + return 0; + + out_free_attr: +@@ -192,7 +192,10 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par) + pr_debug("timeout value is zero\n"); + return -EINVAL; + } +- ++ if (info->timeout >= INT_MAX / 1000) { ++ pr_debug("timeout value is too big\n"); ++ return -EINVAL; ++ } + if (info->label[0] == '\0' || + strnlen(info->label, + MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) { +diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c +index 3ba31c194cce..0858fe17e14a 100644 +--- a/net/netfilter/xt_LED.c ++++ b/net/netfilter/xt_LED.c +@@ -141,10 +141,11 @@ static int led_tg_check(const struct xt_tgchk_param *par) + goto exit_alloc; + } + +- /* See if we need to set up a timer */ +- if (ledinfo->delay > 0) +- setup_timer(&ledinternal->timer, led_timeout_callback, +- (unsigned long)ledinternal); ++ /* Since the letinternal timer can be shared between multiple targets, ++ * always set it up, even if the current target does not need it ++ */ ++ setup_timer(&ledinternal->timer, led_timeout_callback, ++ (unsigned long)ledinternal); + + list_add_tail(&ledinternal->list, &xt_led_triggers); + +@@ -181,8 +182,7 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par) + + list_del(&ledinternal->list); + +- if (ledinfo->delay > 0) +- del_timer_sync(&ledinternal->timer); ++ del_timer_sync(&ledinternal->timer); + + led_trigger_unregister(&ledinternal->netfilter_led_trigger); + +diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib +index 0a07f9014944..ae0f9ab1a70d 100644 +--- a/scripts/Makefile.lib ++++ b/scripts/Makefile.lib +@@ -290,11 +290,11 @@ cmd_dt_S_dtb= \ + echo '\#include <asm-generic/vmlinux.lds.h>'; \ + echo '.section .dtb.init.rodata,"a"'; \ + echo '.balign STRUCT_ALIGNMENT'; \ +- echo '.global __dtb_$(*F)_begin'; \ +- echo '__dtb_$(*F)_begin:'; \ ++ echo '.global __dtb_$(subst -,_,$(*F))_begin'; \ ++ echo '__dtb_$(subst -,_,$(*F))_begin:'; \ + echo '.incbin "$<" '; \ +- echo '__dtb_$(*F)_end:'; \ +- echo '.global __dtb_$(*F)_end'; \ ++ echo '__dtb_$(subst -,_,$(*F))_end:'; \ ++ echo '.global __dtb_$(subst -,_,$(*F))_end'; \ + echo '.balign STRUCT_ALIGNMENT'; \ + ) > $@ + +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c +index 0b408617b2c9..799ad3e1d24b 100644 +--- a/sound/core/seq/seq_clientmgr.c ++++ b/sound/core/seq/seq_clientmgr.c +@@ -906,7 +906,8 @@ int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop) + static int snd_seq_client_enqueue_event(struct snd_seq_client *client, + struct snd_seq_event *event, + struct file *file, int blocking, +- int atomic, int hop) ++ int atomic, int hop, ++ struct mutex *mutexp) + { + struct snd_seq_event_cell *cell; + int err; +@@ -944,7 +945,8 @@ static int snd_seq_client_enqueue_event(struct snd_seq_client *client, + return -ENXIO; /* queue is not allocated */ + + /* allocate an event cell */ +- err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file); ++ err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, ++ file, mutexp); + if (err < 0) + return err; + +@@ -1013,12 +1015,11 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf, + return -ENXIO; + + /* allocate the pool now if the pool is not allocated yet */ ++ mutex_lock(&client->ioctl_mutex); + if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) { +- mutex_lock(&client->ioctl_mutex); + err = snd_seq_pool_init(client->pool); +- mutex_unlock(&client->ioctl_mutex); + if (err < 0) +- return -ENOMEM; ++ goto out; + } + + /* only process whole events */ +@@ -1069,7 +1070,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf, + /* ok, enqueue it */ + err = snd_seq_client_enqueue_event(client, &event, file, + !(file->f_flags & O_NONBLOCK), +- 0, 0); ++ 0, 0, &client->ioctl_mutex); + if (err < 0) + break; + +@@ -1080,6 +1081,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf, + written += len; + } + ++ out: ++ mutex_unlock(&client->ioctl_mutex); + return written ? written : err; + } + +@@ -1835,6 +1838,9 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client, + (! snd_seq_write_pool_allocated(client) || + info->output_pool != client->pool->size)) { + if (snd_seq_write_pool_allocated(client)) { ++ /* is the pool in use? */ ++ if (atomic_read(&client->pool->counter)) ++ return -EBUSY; + /* remove all existing cells */ + snd_seq_pool_mark_closing(client->pool); + snd_seq_queue_client_leave_cells(client->number); +@@ -2259,7 +2265,8 @@ static int kernel_client_enqueue(int client, struct snd_seq_event *ev, + if (! cptr->accept_output) + result = -EPERM; + else /* send it */ +- result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop); ++ result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, ++ atomic, hop, NULL); + + snd_seq_client_unlock(cptr); + return result; +diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c +index 3490d21ab9e7..9acbed1ac982 100644 +--- a/sound/core/seq/seq_fifo.c ++++ b/sound/core/seq/seq_fifo.c +@@ -123,7 +123,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f, + return -EINVAL; + + snd_use_lock_use(&f->use_lock); +- err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */ ++ err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */ + if (err < 0) { + if ((err == -ENOMEM) || (err == -EAGAIN)) + atomic_inc(&f->overflow); +diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c +index 5847c4475bf3..4c8cbcd89887 100644 +--- a/sound/core/seq/seq_memory.c ++++ b/sound/core/seq/seq_memory.c +@@ -221,7 +221,8 @@ void snd_seq_cell_free(struct snd_seq_event_cell * cell) + */ + static int snd_seq_cell_alloc(struct snd_seq_pool *pool, + struct snd_seq_event_cell **cellp, +- int nonblock, struct file *file) ++ int nonblock, struct file *file, ++ struct mutex *mutexp) + { + struct snd_seq_event_cell *cell; + unsigned long flags; +@@ -245,7 +246,11 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool, + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&pool->output_sleep, &wait); + spin_unlock_irq(&pool->lock); ++ if (mutexp) ++ mutex_unlock(mutexp); + schedule(); ++ if (mutexp) ++ mutex_lock(mutexp); + spin_lock_irq(&pool->lock); + remove_wait_queue(&pool->output_sleep, &wait); + /* interrupted? */ +@@ -288,7 +293,7 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool, + */ + int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, + struct snd_seq_event_cell **cellp, int nonblock, +- struct file *file) ++ struct file *file, struct mutex *mutexp) + { + int ncells, err; + unsigned int extlen; +@@ -305,7 +310,7 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, + if (ncells >= pool->total_elements) + return -ENOMEM; + +- err = snd_seq_cell_alloc(pool, &cell, nonblock, file); ++ err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp); + if (err < 0) + return err; + +@@ -331,7 +336,8 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, + int size = sizeof(struct snd_seq_event); + if (len < size) + size = len; +- err = snd_seq_cell_alloc(pool, &tmp, nonblock, file); ++ err = snd_seq_cell_alloc(pool, &tmp, nonblock, file, ++ mutexp); + if (err < 0) + goto __error; + if (cell->event.data.ext.ptr == NULL) +diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h +index 32f959c17786..3abe306c394a 100644 +--- a/sound/core/seq/seq_memory.h ++++ b/sound/core/seq/seq_memory.h +@@ -66,7 +66,8 @@ struct snd_seq_pool { + void snd_seq_cell_free(struct snd_seq_event_cell *cell); + + int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, +- struct snd_seq_event_cell **cellp, int nonblock, struct file *file); ++ struct snd_seq_event_cell **cellp, int nonblock, ++ struct file *file, struct mutex *mutexp); + + /* return number of unused (free) cells */ + static inline int snd_seq_unused_cells(struct snd_seq_pool *pool) +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index 2c3065c1f3fb..b3851b991120 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -849,6 +849,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { + SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC), + SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC), + SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK), ++ SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK), ++ SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK), + SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), + SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), + SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 974b74e91ef0..cd427ea8861d 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -4760,6 +4760,16 @@ static void alc298_fixup_speaker_volume(struct hda_codec *codec, + } + } + ++/* disable DAC3 (0x06) selection on NID 0x17 as it has no volume amp control */ ++static void alc295_fixup_disable_dac3(struct hda_codec *codec, ++ const struct hda_fixup *fix, int action) ++{ ++ if (action == HDA_FIXUP_ACT_PRE_PROBE) { ++ hda_nid_t conn[2] = { 0x02, 0x03 }; ++ snd_hda_override_conn_list(codec, 0x17, 2, conn); ++ } ++} ++ + /* Hook to update amp GPIO4 for automute */ + static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec, + struct hda_jack_callback *jack) +@@ -4909,6 +4919,7 @@ enum { + ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, + ALC255_FIXUP_DELL_SPK_NOISE, + ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, ++ ALC295_FIXUP_DISABLE_DAC3, + ALC280_FIXUP_HP_HEADSET_MIC, + ALC221_FIXUP_HP_FRONT_MIC, + ALC292_FIXUP_TPT460, +@@ -5601,6 +5612,10 @@ static const struct hda_fixup alc269_fixups[] = { + .chained = true, + .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, + }, ++ [ALC295_FIXUP_DISABLE_DAC3] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc295_fixup_disable_dac3, ++ }, + [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { +@@ -5664,6 +5679,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), + SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), + SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), ++ SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3), + SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), + SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), +@@ -5785,9 +5801,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), ++ SND_PCI_QUIRK(0x17aa, 0x2249, "Thinkpad", ALC292_FIXUP_TPT460), + SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), ++ SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), + SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), + SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), +diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c +index f5d34153e21f..f0c9e2562474 100644 +--- a/sound/soc/codecs/rt5651.c ++++ b/sound/soc/codecs/rt5651.c +@@ -1736,6 +1736,7 @@ static const struct regmap_config rt5651_regmap = { + .num_reg_defaults = ARRAY_SIZE(rt5651_reg), + .ranges = rt5651_ranges, + .num_ranges = ARRAY_SIZE(rt5651_ranges), ++ .use_single_rw = true, + }; + + #if defined(CONFIG_OF) +diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c +index 1589325855bc..3dba5550a665 100644 +--- a/sound/soc/codecs/sgtl5000.c ++++ b/sound/soc/codecs/sgtl5000.c +@@ -774,15 +774,26 @@ static int sgtl5000_pcm_hw_params(struct snd_pcm_substream *substream, + static int sgtl5000_set_bias_level(struct snd_soc_codec *codec, + enum snd_soc_bias_level level) + { ++ struct sgtl5000_priv *sgtl = snd_soc_codec_get_drvdata(codec); ++ int ret; ++ + switch (level) { + case SND_SOC_BIAS_ON: + case SND_SOC_BIAS_PREPARE: + case SND_SOC_BIAS_STANDBY: ++ regcache_cache_only(sgtl->regmap, false); ++ ret = regcache_sync(sgtl->regmap); ++ if (ret) { ++ regcache_cache_only(sgtl->regmap, true); ++ return ret; ++ } ++ + snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, + SGTL5000_REFTOP_POWERUP, + SGTL5000_REFTOP_POWERUP); + break; + case SND_SOC_BIAS_OFF: ++ regcache_cache_only(sgtl->regmap, true); + snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, + SGTL5000_REFTOP_POWERUP, 0); + break; +diff --git a/tools/perf/util/trigger.h b/tools/perf/util/trigger.h +index e97d7016d771..ce7e6e5c1cea 100644 +--- a/tools/perf/util/trigger.h ++++ b/tools/perf/util/trigger.h +@@ -11,7 +11,7 @@ + * States and transits: + * + * +- * OFF--(on)--> READY --(hit)--> HIT ++ * OFF--> ON --> READY --(hit)--> HIT + * ^ | + * | (ready) + * | | +@@ -26,8 +26,9 @@ struct trigger { + volatile enum { + TRIGGER_ERROR = -2, + TRIGGER_OFF = -1, +- TRIGGER_READY = 0, +- TRIGGER_HIT = 1, ++ TRIGGER_ON = 0, ++ TRIGGER_READY = 1, ++ TRIGGER_HIT = 2, + } state; + const char *name; + }; +@@ -49,7 +50,7 @@ static inline bool trigger_is_error(struct trigger *t) + static inline void trigger_on(struct trigger *t) + { + TRIGGER_WARN_ONCE(t, TRIGGER_OFF); +- t->state = TRIGGER_READY; ++ t->state = TRIGGER_ON; + } + + static inline void trigger_ready(struct trigger *t) |