summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2019-10-05 07:41:52 -0400
committerMike Pagano <mpagano@gentoo.org>2019-10-05 07:41:52 -0400
commit78f3e56944444ae953990be586ca8067a7653d5b (patch)
treed88a7684bb8e251074a32b830e8c3b7ab5c47c75
parentLinux patch 4.19.76 (diff)
downloadlinux-patches-78f3e56944444ae953990be586ca8067a7653d5b.tar.gz
linux-patches-78f3e56944444ae953990be586ca8067a7653d5b.tar.bz2
linux-patches-78f3e56944444ae953990be586ca8067a7653d5b.zip
Linux patch 4.19.774.19-76
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1076_linux-4.19.77.patch7298
2 files changed, 7302 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 871922b7..a687ae28 100644
--- a/0000_README
+++ b/0000_README
@@ -343,6 +343,10 @@ Patch: 1075_linux-4.19.76.patch
From: https://www.kernel.org
Desc: Linux 4.19.76
+Patch: 1076_linux-4.19.77.patch
+From: https://www.kernel.org
+Desc: Linux 4.19.77
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1076_linux-4.19.77.patch b/1076_linux-4.19.77.patch
new file mode 100644
index 00000000..c003ebcd
--- /dev/null
+++ b/1076_linux-4.19.77.patch
@@ -0,0 +1,7298 @@
+diff --git a/Makefile b/Makefile
+index 9cb471a75a1b..aeabc6459acc 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 76
++SUBLEVEL = 77
+ EXTRAVERSION =
+ NAME = "People's Front"
+
+diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts
+index 57c2332bf282..25bdc9d97a4d 100644
+--- a/arch/arm/boot/dts/exynos5420-peach-pit.dts
++++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts
+@@ -437,6 +437,7 @@
+ regulator-name = "vdd_ldo10";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
++ regulator-always-on;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
+index d80ab9085da1..7989631b39cc 100644
+--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
++++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
+@@ -437,6 +437,7 @@
+ regulator-name = "vdd_ldo10";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
++ regulator-always-on;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi
+index 895fbde4d433..c1ed83131b49 100644
+--- a/arch/arm/boot/dts/imx7-colibri.dtsi
++++ b/arch/arm/boot/dts/imx7-colibri.dtsi
+@@ -323,6 +323,7 @@
+ vmmc-supply = <&reg_module_3v3>;
+ vqmmc-supply = <&reg_DCDC3>;
+ non-removable;
++ sdhci-caps-mask = <0x80000000 0x0>;
+ };
+
+ &iomuxc {
+diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
+index 8bf365d28cac..584418f517a8 100644
+--- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
++++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
+@@ -43,7 +43,7 @@
+ <&clks IMX7D_ENET1_TIME_ROOT_CLK>;
+ assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
+ assigned-clock-rates = <0>, <100000000>;
+- phy-mode = "rgmii";
++ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy0>;
+ fsl,magic-packet;
+ status = "okay";
+@@ -69,7 +69,7 @@
+ <&clks IMX7D_ENET2_TIME_ROOT_CLK>;
+ assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
+ assigned-clock-rates = <0>, <100000000>;
+- phy-mode = "rgmii";
++ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy1>;
+ fsl,magic-packet;
+ status = "okay";
+diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
+index caa6d5fe9078..b296ada97409 100644
+--- a/arch/arm/mach-zynq/platsmp.c
++++ b/arch/arm/mach-zynq/platsmp.c
+@@ -65,7 +65,7 @@ int zynq_cpun_start(u32 address, int cpu)
+ * 0x4: Jump by mov instruction
+ * 0x8: Jumping address
+ */
+- memcpy((__force void *)zero, &zynq_secondary_trampoline,
++ memcpy_toio(zero, &zynq_secondary_trampoline,
+ trampoline_size);
+ writel(address, zero + trampoline_size);
+
+diff --git a/arch/arm/plat-samsung/watchdog-reset.c b/arch/arm/plat-samsung/watchdog-reset.c
+index ce42cc640a61..71d85ff323f7 100644
+--- a/arch/arm/plat-samsung/watchdog-reset.c
++++ b/arch/arm/plat-samsung/watchdog-reset.c
+@@ -62,6 +62,7 @@ void samsung_wdt_reset(void)
+ #ifdef CONFIG_OF
+ static const struct of_device_id s3c2410_wdt_match[] = {
+ { .compatible = "samsung,s3c2410-wdt" },
++ { .compatible = "samsung,s3c6410-wdt" },
+ {},
+ };
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+index e065394360bb..92186edefeb9 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+@@ -708,6 +708,7 @@
+ <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ fifo-depth = <0x100>;
++ max-frequency = <150000000>;
+ status = "disabled";
+ };
+
+@@ -719,6 +720,7 @@
+ <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ fifo-depth = <0x100>;
++ max-frequency = <150000000>;
+ status = "disabled";
+ };
+
+@@ -730,6 +732,7 @@
+ <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ fifo-depth = <0x100>;
++ max-frequency = <150000000>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index b4a48419769f..9b7d5abd04af 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -62,14 +62,6 @@
+ #define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
+ MIDR_ARCHITECTURE_MASK)
+
+-#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max) \
+-({ \
+- u32 _model = (midr) & MIDR_CPU_MODEL_MASK; \
+- u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); \
+- \
+- _model == (model) && rv >= (rv_min) && rv <= (rv_max); \
+- })
+-
+ #define ARM_CPU_IMP_ARM 0x41
+ #define ARM_CPU_IMP_APM 0x50
+ #define ARM_CPU_IMP_CAVIUM 0x43
+@@ -153,10 +145,19 @@ struct midr_range {
+
+ #define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf)
+
++static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
++ u32 rv_max)
++{
++ u32 _model = midr & MIDR_CPU_MODEL_MASK;
++ u32 rv = midr & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);
++
++ return _model == model && rv >= rv_min && rv <= rv_max;
++}
++
+ static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
+ {
+- return MIDR_IS_CPU_MODEL_RANGE(midr, range->model,
+- range->rv_min, range->rv_max);
++ return midr_is_cpu_model_range(midr, range->model,
++ range->rv_min, range->rv_max);
+ }
+
+ static inline bool
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index 2214a403f39b..212a48826655 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -224,8 +224,10 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
+ * Only if the new pte is valid and kernel, otherwise TLB maintenance
+ * or update_mmu_cache() have the necessary barriers.
+ */
+- if (pte_valid_not_user(pte))
++ if (pte_valid_not_user(pte)) {
+ dsb(ishst);
++ isb();
++ }
+ }
+
+ extern void __sync_icache_dcache(pte_t pteval);
+@@ -432,6 +434,7 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
+ WRITE_ONCE(*pmdp, pmd);
+ dsb(ishst);
++ isb();
+ }
+
+ static inline void pmd_clear(pmd_t *pmdp)
+@@ -483,6 +486,7 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
+ {
+ WRITE_ONCE(*pudp, pud);
+ dsb(ishst);
++ isb();
+ }
+
+ static inline void pud_clear(pud_t *pudp)
+diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
+index a4a1901140ee..fc247b96619c 100644
+--- a/arch/arm64/include/asm/tlbflush.h
++++ b/arch/arm64/include/asm/tlbflush.h
+@@ -224,6 +224,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
+
+ __tlbi(vaae1is, addr);
+ dsb(ish);
++ isb();
+ }
+ #endif
+
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 859d63cc99a3..a897efdb3ddd 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -846,7 +846,7 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _
+ u32 midr = read_cpuid_id();
+
+ /* Cavium ThunderX pass 1.x and 2.x */
+- return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
++ return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
+ }
+diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
+index 8cce091b6c21..ec6aa1863316 100644
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -294,6 +294,15 @@ skip_pgd:
+ msr sctlr_el1, x18
+ isb
+
++ /*
++ * Invalidate the local I-cache so that any instructions fetched
++ * speculatively from the PoC are discarded, since they may have
++ * been dynamically patched at the PoU.
++ */
++ ic iallu
++ dsb nsh
++ isb
++
+ /* Set the flag to zero to indicate that we're all done */
+ str wzr, [flag_ptr]
+ ret
+diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
+index 326448f9df16..1a42ba885188 100644
+--- a/arch/ia64/kernel/module.c
++++ b/arch/ia64/kernel/module.c
+@@ -914,10 +914,14 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo
+ void
+ module_arch_cleanup (struct module *mod)
+ {
+- if (mod->arch.init_unw_table)
++ if (mod->arch.init_unw_table) {
+ unw_remove_unwind_table(mod->arch.init_unw_table);
+- if (mod->arch.core_unw_table)
++ mod->arch.init_unw_table = NULL;
++ }
++ if (mod->arch.core_unw_table) {
+ unw_remove_unwind_table(mod->arch.core_unw_table);
++ mod->arch.core_unw_table = NULL;
++ }
+ }
+
+ void *dereference_module_function_descriptor(struct module *mod, void *ptr)
+diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h
+index 9000b249d225..407a617fa3a2 100644
+--- a/arch/m68k/include/asm/atarihw.h
++++ b/arch/m68k/include/asm/atarihw.h
+@@ -22,7 +22,6 @@
+
+ #include <linux/types.h>
+ #include <asm/bootinfo-atari.h>
+-#include <asm/raw_io.h>
+ #include <asm/kmap.h>
+
+ extern u_long atari_mch_cookie;
+@@ -126,14 +125,6 @@ extern struct atari_hw_present atari_hw_present;
+ */
+
+
+-#define atari_readb raw_inb
+-#define atari_writeb raw_outb
+-
+-#define atari_inb_p raw_inb
+-#define atari_outb_p raw_outb
+-
+-
+-
+ #include <linux/mm.h>
+ #include <asm/cacheflush.h>
+
+diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
+index 782b78f8a048..e056feabbaf0 100644
+--- a/arch/m68k/include/asm/io_mm.h
++++ b/arch/m68k/include/asm/io_mm.h
+@@ -29,7 +29,11 @@
+ #include <asm-generic/iomap.h>
+
+ #ifdef CONFIG_ATARI
+-#include <asm/atarihw.h>
++#define atari_readb raw_inb
++#define atari_writeb raw_outb
++
++#define atari_inb_p raw_inb
++#define atari_outb_p raw_outb
+ #endif
+
+
+diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
+index 08cee11180e6..e441517785fd 100644
+--- a/arch/m68k/include/asm/macintosh.h
++++ b/arch/m68k/include/asm/macintosh.h
+@@ -4,6 +4,7 @@
+
+ #include <linux/seq_file.h>
+ #include <linux/interrupt.h>
++#include <linux/irq.h>
+
+ #include <asm/bootinfo-mac.h>
+
+diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
+index 828f6656f8f7..649fb268f446 100644
+--- a/arch/powerpc/platforms/powernv/opal-imc.c
++++ b/arch/powerpc/platforms/powernv/opal-imc.c
+@@ -57,9 +57,9 @@ static void export_imc_mode_and_cmd(struct device_node *node,
+ struct imc_pmu *pmu_ptr)
+ {
+ static u64 loc, *imc_mode_addr, *imc_cmd_addr;
+- int chip = 0, nid;
+ char mode[16], cmd[16];
+ u32 cb_offset;
++ struct imc_mem_info *ptr = pmu_ptr->mem_info;
+
+ imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root);
+
+@@ -73,20 +73,20 @@ static void export_imc_mode_and_cmd(struct device_node *node,
+ if (of_property_read_u32(node, "cb_offset", &cb_offset))
+ cb_offset = IMC_CNTL_BLK_OFFSET;
+
+- for_each_node(nid) {
+- loc = (u64)(pmu_ptr->mem_info[chip].vbase) + cb_offset;
++ while (ptr->vbase != NULL) {
++ loc = (u64)(ptr->vbase) + cb_offset;
+ imc_mode_addr = (u64 *)(loc + IMC_CNTL_BLK_MODE_OFFSET);
+- sprintf(mode, "imc_mode_%d", nid);
++ sprintf(mode, "imc_mode_%d", (u32)(ptr->id));
+ if (!imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent,
+ imc_mode_addr))
+ goto err;
+
+ imc_cmd_addr = (u64 *)(loc + IMC_CNTL_BLK_CMD_OFFSET);
+- sprintf(cmd, "imc_cmd_%d", nid);
++ sprintf(cmd, "imc_cmd_%d", (u32)(ptr->id));
+ if (!imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent,
+ imc_cmd_addr))
+ goto err;
+- chip++;
++ ptr++;
+ }
+ return;
+
+diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
+index 8ff7cb3da1cb..2bc189187ed4 100644
+--- a/arch/s390/crypto/aes_s390.c
++++ b/arch/s390/crypto/aes_s390.c
+@@ -585,6 +585,9 @@ static int xts_aes_encrypt(struct blkcipher_desc *desc,
+ struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
++ if (!nbytes)
++ return -EINVAL;
++
+ if (unlikely(!xts_ctx->fc))
+ return xts_fallback_encrypt(desc, dst, src, nbytes);
+
+@@ -599,6 +602,9 @@ static int xts_aes_decrypt(struct blkcipher_desc *desc,
+ struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
++ if (!nbytes)
++ return -EINVAL;
++
+ if (unlikely(!xts_ctx->fc))
+ return xts_fallback_decrypt(desc, dst, src, nbytes);
+
+diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
+index aebedbaf5260..5d0b72f28140 100644
+--- a/arch/x86/include/asm/intel-family.h
++++ b/arch/x86/include/asm/intel-family.h
+@@ -58,6 +58,9 @@
+ #define INTEL_FAM6_ICELAKE_MOBILE 0x7E
+ #define INTEL_FAM6_ICELAKE_NNPI 0x9D
+
++#define INTEL_FAM6_TIGERLAKE_L 0x8C
++#define INTEL_FAM6_TIGERLAKE 0x8D
++
+ /* "Small Core" Processors (Atom) */
+
+ #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index b316bd61a6ac..dfdd1caf0d55 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1450,54 +1450,72 @@ static void lapic_setup_esr(void)
+ oldvalue, value);
+ }
+
+-static void apic_pending_intr_clear(void)
++#define APIC_IR_REGS APIC_ISR_NR
++#define APIC_IR_BITS (APIC_IR_REGS * 32)
++#define APIC_IR_MAPSIZE (APIC_IR_BITS / BITS_PER_LONG)
++
++union apic_ir {
++ unsigned long map[APIC_IR_MAPSIZE];
++ u32 regs[APIC_IR_REGS];
++};
++
++static bool apic_check_and_ack(union apic_ir *irr, union apic_ir *isr)
+ {
+- long long max_loops = cpu_khz ? cpu_khz : 1000000;
+- unsigned long long tsc = 0, ntsc;
+- unsigned int queued;
+- unsigned long value;
+- int i, j, acked = 0;
++ int i, bit;
++
++ /* Read the IRRs */
++ for (i = 0; i < APIC_IR_REGS; i++)
++ irr->regs[i] = apic_read(APIC_IRR + i * 0x10);
++
++ /* Read the ISRs */
++ for (i = 0; i < APIC_IR_REGS; i++)
++ isr->regs[i] = apic_read(APIC_ISR + i * 0x10);
+
+- if (boot_cpu_has(X86_FEATURE_TSC))
+- tsc = rdtsc();
+ /*
+- * After a crash, we no longer service the interrupts and a pending
+- * interrupt from previous kernel might still have ISR bit set.
+- *
+- * Most probably by now CPU has serviced that pending interrupt and
+- * it might not have done the ack_APIC_irq() because it thought,
+- * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
+- * does not clear the ISR bit and cpu thinks it has already serivced
+- * the interrupt. Hence a vector might get locked. It was noticed
+- * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
++ * If the ISR map is not empty. ACK the APIC and run another round
++ * to verify whether a pending IRR has been unblocked and turned
++ * into a ISR.
+ */
+- do {
+- queued = 0;
+- for (i = APIC_ISR_NR - 1; i >= 0; i--)
+- queued |= apic_read(APIC_IRR + i*0x10);
+-
+- for (i = APIC_ISR_NR - 1; i >= 0; i--) {
+- value = apic_read(APIC_ISR + i*0x10);
+- for_each_set_bit(j, &value, 32) {
+- ack_APIC_irq();
+- acked++;
+- }
+- }
+- if (acked > 256) {
+- pr_err("LAPIC pending interrupts after %d EOI\n", acked);
+- break;
+- }
+- if (queued) {
+- if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) {
+- ntsc = rdtsc();
+- max_loops = (long long)cpu_khz << 10;
+- max_loops -= ntsc - tsc;
+- } else {
+- max_loops--;
+- }
+- }
+- } while (queued && max_loops > 0);
+- WARN_ON(max_loops <= 0);
++ if (!bitmap_empty(isr->map, APIC_IR_BITS)) {
++ /*
++ * There can be multiple ISR bits set when a high priority
++ * interrupt preempted a lower priority one. Issue an ACK
++ * per set bit.
++ */
++ for_each_set_bit(bit, isr->map, APIC_IR_BITS)
++ ack_APIC_irq();
++ return true;
++ }
++
++ return !bitmap_empty(irr->map, APIC_IR_BITS);
++}
++
++/*
++ * After a crash, we no longer service the interrupts and a pending
++ * interrupt from previous kernel might still have ISR bit set.
++ *
++ * Most probably by now the CPU has serviced that pending interrupt and it
++ * might not have done the ack_APIC_irq() because it thought, interrupt
++ * came from i8259 as ExtInt. LAPIC did not get EOI so it does not clear
++ * the ISR bit and cpu thinks it has already serivced the interrupt. Hence
++ * a vector might get locked. It was noticed for timer irq (vector
++ * 0x31). Issue an extra EOI to clear ISR.
++ *
++ * If there are pending IRR bits they turn into ISR bits after a higher
++ * priority ISR bit has been acked.
++ */
++static void apic_pending_intr_clear(void)
++{
++ union apic_ir irr, isr;
++ unsigned int i;
++
++ /* 512 loops are way oversized and give the APIC a chance to obey. */
++ for (i = 0; i < 512; i++) {
++ if (!apic_check_and_ack(&irr, &isr))
++ return;
++ }
++ /* Dump the IRR/ISR content if that failed */
++ pr_warn("APIC: Stale IRR: %256pb ISR: %256pb\n", irr.map, isr.map);
+ }
+
+ /**
+@@ -1520,6 +1538,14 @@ static void setup_local_APIC(void)
+ return;
+ }
+
++ /*
++ * If this comes from kexec/kcrash the APIC might be enabled in
++ * SPIV. Soft disable it before doing further initialization.
++ */
++ value = apic_read(APIC_SPIV);
++ value &= ~APIC_SPIV_APIC_ENABLED;
++ apic_write(APIC_SPIV, value);
++
+ #ifdef CONFIG_X86_32
+ /* Pound the ESR really hard over the head with a big hammer - mbligh */
+ if (lapic_is_integrated() && apic->disable_esr) {
+@@ -1565,6 +1591,7 @@ static void setup_local_APIC(void)
+ value &= ~APIC_TPRI_MASK;
+ apic_write(APIC_TASKPRI, value);
+
++ /* Clear eventually stale ISR/IRR bits */
+ apic_pending_intr_clear();
+
+ /*
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index 10e1d17aa060..c352ca2e1456 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -400,6 +400,17 @@ static int activate_reserved(struct irq_data *irqd)
+ if (!irqd_can_reserve(irqd))
+ apicd->can_reserve = false;
+ }
++
++ /*
++ * Check to ensure that the effective affinity mask is a subset
++ * the user supplied affinity mask, and warn the user if it is not
++ */
++ if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd),
++ irq_data_get_affinity_mask(irqd))) {
++ pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n",
++ irqd->irq);
++ }
++
+ return ret;
+ }
+
+diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
+index 04adc8d60aed..b2b87b91f336 100644
+--- a/arch/x86/kernel/smp.c
++++ b/arch/x86/kernel/smp.c
+@@ -181,6 +181,12 @@ asmlinkage __visible void smp_reboot_interrupt(void)
+ irq_exit();
+ }
+
++static int register_stop_handler(void)
++{
++ return register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
++ NMI_FLAG_FIRST, "smp_stop");
++}
++
+ static void native_stop_other_cpus(int wait)
+ {
+ unsigned long flags;
+@@ -214,39 +220,41 @@ static void native_stop_other_cpus(int wait)
+ apic->send_IPI_allbutself(REBOOT_VECTOR);
+
+ /*
+- * Don't wait longer than a second if the caller
+- * didn't ask us to wait.
++ * Don't wait longer than a second for IPI completion. The
++ * wait request is not checked here because that would
++ * prevent an NMI shutdown attempt in case that not all
++ * CPUs reach shutdown state.
+ */
+ timeout = USEC_PER_SEC;
+- while (num_online_cpus() > 1 && (wait || timeout--))
++ while (num_online_cpus() > 1 && timeout--)
+ udelay(1);
+ }
+-
+- /* if the REBOOT_VECTOR didn't work, try with the NMI */
+- if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) {
+- if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
+- NMI_FLAG_FIRST, "smp_stop"))
+- /* Note: we ignore failures here */
+- /* Hope the REBOOT_IRQ is good enough */
+- goto finish;
+-
+- /* sync above data before sending IRQ */
+- wmb();
+
+- pr_emerg("Shutting down cpus with NMI\n");
++ /* if the REBOOT_VECTOR didn't work, try with the NMI */
++ if (num_online_cpus() > 1) {
++ /*
++ * If NMI IPI is enabled, try to register the stop handler
++ * and send the IPI. In any case try to wait for the other
++ * CPUs to stop.
++ */
++ if (!smp_no_nmi_ipi && !register_stop_handler()) {
++ /* Sync above data before sending IRQ */
++ wmb();
+
+- apic->send_IPI_allbutself(NMI_VECTOR);
++ pr_emerg("Shutting down cpus with NMI\n");
+
++ apic->send_IPI_allbutself(NMI_VECTOR);
++ }
+ /*
+- * Don't wait longer than a 10 ms if the caller
+- * didn't ask us to wait.
++ * Don't wait longer than 10 ms if the caller didn't
++ * reqeust it. If wait is true, the machine hangs here if
++ * one or more CPUs do not reach shutdown state.
+ */
+ timeout = USEC_PER_MSEC * 10;
+ while (num_online_cpus() > 1 && (wait || timeout--))
+ udelay(1);
+ }
+
+-finish:
+ local_irq_save(flags);
+ disable_local_APIC();
+ mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 429728b35bca..e699f4d2a450 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -5368,6 +5368,8 @@ done_prefixes:
+ ctxt->memopp->addr.mem.ea + ctxt->_eip);
+
+ done:
++ if (rc == X86EMUL_PROPAGATE_FAULT)
++ ctxt->have_exception = true;
+ return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
+ }
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index dbae8415cf4a..05cb5855255e 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -581,8 +581,14 @@ static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
+ data, offset, len, access);
+ }
+
++static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
++{
++ return rsvd_bits(cpuid_maxphyaddr(vcpu), 63) | rsvd_bits(5, 8) |
++ rsvd_bits(1, 2);
++}
++
+ /*
+- * Load the pae pdptrs. Return true is they are all valid.
++ * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise.
+ */
+ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
+ {
+@@ -601,8 +607,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
+ }
+ for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
+ if ((pdpte[i] & PT_PRESENT_MASK) &&
+- (pdpte[i] &
+- vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) {
++ (pdpte[i] & pdptr_rsvd_bits(vcpu))) {
+ ret = 0;
+ goto out;
+ }
+@@ -6244,8 +6249,16 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
+ if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
+ emulation_type))
+ return EMULATE_DONE;
+- if (ctxt->have_exception && inject_emulated_exception(vcpu))
++ if (ctxt->have_exception) {
++ /*
++ * #UD should result in just EMULATION_FAILED, and trap-like
++ * exception should not be encountered during decode.
++ */
++ WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR ||
++ exception_type(ctxt->exception.vector) == EXCPT_TRAP);
++ inject_emulated_exception(vcpu);
+ return EMULATE_DONE;
++ }
+ if (emulation_type & EMULTYPE_SKIP)
+ return EMULATE_FAIL;
+ return handle_emulation_failure(vcpu, emulation_type);
+diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
+index 4df3e5c89d57..622d5968c979 100644
+--- a/arch/x86/mm/pti.c
++++ b/arch/x86/mm/pti.c
+@@ -338,13 +338,15 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
+
+ pud = pud_offset(p4d, addr);
+ if (pud_none(*pud)) {
+- addr += PUD_SIZE;
++ WARN_ON_ONCE(addr & ~PUD_MASK);
++ addr = round_up(addr + 1, PUD_SIZE);
+ continue;
+ }
+
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd)) {
+- addr += PMD_SIZE;
++ WARN_ON_ONCE(addr & ~PMD_MASK);
++ addr = round_up(addr + 1, PMD_SIZE);
+ continue;
+ }
+
+@@ -643,6 +645,8 @@ void __init pti_init(void)
+ */
+ void pti_finalize(void)
+ {
++ if (!boot_cpu_has(X86_FEATURE_PTI))
++ return;
+ /*
+ * We need to clone everything (again) that maps parts of the
+ * kernel image.
+diff --git a/block/blk-flush.c b/block/blk-flush.c
+index 87fc49daa2b4..256fa1ccc2bd 100644
+--- a/block/blk-flush.c
++++ b/block/blk-flush.c
+@@ -232,6 +232,16 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
+
+ /* release the tag's ownership to the req cloned from */
+ spin_lock_irqsave(&fq->mq_flush_lock, flags);
++
++ if (!refcount_dec_and_test(&flush_rq->ref)) {
++ fq->rq_status = error;
++ spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
++ return;
++ }
++
++ if (fq->rq_status != BLK_STS_OK)
++ error = fq->rq_status;
++
+ hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
+ if (!q->elevator) {
+ blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 7ea85ec52026..684acaa96db7 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -844,7 +844,10 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
+ */
+ if (blk_mq_req_expired(rq, next))
+ blk_mq_rq_timed_out(rq, reserved);
+- if (refcount_dec_and_test(&rq->ref))
++
++ if (is_flush_rq(rq, hctx))
++ rq->end_io(rq, 0);
++ else if (refcount_dec_and_test(&rq->ref))
+ __blk_mq_free_request(rq);
+ }
+
+diff --git a/block/blk.h b/block/blk.h
+index 11e4ca2f2cd4..1a5b67b57e6b 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -23,6 +23,7 @@ struct blk_flush_queue {
+ unsigned int flush_queue_delayed:1;
+ unsigned int flush_pending_idx:1;
+ unsigned int flush_running_idx:1;
++ blk_status_t rq_status;
+ unsigned long flush_pending_since;
+ struct list_head flush_queue[2];
+ struct list_head flush_data_in_flight;
+@@ -123,6 +124,12 @@ static inline void __blk_get_queue(struct request_queue *q)
+ kobject_get(&q->kobj);
+ }
+
++static inline bool
++is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
++{
++ return hctx->fq->flush_rq == req;
++}
++
+ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
+ int node, int cmd_size, gfp_t flags);
+ void blk_free_flush_queue(struct blk_flush_queue *q);
+diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
+index fc447410ae4d..a448cdf56718 100644
+--- a/drivers/acpi/acpi_processor.c
++++ b/drivers/acpi/acpi_processor.c
+@@ -282,9 +282,13 @@ static int acpi_processor_get_info(struct acpi_device *device)
+ }
+
+ if (acpi_duplicate_processor_id(pr->acpi_id)) {
+- dev_err(&device->dev,
+- "Failed to get unique processor _UID (0x%x)\n",
+- pr->acpi_id);
++ if (pr->acpi_id == 0xff)
++ dev_info_once(&device->dev,
++ "Entry not well-defined, consider updating BIOS\n");
++ else
++ dev_err(&device->dev,
++ "Failed to get unique processor _UID (0x%x)\n",
++ pr->acpi_id);
+ return -ENODEV;
+ }
+
+diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
+index d9ce4b162e2c..a1aa59849b96 100644
+--- a/drivers/acpi/cppc_acpi.c
++++ b/drivers/acpi/cppc_acpi.c
+@@ -369,8 +369,10 @@ static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
+ union acpi_object *psd = NULL;
+ struct acpi_psd_package *pdomain;
+
+- status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
+- ACPI_TYPE_PACKAGE);
++ status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
++ &buffer, ACPI_TYPE_PACKAGE);
++ if (status == AE_NOT_FOUND) /* _PSD is optional */
++ return 0;
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
+index e967c1173ba3..222ea3f12f41 100644
+--- a/drivers/acpi/custom_method.c
++++ b/drivers/acpi/custom_method.c
+@@ -48,8 +48,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
+ if ((*ppos > max_size) ||
+ (*ppos + count > max_size) ||
+ (*ppos + count < count) ||
+- (count > uncopied_bytes))
++ (count > uncopied_bytes)) {
++ kfree(buf);
+ return -EINVAL;
++ }
+
+ if (copy_from_user(buf + (*ppos), user_buf, count)) {
+ kfree(buf);
+@@ -69,6 +71,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
+ }
+
++ kfree(buf);
+ return count;
+ }
+
+diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
+index c576a6fe4ebb..94ded9513c73 100644
+--- a/drivers/acpi/pci_irq.c
++++ b/drivers/acpi/pci_irq.c
+@@ -462,8 +462,10 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
+ * No IRQ known to the ACPI subsystem - maybe the BIOS /
+ * driver reported one, then use it. Exit in any case.
+ */
+- if (!acpi_pci_irq_valid(dev, pin))
++ if (!acpi_pci_irq_valid(dev, pin)) {
++ kfree(entry);
+ return 0;
++ }
+
+ if (acpi_isa_register_gsi(dev))
+ dev_warn(&dev->dev, "PCI INT %c: no GSI\n",
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 021ce46e2e57..5d110b1362e7 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -81,6 +81,12 @@ enum board_ids {
+ board_ahci_sb700, /* for SB700 and SB800 */
+ board_ahci_vt8251,
+
++ /*
++ * board IDs for Intel chipsets that support more than 6 ports
++ * *and* end up needing the PCS quirk.
++ */
++ board_ahci_pcs7,
++
+ /* aliases */
+ board_ahci_mcp_linux = board_ahci_mcp65,
+ board_ahci_mcp67 = board_ahci_mcp65,
+@@ -236,6 +242,12 @@ static const struct ata_port_info ahci_port_info[] = {
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_vt8251_ops,
+ },
++ [board_ahci_pcs7] = {
++ .flags = AHCI_FLAG_COMMON,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_ops,
++ },
+ };
+
+ static const struct pci_device_id ahci_pci_tbl[] = {
+@@ -280,26 +292,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_mobile }, /* PCH M RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
+- { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b0), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b1), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b2), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b3), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b4), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b5), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b6), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b7), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19bE), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19bF), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c0), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c1), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c2), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c3), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c4), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c5), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c6), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c7), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19cE), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19cF), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_mobile }, /* CPT M AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
+@@ -639,30 +651,6 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
+ ahci_save_initial_config(&pdev->dev, hpriv);
+ }
+
+-static int ahci_pci_reset_controller(struct ata_host *host)
+-{
+- struct pci_dev *pdev = to_pci_dev(host->dev);
+- int rc;
+-
+- rc = ahci_reset_controller(host);
+- if (rc)
+- return rc;
+-
+- if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+- struct ahci_host_priv *hpriv = host->private_data;
+- u16 tmp16;
+-
+- /* configure PCS */
+- pci_read_config_word(pdev, 0x92, &tmp16);
+- if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
+- tmp16 |= hpriv->port_map;
+- pci_write_config_word(pdev, 0x92, tmp16);
+- }
+- }
+-
+- return 0;
+-}
+-
+ static void ahci_pci_init_controller(struct ata_host *host)
+ {
+ struct ahci_host_priv *hpriv = host->private_data;
+@@ -865,7 +853,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev)
+ struct ata_host *host = pci_get_drvdata(pdev);
+ int rc;
+
+- rc = ahci_pci_reset_controller(host);
++ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+ ahci_pci_init_controller(host);
+@@ -900,7 +888,7 @@ static int ahci_pci_device_resume(struct device *dev)
+ ahci_mcp89_apple_enable(pdev);
+
+ if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+- rc = ahci_pci_reset_controller(host);
++ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+
+@@ -1635,6 +1623,34 @@ update_policy:
+ ap->target_lpm_policy = policy;
+ }
+
++static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
++{
++ const struct pci_device_id *id = pci_match_id(ahci_pci_tbl, pdev);
++ u16 tmp16;
++
++ /*
++ * Only apply the 6-port PCS quirk for known legacy platforms.
++ */
++ if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
++ return;
++ if (((enum board_ids) id->driver_data) < board_ahci_pcs7)
++ return;
++
++ /*
++ * port_map is determined from PORTS_IMPL PCI register which is
++ * implemented as write or write-once register. If the register
++ * isn't programmed, ahci automatically generates it from number
++ * of ports, which is good enough for PCS programming. It is
++ * otherwise expected that platform firmware enables the ports
++ * before the OS boots.
++ */
++ pci_read_config_word(pdev, PCS_6, &tmp16);
++ if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
++ tmp16 |= hpriv->port_map;
++ pci_write_config_word(pdev, PCS_6, tmp16);
++ }
++}
++
+ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ {
+ unsigned int board_id = ent->driver_data;
+@@ -1747,6 +1763,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ /* save initial config */
+ ahci_pci_save_initial_config(pdev, hpriv);
+
++ /*
++ * If platform firmware failed to enable ports, try to enable
++ * them here.
++ */
++ ahci_intel_pcs_quirk(pdev, hpriv);
++
+ /* prepare host */
+ if (hpriv->cap & HOST_CAP_NCQ) {
+ pi.flags |= ATA_FLAG_NCQ;
+@@ -1856,7 +1878,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ if (rc)
+ return rc;
+
+- rc = ahci_pci_reset_controller(host);
++ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+
+diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
+index 6a1515f0da40..9290e787abdc 100644
+--- a/drivers/ata/ahci.h
++++ b/drivers/ata/ahci.h
+@@ -261,6 +261,8 @@ enum {
+ ATA_FLAG_ACPI_SATA | ATA_FLAG_AN,
+
+ ICH_MAP = 0x90, /* ICH MAP register */
++ PCS_6 = 0x92, /* 6 port PCS */
++ PCS_7 = 0x94, /* 7+ port PCS (Denverton) */
+
+ /* em constants */
+ EM_MAX_SLOTS = 8,
+diff --git a/drivers/base/soc.c b/drivers/base/soc.c
+index 10b280f30217..7e91894a380b 100644
+--- a/drivers/base/soc.c
++++ b/drivers/base/soc.c
+@@ -157,6 +157,7 @@ out2:
+ out1:
+ return ERR_PTR(ret);
+ }
++EXPORT_SYMBOL_GPL(soc_device_register);
+
+ /* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
+ void soc_device_unregister(struct soc_device *soc_dev)
+@@ -166,6 +167,7 @@ void soc_device_unregister(struct soc_device *soc_dev)
+ device_unregister(&soc_dev->dev);
+ early_soc_dev_attr = NULL;
+ }
++EXPORT_SYMBOL_GPL(soc_device_unregister);
+
+ static int __init soc_bus_register(void)
+ {
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index cef8e00c9d9d..126c2c514673 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1719,6 +1719,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ case LOOP_SET_FD:
+ case LOOP_CHANGE_FD:
+ case LOOP_SET_BLOCK_SIZE:
++ case LOOP_SET_DIRECT_IO:
+ err = lo_ioctl(bdev, mode, cmd, arg);
+ break;
+ default:
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index fa60f265ee50..b1c7009de1f4 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -353,8 +353,10 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
+ }
+ config = nbd->config;
+
+- if (!mutex_trylock(&cmd->lock))
++ if (!mutex_trylock(&cmd->lock)) {
++ nbd_config_put(nbd);
+ return BLK_EH_RESET_TIMER;
++ }
+
+ if (config->num_connections > 1) {
+ dev_err_ratelimited(nbd_to_dev(nbd),
+diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
+index aaf9e5afaad4..0ef7cb0448e8 100644
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -67,7 +67,7 @@ static void add_early_randomness(struct hwrng *rng)
+ size_t size = min_t(size_t, 16, rng_buffer_size());
+
+ mutex_lock(&reading_mutex);
+- bytes_read = rng_get_data(rng, rng_buffer, size, 1);
++ bytes_read = rng_get_data(rng, rng_buffer, size, 0);
+ mutex_unlock(&reading_mutex);
+ if (bytes_read > 0)
+ add_device_randomness(rng_buffer, bytes_read);
+diff --git a/drivers/char/mem.c b/drivers/char/mem.c
+index 7b4e4de778e4..54b86490d9ca 100644
+--- a/drivers/char/mem.c
++++ b/drivers/char/mem.c
+@@ -97,6 +97,13 @@ void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
+ }
+ #endif
+
++static inline bool should_stop_iteration(void)
++{
++ if (need_resched())
++ cond_resched();
++ return fatal_signal_pending(current);
++}
++
+ /*
+ * This funcion reads the *physical* memory. The f_pos points directly to the
+ * memory location.
+@@ -175,6 +182,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
+ p += sz;
+ count -= sz;
+ read += sz;
++ if (should_stop_iteration())
++ break;
+ }
+ kfree(bounce);
+
+@@ -251,6 +260,8 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
+ p += sz;
+ count -= sz;
+ written += sz;
++ if (should_stop_iteration())
++ break;
+ }
+
+ *ppos += written;
+@@ -468,6 +479,10 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
+ read += sz;
+ low_count -= sz;
+ count -= sz;
++ if (should_stop_iteration()) {
++ count = 0;
++ break;
++ }
+ }
+ }
+
+@@ -492,6 +507,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
+ buf += sz;
+ read += sz;
+ p += sz;
++ if (should_stop_iteration())
++ break;
+ }
+ free_page((unsigned long)kbuf);
+ }
+@@ -544,6 +561,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
+ p += sz;
+ count -= sz;
+ written += sz;
++ if (should_stop_iteration())
++ break;
+ }
+
+ *ppos += written;
+@@ -595,6 +614,8 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
+ buf += sz;
+ virtr += sz;
+ p += sz;
++ if (should_stop_iteration())
++ break;
+ }
+ free_page((unsigned long)kbuf);
+ }
+diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
+index c25658b26598..24a9658348d7 100644
+--- a/drivers/devfreq/exynos-bus.c
++++ b/drivers/devfreq/exynos-bus.c
+@@ -194,11 +194,10 @@ static void exynos_bus_exit(struct device *dev)
+ if (ret < 0)
+ dev_warn(dev, "failed to disable the devfreq-event devices\n");
+
+- if (bus->regulator)
+- regulator_disable(bus->regulator);
+-
+ dev_pm_opp_of_remove_table(dev);
+ clk_disable_unprepare(bus->clk);
++ if (bus->regulator)
++ regulator_disable(bus->regulator);
+ }
+
+ /*
+@@ -386,6 +385,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
+ struct exynos_bus *bus;
+ int ret, max_state;
+ unsigned long min_freq, max_freq;
++ bool passive = false;
+
+ if (!np) {
+ dev_err(dev, "failed to find devicetree node\n");
+@@ -399,27 +399,27 @@ static int exynos_bus_probe(struct platform_device *pdev)
+ bus->dev = &pdev->dev;
+ platform_set_drvdata(pdev, bus);
+
+- /* Parse the device-tree to get the resource information */
+- ret = exynos_bus_parse_of(np, bus);
+- if (ret < 0)
+- return ret;
+-
+ profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
+- if (!profile) {
+- ret = -ENOMEM;
+- goto err;
+- }
++ if (!profile)
++ return -ENOMEM;
+
+ node = of_parse_phandle(dev->of_node, "devfreq", 0);
+ if (node) {
+ of_node_put(node);
+- goto passive;
++ passive = true;
+ } else {
+ ret = exynos_bus_parent_parse_of(np, bus);
++ if (ret < 0)
++ return ret;
+ }
+
++ /* Parse the device-tree to get the resource information */
++ ret = exynos_bus_parse_of(np, bus);
+ if (ret < 0)
+- goto err;
++ goto err_reg;
++
++ if (passive)
++ goto passive;
+
+ /* Initialize the struct profile and governor data for parent device */
+ profile->polling_ms = 50;
+@@ -510,6 +510,9 @@ out:
+ err:
+ dev_pm_opp_of_remove_table(dev);
+ clk_disable_unprepare(bus->clk);
++err_reg:
++ if (!passive)
++ regulator_disable(bus->regulator);
+
+ return ret;
+ }
+diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
+index 3bc29acbd54e..8cfb69749d49 100644
+--- a/drivers/devfreq/governor_passive.c
++++ b/drivers/devfreq/governor_passive.c
+@@ -152,7 +152,6 @@ static int devfreq_passive_notifier_call(struct notifier_block *nb,
+ static int devfreq_passive_event_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
+ {
+- struct device *dev = devfreq->dev.parent;
+ struct devfreq_passive_data *p_data
+ = (struct devfreq_passive_data *)devfreq->data;
+ struct devfreq *parent = (struct devfreq *)p_data->parent;
+@@ -168,12 +167,12 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq,
+ p_data->this = devfreq;
+
+ nb->notifier_call = devfreq_passive_notifier_call;
+- ret = devm_devfreq_register_notifier(dev, parent, nb,
++ ret = devfreq_register_notifier(parent, nb,
+ DEVFREQ_TRANSITION_NOTIFIER);
+ break;
+ case DEVFREQ_GOV_STOP:
+- devm_devfreq_unregister_notifier(dev, parent, nb,
+- DEVFREQ_TRANSITION_NOTIFIER);
++ WARN_ON(devfreq_unregister_notifier(parent, nb,
++ DEVFREQ_TRANSITION_NOTIFIER));
+ break;
+ default:
+ break;
+diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
+index 2b11d967acd0..9d782cc95c6a 100644
+--- a/drivers/dma/bcm2835-dma.c
++++ b/drivers/dma/bcm2835-dma.c
+@@ -898,8 +898,10 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+- if (rc)
++ if (rc) {
++ dev_err(&pdev->dev, "Unable to set DMA mask\n");
+ return rc;
++ }
+
+ od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
+ if (!od)
+diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
+index a410657f7bcd..012584cf3c17 100644
+--- a/drivers/dma/iop-adma.c
++++ b/drivers/dma/iop-adma.c
+@@ -125,9 +125,9 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
+ list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
+ chain_node) {
+ pr_debug("\tcookie: %d slot: %d busy: %d "
+- "this_desc: %#x next_desc: %#x ack: %d\n",
++ "this_desc: %#x next_desc: %#llx ack: %d\n",
+ iter->async_tx.cookie, iter->idx, busy,
+- iter->async_tx.phys, iop_desc_get_next_desc(iter),
++ iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter),
+ async_tx_test_ack(&iter->async_tx));
+ prefetch(_iter);
+ prefetch(&_iter->async_tx);
+@@ -315,9 +315,9 @@ retry:
+ int i;
+ dev_dbg(iop_chan->device->common.dev,
+ "allocated slot: %d "
+- "(desc %p phys: %#x) slots_per_op %d\n",
++ "(desc %p phys: %#llx) slots_per_op %d\n",
+ iter->idx, iter->hw_desc,
+- iter->async_tx.phys, slots_per_op);
++ (u64)iter->async_tx.phys, slots_per_op);
+
+ /* pre-ack all but the last descriptor */
+ if (num_slots != slots_per_op)
+@@ -525,7 +525,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
+ return NULL;
+ BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
+
+- dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
++ dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n",
+ __func__, len);
+
+ spin_lock_bh(&iop_chan->lock);
+@@ -558,7 +558,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
+ BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
+
+ dev_dbg(iop_chan->device->common.dev,
+- "%s src_cnt: %d len: %u flags: %lx\n",
++ "%s src_cnt: %d len: %zu flags: %lx\n",
+ __func__, src_cnt, len, flags);
+
+ spin_lock_bh(&iop_chan->lock);
+@@ -591,7 +591,7 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
+ if (unlikely(!len))
+ return NULL;
+
+- dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
++ dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
+ __func__, src_cnt, len);
+
+ spin_lock_bh(&iop_chan->lock);
+@@ -629,7 +629,7 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+ BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
+
+ dev_dbg(iop_chan->device->common.dev,
+- "%s src_cnt: %d len: %u flags: %lx\n",
++ "%s src_cnt: %d len: %zu flags: %lx\n",
+ __func__, src_cnt, len, flags);
+
+ if (dmaf_p_disabled_continue(flags))
+@@ -692,7 +692,7 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+ return NULL;
+ BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
+
+- dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
++ dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
+ __func__, src_cnt, len);
+
+ spin_lock_bh(&iop_chan->lock);
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index ceabdea40ae0..982631d4e1f8 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2273,9 +2273,6 @@ static int edma_probe(struct platform_device *pdev)
+
+ ecc->default_queue = info->default_queue;
+
+- for (i = 0; i < ecc->num_slots; i++)
+- edma_write_slot(ecc, i, &dummy_paramset);
+-
+ if (info->rsv) {
+ /* Set the reserved slots in inuse list */
+ rsv_slots = info->rsv->rsv_slots;
+@@ -2288,6 +2285,12 @@ static int edma_probe(struct platform_device *pdev)
+ }
+ }
+
++ for (i = 0; i < ecc->num_slots; i++) {
++ /* Reset only unused - not reserved - paRAM slots */
++ if (!test_bit(i, ecc->slot_inuse))
++ edma_write_slot(ecc, i, &dummy_paramset);
++ }
++
+ /* Clear the xbar mapped channels in unused list */
+ xbar_chans = info->xbar_chans;
+ if (xbar_chans) {
+diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
+index 5762c3c383f2..56de378ad13d 100644
+--- a/drivers/edac/altera_edac.c
++++ b/drivers/edac/altera_edac.c
+@@ -1956,6 +1956,7 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
+ struct altr_arria10_edac *edac = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int irq = irq_desc_get_irq(desc);
++ unsigned long bits;
+
+ dberr = (irq == edac->db_irq) ? 1 : 0;
+ sm_offset = dberr ? A10_SYSMGR_ECC_INTSTAT_DERR_OFST :
+@@ -1965,7 +1966,8 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
+
+ regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status);
+
+- for_each_set_bit(bit, (unsigned long *)&irq_status, 32) {
++ bits = irq_status;
++ for_each_set_bit(bit, &bits, 32) {
+ irq = irq_linear_revmap(edac->domain, dberr * 32 + bit);
+ if (irq)
+ generic_handle_irq(irq);
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index e2addb2bca29..94265e438514 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -2501,13 +2501,6 @@ static void decode_umc_error(int node_id, struct mce *m)
+ goto log_error;
+ }
+
+- if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
+- err.err_code = ERR_NORM_ADDR;
+- goto log_error;
+- }
+-
+- error_address_to_page_and_offset(sys_addr, &err);
+-
+ if (!(m->status & MCI_STATUS_SYNDV)) {
+ err.err_code = ERR_SYND;
+ goto log_error;
+@@ -2524,6 +2517,13 @@ static void decode_umc_error(int node_id, struct mce *m)
+
+ err.csrow = m->synd & 0x7;
+
++ if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
++ err.err_code = ERR_NORM_ADDR;
++ goto log_error;
++ }
++
++ error_address_to_page_and_offset(sys_addr, &err);
++
+ log_error:
+ __log_ecc_error(mci, &err, ecc_type);
+ }
+@@ -3101,12 +3101,15 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
+ static inline void
+ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
+ {
+- u8 i, ecc_en = 1, cpk_en = 1;
++ u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
+
+ for (i = 0; i < NUM_UMCS; i++) {
+ if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
+ ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
+ cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
++
++ dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
++ dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
+ }
+ }
+
+@@ -3114,8 +3117,15 @@ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
+ if (ecc_en) {
+ mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
+
+- if (cpk_en)
++ if (!cpk_en)
++ return;
++
++ if (dev_x4)
+ mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
++ else if (dev_x16)
++ mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
++ else
++ mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
+ }
+ }
+
+diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
+index 7d3edd713932..f59511bd9926 100644
+--- a/drivers/edac/edac_mc.c
++++ b/drivers/edac/edac_mc.c
+@@ -1246,9 +1246,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
+ if (p > e->location)
+ *(p - 1) = '\0';
+
+- /* Report the error via the trace interface */
+- grain_bits = fls_long(e->grain) + 1;
++ /* Sanity-check driver-supplied grain value. */
++ if (WARN_ON_ONCE(!e->grain))
++ e->grain = 1;
++
++ grain_bits = fls_long(e->grain - 1);
+
++ /* Report the error via the trace interface */
+ if (IS_ENABLED(CONFIG_RAS))
+ trace_mc_event(type, e->msg, e->label, e->error_count,
+ mci->mc_idx, e->top_layer, e->mid_layer,
+diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
+index 903a4f1fadcc..0153c730750e 100644
+--- a/drivers/edac/pnd2_edac.c
++++ b/drivers/edac/pnd2_edac.c
+@@ -268,11 +268,14 @@ static u64 get_sideband_reg_base_addr(void)
+ }
+ }
+
++#define DNV_MCHBAR_SIZE 0x8000
++#define DNV_SB_PORT_SIZE 0x10000
+ static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
+ {
+ struct pci_dev *pdev;
+ char *base;
+ u64 addr;
++ unsigned long size;
+
+ if (op == 4) {
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
+@@ -287,15 +290,17 @@ static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *na
+ addr = get_mem_ctrl_hub_base_addr();
+ if (!addr)
+ return -ENODEV;
++ size = DNV_MCHBAR_SIZE;
+ } else {
+ /* MMIO via sideband register base address */
+ addr = get_sideband_reg_base_addr();
+ if (!addr)
+ return -ENODEV;
+ addr += (port << 16);
++ size = DNV_SB_PORT_SIZE;
+ }
+
+- base = ioremap((resource_size_t)addr, 0x10000);
++ base = ioremap((resource_size_t)addr, size);
+ if (!base)
+ return -ENODEV;
+
+diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
+index 8f952f2f1a29..09119e3f5c01 100644
+--- a/drivers/firmware/arm_scmi/driver.c
++++ b/drivers/firmware/arm_scmi/driver.c
+@@ -271,6 +271,14 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
+ struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
+ struct scmi_shared_mem __iomem *mem = cinfo->payload;
+
++ /*
++ * Ideally channel must be free by now unless OS timeout last
++ * request and platform continued to process the same, wait
++ * until it releases the shared memory, otherwise we may endup
++ * overwriting its response with new message payload or vice-versa
++ */
++ spin_until_cond(ioread32(&mem->channel_status) &
++ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+ /* Mark channel busy + clear error */
+ iowrite32(0x0, &mem->channel_status);
+ iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
+diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
+index 6090d25dce85..4045098ddb86 100644
+--- a/drivers/firmware/efi/cper.c
++++ b/drivers/firmware/efi/cper.c
+@@ -402,6 +402,21 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
+ printk(
+ "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
+ pfx, pcie->bridge.secondary_status, pcie->bridge.control);
++
++ /* Fatal errors call __ghes_panic() before AER handler prints this */
++ if ((pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) &&
++ (gdata->error_severity & CPER_SEV_FATAL)) {
++ struct aer_capability_regs *aer;
++
++ aer = (struct aer_capability_regs *)pcie->aer_info;
++ printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n",
++ pfx, aer->uncor_status, aer->uncor_mask);
++ printk("%saer_uncor_severity: 0x%08x\n",
++ pfx, aer->uncor_severity);
++ printk("%sTLP Header: %08x %08x %08x %08x\n", pfx,
++ aer->header_log.dw0, aer->header_log.dw1,
++ aer->header_log.dw2, aer->header_log.dw3);
++ }
+ }
+
+ static void cper_print_tstamp(const char *pfx,
+diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
+index e778af766fae..98c987188835 100644
+--- a/drivers/firmware/qcom_scm.c
++++ b/drivers/firmware/qcom_scm.c
+@@ -18,6 +18,7 @@
+ #include <linux/init.h>
+ #include <linux/cpumask.h>
+ #include <linux/export.h>
++#include <linux/dma-direct.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/module.h>
+ #include <linux/types.h>
+@@ -449,6 +450,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+ phys_addr_t mem_to_map_phys;
+ phys_addr_t dest_phys;
+ phys_addr_t ptr_phys;
++ dma_addr_t ptr_dma;
+ size_t mem_to_map_sz;
+ size_t dest_sz;
+ size_t src_sz;
+@@ -466,9 +468,10 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+ ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
+ ALIGN(dest_sz, SZ_64);
+
+- ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
++ ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
++ ptr_phys = dma_to_phys(__scm->dev, ptr_dma);
+
+ /* Fill source vmid detail */
+ src = ptr;
+@@ -498,7 +501,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+
+ ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
+ ptr_phys, src_sz, dest_phys, dest_sz);
+- dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys);
++ dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma);
+ if (ret) {
+ dev_err(__scm->dev,
+ "Assign memory protection call failed %d.\n", ret);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 221de241535a..3b07a316680c 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1462,6 +1462,7 @@ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+ }
+
+ static const struct backlight_ops amdgpu_dm_backlight_ops = {
++ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = amdgpu_dm_backlight_get_brightness,
+ .update_status = amdgpu_dm_backlight_update_status,
+ };
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index b52ccab428a9..c7c505095402 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -4052,6 +4052,11 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
+
+ data->frame_time_x2 = frame_time_in_us * 2 / 100;
+
++ if (data->frame_time_x2 < 280) {
++ pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
++ data->frame_time_x2 = 280;
++ }
++
+ display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
+diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
+index 34e45b97629e..2f2fb1966958 100644
+--- a/drivers/hwmon/acpi_power_meter.c
++++ b/drivers/hwmon/acpi_power_meter.c
+@@ -694,8 +694,8 @@ static int setup_attrs(struct acpi_power_meter_resource *resource)
+
+ if (resource->caps.flags & POWER_METER_CAN_CAP) {
+ if (!can_cap_in_hardware()) {
+- dev_err(&resource->acpi_dev->dev,
+- "Ignoring unsafe software power cap!\n");
++ dev_warn(&resource->acpi_dev->dev,
++ "Ignoring unsafe software power cap!\n");
+ goto skip_unsafe_cap;
+ }
+
+diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
+index b75ff144b570..e6f351c92c02 100644
+--- a/drivers/i2c/busses/i2c-riic.c
++++ b/drivers/i2c/busses/i2c-riic.c
+@@ -203,6 +203,7 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
+ if (readb(riic->base + RIIC_ICSR2) & ICSR2_NACKF) {
+ /* We got a NACKIE */
+ readb(riic->base + RIIC_ICDRR); /* dummy read */
++ riic_clear_set_bit(riic, ICSR2_NACKF, 0, RIIC_ICSR2);
+ riic->err = -ENXIO;
+ } else if (riic->bytes_left) {
+ return IRQ_NONE;
+diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
+index 0307405491e0..f208a25d0e4f 100644
+--- a/drivers/infiniband/hw/hfi1/mad.c
++++ b/drivers/infiniband/hw/hfi1/mad.c
+@@ -2326,7 +2326,7 @@ struct opa_port_status_req {
+ __be32 vl_select_mask;
+ };
+
+-#define VL_MASK_ALL 0x000080ff
++#define VL_MASK_ALL 0x00000000000080ffUL
+
+ struct opa_port_status_rsp {
+ __u8 port_num;
+@@ -2625,15 +2625,14 @@ static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
+ }
+
+ static void a0_portstatus(struct hfi1_pportdata *ppd,
+- struct opa_port_status_rsp *rsp, u32 vl_select_mask)
++ struct opa_port_status_rsp *rsp)
+ {
+ if (!is_bx(ppd->dd)) {
+ unsigned long vl;
+ u64 sum_vl_xmit_wait = 0;
+- u32 vl_all_mask = VL_MASK_ALL;
++ unsigned long vl_all_mask = VL_MASK_ALL;
+
+- for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
+- 8 * sizeof(vl_all_mask)) {
++ for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
+ u64 tmp = sum_vl_xmit_wait +
+ read_port_cntr(ppd, C_TX_WAIT_VL,
+ idx_from_vl(vl));
+@@ -2730,12 +2729,12 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
+ (struct opa_port_status_req *)pmp->data;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct opa_port_status_rsp *rsp;
+- u32 vl_select_mask = be32_to_cpu(req->vl_select_mask);
++ unsigned long vl_select_mask = be32_to_cpu(req->vl_select_mask);
+ unsigned long vl;
+ size_t response_data_size;
+ u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
+ u8 port_num = req->port_num;
+- u8 num_vls = hweight32(vl_select_mask);
++ u8 num_vls = hweight64(vl_select_mask);
+ struct _vls_pctrs *vlinfo;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+@@ -2771,7 +2770,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
+
+ hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
+
+- rsp->vl_select_mask = cpu_to_be32(vl_select_mask);
++ rsp->vl_select_mask = cpu_to_be32((u32)vl_select_mask);
+ rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
+@@ -2842,8 +2841,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
+ * So in the for_each_set_bit() loop below, we don't need
+ * any additional checks for vl.
+ */
+- for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
+- 8 * sizeof(vl_select_mask)) {
++ for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
+ memset(vlinfo, 0, sizeof(*vlinfo));
+
+ tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
+@@ -2884,7 +2882,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
+ vfi++;
+ }
+
+- a0_portstatus(ppd, rsp, vl_select_mask);
++ a0_portstatus(ppd, rsp);
+
+ if (resp_len)
+ *resp_len += response_data_size;
+@@ -2931,16 +2929,14 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
+ return error_counter_summary;
+ }
+
+-static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp,
+- u32 vl_select_mask)
++static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp)
+ {
+ if (!is_bx(ppd->dd)) {
+ unsigned long vl;
+ u64 sum_vl_xmit_wait = 0;
+- u32 vl_all_mask = VL_MASK_ALL;
++ unsigned long vl_all_mask = VL_MASK_ALL;
+
+- for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
+- 8 * sizeof(vl_all_mask)) {
++ for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
+ u64 tmp = sum_vl_xmit_wait +
+ read_port_cntr(ppd, C_TX_WAIT_VL,
+ idx_from_vl(vl));
+@@ -2995,7 +2991,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
+ u64 port_mask;
+ u8 port_num;
+ unsigned long vl;
+- u32 vl_select_mask;
++ unsigned long vl_select_mask;
+ int vfi;
+ u16 link_width;
+ u16 link_speed;
+@@ -3073,8 +3069,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
+ * So in the for_each_set_bit() loop below, we don't need
+ * any additional checks for vl.
+ */
+- for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
+- 8 * sizeof(req->vl_select_mask)) {
++ for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
+ memset(vlinfo, 0, sizeof(*vlinfo));
+
+ rsp->vls[vfi].port_vl_xmit_data =
+@@ -3122,7 +3117,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
+ vfi++;
+ }
+
+- a0_datacounters(ppd, rsp, vl_select_mask);
++ a0_datacounters(ppd, rsp);
+
+ if (resp_len)
+ *resp_len += response_data_size;
+@@ -3217,7 +3212,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
+ struct _vls_ectrs *vlinfo;
+ unsigned long vl;
+ u64 port_mask, tmp;
+- u32 vl_select_mask;
++ unsigned long vl_select_mask;
+ int vfi;
+
+ req = (struct opa_port_error_counters64_msg *)pmp->data;
+@@ -3276,8 +3271,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
+ vlinfo = &rsp->vls[0];
+ vfi = 0;
+ vl_select_mask = be32_to_cpu(req->vl_select_mask);
+- for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
+- 8 * sizeof(req->vl_select_mask)) {
++ for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
+ memset(vlinfo, 0, sizeof(*vlinfo));
+ rsp->vls[vfi].port_vl_xmit_discards =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
+@@ -3488,7 +3482,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
+ u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
+ u64 portn = be64_to_cpu(req->port_select_mask[3]);
+ u32 counter_select = be32_to_cpu(req->counter_select_mask);
+- u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
++ unsigned long vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
+ unsigned long vl;
+
+ if ((nports != 1) || (portn != 1 << port)) {
+@@ -3582,8 +3576,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
+ if (counter_select & CS_UNCORRECTABLE_ERRORS)
+ write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
+
+- for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
+- 8 * sizeof(vl_select_mask)) {
++ for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
+ if (counter_select & CS_PORT_XMIT_DATA)
+ write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
+
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 53eccc0da8fd..c05eae93170e 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -6370,6 +6370,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
+ mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
+ list_del(&mpi->list);
+ mutex_unlock(&mlx5_ib_multiport_mutex);
++ kfree(mpi);
+ return;
+ }
+
+diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
+index ab5eba6edf82..e13ea199f589 100644
+--- a/drivers/iommu/Makefile
++++ b/drivers/iommu/Makefile
+@@ -10,7 +10,7 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
+ obj-$(CONFIG_IOMMU_IOVA) += iova.o
+ obj-$(CONFIG_OF_IOMMU) += of_iommu.o
+ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
+-obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
++obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
+ obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
+ obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
+ obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 69c269dc4f1b..1f2ed44de243 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -2563,7 +2563,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
+
+ bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
+ phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
+- ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
++ ret = iommu_map_page(domain, bus_addr, phys_addr,
++ PAGE_SIZE, prot,
++ GFP_ATOMIC | __GFP_NOWARN);
+ if (ret)
+ goto out_unmap;
+
+diff --git a/drivers/iommu/amd_iommu.h b/drivers/iommu/amd_iommu.h
+new file mode 100644
+index 000000000000..12d540d9b59b
+--- /dev/null
++++ b/drivers/iommu/amd_iommu.h
+@@ -0,0 +1,14 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++#ifndef AMD_IOMMU_H
++#define AMD_IOMMU_H
++
++int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line);
++
++#ifdef CONFIG_DMI
++void amd_iommu_apply_ivrs_quirks(void);
++#else
++static void amd_iommu_apply_ivrs_quirks(void) { }
++#endif
++
++#endif
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index 66b4800bcdd8..1e9a5da562f0 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -39,6 +39,7 @@
+ #include <asm/irq_remapping.h>
+
+ #include <linux/crash_dump.h>
++#include "amd_iommu.h"
+ #include "amd_iommu_proto.h"
+ #include "amd_iommu_types.h"
+ #include "irq_remapping.h"
+@@ -1002,7 +1003,7 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
+ set_iommu_for_device(iommu, devid);
+ }
+
+-static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
++int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
+ {
+ struct devid_map *entry;
+ struct list_head *list;
+@@ -1153,6 +1154,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
+ if (ret)
+ return ret;
+
++ amd_iommu_apply_ivrs_quirks();
++
+ /*
+ * First save the recommended feature enable bits from ACPI
+ */
+diff --git a/drivers/iommu/amd_iommu_quirks.c b/drivers/iommu/amd_iommu_quirks.c
+new file mode 100644
+index 000000000000..c235f79b7a20
+--- /dev/null
++++ b/drivers/iommu/amd_iommu_quirks.c
+@@ -0,0 +1,92 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++/*
++ * Quirks for AMD IOMMU
++ *
++ * Copyright (C) 2019 Kai-Heng Feng <kai.heng.feng@canonical.com>
++ */
++
++#ifdef CONFIG_DMI
++#include <linux/dmi.h>
++
++#include "amd_iommu.h"
++
++#define IVHD_SPECIAL_IOAPIC 1
++
++struct ivrs_quirk_entry {
++ u8 id;
++ u16 devid;
++};
++
++enum {
++ DELL_INSPIRON_7375 = 0,
++ DELL_LATITUDE_5495,
++ LENOVO_IDEAPAD_330S_15ARR,
++};
++
++static const struct ivrs_quirk_entry ivrs_ioapic_quirks[][3] __initconst = {
++ /* ivrs_ioapic[4]=00:14.0 ivrs_ioapic[5]=00:00.2 */
++ [DELL_INSPIRON_7375] = {
++ { .id = 4, .devid = 0xa0 },
++ { .id = 5, .devid = 0x2 },
++ {}
++ },
++ /* ivrs_ioapic[4]=00:14.0 */
++ [DELL_LATITUDE_5495] = {
++ { .id = 4, .devid = 0xa0 },
++ {}
++ },
++ /* ivrs_ioapic[32]=00:14.0 */
++ [LENOVO_IDEAPAD_330S_15ARR] = {
++ { .id = 32, .devid = 0xa0 },
++ {}
++ },
++ {}
++};
++
++static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d)
++{
++ const struct ivrs_quirk_entry *i;
++
++ for (i = d->driver_data; i->id != 0 && i->devid != 0; i++)
++ add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0);
++
++ return 0;
++}
++
++static const struct dmi_system_id ivrs_quirks[] __initconst = {
++ {
++ .callback = ivrs_ioapic_quirk_cb,
++ .ident = "Dell Inspiron 7375",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7375"),
++ },
++ .driver_data = (void *)&ivrs_ioapic_quirks[DELL_INSPIRON_7375],
++ },
++ {
++ .callback = ivrs_ioapic_quirk_cb,
++ .ident = "Dell Latitude 5495",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 5495"),
++ },
++ .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
++ },
++ {
++ .callback = ivrs_ioapic_quirk_cb,
++ .ident = "Lenovo ideapad 330S-15ARR",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "81FB"),
++ },
++ .driver_data = (void *)&ivrs_ioapic_quirks[LENOVO_IDEAPAD_330S_15ARR],
++ },
++ {}
++};
++
++void __init amd_iommu_apply_ivrs_quirks(void)
++{
++ dmi_check_system(ivrs_quirks);
++}
++#endif
+diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
+index 9a576ae837dc..da4516fbf542 100644
+--- a/drivers/iommu/iova.c
++++ b/drivers/iommu/iova.c
+@@ -580,7 +580,9 @@ void queue_iova(struct iova_domain *iovad,
+
+ spin_unlock_irqrestore(&fq->lock, flags);
+
+- if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)
++ /* Avoid false sharing as much as possible. */
++ if (!atomic_read(&iovad->fq_timer_on) &&
++ !atomic_cmpxchg(&iovad->fq_timer_on, 0, 1))
+ mod_timer(&iovad->fq_timer,
+ jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
+ }
+diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
+index a73337b74f41..db588a79a9f0 100644
+--- a/drivers/isdn/mISDN/socket.c
++++ b/drivers/isdn/mISDN/socket.c
+@@ -764,6 +764,8 @@ base_sock_create(struct net *net, struct socket *sock, int protocol, int kern)
+
+ if (sock->type != SOCK_RAW)
+ return -ESOCKTNOSUPPORT;
++ if (!capable(CAP_NET_RAW))
++ return -EPERM;
+
+ sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern);
+ if (!sk)
+diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
+index 17d73db1456e..e4cb3811e82a 100644
+--- a/drivers/leds/led-triggers.c
++++ b/drivers/leds/led-triggers.c
+@@ -177,6 +177,7 @@ err_activate:
+ list_del(&led_cdev->trig_list);
+ write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock, flags);
+ led_set_brightness(led_cdev, LED_OFF);
++ kfree(event);
+
+ return ret;
+ }
+diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
+index 2a9009fe5545..18edc8bdc9f7 100644
+--- a/drivers/leds/leds-lp5562.c
++++ b/drivers/leds/leds-lp5562.c
+@@ -263,7 +263,11 @@ static void lp5562_firmware_loaded(struct lp55xx_chip *chip)
+ {
+ const struct firmware *fw = chip->fw;
+
+- if (fw->size > LP5562_PROGRAM_LENGTH) {
++ /*
++ * the firmware is encoded in ascii hex character, with 2 chars
++ * per byte
++ */
++ if (fw->size > (LP5562_PROGRAM_LENGTH * 2)) {
+ dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
+ fw->size);
+ return;
+diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
+index 73f5319295bc..c12cd809ab19 100644
+--- a/drivers/md/bcache/closure.c
++++ b/drivers/md/bcache/closure.c
+@@ -105,8 +105,14 @@ struct closure_syncer {
+
+ static void closure_sync_fn(struct closure *cl)
+ {
+- cl->s->done = 1;
+- wake_up_process(cl->s->task);
++ struct closure_syncer *s = cl->s;
++ struct task_struct *p;
++
++ rcu_read_lock();
++ p = READ_ONCE(s->task);
++ s->done = 1;
++ wake_up_process(p);
++ rcu_read_unlock();
+ }
+
+ void __sched __closure_sync(struct closure *cl)
+diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
+index 17c6a73c536c..4d36373e1c0f 100644
+--- a/drivers/md/dm-rq.c
++++ b/drivers/md/dm-rq.c
+@@ -505,6 +505,7 @@ check_again:
+ ret = dm_dispatch_clone_request(clone, rq);
+ if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
+ blk_rq_unprep_clone(clone);
++ blk_mq_cleanup_rq(clone);
+ tio->ti->type->release_clone_rq(clone, &tio->info);
+ tio->clone = NULL;
+ if (!rq->q->mq_ops)
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index fb5d702e43b5..a8fbaa384e9a 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -1770,8 +1770,15 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
+ if (!(le32_to_cpu(sb->feature_map) &
+ MD_FEATURE_RECOVERY_BITMAP))
+ rdev->saved_raid_disk = -1;
+- } else
+- set_bit(In_sync, &rdev->flags);
++ } else {
++ /*
++ * If the array is FROZEN, then the device can't
++ * be in_sync with rest of array.
++ */
++ if (!test_bit(MD_RECOVERY_FROZEN,
++ &mddev->recovery))
++ set_bit(In_sync, &rdev->flags);
++ }
+ rdev->raid_disk = role;
+ break;
+ }
+@@ -4116,7 +4123,7 @@ array_state_show(struct mddev *mddev, char *page)
+ {
+ enum array_state st = inactive;
+
+- if (mddev->pers)
++ if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags))
+ switch(mddev->ro) {
+ case 1:
+ st = readonly;
+@@ -5671,9 +5678,6 @@ int md_run(struct mddev *mddev)
+ md_update_sb(mddev, 0);
+
+ md_new_event(mddev);
+- sysfs_notify_dirent_safe(mddev->sysfs_state);
+- sysfs_notify_dirent_safe(mddev->sysfs_action);
+- sysfs_notify(&mddev->kobj, NULL, "degraded");
+ return 0;
+
+ abort:
+@@ -5687,6 +5691,7 @@ static int do_md_run(struct mddev *mddev)
+ {
+ int err;
+
++ set_bit(MD_NOT_READY, &mddev->flags);
+ err = md_run(mddev);
+ if (err)
+ goto out;
+@@ -5707,9 +5712,14 @@ static int do_md_run(struct mddev *mddev)
+
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ revalidate_disk(mddev->gendisk);
++ clear_bit(MD_NOT_READY, &mddev->flags);
+ mddev->changed = 1;
+ kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
++ sysfs_notify_dirent_safe(mddev->sysfs_state);
++ sysfs_notify_dirent_safe(mddev->sysfs_action);
++ sysfs_notify(&mddev->kobj, NULL, "degraded");
+ out:
++ clear_bit(MD_NOT_READY, &mddev->flags);
+ return err;
+ }
+
+@@ -8797,6 +8807,7 @@ void md_check_recovery(struct mddev *mddev)
+
+ if (mddev_trylock(mddev)) {
+ int spares = 0;
++ bool try_set_sync = mddev->safemode != 0;
+
+ if (!mddev->external && mddev->safemode == 1)
+ mddev->safemode = 0;
+@@ -8842,7 +8853,7 @@ void md_check_recovery(struct mddev *mddev)
+ }
+ }
+
+- if (!mddev->external && !mddev->in_sync) {
++ if (try_set_sync && !mddev->external && !mddev->in_sync) {
+ spin_lock(&mddev->lock);
+ set_in_sync(mddev);
+ spin_unlock(&mddev->lock);
+@@ -8948,7 +8959,8 @@ void md_reap_sync_thread(struct mddev *mddev)
+ /* resync has finished, collect result */
+ md_unregister_thread(&mddev->sync_thread);
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+- !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
++ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
++ mddev->degraded != mddev->raid_disks) {
+ /* success...*/
+ /* activate any spares */
+ if (mddev->pers->spare_active(mddev)) {
+diff --git a/drivers/md/md.h b/drivers/md/md.h
+index 325cb2136a49..4f89463e0b01 100644
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -243,6 +243,9 @@ enum mddev_flags {
+ MD_UPDATING_SB, /* md_check_recovery is updating the metadata
+ * without explicitly holding reconfig_mutex.
+ */
++ MD_NOT_READY, /* do_md_run() is active, so 'array_state'
++ * must not report that array is ready yet
++ */
+ };
+
+ enum mddev_sb_flags {
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index f4daa56d204d..43fa7dbf844b 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -26,6 +26,9 @@
+ #include "raid0.h"
+ #include "raid5.h"
+
++static int default_layout = 0;
++module_param(default_layout, int, 0644);
++
+ #define UNSUPPORTED_MDDEV_FLAGS \
+ ((1L << MD_HAS_JOURNAL) | \
+ (1L << MD_JOURNAL_CLEAN) | \
+@@ -146,6 +149,19 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
+ }
+ pr_debug("md/raid0:%s: FINAL %d zones\n",
+ mdname(mddev), conf->nr_strip_zones);
++
++ if (conf->nr_strip_zones == 1) {
++ conf->layout = RAID0_ORIG_LAYOUT;
++ } else if (default_layout == RAID0_ORIG_LAYOUT ||
++ default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
++ conf->layout = default_layout;
++ } else {
++ pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
++ mdname(mddev));
++ pr_err("md/raid0: please set raid.default_layout to 1 or 2\n");
++ err = -ENOTSUPP;
++ goto abort;
++ }
+ /*
+ * now since we have the hard sector sizes, we can make sure
+ * chunk size is a multiple of that sector size
+@@ -555,10 +571,12 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
+
+ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
+ {
++ struct r0conf *conf = mddev->private;
+ struct strip_zone *zone;
+ struct md_rdev *tmp_dev;
+ sector_t bio_sector;
+ sector_t sector;
++ sector_t orig_sector;
+ unsigned chunk_sects;
+ unsigned sectors;
+
+@@ -592,8 +610,21 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
+ bio = split;
+ }
+
++ orig_sector = sector;
+ zone = find_zone(mddev->private, &sector);
+- tmp_dev = map_sector(mddev, zone, sector, &sector);
++ switch (conf->layout) {
++ case RAID0_ORIG_LAYOUT:
++ tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
++ break;
++ case RAID0_ALT_MULTIZONE_LAYOUT:
++ tmp_dev = map_sector(mddev, zone, sector, &sector);
++ break;
++ default:
++ WARN("md/raid0:%s: Invalid layout\n", mdname(mddev));
++ bio_io_error(bio);
++ return true;
++ }
++
+ bio_set_dev(bio, tmp_dev->bdev);
+ bio->bi_iter.bi_sector = sector + zone->dev_start +
+ tmp_dev->data_offset;
+diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h
+index 540e65d92642..3816e5477db1 100644
+--- a/drivers/md/raid0.h
++++ b/drivers/md/raid0.h
+@@ -8,11 +8,25 @@ struct strip_zone {
+ int nb_dev; /* # of devices attached to the zone */
+ };
+
++/* Linux 3.14 (20d0189b101) made an unintended change to
++ * the RAID0 layout for multi-zone arrays (where devices aren't all
++ * the same size.
++ * RAID0_ORIG_LAYOUT restores the original layout
++ * RAID0_ALT_MULTIZONE_LAYOUT uses the altered layout
++ * The layouts are identical when there is only one zone (all
++ * devices the same size).
++ */
++
++enum r0layout {
++ RAID0_ORIG_LAYOUT = 1,
++ RAID0_ALT_MULTIZONE_LAYOUT = 2,
++};
+ struct r0conf {
+ struct strip_zone *strip_zone;
+ struct md_rdev **devlist; /* lists of rdevs, pointed to
+ * by strip_zone->dev */
+ int nr_strip_zones;
++ enum r0layout layout;
+ };
+
+ #endif
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index fa47249fa3e4..6929d110d804 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -434,19 +434,21 @@ static void raid1_end_write_request(struct bio *bio)
+ /* We never try FailFast to WriteMostly devices */
+ !test_bit(WriteMostly, &rdev->flags)) {
+ md_error(r1_bio->mddev, rdev);
+- if (!test_bit(Faulty, &rdev->flags))
+- /* This is the only remaining device,
+- * We need to retry the write without
+- * FailFast
+- */
+- set_bit(R1BIO_WriteError, &r1_bio->state);
+- else {
+- /* Finished with this branch */
+- r1_bio->bios[mirror] = NULL;
+- to_put = bio;
+- }
+- } else
++ }
++
++ /*
++ * When the device is faulty, it is not necessary to
++ * handle write error.
++ * For failfast, this is the only remaining device,
++ * We need to retry the write without FailFast.
++ */
++ if (!test_bit(Faulty, &rdev->flags))
+ set_bit(R1BIO_WriteError, &r1_bio->state);
++ else {
++ /* Finished with this branch */
++ r1_bio->bios[mirror] = NULL;
++ to_put = bio;
++ }
+ } else {
+ /*
+ * Set R1BIO_Uptodate in our master bio, so that we
+@@ -3103,6 +3105,13 @@ static int raid1_run(struct mddev *mddev)
+ !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
+ test_bit(Faulty, &conf->mirrors[i].rdev->flags))
+ mddev->degraded++;
++ /*
++ * RAID1 needs at least one disk in active
++ */
++ if (conf->raid_disks - mddev->degraded < 1) {
++ ret = -EINVAL;
++ goto abort;
++ }
+
+ if (conf->raid_disks - mddev->degraded == 1)
+ mddev->recovery_cp = MaxSector;
+@@ -3136,8 +3145,12 @@ static int raid1_run(struct mddev *mddev)
+ ret = md_integrity_register(mddev);
+ if (ret) {
+ md_unregister_thread(&mddev->thread);
+- raid1_free(mddev, conf);
++ goto abort;
+ }
++ return 0;
++
++abort:
++ raid1_free(mddev, conf);
+ return ret;
+ }
+
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index a147619498df..4a5aad26ded7 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -2540,7 +2540,8 @@ static void raid5_end_read_request(struct bio * bi)
+ int set_bad = 0;
+
+ clear_bit(R5_UPTODATE, &sh->dev[i].flags);
+- atomic_inc(&rdev->read_errors);
++ if (!(bi->bi_status == BLK_STS_PROTECTION))
++ atomic_inc(&rdev->read_errors);
+ if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
+ pr_warn_ratelimited(
+ "md/raid:%s: read error on replacement device (sector %llu on %s).\n",
+@@ -2572,7 +2573,9 @@ static void raid5_end_read_request(struct bio * bi)
+ && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
+ retry = 1;
+ if (retry)
+- if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
++ if (sh->qd_idx >= 0 && sh->pd_idx == i)
++ set_bit(R5_ReadError, &sh->dev[i].flags);
++ else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
+ set_bit(R5_ReadError, &sh->dev[i].flags);
+ clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
+ } else
+@@ -5721,7 +5724,8 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
+ do_flush = false;
+ }
+
+- set_bit(STRIPE_HANDLE, &sh->state);
++ if (!sh->batch_head)
++ set_bit(STRIPE_HANDLE, &sh->state);
+ clear_bit(STRIPE_DELAYED, &sh->state);
+ if ((!sh->batch_head || sh == sh->batch_head) &&
+ (bi->bi_opf & REQ_SYNC) &&
+diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c
+index dd2078b27a41..2424680f71c3 100644
+--- a/drivers/media/cec/cec-notifier.c
++++ b/drivers/media/cec/cec-notifier.c
+@@ -123,6 +123,8 @@ void cec_notifier_unregister(struct cec_notifier *n)
+ {
+ mutex_lock(&n->lock);
+ n->callback = NULL;
++ n->cec_adap->notifier = NULL;
++ n->cec_adap = NULL;
+ mutex_unlock(&n->lock);
+ cec_notifier_put(n);
+ }
+diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
+index c4e7ebfe4d29..8a61150ee249 100644
+--- a/drivers/media/dvb-core/dvb_frontend.c
++++ b/drivers/media/dvb-core/dvb_frontend.c
+@@ -164,6 +164,9 @@ static void dvb_frontend_free(struct kref *ref)
+
+ static void dvb_frontend_put(struct dvb_frontend *fe)
+ {
++ /* call detach before dropping the reference count */
++ if (fe->ops.detach)
++ fe->ops.detach(fe);
+ /*
+ * Check if the frontend was registered, as otherwise
+ * kref was not initialized yet.
+@@ -3035,7 +3038,6 @@ void dvb_frontend_detach(struct dvb_frontend *fe)
+ dvb_frontend_invoke_release(fe, fe->ops.release_sec);
+ dvb_frontend_invoke_release(fe, fe->ops.tuner_ops.release);
+ dvb_frontend_invoke_release(fe, fe->ops.analog_ops.release);
+- dvb_frontend_invoke_release(fe, fe->ops.detach);
+ dvb_frontend_put(fe);
+ }
+ EXPORT_SYMBOL(dvb_frontend_detach);
+diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
+index 3c8778570331..04dc2f4bc7aa 100644
+--- a/drivers/media/dvb-core/dvbdev.c
++++ b/drivers/media/dvb-core/dvbdev.c
+@@ -339,8 +339,10 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev,
+ if (npads) {
+ dvbdev->pads = kcalloc(npads, sizeof(*dvbdev->pads),
+ GFP_KERNEL);
+- if (!dvbdev->pads)
++ if (!dvbdev->pads) {
++ kfree(dvbdev->entity);
+ return -ENOMEM;
++ }
+ }
+
+ switch (type) {
+diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
+index 29836c1a40e9..ee830c76e4b3 100644
+--- a/drivers/media/dvb-frontends/dvb-pll.c
++++ b/drivers/media/dvb-frontends/dvb-pll.c
+@@ -18,6 +18,7 @@
+
+ #include <linux/slab.h>
+ #include <linux/module.h>
++#include <linux/idr.h>
+ #include <linux/dvb/frontend.h>
+ #include <asm/types.h>
+
+@@ -43,8 +44,7 @@ struct dvb_pll_priv {
+ };
+
+ #define DVB_PLL_MAX 64
+-
+-static unsigned int dvb_pll_devcount;
++static DEFINE_IDA(pll_ida);
+
+ static int debug;
+ module_param(debug, int, 0644);
+@@ -796,6 +796,7 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
+ struct dvb_pll_priv *priv = NULL;
+ int ret;
+ const struct dvb_pll_desc *desc;
++ int nr;
+
+ b1 = kmalloc(1, GFP_KERNEL);
+ if (!b1)
+@@ -804,9 +805,14 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
+ b1[0] = 0;
+ msg.buf = b1;
+
+- if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) &&
+- (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list)))
+- pll_desc_id = id[dvb_pll_devcount];
++ nr = ida_simple_get(&pll_ida, 0, DVB_PLL_MAX, GFP_KERNEL);
++ if (nr < 0) {
++ kfree(b1);
++ return NULL;
++ }
++
++ if (id[nr] > DVB_PLL_UNDEFINED && id[nr] < ARRAY_SIZE(pll_list))
++ pll_desc_id = id[nr];
+
+ BUG_ON(pll_desc_id < 1 || pll_desc_id >= ARRAY_SIZE(pll_list));
+
+@@ -817,24 +823,20 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ ret = i2c_transfer (i2c, &msg, 1);
+- if (ret != 1) {
+- kfree(b1);
+- return NULL;
+- }
++ if (ret != 1)
++ goto out;
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ }
+
+ priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL);
+- if (!priv) {
+- kfree(b1);
+- return NULL;
+- }
++ if (!priv)
++ goto out;
+
+ priv->pll_i2c_address = pll_addr;
+ priv->i2c = i2c;
+ priv->pll_desc = desc;
+- priv->nr = dvb_pll_devcount++;
++ priv->nr = nr;
+
+ memcpy(&fe->ops.tuner_ops, &dvb_pll_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+@@ -867,6 +869,11 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
+ kfree(b1);
+
+ return fe;
++out:
++ kfree(b1);
++ ida_simple_remove(&pll_ida, nr);
++
++ return NULL;
+ }
+ EXPORT_SYMBOL(dvb_pll_attach);
+
+@@ -903,9 +910,10 @@ dvb_pll_probe(struct i2c_client *client, const struct i2c_device_id *id)
+
+ static int dvb_pll_remove(struct i2c_client *client)
+ {
+- struct dvb_frontend *fe;
++ struct dvb_frontend *fe = i2c_get_clientdata(client);
++ struct dvb_pll_priv *priv = fe->tuner_priv;
+
+- fe = i2c_get_clientdata(client);
++ ida_simple_remove(&pll_ida, priv->nr);
+ dvb_pll_release(fe);
+ return 0;
+ }
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index d5c0ffc55d46..a3bbef682fb8 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -2787,9 +2787,14 @@ static int ov5640_probe(struct i2c_client *client,
+ /* request optional power down pin */
+ sensor->pwdn_gpio = devm_gpiod_get_optional(dev, "powerdown",
+ GPIOD_OUT_HIGH);
++ if (IS_ERR(sensor->pwdn_gpio))
++ return PTR_ERR(sensor->pwdn_gpio);
++
+ /* request optional reset pin */
+ sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
++ if (IS_ERR(sensor->reset_gpio))
++ return PTR_ERR(sensor->reset_gpio);
+
+ v4l2_i2c_subdev_init(&sensor->sd, client, &ov5640_subdev_ops);
+
+diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
+index 1722cdab0daf..34343bc10007 100644
+--- a/drivers/media/i2c/ov5645.c
++++ b/drivers/media/i2c/ov5645.c
+@@ -53,6 +53,8 @@
+ #define OV5645_CHIP_ID_HIGH_BYTE 0x56
+ #define OV5645_CHIP_ID_LOW 0x300b
+ #define OV5645_CHIP_ID_LOW_BYTE 0x45
++#define OV5645_IO_MIPI_CTRL00 0x300e
++#define OV5645_PAD_OUTPUT00 0x3019
+ #define OV5645_AWB_MANUAL_CONTROL 0x3406
+ #define OV5645_AWB_MANUAL_ENABLE BIT(0)
+ #define OV5645_AEC_PK_MANUAL 0x3503
+@@ -63,6 +65,7 @@
+ #define OV5645_ISP_VFLIP BIT(2)
+ #define OV5645_TIMING_TC_REG21 0x3821
+ #define OV5645_SENSOR_MIRROR BIT(1)
++#define OV5645_MIPI_CTRL00 0x4800
+ #define OV5645_PRE_ISP_TEST_SETTING_1 0x503d
+ #define OV5645_TEST_PATTERN_MASK 0x3
+ #define OV5645_SET_TEST_PATTERN(x) ((x) & OV5645_TEST_PATTERN_MASK)
+@@ -129,7 +132,6 @@ static const struct reg_value ov5645_global_init_setting[] = {
+ { 0x3503, 0x07 },
+ { 0x3002, 0x1c },
+ { 0x3006, 0xc3 },
+- { 0x300e, 0x45 },
+ { 0x3017, 0x00 },
+ { 0x3018, 0x00 },
+ { 0x302e, 0x0b },
+@@ -358,7 +360,10 @@ static const struct reg_value ov5645_global_init_setting[] = {
+ { 0x3a1f, 0x14 },
+ { 0x0601, 0x02 },
+ { 0x3008, 0x42 },
+- { 0x3008, 0x02 }
++ { 0x3008, 0x02 },
++ { OV5645_IO_MIPI_CTRL00, 0x40 },
++ { OV5645_MIPI_CTRL00, 0x24 },
++ { OV5645_PAD_OUTPUT00, 0x70 }
+ };
+
+ static const struct reg_value ov5645_setting_sxga[] = {
+@@ -745,13 +750,9 @@ static int ov5645_s_power(struct v4l2_subdev *sd, int on)
+ goto exit;
+ }
+
+- ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
+- OV5645_SYSTEM_CTRL0_STOP);
+- if (ret < 0) {
+- ov5645_set_power_off(ov5645);
+- goto exit;
+- }
++ usleep_range(500, 1000);
+ } else {
++ ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x58);
+ ov5645_set_power_off(ov5645);
+ }
+ }
+@@ -1057,11 +1058,20 @@ static int ov5645_s_stream(struct v4l2_subdev *subdev, int enable)
+ dev_err(ov5645->dev, "could not sync v4l2 controls\n");
+ return ret;
+ }
++
++ ret = ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x45);
++ if (ret < 0)
++ return ret;
++
+ ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
+ OV5645_SYSTEM_CTRL0_START);
+ if (ret < 0)
+ return ret;
+ } else {
++ ret = ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x40);
++ if (ret < 0)
++ return ret;
++
+ ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
+ OV5645_SYSTEM_CTRL0_STOP);
+ if (ret < 0)
+diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
+index 5bea31cd41aa..33a21d585dc9 100644
+--- a/drivers/media/i2c/ov9650.c
++++ b/drivers/media/i2c/ov9650.c
+@@ -716,6 +716,11 @@ static int ov965x_set_gain(struct ov965x *ov965x, int auto_gain)
+ for (m = 6; m >= 0; m--)
+ if (gain >= (1 << m) * 16)
+ break;
++
++ /* Sanity check: don't adjust the gain with a negative value */
++ if (m < 0)
++ return -EINVAL;
++
+ rgain = (gain - ((1 << m) * 16)) / (1 << m);
+ rgain |= (((1 << m) - 1) << 4);
+
+diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
+index cf1e526de56a..8a1128c60680 100644
+--- a/drivers/media/pci/saa7134/saa7134-i2c.c
++++ b/drivers/media/pci/saa7134/saa7134-i2c.c
+@@ -351,7 +351,11 @@ static const struct i2c_client saa7134_client_template = {
+
+ /* ----------------------------------------------------------- */
+
+-/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */
++/*
++ * On Medion 7134 reading the SAA7134 chip config EEPROM needs DVB-T
++ * demod i2c gate closed due to an address clash between this EEPROM
++ * and the demod one.
++ */
+ static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
+ {
+ u8 subaddr = 0x7, dmdregval;
+@@ -368,14 +372,14 @@ static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
+
+ ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2);
+ if ((ret == 2) && (dmdregval & 0x2)) {
+- pr_debug("%s: DVB-T demod i2c gate was left closed\n",
++ pr_debug("%s: DVB-T demod i2c gate was left open\n",
+ dev->name);
+
+ data[0] = subaddr;
+ data[1] = (dmdregval & ~0x2);
+ if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1)
+- pr_err("%s: EEPROM i2c gate open failure\n",
+- dev->name);
++ pr_err("%s: EEPROM i2c gate close failure\n",
++ dev->name);
+ }
+ }
+
+diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c
+index 6d8e4afe9673..8c56d4c37a52 100644
+--- a/drivers/media/pci/saa7146/hexium_gemini.c
++++ b/drivers/media/pci/saa7146/hexium_gemini.c
+@@ -304,6 +304,9 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
+ ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER);
+ if (ret < 0) {
+ pr_err("cannot register capture v4l2 device. skipping.\n");
++ saa7146_vv_release(dev);
++ i2c_del_adapter(&hexium->i2c_adapter);
++ kfree(hexium);
+ return ret;
+ }
+
+diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
+index 5ddb2321e9e4..0fe9be93fabe 100644
+--- a/drivers/media/platform/exynos4-is/fimc-is.c
++++ b/drivers/media/platform/exynos4-is/fimc-is.c
+@@ -819,6 +819,7 @@ static int fimc_is_probe(struct platform_device *pdev)
+ return -ENODEV;
+
+ is->pmu_regs = of_iomap(node, 0);
++ of_node_put(node);
+ if (!is->pmu_regs)
+ return -ENOMEM;
+
+diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
+index deb499f76412..b5993532831d 100644
+--- a/drivers/media/platform/exynos4-is/media-dev.c
++++ b/drivers/media/platform/exynos4-is/media-dev.c
+@@ -498,6 +498,7 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
+ continue;
+
+ ret = fimc_md_parse_port_node(fmd, port, index);
++ of_node_put(port);
+ if (ret < 0) {
+ of_node_put(node);
+ goto rpm_put;
+@@ -531,6 +532,7 @@ static int __of_get_csis_id(struct device_node *np)
+ if (!np)
+ return -EINVAL;
+ of_property_read_u32(np, "reg", &reg);
++ of_node_put(np);
+ return reg - FIMC_INPUT_MIPI_CSI2_0;
+ }
+
+diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
+index 0273302aa741..83086eea1450 100644
+--- a/drivers/media/platform/fsl-viu.c
++++ b/drivers/media/platform/fsl-viu.c
+@@ -37,7 +37,7 @@
+ #define VIU_VERSION "0.5.1"
+
+ /* Allow building this driver with COMPILE_TEST */
+-#ifndef CONFIG_PPC
++#if !defined(CONFIG_PPC) && !defined(CONFIG_MICROBLAZE)
+ #define out_be32(v, a) iowrite32be(a, (void __iomem *)v)
+ #define in_be32(a) ioread32be((void __iomem *)a)
+ #endif
+diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
+index bbb24fb95b95..3deb0549b1a1 100644
+--- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
++++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
+@@ -118,7 +118,9 @@ static int mtk_mdp_probe(struct platform_device *pdev)
+ mutex_init(&mdp->vpulock);
+
+ /* Old dts had the components as child nodes */
+- if (of_get_next_child(dev->of_node, NULL)) {
++ node = of_get_next_child(dev->of_node, NULL);
++ if (node) {
++ of_node_put(node);
+ parent = dev->of_node;
+ dev_warn(dev, "device tree is out of date\n");
+ } else {
+diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
+index 432bc7fbedc9..addd03b51748 100644
+--- a/drivers/media/platform/omap3isp/isp.c
++++ b/drivers/media/platform/omap3isp/isp.c
+@@ -722,6 +722,10 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe,
+ s_stream, mode);
+ pipe->do_propagation = true;
+ }
++
++ /* Stop at the first external sub-device. */
++ if (subdev->dev != isp->dev)
++ break;
+ }
+
+ return 0;
+@@ -836,6 +840,10 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe)
+ &subdev->entity);
+ failure = -ETIMEDOUT;
+ }
++
++ /* Stop at the first external sub-device. */
++ if (subdev->dev != isp->dev)
++ break;
+ }
+
+ return failure;
+diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
+index 77b73e27a274..412438dce285 100644
+--- a/drivers/media/platform/omap3isp/ispccdc.c
++++ b/drivers/media/platform/omap3isp/ispccdc.c
+@@ -2605,6 +2605,7 @@ int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc,
+ int ret;
+
+ /* Register the subdev and video node. */
++ ccdc->subdev.dev = vdev->mdev->dev;
+ ret = v4l2_device_register_subdev(vdev, &ccdc->subdev);
+ if (ret < 0)
+ goto error;
+diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
+index e062939d0d05..47b0d3fe87d8 100644
+--- a/drivers/media/platform/omap3isp/ispccp2.c
++++ b/drivers/media/platform/omap3isp/ispccp2.c
+@@ -1034,6 +1034,7 @@ int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2,
+ int ret;
+
+ /* Register the subdev and video nodes. */
++ ccp2->subdev.dev = vdev->mdev->dev;
+ ret = v4l2_device_register_subdev(vdev, &ccp2->subdev);
+ if (ret < 0)
+ goto error;
+diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
+index a4d3d030e81e..e45292a1bf6c 100644
+--- a/drivers/media/platform/omap3isp/ispcsi2.c
++++ b/drivers/media/platform/omap3isp/ispcsi2.c
+@@ -1201,6 +1201,7 @@ int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2,
+ int ret;
+
+ /* Register the subdev and video nodes. */
++ csi2->subdev.dev = vdev->mdev->dev;
+ ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
+ if (ret < 0)
+ goto error;
+diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
+index 3195f7c8b8b7..591c6de498f8 100644
+--- a/drivers/media/platform/omap3isp/isppreview.c
++++ b/drivers/media/platform/omap3isp/isppreview.c
+@@ -2228,6 +2228,7 @@ int omap3isp_preview_register_entities(struct isp_prev_device *prev,
+ int ret;
+
+ /* Register the subdev and video nodes. */
++ prev->subdev.dev = vdev->mdev->dev;
+ ret = v4l2_device_register_subdev(vdev, &prev->subdev);
+ if (ret < 0)
+ goto error;
+diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
+index 0b6a87508584..2035e3c6a9de 100644
+--- a/drivers/media/platform/omap3isp/ispresizer.c
++++ b/drivers/media/platform/omap3isp/ispresizer.c
+@@ -1684,6 +1684,7 @@ int omap3isp_resizer_register_entities(struct isp_res_device *res,
+ int ret;
+
+ /* Register the subdev and video nodes. */
++ res->subdev.dev = vdev->mdev->dev;
+ ret = v4l2_device_register_subdev(vdev, &res->subdev);
+ if (ret < 0)
+ goto error;
+diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
+index 47353fee26c3..bfa2d0504646 100644
+--- a/drivers/media/platform/omap3isp/ispstat.c
++++ b/drivers/media/platform/omap3isp/ispstat.c
+@@ -1029,6 +1029,8 @@ void omap3isp_stat_unregister_entities(struct ispstat *stat)
+ int omap3isp_stat_register_entities(struct ispstat *stat,
+ struct v4l2_device *vdev)
+ {
++ stat->subdev.dev = vdev->mdev->dev;
++
+ return v4l2_device_register_subdev(vdev, &stat->subdev);
+ }
+
+diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
+index 0d1467028811..5a30f1d84fe1 100644
+--- a/drivers/media/platform/rcar_fdp1.c
++++ b/drivers/media/platform/rcar_fdp1.c
+@@ -2306,7 +2306,7 @@ static int fdp1_probe(struct platform_device *pdev)
+ fdp1->fcp = rcar_fcp_get(fcp_node);
+ of_node_put(fcp_node);
+ if (IS_ERR(fdp1->fcp)) {
+- dev_err(&pdev->dev, "FCP not found (%ld)\n",
++ dev_dbg(&pdev->dev, "FCP not found (%ld)\n",
+ PTR_ERR(fdp1->fcp));
+ return PTR_ERR(fdp1->fcp);
+ }
+diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
+index 26289adaf658..a5634ca85a31 100644
+--- a/drivers/media/platform/vsp1/vsp1_dl.c
++++ b/drivers/media/platform/vsp1/vsp1_dl.c
+@@ -557,8 +557,10 @@ static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
+
+ /* Get a default body for our list. */
+ dl->body0 = vsp1_dl_body_get(dlm->pool);
+- if (!dl->body0)
++ if (!dl->body0) {
++ kfree(dl);
+ return NULL;
++ }
+
+ header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries);
+
+diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
+index 313a95f195a2..19e381dd5808 100644
+--- a/drivers/media/radio/si470x/radio-si470x-usb.c
++++ b/drivers/media/radio/si470x/radio-si470x-usb.c
+@@ -743,7 +743,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
+ /* start radio */
+ retval = si470x_start_usb(radio);
+ if (retval < 0)
+- goto err_all;
++ goto err_buf;
+
+ /* set initial frequency */
+ si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */
+@@ -758,6 +758,8 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
+
+ return 0;
+ err_all:
++ usb_kill_urb(radio->int_in_urb);
++err_buf:
+ kfree(radio->buffer);
+ err_ctrl:
+ v4l2_ctrl_handler_free(&radio->hdl);
+@@ -831,6 +833,7 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf)
+ mutex_lock(&radio->lock);
+ v4l2_device_disconnect(&radio->v4l2_dev);
+ video_unregister_device(&radio->videodev);
++ usb_kill_urb(radio->int_in_urb);
+ usb_set_intfdata(intf, NULL);
+ mutex_unlock(&radio->lock);
+ v4l2_device_put(&radio->v4l2_dev);
+diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
+index 7daac8bab83b..6f3030b2054d 100644
+--- a/drivers/media/rc/iguanair.c
++++ b/drivers/media/rc/iguanair.c
+@@ -424,6 +424,10 @@ static int iguanair_probe(struct usb_interface *intf,
+ int ret, pipein, pipeout;
+ struct usb_host_interface *idesc;
+
++ idesc = intf->altsetting;
++ if (idesc->desc.bNumEndpoints < 2)
++ return -ENODEV;
++
+ ir = kzalloc(sizeof(*ir), GFP_KERNEL);
+ rc = rc_allocate_device(RC_DRIVER_IR_RAW);
+ if (!ir || !rc) {
+@@ -438,18 +442,13 @@ static int iguanair_probe(struct usb_interface *intf,
+ ir->urb_in = usb_alloc_urb(0, GFP_KERNEL);
+ ir->urb_out = usb_alloc_urb(0, GFP_KERNEL);
+
+- if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out) {
++ if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out ||
++ !usb_endpoint_is_int_in(&idesc->endpoint[0].desc) ||
++ !usb_endpoint_is_int_out(&idesc->endpoint[1].desc)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+- idesc = intf->altsetting;
+-
+- if (idesc->desc.bNumEndpoints < 2) {
+- ret = -ENODEV;
+- goto out;
+- }
+-
+ ir->rc = rc;
+ ir->dev = &intf->dev;
+ ir->udev = udev;
+diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
+index 1041c056854d..f23a220352f7 100644
+--- a/drivers/media/rc/imon.c
++++ b/drivers/media/rc/imon.c
+@@ -1835,12 +1835,17 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
+ break;
+ /* iMON VFD, MCE IR */
+ case 0x46:
+- case 0x7e:
+ case 0x9e:
+ dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR");
+ detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ allowed_protos = RC_PROTO_BIT_RC6_MCE;
+ break;
++ /* iMON VFD, iMON or MCE IR */
++ case 0x7e:
++ dev_info(ictx->dev, "0xffdc iMON VFD, iMON or MCE IR");
++ detected_display_type = IMON_DISPLAY_TYPE_VFD;
++ allowed_protos |= RC_PROTO_BIT_RC6_MCE;
++ break;
+ /* iMON LCD, MCE IR */
+ case 0x9f:
+ dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR");
+diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
+index 4c0c8008872a..f1dfb8409432 100644
+--- a/drivers/media/rc/mceusb.c
++++ b/drivers/media/rc/mceusb.c
+@@ -42,21 +42,22 @@
+ #include <linux/pm_wakeup.h>
+ #include <media/rc-core.h>
+
+-#define DRIVER_VERSION "1.94"
++#define DRIVER_VERSION "1.95"
+ #define DRIVER_AUTHOR "Jarod Wilson <jarod@redhat.com>"
+ #define DRIVER_DESC "Windows Media Center Ed. eHome Infrared Transceiver " \
+ "device driver"
+ #define DRIVER_NAME "mceusb"
+
++#define USB_TX_TIMEOUT 1000 /* in milliseconds */
+ #define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */
+ #define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */
+
+ /* MCE constants */
+-#define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */
++#define MCE_IRBUF_SIZE 128 /* TX IR buffer length */
+ #define MCE_TIME_UNIT 50 /* Approx 50us resolution */
+-#define MCE_CODE_LENGTH 5 /* Normal length of packet (with header) */
+-#define MCE_PACKET_SIZE 4 /* Normal length of packet (without header) */
+-#define MCE_IRDATA_HEADER 0x84 /* Actual header format is 0x80 + num_bytes */
++#define MCE_PACKET_SIZE 31 /* Max length of packet (with header) */
++#define MCE_IRDATA_HEADER (0x80 + MCE_PACKET_SIZE - 1)
++ /* Actual format is 0x80 + num_bytes */
+ #define MCE_IRDATA_TRAILER 0x80 /* End of IR data */
+ #define MCE_MAX_CHANNELS 2 /* Two transmitters, hardware dependent? */
+ #define MCE_DEFAULT_TX_MASK 0x03 /* Vals: TX1=0x01, TX2=0x02, ALL=0x03 */
+@@ -609,9 +610,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
+ if (len <= skip)
+ return;
+
+- dev_dbg(dev, "%cx data: %*ph (length=%d)",
+- (out ? 't' : 'r'),
+- min(len, buf_len - offset), buf + offset, len);
++ dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)",
++ (out ? 't' : 'r'), offset,
++ min(len, buf_len - offset), buf + offset, len, buf_len);
+
+ inout = out ? "Request" : "Got";
+
+@@ -733,6 +734,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
+ case MCE_RSP_CMD_ILLEGAL:
+ dev_dbg(dev, "Illegal PORT_IR command");
+ break;
++ case MCE_RSP_TX_TIMEOUT:
++ dev_dbg(dev, "IR TX timeout (TX buffer underrun)");
++ break;
+ default:
+ dev_dbg(dev, "Unknown command 0x%02x 0x%02x",
+ cmd, subcmd);
+@@ -747,13 +751,14 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
+ dev_dbg(dev, "End of raw IR data");
+ else if ((cmd != MCE_CMD_PORT_IR) &&
+ ((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA))
+- dev_dbg(dev, "Raw IR data, %d pulse/space samples", ir->rem);
++ dev_dbg(dev, "Raw IR data, %d pulse/space samples",
++ cmd & MCE_PACKET_LENGTH_MASK);
+ #endif
+ }
+
+ /*
+ * Schedule work that can't be done in interrupt handlers
+- * (mceusb_dev_recv() and mce_async_callback()) nor tasklets.
++ * (mceusb_dev_recv() and mce_write_callback()) nor tasklets.
+ * Invokes mceusb_deferred_kevent() for recovering from
+ * error events specified by the kevent bit field.
+ */
+@@ -766,23 +771,80 @@ static void mceusb_defer_kevent(struct mceusb_dev *ir, int kevent)
+ dev_dbg(ir->dev, "kevent %d scheduled", kevent);
+ }
+
+-static void mce_async_callback(struct urb *urb)
++static void mce_write_callback(struct urb *urb)
+ {
+- struct mceusb_dev *ir;
+- int len;
+-
+ if (!urb)
+ return;
+
+- ir = urb->context;
++ complete(urb->context);
++}
++
++/*
++ * Write (TX/send) data to MCE device USB endpoint out.
++ * Used for IR blaster TX and MCE device commands.
++ *
++ * Return: The number of bytes written (> 0) or errno (< 0).
++ */
++static int mce_write(struct mceusb_dev *ir, u8 *data, int size)
++{
++ int ret;
++ struct urb *urb;
++ struct device *dev = ir->dev;
++ unsigned char *buf_out;
++ struct completion tx_done;
++ unsigned long expire;
++ unsigned long ret_wait;
++
++ mceusb_dev_printdata(ir, data, size, 0, size, true);
++
++ urb = usb_alloc_urb(0, GFP_KERNEL);
++ if (unlikely(!urb)) {
++ dev_err(dev, "Error: mce write couldn't allocate urb");
++ return -ENOMEM;
++ }
++
++ buf_out = kmalloc(size, GFP_KERNEL);
++ if (!buf_out) {
++ usb_free_urb(urb);
++ return -ENOMEM;
++ }
++
++ init_completion(&tx_done);
++
++ /* outbound data */
++ if (usb_endpoint_xfer_int(ir->usb_ep_out))
++ usb_fill_int_urb(urb, ir->usbdev, ir->pipe_out,
++ buf_out, size, mce_write_callback, &tx_done,
++ ir->usb_ep_out->bInterval);
++ else
++ usb_fill_bulk_urb(urb, ir->usbdev, ir->pipe_out,
++ buf_out, size, mce_write_callback, &tx_done);
++ memcpy(buf_out, data, size);
++
++ ret = usb_submit_urb(urb, GFP_KERNEL);
++ if (ret) {
++ dev_err(dev, "Error: mce write submit urb error = %d", ret);
++ kfree(buf_out);
++ usb_free_urb(urb);
++ return ret;
++ }
++
++ expire = msecs_to_jiffies(USB_TX_TIMEOUT);
++ ret_wait = wait_for_completion_timeout(&tx_done, expire);
++ if (!ret_wait) {
++ dev_err(dev, "Error: mce write timed out (expire = %lu (%dms))",
++ expire, USB_TX_TIMEOUT);
++ usb_kill_urb(urb);
++ ret = (urb->status == -ENOENT ? -ETIMEDOUT : urb->status);
++ } else {
++ ret = urb->status;
++ }
++ if (ret >= 0)
++ ret = urb->actual_length; /* bytes written */
+
+ switch (urb->status) {
+ /* success */
+ case 0:
+- len = urb->actual_length;
+-
+- mceusb_dev_printdata(ir, urb->transfer_buffer, len,
+- 0, len, true);
+ break;
+
+ case -ECONNRESET:
+@@ -792,140 +854,135 @@ static void mce_async_callback(struct urb *urb)
+ break;
+
+ case -EPIPE:
+- dev_err(ir->dev, "Error: request urb status = %d (TX HALT)",
++ dev_err(ir->dev, "Error: mce write urb status = %d (TX HALT)",
+ urb->status);
+ mceusb_defer_kevent(ir, EVENT_TX_HALT);
+ break;
+
+ default:
+- dev_err(ir->dev, "Error: request urb status = %d", urb->status);
++ dev_err(ir->dev, "Error: mce write urb status = %d",
++ urb->status);
+ break;
+ }
+
+- /* the transfer buffer and urb were allocated in mce_request_packet */
+- kfree(urb->transfer_buffer);
+- usb_free_urb(urb);
+-}
+-
+-/* request outgoing (send) usb packet - used to initialize remote */
+-static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
+- int size)
+-{
+- int res;
+- struct urb *async_urb;
+- struct device *dev = ir->dev;
+- unsigned char *async_buf;
++ dev_dbg(dev, "tx done status = %d (wait = %lu, expire = %lu (%dms), urb->actual_length = %d, urb->status = %d)",
++ ret, ret_wait, expire, USB_TX_TIMEOUT,
++ urb->actual_length, urb->status);
+
+- async_urb = usb_alloc_urb(0, GFP_KERNEL);
+- if (unlikely(!async_urb)) {
+- dev_err(dev, "Error, couldn't allocate urb!");
+- return;
+- }
+-
+- async_buf = kmalloc(size, GFP_KERNEL);
+- if (!async_buf) {
+- usb_free_urb(async_urb);
+- return;
+- }
+-
+- /* outbound data */
+- if (usb_endpoint_xfer_int(ir->usb_ep_out))
+- usb_fill_int_urb(async_urb, ir->usbdev, ir->pipe_out,
+- async_buf, size, mce_async_callback, ir,
+- ir->usb_ep_out->bInterval);
+- else
+- usb_fill_bulk_urb(async_urb, ir->usbdev, ir->pipe_out,
+- async_buf, size, mce_async_callback, ir);
+-
+- memcpy(async_buf, data, size);
+-
+- dev_dbg(dev, "send request called (size=%#x)", size);
++ kfree(buf_out);
++ usb_free_urb(urb);
+
+- res = usb_submit_urb(async_urb, GFP_ATOMIC);
+- if (res) {
+- dev_err(dev, "send request FAILED! (res=%d)", res);
+- kfree(async_buf);
+- usb_free_urb(async_urb);
+- return;
+- }
+- dev_dbg(dev, "send request complete (res=%d)", res);
++ return ret;
+ }
+
+-static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
++static void mce_command_out(struct mceusb_dev *ir, u8 *data, int size)
+ {
+ int rsize = sizeof(DEVICE_RESUME);
+
+ if (ir->need_reset) {
+ ir->need_reset = false;
+- mce_request_packet(ir, DEVICE_RESUME, rsize);
++ mce_write(ir, DEVICE_RESUME, rsize);
+ msleep(10);
+ }
+
+- mce_request_packet(ir, data, size);
++ mce_write(ir, data, size);
+ msleep(10);
+ }
+
+-/* Send data out the IR blaster port(s) */
++/*
++ * Transmit IR out the MCE device IR blaster port(s).
++ *
++ * Convert IR pulse/space sequence from LIRC to MCE format.
++ * Break up a long IR sequence into multiple parts (MCE IR data packets).
++ *
++ * u32 txbuf[] consists of IR pulse, space, ..., and pulse times in usec.
++ * Pulses and spaces are implicit by their position.
++ * The first IR sample, txbuf[0], is always a pulse.
++ *
++ * u8 irbuf[] consists of multiple IR data packets for the MCE device.
++ * A packet is 1 u8 MCE_IRDATA_HEADER and up to 30 u8 IR samples.
++ * An IR sample is 1-bit pulse/space flag with 7-bit time
++ * in MCE time units (50usec).
++ *
++ * Return: The number of IR samples sent (> 0) or errno (< 0).
++ */
+ static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
+ {
+ struct mceusb_dev *ir = dev->priv;
+- int i, length, ret = 0;
+- int cmdcount = 0;
+- unsigned char cmdbuf[MCE_CMDBUF_SIZE];
+-
+- /* MCE tx init header */
+- cmdbuf[cmdcount++] = MCE_CMD_PORT_IR;
+- cmdbuf[cmdcount++] = MCE_CMD_SETIRTXPORTS;
+- cmdbuf[cmdcount++] = ir->tx_mask;
++ u8 cmdbuf[3] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRTXPORTS, 0x00 };
++ u8 irbuf[MCE_IRBUF_SIZE];
++ int ircount = 0;
++ unsigned int irsample;
++ int i, length, ret;
+
+ /* Send the set TX ports command */
+- mce_async_out(ir, cmdbuf, cmdcount);
+- cmdcount = 0;
+-
+- /* Generate mce packet data */
+- for (i = 0; (i < count) && (cmdcount < MCE_CMDBUF_SIZE); i++) {
+- txbuf[i] = txbuf[i] / MCE_TIME_UNIT;
+-
+- do { /* loop to support long pulses/spaces > 127*50us=6.35ms */
+-
+- /* Insert mce packet header every 4th entry */
+- if ((cmdcount < MCE_CMDBUF_SIZE) &&
+- (cmdcount % MCE_CODE_LENGTH) == 0)
+- cmdbuf[cmdcount++] = MCE_IRDATA_HEADER;
+-
+- /* Insert mce packet data */
+- if (cmdcount < MCE_CMDBUF_SIZE)
+- cmdbuf[cmdcount++] =
+- (txbuf[i] < MCE_PULSE_BIT ?
+- txbuf[i] : MCE_MAX_PULSE_LENGTH) |
+- (i & 1 ? 0x00 : MCE_PULSE_BIT);
+- else {
+- ret = -EINVAL;
+- goto out;
++ cmdbuf[2] = ir->tx_mask;
++ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
++
++ /* Generate mce IR data packet */
++ for (i = 0; i < count; i++) {
++ irsample = txbuf[i] / MCE_TIME_UNIT;
++
++ /* loop to support long pulses/spaces > 6350us (127*50us) */
++ while (irsample > 0) {
++ /* Insert IR header every 30th entry */
++ if (ircount % MCE_PACKET_SIZE == 0) {
++ /* Room for IR header and one IR sample? */
++ if (ircount >= MCE_IRBUF_SIZE - 1) {
++ /* Send near full buffer */
++ ret = mce_write(ir, irbuf, ircount);
++ if (ret < 0)
++ return ret;
++ ircount = 0;
++ }
++ irbuf[ircount++] = MCE_IRDATA_HEADER;
+ }
+
+- } while ((txbuf[i] > MCE_MAX_PULSE_LENGTH) &&
+- (txbuf[i] -= MCE_MAX_PULSE_LENGTH));
+- }
+-
+- /* Check if we have room for the empty packet at the end */
+- if (cmdcount >= MCE_CMDBUF_SIZE) {
+- ret = -EINVAL;
+- goto out;
+- }
++ /* Insert IR sample */
++ if (irsample <= MCE_MAX_PULSE_LENGTH) {
++ irbuf[ircount] = irsample;
++ irsample = 0;
++ } else {
++ irbuf[ircount] = MCE_MAX_PULSE_LENGTH;
++ irsample -= MCE_MAX_PULSE_LENGTH;
++ }
++ /*
++ * Even i = IR pulse
++ * Odd i = IR space
++ */
++ irbuf[ircount] |= (i & 1 ? 0 : MCE_PULSE_BIT);
++ ircount++;
++
++ /* IR buffer full? */
++ if (ircount >= MCE_IRBUF_SIZE) {
++ /* Fix packet length in last header */
++ length = ircount % MCE_PACKET_SIZE;
++ if (length > 0)
++ irbuf[ircount - length] -=
++ MCE_PACKET_SIZE - length;
++ /* Send full buffer */
++ ret = mce_write(ir, irbuf, ircount);
++ if (ret < 0)
++ return ret;
++ ircount = 0;
++ }
++ }
++ } /* after for loop, 0 <= ircount < MCE_IRBUF_SIZE */
+
+ /* Fix packet length in last header */
+- length = cmdcount % MCE_CODE_LENGTH;
+- cmdbuf[cmdcount - length] -= MCE_CODE_LENGTH - length;
++ length = ircount % MCE_PACKET_SIZE;
++ if (length > 0)
++ irbuf[ircount - length] -= MCE_PACKET_SIZE - length;
+
+- /* All mce commands end with an empty packet (0x80) */
+- cmdbuf[cmdcount++] = MCE_IRDATA_TRAILER;
++ /* Append IR trailer (0x80) to final partial (or empty) IR buffer */
++ irbuf[ircount++] = MCE_IRDATA_TRAILER;
+
+- /* Transmit the command to the mce device */
+- mce_async_out(ir, cmdbuf, cmdcount);
++ /* Send final buffer */
++ ret = mce_write(ir, irbuf, ircount);
++ if (ret < 0)
++ return ret;
+
+-out:
+- return ret ? ret : count;
++ return count;
+ }
+
+ /* Sets active IR outputs -- mce devices typically have two */
+@@ -965,7 +1022,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
+ cmdbuf[2] = MCE_CMD_SIG_END;
+ cmdbuf[3] = MCE_IRDATA_TRAILER;
+ dev_dbg(ir->dev, "disabling carrier modulation");
+- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
++ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
+ return 0;
+ }
+
+@@ -979,7 +1036,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
+ carrier);
+
+ /* Transmit new carrier to mce device */
+- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
++ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
+ return 0;
+ }
+ }
+@@ -1002,10 +1059,10 @@ static int mceusb_set_timeout(struct rc_dev *dev, unsigned int timeout)
+ cmdbuf[2] = units >> 8;
+ cmdbuf[3] = units;
+
+- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
++ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
+
+ /* get receiver timeout value */
+- mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
++ mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
+
+ return 0;
+ }
+@@ -1030,7 +1087,7 @@ static int mceusb_set_rx_wideband(struct rc_dev *dev, int enable)
+ ir->wideband_rx_enabled = false;
+ cmdbuf[2] = 1; /* port 1 is long range receiver */
+ }
+- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
++ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
+ /* response from device sets ir->learning_active */
+
+ return 0;
+@@ -1053,7 +1110,7 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
+ ir->carrier_report_enabled = true;
+ if (!ir->learning_active) {
+ cmdbuf[2] = 2; /* port 2 is short range receiver */
+- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
++ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
+ }
+ } else {
+ ir->carrier_report_enabled = false;
+@@ -1064,7 +1121,7 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
+ */
+ if (ir->learning_active && !ir->wideband_rx_enabled) {
+ cmdbuf[2] = 1; /* port 1 is long range receiver */
+- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
++ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
+ }
+ }
+
+@@ -1143,6 +1200,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
+ }
+ break;
+ case MCE_RSP_CMD_ILLEGAL:
++ case MCE_RSP_TX_TIMEOUT:
+ ir->need_reset = true;
+ break;
+ default:
+@@ -1280,7 +1338,7 @@ static void mceusb_get_emulator_version(struct mceusb_dev *ir)
+ {
+ /* If we get no reply or an illegal command reply, its ver 1, says MS */
+ ir->emver = 1;
+- mce_async_out(ir, GET_EMVER, sizeof(GET_EMVER));
++ mce_command_out(ir, GET_EMVER, sizeof(GET_EMVER));
+ }
+
+ static void mceusb_gen1_init(struct mceusb_dev *ir)
+@@ -1326,10 +1384,10 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
+ dev_dbg(dev, "set handshake - retC = %d", ret);
+
+ /* device resume */
+- mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
++ mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
+
+ /* get hw/sw revision? */
+- mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
++ mce_command_out(ir, GET_REVISION, sizeof(GET_REVISION));
+
+ kfree(data);
+ }
+@@ -1337,13 +1395,13 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
+ static void mceusb_gen2_init(struct mceusb_dev *ir)
+ {
+ /* device resume */
+- mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
++ mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
+
+ /* get wake version (protocol, key, address) */
+- mce_async_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION));
++ mce_command_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION));
+
+ /* unknown what this one actually returns... */
+- mce_async_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
++ mce_command_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
+ }
+
+ static void mceusb_get_parameters(struct mceusb_dev *ir)
+@@ -1357,24 +1415,24 @@ static void mceusb_get_parameters(struct mceusb_dev *ir)
+ ir->num_rxports = 2;
+
+ /* get number of tx and rx ports */
+- mce_async_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS));
++ mce_command_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS));
+
+ /* get the carrier and frequency */
+- mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
++ mce_command_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
+
+ if (ir->num_txports && !ir->flags.no_tx)
+ /* get the transmitter bitmask */
+- mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
++ mce_command_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
+
+ /* get receiver timeout value */
+- mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
++ mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
+
+ /* get receiver sensor setting */
+- mce_async_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
++ mce_command_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
+
+ for (i = 0; i < ir->num_txports; i++) {
+ cmdbuf[2] = i;
+- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
++ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
+ }
+ }
+
+@@ -1383,7 +1441,7 @@ static void mceusb_flash_led(struct mceusb_dev *ir)
+ if (ir->emver < 2)
+ return;
+
+- mce_async_out(ir, FLASH_LED, sizeof(FLASH_LED));
++ mce_command_out(ir, FLASH_LED, sizeof(FLASH_LED));
+ }
+
+ /*
+diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c
+index e42efd9d382e..d37b85d2bc75 100644
+--- a/drivers/media/rc/mtk-cir.c
++++ b/drivers/media/rc/mtk-cir.c
+@@ -44,6 +44,11 @@
+ /* Fields containing pulse width data */
+ #define MTK_WIDTH_MASK (GENMASK(7, 0))
+
++/* IR threshold */
++#define MTK_IRTHD 0x14
++#define MTK_DG_CNT_MASK (GENMASK(12, 8))
++#define MTK_DG_CNT(x) ((x) << 8)
++
+ /* Bit to enable interrupt */
+ #define MTK_IRINT_EN BIT(0)
+
+@@ -409,6 +414,9 @@ static int mtk_ir_probe(struct platform_device *pdev)
+ mtk_w32_mask(ir, val, ir->data->fields[MTK_HW_PERIOD].mask,
+ ir->data->fields[MTK_HW_PERIOD].reg);
+
++ /* Set de-glitch counter */
++ mtk_w32_mask(ir, MTK_DG_CNT(1), MTK_DG_CNT_MASK, MTK_IRTHD);
++
+ /* Enable IR and PWM */
+ val = mtk_r32(ir, MTK_CONFIG_HIGH_REG);
+ val |= MTK_OK_COUNT(ir->data->ok_count) | MTK_PWM_EN | MTK_IR_EN;
+diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
+index f5b04594e209..4c191fcd3a7f 100644
+--- a/drivers/media/usb/cpia2/cpia2_usb.c
++++ b/drivers/media/usb/cpia2/cpia2_usb.c
+@@ -685,6 +685,10 @@ static int submit_urbs(struct camera_data *cam)
+ if (!urb) {
+ for (j = 0; j < i; j++)
+ usb_free_urb(cam->sbuf[j].urb);
++ for (j = 0; j < NUM_SBUF; j++) {
++ kfree(cam->sbuf[j].data);
++ cam->sbuf[j].data = NULL;
++ }
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
+index 091389fdf89e..c8d79502827b 100644
+--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
++++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
+@@ -2442,9 +2442,13 @@ static int dib9090_tuner_attach(struct dvb_usb_adapter *adap)
+ 8, 0x0486,
+ };
+
++ if (!IS_ENABLED(CONFIG_DVB_DIB9000))
++ return -ENODEV;
+ if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &dib9090_dib0090_config) == NULL)
+ return -ENODEV;
+ i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
++ if (!i2c)
++ return -ENODEV;
+ if (dib01x0_pmu_update(i2c, data_dib190, 10) != 0)
+ return -ENODEV;
+ dib0700_set_i2c_speed(adap->dev, 1500);
+@@ -2520,10 +2524,14 @@ static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap)
+ 0, 0x00ef,
+ 8, 0x0406,
+ };
++ if (!IS_ENABLED(CONFIG_DVB_DIB9000))
++ return -ENODEV;
+ i2c = dib9000_get_tuner_interface(adap->fe_adap[0].fe);
+ if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &nim9090md_dib0090_config[0]) == NULL)
+ return -ENODEV;
+ i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
++ if (!i2c)
++ return -ENODEV;
+ if (dib01x0_pmu_update(i2c, data_dib190, 10) < 0)
+ return -ENODEV;
+
+diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
+index 0af74383083d..ae793dac4964 100644
+--- a/drivers/media/usb/dvb-usb/pctv452e.c
++++ b/drivers/media/usb/dvb-usb/pctv452e.c
+@@ -913,14 +913,6 @@ static int pctv452e_frontend_attach(struct dvb_usb_adapter *a)
+ &a->dev->i2c_adap);
+ if (!a->fe_adap[0].fe)
+ return -ENODEV;
+-
+- /*
+- * dvb_frontend will call dvb_detach for both stb0899_detach
+- * and stb0899_release but we only do dvb_attach(stb0899_attach).
+- * Increment the module refcount instead.
+- */
+- symbol_get(stb0899_attach);
+-
+ if ((dvb_attach(lnbp22_attach, a->fe_adap[0].fe,
+ &a->dev->i2c_adap)) == NULL)
+ err("Cannot attach lnbp22\n");
+diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
+index 87b887b7604e..3f59a98dbf9a 100644
+--- a/drivers/media/usb/em28xx/em28xx-cards.c
++++ b/drivers/media/usb/em28xx/em28xx-cards.c
+@@ -4020,7 +4020,6 @@ static void em28xx_usb_disconnect(struct usb_interface *intf)
+ dev->dev_next->disconnected = 1;
+ dev_info(&dev->intf->dev, "Disconnecting %s\n",
+ dev->dev_next->name);
+- flush_request_modules(dev->dev_next);
+ }
+
+ dev->disconnected = 1;
+diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c
+index 989ae997f66d..89b9293b31be 100644
+--- a/drivers/media/usb/gspca/konica.c
++++ b/drivers/media/usb/gspca/konica.c
+@@ -123,6 +123,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 value, u16 index)
+ if (ret < 0) {
+ pr_err("reg_r err %d\n", ret);
+ gspca_dev->usb_err = ret;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(gspca_dev->usb_buf, 0, 2);
+ }
+ }
+
+diff --git a/drivers/media/usb/gspca/nw80x.c b/drivers/media/usb/gspca/nw80x.c
+index bedc04a72e97..bde4441f935e 100644
+--- a/drivers/media/usb/gspca/nw80x.c
++++ b/drivers/media/usb/gspca/nw80x.c
+@@ -1581,6 +1581,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
+ if (ret < 0) {
+ pr_err("reg_r err %d\n", ret);
+ gspca_dev->usb_err = ret;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
+ return;
+ }
+ if (len == 1)
+diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
+index 10fcbe9e8614..cb41e61d50dd 100644
+--- a/drivers/media/usb/gspca/ov519.c
++++ b/drivers/media/usb/gspca/ov519.c
+@@ -2083,6 +2083,11 @@ static int reg_r(struct sd *sd, u16 index)
+ } else {
+ gspca_err(gspca_dev, "reg_r %02x failed %d\n", index, ret);
+ sd->gspca_dev.usb_err = ret;
++ /*
++ * Make sure the result is zeroed to avoid uninitialized
++ * values.
++ */
++ gspca_dev->usb_buf[0] = 0;
+ }
+
+ return ret;
+@@ -2111,6 +2116,11 @@ static int reg_r8(struct sd *sd,
+ } else {
+ gspca_err(gspca_dev, "reg_r8 %02x failed %d\n", index, ret);
+ sd->gspca_dev.usb_err = ret;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(gspca_dev->usb_buf, 0, 8);
+ }
+
+ return ret;
+diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
+index d06dc0755b9a..9e3326b66c79 100644
+--- a/drivers/media/usb/gspca/ov534.c
++++ b/drivers/media/usb/gspca/ov534.c
+@@ -642,6 +642,11 @@ static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
+ if (ret < 0) {
+ pr_err("read failed %d\n", ret);
+ gspca_dev->usb_err = ret;
++ /*
++ * Make sure the result is zeroed to avoid uninitialized
++ * values.
++ */
++ gspca_dev->usb_buf[0] = 0;
+ }
+ return gspca_dev->usb_buf[0];
+ }
+diff --git a/drivers/media/usb/gspca/ov534_9.c b/drivers/media/usb/gspca/ov534_9.c
+index 3d1364d2f83e..4d4ae22e9640 100644
+--- a/drivers/media/usb/gspca/ov534_9.c
++++ b/drivers/media/usb/gspca/ov534_9.c
+@@ -1154,6 +1154,7 @@ static u8 reg_r(struct gspca_dev *gspca_dev, u16 reg)
+ if (ret < 0) {
+ pr_err("reg_r err %d\n", ret);
+ gspca_dev->usb_err = ret;
++ return 0;
+ }
+ return gspca_dev->usb_buf[0];
+ }
+diff --git a/drivers/media/usb/gspca/se401.c b/drivers/media/usb/gspca/se401.c
+index 477da0664b7d..40b87717bb5c 100644
+--- a/drivers/media/usb/gspca/se401.c
++++ b/drivers/media/usb/gspca/se401.c
+@@ -111,6 +111,11 @@ static void se401_read_req(struct gspca_dev *gspca_dev, u16 req, int silent)
+ pr_err("read req failed req %#04x error %d\n",
+ req, err);
+ gspca_dev->usb_err = err;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(gspca_dev->usb_buf, 0, READ_REQ_SIZE);
+ }
+ }
+
+diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
+index cfa2a04d9f3f..efca54ee0f35 100644
+--- a/drivers/media/usb/gspca/sn9c20x.c
++++ b/drivers/media/usb/gspca/sn9c20x.c
+@@ -132,6 +132,13 @@ static const struct dmi_system_id flip_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0341")
+ }
+ },
++ {
++ .ident = "MSI MS-1039",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "MS-1039"),
++ }
++ },
+ {
+ .ident = "MSI MS-1632",
+ .matches = {
+@@ -918,6 +925,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 reg, u16 length)
+ if (unlikely(result < 0 || result != length)) {
+ pr_err("Read register %02x failed %d\n", reg, result);
+ gspca_dev->usb_err = result;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
+ }
+ }
+
+diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
+index 5f3f2979540a..22de65d840dd 100644
+--- a/drivers/media/usb/gspca/sonixb.c
++++ b/drivers/media/usb/gspca/sonixb.c
+@@ -462,6 +462,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
+ dev_err(gspca_dev->v4l2_dev.dev,
+ "Error reading register %02x: %d\n", value, res);
+ gspca_dev->usb_err = res;
++ /*
++ * Make sure the result is zeroed to avoid uninitialized
++ * values.
++ */
++ gspca_dev->usb_buf[0] = 0;
+ }
+ }
+
+diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c
+index df8d8482b795..fa108ce000ad 100644
+--- a/drivers/media/usb/gspca/sonixj.c
++++ b/drivers/media/usb/gspca/sonixj.c
+@@ -1171,6 +1171,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
+ if (ret < 0) {
+ pr_err("reg_r err %d\n", ret);
+ gspca_dev->usb_err = ret;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
+ }
+ }
+
+diff --git a/drivers/media/usb/gspca/spca1528.c b/drivers/media/usb/gspca/spca1528.c
+index d25924e430f3..a20eb8580db2 100644
+--- a/drivers/media/usb/gspca/spca1528.c
++++ b/drivers/media/usb/gspca/spca1528.c
+@@ -80,6 +80,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
+ if (ret < 0) {
+ pr_err("reg_r err %d\n", ret);
+ gspca_dev->usb_err = ret;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
+ }
+ }
+
+diff --git a/drivers/media/usb/gspca/sq930x.c b/drivers/media/usb/gspca/sq930x.c
+index d7cbcf2b3947..3521f5ff428e 100644
+--- a/drivers/media/usb/gspca/sq930x.c
++++ b/drivers/media/usb/gspca/sq930x.c
+@@ -434,6 +434,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
+ if (ret < 0) {
+ pr_err("reg_r %04x failed %d\n", value, ret);
+ gspca_dev->usb_err = ret;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
+ }
+ }
+
+diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
+index 437a3367ab97..26eae69a2562 100644
+--- a/drivers/media/usb/gspca/sunplus.c
++++ b/drivers/media/usb/gspca/sunplus.c
+@@ -264,6 +264,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
+ if (ret < 0) {
+ pr_err("reg_r err %d\n", ret);
+ gspca_dev->usb_err = ret;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
+ }
+ }
+
+diff --git a/drivers/media/usb/gspca/vc032x.c b/drivers/media/usb/gspca/vc032x.c
+index 52d071659634..6e32264d3825 100644
+--- a/drivers/media/usb/gspca/vc032x.c
++++ b/drivers/media/usb/gspca/vc032x.c
+@@ -2915,6 +2915,11 @@ static void reg_r_i(struct gspca_dev *gspca_dev,
+ if (ret < 0) {
+ pr_err("reg_r err %d\n", ret);
+ gspca_dev->usb_err = ret;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
+ }
+ }
+ static void reg_r(struct gspca_dev *gspca_dev,
+diff --git a/drivers/media/usb/gspca/w996Xcf.c b/drivers/media/usb/gspca/w996Xcf.c
+index abfab3de1866..ef0a839f9b8a 100644
+--- a/drivers/media/usb/gspca/w996Xcf.c
++++ b/drivers/media/usb/gspca/w996Xcf.c
+@@ -143,6 +143,11 @@ static int w9968cf_read_sb(struct sd *sd)
+ } else {
+ pr_err("Read SB reg [01] failed\n");
+ sd->gspca_dev.usb_err = ret;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(sd->gspca_dev.usb_buf, 0, 2);
+ }
+
+ udelay(W9968CF_I2C_BUS_DELAY);
+diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
+index 29ac7fc5b039..3316a17c141b 100644
+--- a/drivers/media/usb/hdpvr/hdpvr-core.c
++++ b/drivers/media/usb/hdpvr/hdpvr-core.c
+@@ -141,6 +141,7 @@ static int device_authorization(struct hdpvr_device *dev)
+
+ dev->fw_ver = dev->usbc_buf[1];
+
++ dev->usbc_buf[46] = '\0';
+ v4l2_info(&dev->v4l2_dev, "firmware version 0x%x dated %s\n",
+ dev->fw_ver, &dev->usbc_buf[2]);
+
+@@ -275,6 +276,7 @@ static int hdpvr_probe(struct usb_interface *interface,
+ #endif
+ size_t buffer_size;
+ int i;
++ int dev_num;
+ int retval = -ENOMEM;
+
+ /* allocate memory for our device state and initialize it */
+@@ -372,8 +374,17 @@ static int hdpvr_probe(struct usb_interface *interface,
+ }
+ #endif
+
++ dev_num = atomic_inc_return(&dev_nr);
++ if (dev_num >= HDPVR_MAX) {
++ v4l2_err(&dev->v4l2_dev,
++ "max device number reached, device register failed\n");
++ atomic_dec(&dev_nr);
++ retval = -ENODEV;
++ goto reg_fail;
++ }
++
+ retval = hdpvr_register_videodev(dev, &interface->dev,
+- video_nr[atomic_inc_return(&dev_nr)]);
++ video_nr[dev_num]);
+ if (retval < 0) {
+ v4l2_err(&dev->v4l2_dev, "registering videodev failed\n");
+ goto reg_fail;
+diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
+index 44ca66cb9b8f..f34efa7c61b4 100644
+--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
++++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
+@@ -329,7 +329,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
+
+ dprintk("%s\n", __func__);
+
+- b = kmalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL);
++ b = kzalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
+index b299a24d33f9..d206f2de80d2 100644
+--- a/drivers/mmc/core/sdio_irq.c
++++ b/drivers/mmc/core/sdio_irq.c
+@@ -35,6 +35,7 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
+ {
+ struct mmc_card *card = host->card;
+ int i, ret, count;
++ bool sdio_irq_pending = host->sdio_irq_pending;
+ unsigned char pending;
+ struct sdio_func *func;
+
+@@ -42,13 +43,16 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
+ if (mmc_card_suspended(card))
+ return 0;
+
++ /* Clear the flag to indicate that we have processed the IRQ. */
++ host->sdio_irq_pending = false;
++
+ /*
+ * Optimization, if there is only 1 function interrupt registered
+ * and we know an IRQ was signaled then call irq handler directly.
+ * Otherwise do the full probe.
+ */
+ func = card->sdio_single_irq;
+- if (func && host->sdio_irq_pending) {
++ if (func && sdio_irq_pending) {
+ func->irq_handler(func);
+ return 1;
+ }
+@@ -100,7 +104,6 @@ void sdio_run_irqs(struct mmc_host *host)
+ {
+ mmc_claim_host(host);
+ if (host->sdio_irqs) {
+- host->sdio_irq_pending = true;
+ process_sdio_pending_irqs(host);
+ if (host->ops->ack_sdio_irq)
+ host->ops->ack_sdio_irq(host);
+@@ -119,6 +122,7 @@ void sdio_irq_work(struct work_struct *work)
+
+ void sdio_signal_irq(struct mmc_host *host)
+ {
++ host->sdio_irq_pending = true;
+ queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
+ }
+ EXPORT_SYMBOL_GPL(sdio_signal_irq);
+@@ -164,7 +168,6 @@ static int sdio_irq_thread(void *_host)
+ if (ret)
+ break;
+ ret = process_sdio_pending_irqs(host);
+- host->sdio_irq_pending = false;
+ mmc_release_host(host);
+
+ /*
+diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
+index 942da07c9eb8..22c454c7aaca 100644
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -3486,6 +3486,10 @@ int dw_mci_runtime_resume(struct device *dev)
+ /* Force setup bus to guarantee available clock output */
+ dw_mci_setup_bus(host->slot, true);
+
++ /* Re-enable SDIO interrupts. */
++ if (sdio_irq_claimed(host->slot->mmc))
++ __dw_mci_enable_sdio_irq(host->slot, 1);
++
+ /* Now that slots are all setup, we can enable card detect */
+ dw_mci_enable_cd(host);
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index c749d3dc1d36..eb33b892b484 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1713,7 +1713,9 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+ else if (timing == MMC_TIMING_UHS_SDR12)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+- else if (timing == MMC_TIMING_UHS_SDR25)
++ else if (timing == MMC_TIMING_SD_HS ||
++ timing == MMC_TIMING_MMC_HS ||
++ timing == MMC_TIMING_UHS_SDR25)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ else if (timing == MMC_TIMING_UHS_SDR50)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
+index 8459115d9d4e..553776cc1d29 100644
+--- a/drivers/net/arcnet/arcnet.c
++++ b/drivers/net/arcnet/arcnet.c
+@@ -1063,31 +1063,34 @@ EXPORT_SYMBOL(arcnet_interrupt);
+ static void arcnet_rx(struct net_device *dev, int bufnum)
+ {
+ struct arcnet_local *lp = netdev_priv(dev);
+- struct archdr pkt;
++ union {
++ struct archdr pkt;
++ char buf[512];
++ } rxdata;
+ struct arc_rfc1201 *soft;
+ int length, ofs;
+
+- soft = &pkt.soft.rfc1201;
++ soft = &rxdata.pkt.soft.rfc1201;
+
+- lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE);
+- if (pkt.hard.offset[0]) {
+- ofs = pkt.hard.offset[0];
++ lp->hw.copy_from_card(dev, bufnum, 0, &rxdata.pkt, ARC_HDR_SIZE);
++ if (rxdata.pkt.hard.offset[0]) {
++ ofs = rxdata.pkt.hard.offset[0];
+ length = 256 - ofs;
+ } else {
+- ofs = pkt.hard.offset[1];
++ ofs = rxdata.pkt.hard.offset[1];
+ length = 512 - ofs;
+ }
+
+ /* get the full header, if possible */
+- if (sizeof(pkt.soft) <= length) {
+- lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(pkt.soft));
++ if (sizeof(rxdata.pkt.soft) <= length) {
++ lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(rxdata.pkt.soft));
+ } else {
+- memset(&pkt.soft, 0, sizeof(pkt.soft));
++ memset(&rxdata.pkt.soft, 0, sizeof(rxdata.pkt.soft));
+ lp->hw.copy_from_card(dev, bufnum, ofs, soft, length);
+ }
+
+ arc_printk(D_DURING, dev, "Buffer #%d: received packet from %02Xh to %02Xh (%d+4 bytes)\n",
+- bufnum, pkt.hard.source, pkt.hard.dest, length);
++ bufnum, rxdata.pkt.hard.source, rxdata.pkt.hard.dest, length);
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += length + ARC_HDR_SIZE;
+@@ -1096,13 +1099,13 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
+ if (arc_proto_map[soft->proto]->is_ip) {
+ if (BUGLVL(D_PROTO)) {
+ struct ArcProto
+- *oldp = arc_proto_map[lp->default_proto[pkt.hard.source]],
++ *oldp = arc_proto_map[lp->default_proto[rxdata.pkt.hard.source]],
+ *newp = arc_proto_map[soft->proto];
+
+ if (oldp != newp) {
+ arc_printk(D_PROTO, dev,
+ "got protocol %02Xh; encap for host %02Xh is now '%c' (was '%c')\n",
+- soft->proto, pkt.hard.source,
++ soft->proto, rxdata.pkt.hard.source,
+ newp->suffix, oldp->suffix);
+ }
+ }
+@@ -1111,10 +1114,10 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
+ lp->default_proto[0] = soft->proto;
+
+ /* in striking contrast, the following isn't a hack. */
+- lp->default_proto[pkt.hard.source] = soft->proto;
++ lp->default_proto[rxdata.pkt.hard.source] = soft->proto;
+ }
+ /* call the protocol-specific receiver. */
+- arc_proto_map[soft->proto]->rx(dev, bufnum, &pkt, length);
++ arc_proto_map[soft->proto]->rx(dev, bufnum, &rxdata.pkt, length);
+ }
+
+ static void null_rx(struct net_device *dev, int bufnum,
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+index cdae0efde8e6..7998a73b6a0f 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+@@ -1429,6 +1429,16 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
+ else
+ phy_reg |= 0xFA;
+ e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg);
++
++ if (speed == SPEED_1000) {
++ hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
++ &phy_reg);
++
++ phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
++
++ hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
++ phy_reg);
++ }
+ }
+ hw->phy.ops.release(hw);
+
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
+index eb09c755fa17..1502895eb45d 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
+@@ -210,7 +210,7 @@
+
+ /* PHY Power Management Control */
+ #define HV_PM_CTRL PHY_REG(770, 17)
+-#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
++#define HV_PM_CTRL_K1_CLK_REQ 0x200
+ #define HV_PM_CTRL_K1_ENABLE 0x4000
+
+ #define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28)
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 4e04985fb430..055562c930fb 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -2566,6 +2566,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
+ return;
+ if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
+ return;
++ if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
++ set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
++ return;
++ }
+
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
+ if (pf->vsi[v] &&
+@@ -2580,6 +2584,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
+ }
+ }
+ }
++ clear_bit(__I40E_VF_DISABLE, pf->state);
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
+index 15dea48e0195..d6f8a41c3e35 100644
+--- a/drivers/net/ethernet/marvell/skge.c
++++ b/drivers/net/ethernet/marvell/skge.c
+@@ -3122,7 +3122,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
+ skb_put(skb, len);
+
+ if (dev->features & NETIF_F_RXCSUM) {
+- skb->csum = csum;
++ skb->csum = le16_to_cpu(csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 0e820cf92f8a..231ed508c240 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1642,6 +1642,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
+ { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */
+ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
+ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
++ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
+ { 0, }
+ };
+
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
+index e57d23746585..22c572a09b32 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
+@@ -259,6 +259,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
+ repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
+ if (!repr_priv) {
+ err = -ENOMEM;
++ nfp_repr_free(repr);
+ goto err_reprs_clean;
+ }
+
+@@ -291,6 +292,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
+ err = nfp_repr_init(app, repr,
+ port_id, port, priv->nn->dp.netdev);
+ if (err) {
++ kfree(repr_priv);
+ nfp_port_free(port);
+ nfp_repr_free(repr);
+ goto err_reprs_clean;
+@@ -373,6 +375,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
+ repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
+ if (!repr_priv) {
+ err = -ENOMEM;
++ nfp_repr_free(repr);
+ goto err_reprs_clean;
+ }
+
+@@ -382,11 +385,13 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
+ port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
+ if (IS_ERR(port)) {
+ err = PTR_ERR(port);
++ kfree(repr_priv);
+ nfp_repr_free(repr);
+ goto err_reprs_clean;
+ }
+ err = nfp_port_init_phy_port(app->pf, app, port, i);
+ if (err) {
++ kfree(repr_priv);
+ nfp_port_free(port);
+ nfp_repr_free(repr);
+ goto err_reprs_clean;
+@@ -399,6 +404,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
+ err = nfp_repr_init(app, repr,
+ cmsg_port_id, port, priv->nn->dp.netdev);
+ if (err) {
++ kfree(repr_priv);
+ nfp_port_free(port);
+ nfp_repr_free(repr);
+ goto err_reprs_clean;
+diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
+index 08381ef8bdb4..41d30f55c946 100644
+--- a/drivers/net/ethernet/nxp/lpc_eth.c
++++ b/drivers/net/ethernet/nxp/lpc_eth.c
+@@ -1371,13 +1371,14 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
+ pldat->dma_buff_base_p = dma_handle;
+
+ netdev_dbg(ndev, "IO address space :%pR\n", res);
+- netdev_dbg(ndev, "IO address size :%d\n", resource_size(res));
++ netdev_dbg(ndev, "IO address size :%zd\n",
++ (size_t)resource_size(res));
+ netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
+ pldat->net_base);
+ netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
+- netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size);
+- netdev_dbg(ndev, "DMA buffer P address :0x%08x\n",
+- pldat->dma_buff_base_p);
++ netdev_dbg(ndev, "DMA buffer size :%zd\n", pldat->dma_buff_size);
++ netdev_dbg(ndev, "DMA buffer P address :%pad\n",
++ &pldat->dma_buff_base_p);
+ netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
+ pldat->dma_buff_base_v);
+
+@@ -1424,8 +1425,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
+ if (ret)
+ goto err_out_unregister_netdev;
+
+- netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
+- res->start, ndev->irq);
++ netdev_info(ndev, "LPC mac at 0x%08lx irq %d\n",
++ (unsigned long)res->start, ndev->irq);
+
+ phydev = ndev->phydev;
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 2c971357e66c..0dc92d2faa64 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -1238,6 +1238,7 @@ deliver:
+ macsec_rxsa_put(rx_sa);
+ macsec_rxsc_put(rx_sc);
+
++ skb_orphan(skb);
+ ret = gro_cells_receive(&macsec->gro_cells, skb);
+ if (ret == NET_RX_SUCCESS)
+ count_rx(dev, skb->len);
+diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
+index 2b1e336961f9..bf4070ef6b84 100644
+--- a/drivers/net/phy/national.c
++++ b/drivers/net/phy/national.c
+@@ -110,14 +110,17 @@ static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
+
+ static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
+ {
++ u16 lb_dis = BIT(1);
++
+ if (disable)
+- ns_exp_write(phydev, 0x1c0, ns_exp_read(phydev, 0x1c0) | 1);
++ ns_exp_write(phydev, 0x1c0,
++ ns_exp_read(phydev, 0x1c0) | lb_dis);
+ else
+ ns_exp_write(phydev, 0x1c0,
+- ns_exp_read(phydev, 0x1c0) & 0xfffe);
++ ns_exp_read(phydev, 0x1c0) & ~lb_dis);
+
+ pr_debug("10BASE-T HDX loopback %s\n",
+- (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
++ (ns_exp_read(phydev, 0x1c0) & lb_dis) ? "off" : "on");
+ }
+
+ static int ns_config_init(struct phy_device *phydev)
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 02ad03a2fab7..3e014ecffef8 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -1419,6 +1419,8 @@ static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
+ netif_wake_queue(ppp->dev);
+ else
+ netif_stop_queue(ppp->dev);
++ } else {
++ kfree_skb(skb);
+ }
+ ppp_xmit_unlock(ppp);
+ }
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 1eaec648bd1f..f53e3e4e25f3 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -681,8 +681,12 @@ cdc_ncm_find_endpoints(struct usbnet *dev, struct usb_interface *intf)
+ u8 ep;
+
+ for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
+-
+ e = intf->cur_altsetting->endpoint + ep;
++
++ /* ignore endpoints which cannot transfer data */
++ if (!usb_endpoint_maxp(&e->desc))
++ continue;
++
+ switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+ case USB_ENDPOINT_XFER_INT:
+ if (usb_endpoint_dir_in(&e->desc)) {
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 10854977c55f..84b354f76dea 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -112,6 +112,11 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
+ int intr = 0;
+
+ e = alt->endpoint + ep;
++
++ /* ignore endpoints which cannot transfer data */
++ if (!usb_endpoint_maxp(&e->desc))
++ continue;
++
+ switch (e->desc.bmAttributes) {
+ case USB_ENDPOINT_XFER_INT:
+ if (!usb_endpoint_dir_in(&e->desc))
+@@ -351,6 +356,8 @@ void usbnet_update_max_qlen(struct usbnet *dev)
+ {
+ enum usb_device_speed speed = dev->udev->speed;
+
++ if (!dev->rx_urb_size || !dev->hard_mtu)
++ goto insanity;
+ switch (speed) {
+ case USB_SPEED_HIGH:
+ dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size;
+@@ -367,6 +374,7 @@ void usbnet_update_max_qlen(struct usbnet *dev)
+ dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu;
+ break;
+ default:
++insanity:
+ dev->rx_qlen = dev->tx_qlen = 4;
+ }
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index 3fe7605a2cca..9cb9f0544c9b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -843,11 +843,13 @@ static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
+ * firmware versions. Unfortunately, we don't have a TLV API
+ * flag to rely on, so rely on the major version which is in
+ * the first byte of ucode_ver. This was implemented
+- * initially on version 38 and then backported to 36, 29 and
+- * 17.
++ * initially on version 38 and then backported to29 and 17.
++ * The intention was to have it in 36 as well, but not all
++ * 8000 family got this feature enabled. The 8000 family is
++ * the only one using version 36, so skip this version
++ * entirely.
+ */
+ return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
+- IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
+ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
+ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
+ }
+diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
+index 3dbfce972c56..9e82ec12564b 100644
+--- a/drivers/net/wireless/marvell/libertas/if_usb.c
++++ b/drivers/net/wireless/marvell/libertas/if_usb.c
+@@ -49,7 +49,8 @@ static const struct lbs_fw_table fw_table[] = {
+ { MODEL_8388, "libertas/usb8388_v5.bin", NULL },
+ { MODEL_8388, "libertas/usb8388.bin", NULL },
+ { MODEL_8388, "usb8388.bin", NULL },
+- { MODEL_8682, "libertas/usb8682.bin", NULL }
++ { MODEL_8682, "libertas/usb8682.bin", NULL },
++ { 0, NULL, NULL }
+ };
+
+ static const struct usb_device_id if_usb_table[] = {
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index f57feb8fdea4..892ef5212232 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -404,14 +404,16 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
+
+ down_write(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+- if (ns->head->ns_id != le32_to_cpu(desc->nsids[n]))
++ unsigned nsid = le32_to_cpu(desc->nsids[n]);
++
++ if (ns->head->ns_id < nsid)
+ continue;
+- nvme_update_ns_ana_state(desc, ns);
++ if (ns->head->ns_id == nsid)
++ nvme_update_ns_ana_state(desc, ns);
+ if (++n == nr_nsids)
+ break;
+ }
+ up_write(&ctrl->namespaces_rwsem);
+- WARN_ON_ONCE(n < nr_nsids);
+ return 0;
+ }
+
+diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
+index 2008fa62a373..a8eb8784e151 100644
+--- a/drivers/nvme/target/admin-cmd.c
++++ b/drivers/nvme/target/admin-cmd.c
+@@ -68,9 +68,11 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
+ goto out;
+
+ host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
+- data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
++ data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
++ sectors[READ]), 1000);
+ host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
+- data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
++ data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
++ sectors[WRITE]), 1000);
+
+ put_unaligned_le64(host_reads, &slog->host_reads[0]);
+ put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
+@@ -98,11 +100,11 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
+ if (!ns->bdev)
+ continue;
+ host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
+- data_units_read +=
+- part_stat_read(ns->bdev->bd_part, sectors[READ]);
++ data_units_read += DIV_ROUND_UP(
++ part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
+ host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
+- data_units_written +=
+- part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
++ data_units_written += DIV_ROUND_UP(
++ part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
+
+ }
+ rcu_read_unlock();
+diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
+index 7390fb8ca9d1..29df6ab29e95 100644
+--- a/drivers/parisc/dino.c
++++ b/drivers/parisc/dino.c
+@@ -160,6 +160,15 @@ struct dino_device
+ (struct dino_device *)__pdata; })
+
+
++/* Check if PCI device is behind a Card-mode Dino. */
++static int pci_dev_is_behind_card_dino(struct pci_dev *dev)
++{
++ struct dino_device *dino_dev;
++
++ dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge));
++ return is_card_dino(&dino_dev->hba.dev->id);
++}
++
+ /*
+ * Dino Configuration Space Accessor Functions
+ */
+@@ -442,6 +451,21 @@ static void quirk_cirrus_cardbus(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus );
+
++#ifdef CONFIG_TULIP
++static void pci_fixup_tulip(struct pci_dev *dev)
++{
++ if (!pci_dev_is_behind_card_dino(dev))
++ return;
++ if (!(pci_resource_flags(dev, 1) & IORESOURCE_MEM))
++ return;
++ pr_warn("%s: HP HSC-PCI Cards with card-mode Dino not yet supported.\n",
++ pci_name(dev));
++ /* Disable this card by zeroing the PCI resources */
++ memset(&dev->resource[0], 0, sizeof(dev->resource[0]));
++ memset(&dev->resource[1], 0, sizeof(dev->resource[1]));
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_DEC, PCI_ANY_ID, pci_fixup_tulip);
++#endif /* CONFIG_TULIP */
+
+ static void __init
+ dino_bios_init(void)
+diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
+index 088d1c2047e6..36bd2545afb6 100644
+--- a/drivers/platform/x86/intel_pmc_core.c
++++ b/drivers/platform/x86/intel_pmc_core.c
+@@ -685,10 +685,14 @@ static int __init pmc_core_probe(void)
+ if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
+ pmcdev->map = &cnp_reg_map;
+
+- if (lpit_read_residency_count_address(&slp_s0_addr))
++ if (lpit_read_residency_count_address(&slp_s0_addr)) {
+ pmcdev->base_addr = PMC_BASE_ADDR_DEFAULT;
+- else
++
++ if (page_is_ram(PHYS_PFN(pmcdev->base_addr)))
++ return -ENODEV;
++ } else {
+ pmcdev->base_addr = slp_s0_addr - pmcdev->map->slp_s0_offset;
++ }
+
+ pmcdev->regbase = ioremap(pmcdev->base_addr,
+ pmcdev->map->regmap_length);
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 9577d8941846..f312764660e6 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -4789,7 +4789,7 @@ static int __init regulator_init(void)
+ /* init early to allow our consumers to complete system booting */
+ core_initcall(regulator_init);
+
+-static int __init regulator_late_cleanup(struct device *dev, void *data)
++static int regulator_late_cleanup(struct device *dev, void *data)
+ {
+ struct regulator_dev *rdev = dev_to_rdev(dev);
+ const struct regulator_ops *ops = rdev->desc->ops;
+@@ -4838,17 +4838,8 @@ unlock:
+ return 0;
+ }
+
+-static int __init regulator_init_complete(void)
++static void regulator_init_complete_work_function(struct work_struct *work)
+ {
+- /*
+- * Since DT doesn't provide an idiomatic mechanism for
+- * enabling full constraints and since it's much more natural
+- * with DT to provide them just assume that a DT enabled
+- * system has full constraints.
+- */
+- if (of_have_populated_dt())
+- has_full_constraints = true;
+-
+ /*
+ * Regulators may had failed to resolve their input supplies
+ * when were registered, either because the input supply was
+@@ -4866,6 +4857,35 @@ static int __init regulator_init_complete(void)
+ */
+ class_for_each_device(&regulator_class, NULL, NULL,
+ regulator_late_cleanup);
++}
++
++static DECLARE_DELAYED_WORK(regulator_init_complete_work,
++ regulator_init_complete_work_function);
++
++static int __init regulator_init_complete(void)
++{
++ /*
++ * Since DT doesn't provide an idiomatic mechanism for
++ * enabling full constraints and since it's much more natural
++ * with DT to provide them just assume that a DT enabled
++ * system has full constraints.
++ */
++ if (of_have_populated_dt())
++ has_full_constraints = true;
++
++ /*
++ * We punt completion for an arbitrary amount of time since
++ * systems like distros will load many drivers from userspace
++ * so consumers might not always be ready yet, this is
++ * particularly an issue with laptops where this might bounce
++ * the display off then on. Ideally we'd get a notification
++ * from userspace when this happens but we don't so just wait
++ * a bit and hope we waited long enough. It'd be better if
++ * we'd only do this on systems that need it, and a kernel
++ * command line option might be useful.
++ */
++ schedule_delayed_work(&regulator_init_complete_work,
++ msecs_to_jiffies(30000));
+
+ class_for_each_device(&regulator_class, NULL, NULL,
+ regulator_register_fill_coupling_array);
+diff --git a/drivers/regulator/lm363x-regulator.c b/drivers/regulator/lm363x-regulator.c
+index b615a413ca9f..27c0a67cfd0e 100644
+--- a/drivers/regulator/lm363x-regulator.c
++++ b/drivers/regulator/lm363x-regulator.c
+@@ -33,7 +33,7 @@
+
+ /* LM3632 */
+ #define LM3632_BOOST_VSEL_MAX 0x26
+-#define LM3632_LDO_VSEL_MAX 0x29
++#define LM3632_LDO_VSEL_MAX 0x28
+ #define LM3632_VBOOST_MIN 4500000
+ #define LM3632_VLDO_MIN 4000000
+
+diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
+index d27fabae8ddd..6c629ef1bc4e 100644
+--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
++++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
+@@ -546,6 +546,8 @@ static void send_mode_select(struct work_struct *work)
+ spin_unlock(&ctlr->ms_lock);
+
+ retry:
++ memset(cdb, 0, sizeof(cdb));
++
+ data_size = rdac_failover_get(ctlr, &list, cdb);
+
+ RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 7c1f36b69bdc..bee9cfb29152 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -216,8 +216,13 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
+ struct srb_iocb *lio;
+ int rval = QLA_FUNCTION_FAILED;
+
+- if (!vha->flags.online)
+- goto done;
++ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
++ fcport->loop_id == FC_NO_LOOP_ID) {
++ ql_log(ql_log_warn, vha, 0xffff,
++ "%s: %8phC - not sending command.\n",
++ __func__, fcport->port_name);
++ return rval;
++ }
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+@@ -1123,8 +1128,13 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
+ struct port_database_24xx *pd;
+ struct qla_hw_data *ha = vha->hw;
+
+- if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
++ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
++ fcport->loop_id == FC_NO_LOOP_ID) {
++ ql_log(ql_log_warn, vha, 0xffff,
++ "%s: %8phC - not sending command.\n",
++ __func__, fcport->port_name);
+ return rval;
++ }
+
+ fcport->disc_state = DSC_GPDB;
+
+@@ -1904,8 +1914,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
+ return;
+ }
+
+- if (fcport->disc_state == DSC_DELETE_PEND)
++ if ((fcport->disc_state == DSC_DELETE_PEND) ||
++ (fcport->disc_state == DSC_DELETED)) {
++ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return;
++ }
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+@@ -6557,8 +6570,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
+ }
+
+ /* Clear all async request states across all VPs. */
+- list_for_each_entry(fcport, &vha->vp_fcports, list)
++ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
++ fcport->scan_state = 0;
++ }
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ atomic_inc(&vp->vref_count);
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 02fa81f122c2..60b6019a2fca 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -4864,6 +4864,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
+ if (fcport) {
+ fcport->id_changed = 1;
+ fcport->scan_state = QLA_FCPORT_FOUND;
++ fcport->chip_reset = vha->hw->base_qpair->chip_reset;
+ memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
+
+ if (pla) {
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 9d7feb005acf..7a1cc0b25e59 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -1216,7 +1216,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
+ sess->logout_on_delete = 0;
+ sess->logo_ack_needed = 0;
+ sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
+- sess->scan_state = 0;
+ }
+ }
+
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 75b926e70076..abfcc2f924ce 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1252,6 +1252,18 @@ static void scsi_initialize_rq(struct request *rq)
+ cmd->retries = 0;
+ }
+
++/*
++ * Only called when the request isn't completed by SCSI, and not freed by
++ * SCSI
++ */
++static void scsi_cleanup_rq(struct request *rq)
++{
++ if (rq->rq_flags & RQF_DONTPREP) {
++ scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
++ rq->rq_flags &= ~RQF_DONTPREP;
++ }
++}
++
+ /* Add a command to the list used by the aacraid and dpt_i2o drivers */
+ void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
+ {
+@@ -2339,6 +2351,7 @@ static const struct blk_mq_ops scsi_mq_ops = {
+ .init_request = scsi_mq_init_request,
+ .exit_request = scsi_mq_exit_request,
+ .initialize_rq_fn = scsi_initialize_rq,
++ .cleanup_rq = scsi_cleanup_rq,
+ .map_queues = scsi_map_queues,
+ };
+
+diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
+index ceeeb3069a02..212fa06f7c57 100644
+--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
++++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
+@@ -247,7 +247,7 @@ static int __maybe_unused csi2_dphy_wait_ulp(struct csi2_dev *csi2)
+ }
+
+ /* Waits for low-power LP-11 state on data and clock lanes. */
+-static int csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
++static void csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
+ {
+ u32 mask, reg;
+ int ret;
+@@ -258,11 +258,9 @@ static int csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
+ ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg,
+ (reg & mask) == mask, 0, 500000);
+ if (ret) {
+- v4l2_err(&csi2->sd, "LP-11 timeout, phy_state = 0x%08x\n", reg);
+- return ret;
++ v4l2_warn(&csi2->sd, "LP-11 wait timeout, likely a sensor driver bug, expect capture failures.\n");
++ v4l2_warn(&csi2->sd, "phy_state = 0x%08x\n", reg);
+ }
+-
+- return 0;
+ }
+
+ /* Wait for active clock on the clock lane. */
+@@ -320,9 +318,7 @@ static int csi2_start(struct csi2_dev *csi2)
+ csi2_enable(csi2, true);
+
+ /* Step 5 */
+- ret = csi2_dphy_wait_stopstate(csi2);
+- if (ret)
+- goto err_assert_reset;
++ csi2_dphy_wait_stopstate(csi2);
+
+ /* Step 6 */
+ ret = v4l2_subdev_call(csi2->src_sd, video, s_stream, 1);
+diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
+index 9f39f0c360e0..cc1006375cac 100644
+--- a/drivers/video/fbdev/efifb.c
++++ b/drivers/video/fbdev/efifb.c
+@@ -122,28 +122,13 @@ static void efifb_copy_bmp(u8 *src, u32 *dst, int width, struct screen_info *si)
+ */
+ static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
+ {
+- static const int default_resolutions[][2] = {
+- { 800, 600 },
+- { 1024, 768 },
+- { 1280, 1024 },
+- };
+- u32 i, right_margin;
+-
+- for (i = 0; i < ARRAY_SIZE(default_resolutions); i++) {
+- if (default_resolutions[i][0] == si->lfb_width &&
+- default_resolutions[i][1] == si->lfb_height)
+- break;
+- }
+- /* If not a default resolution used for textmode, this should be fine */
+- if (i >= ARRAY_SIZE(default_resolutions))
+- return true;
+-
+- /* If the right margin is 5 times smaller then the left one, reject */
+- right_margin = si->lfb_width - (bgrt_tab.image_offset_x + bmp_width);
+- if (right_margin < (bgrt_tab.image_offset_x / 5))
+- return false;
++ /*
++ * All x86 firmwares horizontally center the image (the yoffset
++ * calculations differ between boards, but xoffset is predictable).
++ */
++ u32 expected_xoffset = (si->lfb_width - bmp_width) / 2;
+
+- return true;
++ return bgrt_tab.image_offset_x == expected_xoffset;
+ }
+ #else
+ static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 9a47e4e5dea0..e7fd0b5b9234 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -1144,7 +1144,8 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ * (since it grows up, and may collide early with the stack
+ * growing down), and into the unused ELF_ET_DYN_BASE region.
+ */
+- if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) && !interpreter)
++ if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
++ loc->elf_ex.e_type == ET_DYN && !interpreter)
+ current->mm->brk = current->mm->start_brk =
+ ELF_ET_DYN_BASE;
+
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 79ac1ebabaf7..9fd383285f0e 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -1374,6 +1374,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
+ struct tree_mod_elem *tm;
+ struct extent_buffer *eb = NULL;
+ struct extent_buffer *eb_root;
++ u64 eb_root_owner = 0;
+ struct extent_buffer *old;
+ struct tree_mod_root *old_root = NULL;
+ u64 old_generation = 0;
+@@ -1411,6 +1412,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
+ free_extent_buffer(old);
+ }
+ } else if (old_root) {
++ eb_root_owner = btrfs_header_owner(eb_root);
+ btrfs_tree_read_unlock(eb_root);
+ free_extent_buffer(eb_root);
+ eb = alloc_dummy_extent_buffer(fs_info, logical);
+@@ -1428,7 +1430,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
+ if (old_root) {
+ btrfs_set_header_bytenr(eb, eb->start);
+ btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
+- btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
++ btrfs_set_header_owner(eb, eb_root_owner);
+ btrfs_set_header_level(eb, old_root->level);
+ btrfs_set_header_generation(eb, old_generation);
+ }
+@@ -5514,6 +5516,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
+ advance_left = advance_right = 0;
+
+ while (1) {
++ cond_resched();
+ if (advance_left && !left_end_reached) {
+ ret = tree_advance(fs_info, left_path, &left_level,
+ left_root_level,
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 4644f9b629a5..faca485ccd8f 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -39,6 +39,7 @@ extern struct kmem_cache *btrfs_trans_handle_cachep;
+ extern struct kmem_cache *btrfs_bit_radix_cachep;
+ extern struct kmem_cache *btrfs_path_cachep;
+ extern struct kmem_cache *btrfs_free_space_cachep;
++extern struct kmem_cache *btrfs_free_space_bitmap_cachep;
+ struct btrfs_ordered_sum;
+
+ #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 88c939f7aad9..e49e29288049 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -7367,6 +7367,14 @@ search:
+ */
+ if ((flags & extra) && !(block_group->flags & extra))
+ goto loop;
++
++ /*
++ * This block group has different flags than we want.
++ * It's possible that we have MIXED_GROUP flag but no
++ * block group is mixed. Just skip such block group.
++ */
++ btrfs_release_block_group(block_group, delalloc);
++ continue;
+ }
+
+ have_block_group:
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 8ecf8c0e5fe6..4381e0aba8c0 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -763,7 +763,8 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ } else {
+ ASSERT(num_bitmaps);
+ num_bitmaps--;
+- e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
++ e->bitmap = kmem_cache_zalloc(
++ btrfs_free_space_bitmap_cachep, GFP_NOFS);
+ if (!e->bitmap) {
+ kmem_cache_free(
+ btrfs_free_space_cachep, e);
+@@ -1864,7 +1865,7 @@ static void free_bitmap(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *bitmap_info)
+ {
+ unlink_free_space(ctl, bitmap_info);
+- kfree(bitmap_info->bitmap);
++ kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap);
+ kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
+ ctl->total_bitmaps--;
+ ctl->op->recalc_thresholds(ctl);
+@@ -2118,7 +2119,8 @@ new_bitmap:
+ }
+
+ /* allocate the bitmap */
+- info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
++ info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep,
++ GFP_NOFS);
+ spin_lock(&ctl->tree_lock);
+ if (!info->bitmap) {
+ ret = -ENOMEM;
+@@ -2130,7 +2132,8 @@ new_bitmap:
+ out:
+ if (info) {
+ if (info->bitmap)
+- kfree(info->bitmap);
++ kmem_cache_free(btrfs_free_space_bitmap_cachep,
++ info->bitmap);
+ kmem_cache_free(btrfs_free_space_cachep, info);
+ }
+
+@@ -2786,7 +2789,8 @@ out:
+ if (entry->bytes == 0) {
+ ctl->free_extents--;
+ if (entry->bitmap) {
+- kfree(entry->bitmap);
++ kmem_cache_free(btrfs_free_space_bitmap_cachep,
++ entry->bitmap);
+ ctl->total_bitmaps--;
+ ctl->op->recalc_thresholds(ctl);
+ }
+@@ -3594,7 +3598,7 @@ again:
+ }
+
+ if (!map) {
+- map = kzalloc(PAGE_SIZE, GFP_NOFS);
++ map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS);
+ if (!map) {
+ kmem_cache_free(btrfs_free_space_cachep, info);
+ return -ENOMEM;
+@@ -3624,7 +3628,7 @@ again:
+ if (info)
+ kmem_cache_free(btrfs_free_space_cachep, info);
+ if (map)
+- kfree(map);
++ kmem_cache_free(btrfs_free_space_bitmap_cachep, map);
+ return 0;
+ }
+
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 98c535ae038d..37332f83a3a9 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -72,6 +72,7 @@ static struct kmem_cache *btrfs_inode_cachep;
+ struct kmem_cache *btrfs_trans_handle_cachep;
+ struct kmem_cache *btrfs_path_cachep;
+ struct kmem_cache *btrfs_free_space_cachep;
++struct kmem_cache *btrfs_free_space_bitmap_cachep;
+
+ #define S_SHIFT 12
+ static const unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
+@@ -9361,6 +9362,7 @@ void __cold btrfs_destroy_cachep(void)
+ kmem_cache_destroy(btrfs_trans_handle_cachep);
+ kmem_cache_destroy(btrfs_path_cachep);
+ kmem_cache_destroy(btrfs_free_space_cachep);
++ kmem_cache_destroy(btrfs_free_space_bitmap_cachep);
+ }
+
+ int __init btrfs_init_cachep(void)
+@@ -9390,6 +9392,12 @@ int __init btrfs_init_cachep(void)
+ if (!btrfs_free_space_cachep)
+ goto fail;
+
++ btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
++ PAGE_SIZE, PAGE_SIZE,
++ SLAB_RED_ZONE, NULL);
++ if (!btrfs_free_space_bitmap_cachep)
++ goto fail;
++
+ return 0;
+ fail:
+ btrfs_destroy_cachep();
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 734866ab5194..3ea2008dcde3 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -2796,9 +2796,6 @@ out:
+ btrfs_free_path(path);
+
+ mutex_lock(&fs_info->qgroup_rescan_lock);
+- if (!btrfs_fs_closing(fs_info))
+- fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
+-
+ if (err > 0 &&
+ fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
+ fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+@@ -2814,16 +2811,30 @@ out:
+ trans = btrfs_start_transaction(fs_info->quota_root, 1);
+ if (IS_ERR(trans)) {
+ err = PTR_ERR(trans);
++ trans = NULL;
+ btrfs_err(fs_info,
+ "fail to start transaction for status update: %d",
+ err);
+- goto done;
+ }
+- ret = update_qgroup_status_item(trans);
+- if (ret < 0) {
+- err = ret;
+- btrfs_err(fs_info, "fail to update qgroup status: %d", err);
++
++ mutex_lock(&fs_info->qgroup_rescan_lock);
++ if (!btrfs_fs_closing(fs_info))
++ fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
++ if (trans) {
++ ret = update_qgroup_status_item(trans);
++ if (ret < 0) {
++ err = ret;
++ btrfs_err(fs_info, "fail to update qgroup status: %d",
++ err);
++ }
+ }
++ fs_info->qgroup_rescan_running = false;
++ complete_all(&fs_info->qgroup_rescan_completion);
++ mutex_unlock(&fs_info->qgroup_rescan_lock);
++
++ if (!trans)
++ return;
++
+ btrfs_end_transaction(trans);
+
+ if (btrfs_fs_closing(fs_info)) {
+@@ -2834,12 +2845,6 @@ out:
+ } else {
+ btrfs_err(fs_info, "qgroup scan failed with %d", err);
+ }
+-
+-done:
+- mutex_lock(&fs_info->qgroup_rescan_lock);
+- fs_info->qgroup_rescan_running = false;
+- mutex_unlock(&fs_info->qgroup_rescan_lock);
+- complete_all(&fs_info->qgroup_rescan_completion);
+ }
+
+ /*
+@@ -3067,6 +3072,9 @@ cleanup:
+ while ((unode = ulist_next(&reserved->range_changed, &uiter)))
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
+ unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL);
++ /* Also free data bytes of already reserved one */
++ btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid,
++ orig_reserved, BTRFS_QGROUP_RSV_DATA);
+ extent_changeset_release(reserved);
+ return ret;
+ }
+@@ -3111,7 +3119,7 @@ static int qgroup_free_reserved_data(struct inode *inode,
+ * EXTENT_QGROUP_RESERVED, we won't double free.
+ * So not need to rush.
+ */
+- ret = clear_record_extent_bits(&BTRFS_I(inode)->io_failure_tree,
++ ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree,
+ free_start, free_start + free_len - 1,
+ EXTENT_QGROUP_RESERVED, &changeset);
+ if (ret < 0)
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index 665a86f83f4b..c06845237cba 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -579,7 +579,10 @@ void ceph_evict_inode(struct inode *inode)
+ ceph_buffer_put(ci->i_xattrs.prealloc_blob);
+
+ ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
++}
+
++void ceph_destroy_inode(struct inode *inode)
++{
+ call_rcu(&inode->i_rcu, ceph_i_callback);
+ }
+
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index 02528e11bf33..ccab249a37f6 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -827,6 +827,7 @@ static int ceph_remount(struct super_block *sb, int *flags, char *data)
+
+ static const struct super_operations ceph_super_ops = {
+ .alloc_inode = ceph_alloc_inode,
++ .destroy_inode = ceph_destroy_inode,
+ .write_inode = ceph_write_inode,
+ .drop_inode = ceph_drop_inode,
+ .evict_inode = ceph_evict_inode,
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index 6e968e48e5e4..8d3eabf06d66 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -855,6 +855,7 @@ extern const struct inode_operations ceph_file_iops;
+
+ extern struct inode *ceph_alloc_inode(struct super_block *sb);
+ extern void ceph_evict_inode(struct inode *inode);
++extern void ceph_destroy_inode(struct inode *inode);
+ extern int ceph_drop_inode(struct inode *inode);
+
+ extern struct inode *ceph_get_inode(struct super_block *sb,
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 64e3888f30e6..d5457015801d 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -428,6 +428,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
+ cifs_show_security(s, tcon->ses);
+ cifs_show_cache_flavor(s, cifs_sb);
+
++ if (tcon->no_lease)
++ seq_puts(s, ",nolease");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
+ seq_puts(s, ",multiuser");
+ else if (tcon->ses->user_name)
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 57af9bac0045..4dbae6e268d6 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -543,6 +543,7 @@ struct smb_vol {
+ bool noblocksnd:1;
+ bool noautotune:1;
+ bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
++ bool no_lease:1; /* disable requesting leases */
+ bool fsc:1; /* enable fscache */
+ bool mfsymlinks:1; /* use Minshall+French Symlinks */
+ bool multiuser:1;
+@@ -1004,6 +1005,7 @@ struct cifs_tcon {
+ bool need_reopen_files:1; /* need to reopen tcon file handles */
+ bool use_resilient:1; /* use resilient instead of durable handles */
+ bool use_persistent:1; /* use persistent instead of durable handles */
++ bool no_lease:1; /* Do not request leases on files or directories */
+ __le32 capabilities;
+ __u32 share_flags;
+ __u32 maximal_access;
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index c290e231f918..966e493c82e5 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -70,7 +70,7 @@ enum {
+ Opt_user_xattr, Opt_nouser_xattr,
+ Opt_forceuid, Opt_noforceuid,
+ Opt_forcegid, Opt_noforcegid,
+- Opt_noblocksend, Opt_noautotune,
++ Opt_noblocksend, Opt_noautotune, Opt_nolease,
+ Opt_hard, Opt_soft, Opt_perm, Opt_noperm,
+ Opt_mapposix, Opt_nomapposix,
+ Opt_mapchars, Opt_nomapchars, Opt_sfu,
+@@ -129,6 +129,7 @@ static const match_table_t cifs_mount_option_tokens = {
+ { Opt_noforcegid, "noforcegid" },
+ { Opt_noblocksend, "noblocksend" },
+ { Opt_noautotune, "noautotune" },
++ { Opt_nolease, "nolease" },
+ { Opt_hard, "hard" },
+ { Opt_soft, "soft" },
+ { Opt_perm, "perm" },
+@@ -1542,6 +1543,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
+ case Opt_noautotune:
+ vol->noautotune = 1;
+ break;
++ case Opt_nolease:
++ vol->no_lease = 1;
++ break;
+ case Opt_hard:
+ vol->retry = 1;
+ break;
+@@ -3023,6 +3027,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info)
+ return 0;
+ if (tcon->snapshot_time != volume_info->snapshot_time)
+ return 0;
++ if (tcon->no_lease != volume_info->no_lease)
++ return 0;
+ return 1;
+ }
+
+@@ -3231,6 +3237,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
+ tcon->nocase = volume_info->nocase;
+ tcon->nohandlecache = volume_info->nohandlecache;
+ tcon->local_lease = volume_info->local_lease;
++ tcon->no_lease = volume_info->no_lease;
+ INIT_LIST_HEAD(&tcon->pending_opens);
+
+ spin_lock(&cifs_tcp_ses_lock);
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 094be406cde4..f0d966da7f37 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -2398,6 +2398,11 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+ if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
+ return;
+
++ /* Check if the server granted an oplock rather than a lease */
++ if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
++ return smb2_set_oplock_level(cinode, oplock, epoch,
++ purge_cache);
++
+ if (oplock & SMB2_LEASE_READ_CACHING_HE) {
+ new_oplock |= CIFS_CACHE_READ_FLG;
+ strcat(message, "R");
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index cbe633f1840a..b1f5d0d28335 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2192,7 +2192,7 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
+ iov[1].iov_len = uni_path_len;
+ iov[1].iov_base = path;
+
+- if (!server->oplocks)
++ if ((!server->oplocks) || (tcon->no_lease))
+ *oplock = SMB2_OPLOCK_LEVEL_NONE;
+
+ if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
+diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
+index 50ddb795aaeb..a2db401a58ed 100644
+--- a/fs/cifs/xattr.c
++++ b/fs/cifs/xattr.c
+@@ -31,7 +31,7 @@
+ #include "cifs_fs_sb.h"
+ #include "cifs_unicode.h"
+
+-#define MAX_EA_VALUE_SIZE 65535
++#define MAX_EA_VALUE_SIZE CIFSMaxBufSize
+ #define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
+ #define CIFS_XATTR_ATTRIB "cifs.dosattrib" /* full name: user.cifs.dosattrib */
+ #define CIFS_XATTR_CREATETIME "cifs.creationtime" /* user.cifs.creationtime */
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 00bf0b67aae8..f81eb1785af2 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3748,8 +3748,8 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
+ * illegal.
+ */
+ if (ee_block != map->m_lblk || ee_len > map->m_len) {
+-#ifdef EXT4_DEBUG
+- ext4_warning("Inode (%ld) finished: extent logical block %llu,"
++#ifdef CONFIG_EXT4_DEBUG
++ ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu,"
+ " len %u; IO logical block %llu, len %u",
+ inode->i_ino, (unsigned long long)ee_block, ee_len,
+ (unsigned long long)map->m_lblk, map->m_len);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index cff6277f7a9f..a0c94c365a4c 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4265,6 +4265,15 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
+
+ trace_ext4_punch_hole(inode, offset, length, 0);
+
++ ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
++ if (ext4_has_inline_data(inode)) {
++ down_write(&EXT4_I(inode)->i_mmap_sem);
++ ret = ext4_convert_inline_data(inode);
++ up_write(&EXT4_I(inode)->i_mmap_sem);
++ if (ret)
++ return ret;
++ }
++
+ /*
+ * Write out all dirty pages to avoid race conditions
+ * Then release them.
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 6ee471b72a34..6d39143cfa09 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -331,7 +331,7 @@ static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
+ req->in.h.len = sizeof(struct fuse_in_header) +
+ len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
+ list_add_tail(&req->list, &fiq->pending);
+- wake_up_locked(&fiq->waitq);
++ wake_up(&fiq->waitq);
+ kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ }
+
+@@ -343,16 +343,16 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
+ forget->forget_one.nodeid = nodeid;
+ forget->forget_one.nlookup = nlookup;
+
+- spin_lock(&fiq->waitq.lock);
++ spin_lock(&fiq->lock);
+ if (fiq->connected) {
+ fiq->forget_list_tail->next = forget;
+ fiq->forget_list_tail = forget;
+- wake_up_locked(&fiq->waitq);
++ wake_up(&fiq->waitq);
+ kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ } else {
+ kfree(forget);
+ }
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ }
+
+ static void flush_bg_queue(struct fuse_conn *fc)
+@@ -365,10 +365,10 @@ static void flush_bg_queue(struct fuse_conn *fc)
+ req = list_entry(fc->bg_queue.next, struct fuse_req, list);
+ list_del(&req->list);
+ fc->active_background++;
+- spin_lock(&fiq->waitq.lock);
++ spin_lock(&fiq->lock);
+ req->in.h.unique = fuse_get_unique(fiq);
+ queue_request(fiq, req);
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ }
+ }
+
+@@ -387,9 +387,9 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
+ if (test_and_set_bit(FR_FINISHED, &req->flags))
+ goto put_request;
+
+- spin_lock(&fiq->waitq.lock);
++ spin_lock(&fiq->lock);
+ list_del_init(&req->intr_entry);
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ WARN_ON(test_bit(FR_PENDING, &req->flags));
+ WARN_ON(test_bit(FR_SENT, &req->flags));
+ if (test_bit(FR_BACKGROUND, &req->flags)) {
+@@ -427,16 +427,16 @@ put_request:
+
+ static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
+ {
+- spin_lock(&fiq->waitq.lock);
++ spin_lock(&fiq->lock);
+ if (test_bit(FR_FINISHED, &req->flags)) {
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ return;
+ }
+ if (list_empty(&req->intr_entry)) {
+ list_add_tail(&req->intr_entry, &fiq->interrupts);
+- wake_up_locked(&fiq->waitq);
++ wake_up(&fiq->waitq);
+ }
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ }
+
+@@ -466,16 +466,16 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
+ if (!err)
+ return;
+
+- spin_lock(&fiq->waitq.lock);
++ spin_lock(&fiq->lock);
+ /* Request is not yet in userspace, bail out */
+ if (test_bit(FR_PENDING, &req->flags)) {
+ list_del(&req->list);
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ __fuse_put_request(req);
+ req->out.h.error = -EINTR;
+ return;
+ }
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ }
+
+ /*
+@@ -490,9 +490,9 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
+ struct fuse_iqueue *fiq = &fc->iq;
+
+ BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
+- spin_lock(&fiq->waitq.lock);
++ spin_lock(&fiq->lock);
+ if (!fiq->connected) {
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ req->out.h.error = -ENOTCONN;
+ } else {
+ req->in.h.unique = fuse_get_unique(fiq);
+@@ -500,7 +500,7 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
+ /* acquire extra reference, since request is still needed
+ after request_end() */
+ __fuse_get_request(req);
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+
+ request_wait_answer(fc, req);
+ /* Pairs with smp_wmb() in request_end() */
+@@ -633,12 +633,12 @@ static int fuse_request_send_notify_reply(struct fuse_conn *fc,
+
+ __clear_bit(FR_ISREPLY, &req->flags);
+ req->in.h.unique = unique;
+- spin_lock(&fiq->waitq.lock);
++ spin_lock(&fiq->lock);
+ if (fiq->connected) {
+ queue_request(fiq, req);
+ err = 0;
+ }
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+
+ return err;
+ }
+@@ -1082,12 +1082,12 @@ static int request_pending(struct fuse_iqueue *fiq)
+ * Unlike other requests this is assembled on demand, without a need
+ * to allocate a separate fuse_req structure.
+ *
+- * Called with fiq->waitq.lock held, releases it
++ * Called with fiq->lock held, releases it
+ */
+ static int fuse_read_interrupt(struct fuse_iqueue *fiq,
+ struct fuse_copy_state *cs,
+ size_t nbytes, struct fuse_req *req)
+-__releases(fiq->waitq.lock)
++__releases(fiq->lock)
+ {
+ struct fuse_in_header ih;
+ struct fuse_interrupt_in arg;
+@@ -1103,7 +1103,7 @@ __releases(fiq->waitq.lock)
+ ih.unique = req->intr_unique;
+ arg.unique = req->in.h.unique;
+
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ if (nbytes < reqsize)
+ return -EINVAL;
+
+@@ -1140,7 +1140,7 @@ static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
+ static int fuse_read_single_forget(struct fuse_iqueue *fiq,
+ struct fuse_copy_state *cs,
+ size_t nbytes)
+-__releases(fiq->waitq.lock)
++__releases(fiq->lock)
+ {
+ int err;
+ struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
+@@ -1154,7 +1154,7 @@ __releases(fiq->waitq.lock)
+ .len = sizeof(ih) + sizeof(arg),
+ };
+
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ kfree(forget);
+ if (nbytes < ih.len)
+ return -EINVAL;
+@@ -1172,7 +1172,7 @@ __releases(fiq->waitq.lock)
+
+ static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
+ struct fuse_copy_state *cs, size_t nbytes)
+-__releases(fiq->waitq.lock)
++__releases(fiq->lock)
+ {
+ int err;
+ unsigned max_forgets;
+@@ -1186,13 +1186,13 @@ __releases(fiq->waitq.lock)
+ };
+
+ if (nbytes < ih.len) {
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ return -EINVAL;
+ }
+
+ max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
+ head = dequeue_forget(fiq, max_forgets, &count);
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+
+ arg.count = count;
+ ih.len += count * sizeof(struct fuse_forget_one);
+@@ -1222,7 +1222,7 @@ __releases(fiq->waitq.lock)
+ static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
+ struct fuse_copy_state *cs,
+ size_t nbytes)
+-__releases(fiq->waitq.lock)
++__releases(fiq->lock)
+ {
+ if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
+ return fuse_read_single_forget(fiq, cs, nbytes);
+@@ -1251,16 +1251,19 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
+ unsigned reqsize;
+
+ restart:
+- spin_lock(&fiq->waitq.lock);
+- err = -EAGAIN;
+- if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
+- !request_pending(fiq))
+- goto err_unlock;
++ for (;;) {
++ spin_lock(&fiq->lock);
++ if (!fiq->connected || request_pending(fiq))
++ break;
++ spin_unlock(&fiq->lock);
+
+- err = wait_event_interruptible_exclusive_locked(fiq->waitq,
++ if (file->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++ err = wait_event_interruptible_exclusive(fiq->waitq,
+ !fiq->connected || request_pending(fiq));
+- if (err)
+- goto err_unlock;
++ if (err)
++ return err;
++ }
+
+ if (!fiq->connected) {
+ err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV;
+@@ -1284,7 +1287,7 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
+ req = list_entry(fiq->pending.next, struct fuse_req, list);
+ clear_bit(FR_PENDING, &req->flags);
+ list_del_init(&req->list);
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+
+ in = &req->in;
+ reqsize = in->h.len;
+@@ -1341,7 +1344,7 @@ out_end:
+ return err;
+
+ err_unlock:
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ return err;
+ }
+
+@@ -2054,12 +2057,12 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
+ fiq = &fud->fc->iq;
+ poll_wait(file, &fiq->waitq, wait);
+
+- spin_lock(&fiq->waitq.lock);
++ spin_lock(&fiq->lock);
+ if (!fiq->connected)
+ mask = EPOLLERR;
+ else if (request_pending(fiq))
+ mask |= EPOLLIN | EPOLLRDNORM;
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+
+ return mask;
+ }
+@@ -2150,15 +2153,15 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
+ fc->max_background = UINT_MAX;
+ flush_bg_queue(fc);
+
+- spin_lock(&fiq->waitq.lock);
++ spin_lock(&fiq->lock);
+ fiq->connected = 0;
+ list_for_each_entry(req, &fiq->pending, list)
+ clear_bit(FR_PENDING, &req->flags);
+ list_splice_tail_init(&fiq->pending, &to_end);
+ while (forget_pending(fiq))
+ kfree(dequeue_forget(fiq, 1, NULL));
+- wake_up_all_locked(&fiq->waitq);
+- spin_unlock(&fiq->waitq.lock);
++ wake_up_all(&fiq->waitq);
++ spin_unlock(&fiq->lock);
+ kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ end_polls(fc);
+ wake_up_all(&fc->blocked_waitq);
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 9a22aa580fe7..96d46b3ad235 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1700,6 +1700,7 @@ static int fuse_writepage(struct page *page, struct writeback_control *wbc)
+ WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
+
+ redirty_page_for_writepage(wbc, page);
++ unlock_page(page);
+ return 0;
+ }
+
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index cec8b8e74969..900bdcf79bfc 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -388,6 +388,9 @@ struct fuse_iqueue {
+ /** Connection established */
+ unsigned connected;
+
++ /** Lock protecting accesses to members of this structure */
++ spinlock_t lock;
++
+ /** Readers of the connection are waiting on this */
+ wait_queue_head_t waitq;
+
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index db9e60b7eb69..cb018315ecaf 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -585,6 +585,7 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root)
+ static void fuse_iqueue_init(struct fuse_iqueue *fiq)
+ {
+ memset(fiq, 0, sizeof(struct fuse_iqueue));
++ spin_lock_init(&fiq->lock);
+ init_waitqueue_head(&fiq->waitq);
+ INIT_LIST_HEAD(&fiq->pending);
+ INIT_LIST_HEAD(&fiq->interrupts);
+diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
+index d14d71d8d7ee..52feccedd7a4 100644
+--- a/fs/gfs2/bmap.c
++++ b/fs/gfs2/bmap.c
+@@ -1630,6 +1630,7 @@ out_unlock:
+ brelse(dibh);
+ up_write(&ip->i_rw_mutex);
+ gfs2_trans_end(sdp);
++ buf_in_tr = false;
+ }
+ gfs2_glock_dq_uninit(rd_gh);
+ cond_resched();
+diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
+index 54e5d17d7f3e..6fe303850c9e 100644
+--- a/fs/overlayfs/export.c
++++ b/fs/overlayfs/export.c
+@@ -230,9 +230,8 @@ static int ovl_d_to_fh(struct dentry *dentry, char *buf, int buflen)
+ /* Encode an upper or lower file handle */
+ fh = ovl_encode_real_fh(enc_lower ? ovl_dentry_lower(dentry) :
+ ovl_dentry_upper(dentry), !enc_lower);
+- err = PTR_ERR(fh);
+ if (IS_ERR(fh))
+- goto fail;
++ return PTR_ERR(fh);
+
+ err = -EOVERFLOW;
+ if (fh->len > buflen)
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index f0389849fd80..4f4964eeb086 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -386,7 +386,8 @@ static bool ovl_can_list(const char *s)
+ return true;
+
+ /* Never list trusted.overlay, list other trusted for superuser only */
+- return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN);
++ return !ovl_is_private_xattr(s) &&
++ ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
+ }
+
+ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
+diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
+index 1da59c16f637..2885dce1ad49 100644
+--- a/include/linux/blk-mq.h
++++ b/include/linux/blk-mq.h
+@@ -114,6 +114,7 @@ typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
+ typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
+ typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
+ typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
++typedef void (cleanup_rq_fn)(struct request *);
+
+
+ struct blk_mq_ops {
+@@ -165,6 +166,12 @@ struct blk_mq_ops {
+ /* Called from inside blk_get_request() */
+ void (*initialize_rq_fn)(struct request *rq);
+
++ /*
++ * Called before freeing one request which isn't completed yet,
++ * and usually for freeing the driver private data
++ */
++ cleanup_rq_fn *cleanup_rq;
++
+ map_queues_fn *map_queues;
+
+ #ifdef CONFIG_BLK_DEBUG_FS
+@@ -324,4 +331,10 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
+ for ((i) = 0; (i) < (hctx)->nr_ctx && \
+ ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
+
++static inline void blk_mq_cleanup_rq(struct request *rq)
++{
++ if (rq->q->mq_ops->cleanup_rq)
++ rq->q->mq_ops->cleanup_rq(rq);
++}
++
+ #endif
+diff --git a/include/linux/bug.h b/include/linux/bug.h
+index fe5916550da8..f639bd0122f3 100644
+--- a/include/linux/bug.h
++++ b/include/linux/bug.h
+@@ -47,6 +47,11 @@ void generic_bug_clear_once(void);
+
+ #else /* !CONFIG_GENERIC_BUG */
+
++static inline void *find_bug(unsigned long bugaddr)
++{
++ return NULL;
++}
++
+ static inline enum bug_trap_type report_bug(unsigned long bug_addr,
+ struct pt_regs *regs)
+ {
+diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
+index 2ff52de1c2b8..840462ed1ec7 100644
+--- a/include/linux/mmc/host.h
++++ b/include/linux/mmc/host.h
+@@ -488,6 +488,15 @@ void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq);
+
+ void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq);
+
++/*
++ * May be called from host driver's system/runtime suspend/resume callbacks,
++ * to know if SDIO IRQs has been claimed.
++ */
++static inline bool sdio_irq_claimed(struct mmc_host *host)
++{
++ return host->sdio_irqs > 0;
++}
++
+ static inline void mmc_signal_sdio_irq(struct mmc_host *host)
+ {
+ host->ops->enable_sdio_irq(host, 0);
+diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
+index dc905a4ff8d7..185d94829701 100644
+--- a/include/linux/quotaops.h
++++ b/include/linux/quotaops.h
+@@ -22,7 +22,7 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb)
+ /* i_mutex must being held */
+ static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
+ {
+- return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) ||
++ return (ia->ia_valid & ATTR_SIZE) ||
+ (ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) ||
+ (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid));
+ }
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 714d63f60460..b8efca9dc2cb 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1505,7 +1505,8 @@ static int check_kprobe_address_safe(struct kprobe *p,
+ /* Ensure it is not in reserved area nor out of text */
+ if (!kernel_text_address((unsigned long) p->addr) ||
+ within_kprobe_blacklist((unsigned long) p->addr) ||
+- jump_label_text_reserved(p->addr, p->addr)) {
++ jump_label_text_reserved(p->addr, p->addr) ||
++ find_bug((unsigned long)p->addr)) {
+ ret = -EINVAL;
+ goto out;
+ }
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 06045abd1887..d0d03223b45b 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3210,7 +3210,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+ /* move first record forward until length fits into the buffer */
+ seq = dumper->cur_seq;
+ idx = dumper->cur_idx;
+- while (l > size && seq < dumper->next_seq) {
++ while (l >= size && seq < dumper->next_seq) {
+ struct printk_log *msg = log_from_idx(idx);
+
+ l -= msg_print_text(msg, true, NULL, 0);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 795c63ca44a9..f4e050681ba1 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3066,8 +3066,36 @@ void scheduler_tick(void)
+
+ struct tick_work {
+ int cpu;
++ atomic_t state;
+ struct delayed_work work;
+ };
++/* Values for ->state, see diagram below. */
++#define TICK_SCHED_REMOTE_OFFLINE 0
++#define TICK_SCHED_REMOTE_OFFLINING 1
++#define TICK_SCHED_REMOTE_RUNNING 2
++
++/*
++ * State diagram for ->state:
++ *
++ *
++ * TICK_SCHED_REMOTE_OFFLINE
++ * | ^
++ * | |
++ * | | sched_tick_remote()
++ * | |
++ * | |
++ * +--TICK_SCHED_REMOTE_OFFLINING
++ * | ^
++ * | |
++ * sched_tick_start() | | sched_tick_stop()
++ * | |
++ * V |
++ * TICK_SCHED_REMOTE_RUNNING
++ *
++ *
++ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
++ * and sched_tick_start() are happy to leave the state in RUNNING.
++ */
+
+ static struct tick_work __percpu *tick_work_cpu;
+
+@@ -3080,6 +3108,7 @@ static void sched_tick_remote(struct work_struct *work)
+ struct task_struct *curr;
+ struct rq_flags rf;
+ u64 delta;
++ int os;
+
+ /*
+ * Handle the tick only if it appears the remote CPU is running in full
+@@ -3093,7 +3122,7 @@ static void sched_tick_remote(struct work_struct *work)
+
+ rq_lock_irq(rq, &rf);
+ curr = rq->curr;
+- if (is_idle_task(curr))
++ if (is_idle_task(curr) || cpu_is_offline(cpu))
+ goto out_unlock;
+
+ update_rq_clock(rq);
+@@ -3113,13 +3142,18 @@ out_requeue:
+ /*
+ * Run the remote tick once per second (1Hz). This arbitrary
+ * frequency is large enough to avoid overload but short enough
+- * to keep scheduler internal stats reasonably up to date.
++ * to keep scheduler internal stats reasonably up to date. But
++ * first update state to reflect hotplug activity if required.
+ */
+- queue_delayed_work(system_unbound_wq, dwork, HZ);
++ os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
++ WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
++ if (os == TICK_SCHED_REMOTE_RUNNING)
++ queue_delayed_work(system_unbound_wq, dwork, HZ);
+ }
+
+ static void sched_tick_start(int cpu)
+ {
++ int os;
+ struct tick_work *twork;
+
+ if (housekeeping_cpu(cpu, HK_FLAG_TICK))
+@@ -3128,15 +3162,20 @@ static void sched_tick_start(int cpu)
+ WARN_ON_ONCE(!tick_work_cpu);
+
+ twork = per_cpu_ptr(tick_work_cpu, cpu);
+- twork->cpu = cpu;
+- INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
+- queue_delayed_work(system_unbound_wq, &twork->work, HZ);
++ os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
++ WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
++ if (os == TICK_SCHED_REMOTE_OFFLINE) {
++ twork->cpu = cpu;
++ INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
++ queue_delayed_work(system_unbound_wq, &twork->work, HZ);
++ }
+ }
+
+ #ifdef CONFIG_HOTPLUG_CPU
+ static void sched_tick_stop(int cpu)
+ {
+ struct tick_work *twork;
++ int os;
+
+ if (housekeeping_cpu(cpu, HK_FLAG_TICK))
+ return;
+@@ -3144,7 +3183,10 @@ static void sched_tick_stop(int cpu)
+ WARN_ON_ONCE(!tick_work_cpu);
+
+ twork = per_cpu_ptr(tick_work_cpu, cpu);
+- cancel_delayed_work_sync(&twork->work);
++ /* There cannot be competing actions, but don't rely on stop-machine. */
++ os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
++ WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
++ /* Don't cancel, as this would mess up the state machine. */
+ }
+ #endif /* CONFIG_HOTPLUG_CPU */
+
+@@ -3152,7 +3194,6 @@ int __init sched_tick_offload_init(void)
+ {
+ tick_work_cpu = alloc_percpu(struct tick_work);
+ BUG_ON(!tick_work_cpu);
+-
+ return 0;
+ }
+
+@@ -6453,10 +6494,6 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
+ #ifdef CONFIG_RT_GROUP_SCHED
+ if (!sched_rt_can_attach(css_tg(css), task))
+ return -EINVAL;
+-#else
+- /* We don't support RT-tasks being in separate groups */
+- if (task->sched_class != &fair_sched_class)
+- return -EINVAL;
+ #endif
+ /*
+ * Serialize against wake_up_new_task() such that if its
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 64d54acc9928..54fcff656ecd 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -118,6 +118,7 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
+ unsigned int next_freq)
+ {
+ struct cpufreq_policy *policy = sg_policy->policy;
++ int cpu;
+
+ if (!sugov_update_next_freq(sg_policy, time, next_freq))
+ return;
+@@ -127,7 +128,11 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
+ return;
+
+ policy->cur = next_freq;
+- trace_cpu_frequency(next_freq, smp_processor_id());
++
++ if (trace_cpu_frequency_enabled()) {
++ for_each_cpu(cpu, policy->cpus)
++ trace_cpu_frequency(next_freq, cpu);
++ }
+ }
+
+ static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 72c07059ef37..ebec37cb3be9 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -529,6 +529,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
+ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
+ {
+ struct rq *later_rq = NULL;
++ struct dl_bw *dl_b;
+
+ later_rq = find_lock_later_rq(p, rq);
+ if (!later_rq) {
+@@ -557,6 +558,38 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
+ double_lock_balance(rq, later_rq);
+ }
+
++ if (p->dl.dl_non_contending || p->dl.dl_throttled) {
++ /*
++ * Inactive timer is armed (or callback is running, but
++ * waiting for us to release rq locks). In any case, when it
++ * will fire (or continue), it will see running_bw of this
++ * task migrated to later_rq (and correctly handle it).
++ */
++ sub_running_bw(&p->dl, &rq->dl);
++ sub_rq_bw(&p->dl, &rq->dl);
++
++ add_rq_bw(&p->dl, &later_rq->dl);
++ add_running_bw(&p->dl, &later_rq->dl);
++ } else {
++ sub_rq_bw(&p->dl, &rq->dl);
++ add_rq_bw(&p->dl, &later_rq->dl);
++ }
++
++ /*
++ * And we finally need to fixup root_domain(s) bandwidth accounting,
++ * since p is still hanging out in the old (now moved to default) root
++ * domain.
++ */
++ dl_b = &rq->rd->dl_bw;
++ raw_spin_lock(&dl_b->lock);
++ __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
++ raw_spin_unlock(&dl_b->lock);
++
++ dl_b = &later_rq->rd->dl_bw;
++ raw_spin_lock(&dl_b->lock);
++ __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
++ raw_spin_unlock(&dl_b->lock);
++
+ set_task_cpu(p, later_rq->cpu);
+ double_unlock_balance(later_rq, rq);
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 49ed38914669..32d2dac680a7 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -8863,9 +8863,10 @@ more_balance:
+ out_balanced:
+ /*
+ * We reach balance although we may have faced some affinity
+- * constraints. Clear the imbalance flag if it was set.
++ * constraints. Clear the imbalance flag only if other tasks got
++ * a chance to move and fix the imbalance.
+ */
+- if (sd_parent) {
++ if (sd_parent && !(env.flags & LBF_ALL_PINNED)) {
+ int *group_imbalance = &sd_parent->groups->sgc->imbalance;
+
+ if (*group_imbalance)
+@@ -10078,18 +10079,18 @@ err:
+ void online_fair_sched_group(struct task_group *tg)
+ {
+ struct sched_entity *se;
++ struct rq_flags rf;
+ struct rq *rq;
+ int i;
+
+ for_each_possible_cpu(i) {
+ rq = cpu_rq(i);
+ se = tg->se[i];
+-
+- raw_spin_lock_irq(&rq->lock);
++ rq_lock_irq(rq, &rf);
+ update_rq_clock(rq);
+ attach_entity_cfs_rq(se);
+ sync_throttle(tg, i);
+- raw_spin_unlock_irq(&rq->lock);
++ rq_unlock_irq(rq, &rf);
+ }
+ }
+
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index 16f84142f2f4..44a17366c8ec 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -240,13 +240,14 @@ static void do_idle(void)
+ check_pgt_cache();
+ rmb();
+
++ local_irq_disable();
++
+ if (cpu_is_offline(cpu)) {
+- tick_nohz_idle_stop_tick_protected();
++ tick_nohz_idle_stop_tick();
+ cpuhp_report_idle_dead();
+ arch_cpu_idle_dead();
+ }
+
+- local_irq_disable();
+ arch_cpu_idle_enter();
+
+ /*
+diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
+index fdeb9bc6affb..f4255a65c44b 100644
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -676,7 +676,7 @@ static int alarm_timer_create(struct k_itimer *new_timer)
+ enum alarmtimer_type type;
+
+ if (!alarmtimer_get_rtcdev())
+- return -ENOTSUPP;
++ return -EOPNOTSUPP;
+
+ if (!capable(CAP_WAKE_ALARM))
+ return -EPERM;
+@@ -794,7 +794,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
+ int ret = 0;
+
+ if (!alarmtimer_get_rtcdev())
+- return -ENOTSUPP;
++ return -EOPNOTSUPP;
+
+ if (flags & ~TIMER_ABSTIME)
+ return -EINVAL;
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index 76801b9b481e..d62d7ae5201c 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -375,7 +375,8 @@ static int posix_cpu_timer_del(struct k_itimer *timer)
+ struct sighand_struct *sighand;
+ struct task_struct *p = timer->it.cpu.task;
+
+- WARN_ON_ONCE(p == NULL);
++ if (WARN_ON_ONCE(!p))
++ return -EINVAL;
+
+ /*
+ * Protect against sighand release/switch in exit/exec and process/
+@@ -580,7 +581,8 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
+ u64 old_expires, new_expires, old_incr, val;
+ int ret;
+
+- WARN_ON_ONCE(p == NULL);
++ if (WARN_ON_ONCE(!p))
++ return -EINVAL;
+
+ /*
+ * Use the to_ktime conversion because that clamps the maximum
+@@ -716,10 +718,11 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
+
+ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
+ {
+- u64 now;
+ struct task_struct *p = timer->it.cpu.task;
++ u64 now;
+
+- WARN_ON_ONCE(p == NULL);
++ if (WARN_ON_ONCE(!p))
++ return;
+
+ /*
+ * Easy part: convert the reload time.
+@@ -1004,12 +1007,13 @@ static void check_process_timers(struct task_struct *tsk,
+ */
+ static void posix_cpu_timer_rearm(struct k_itimer *timer)
+ {
++ struct task_struct *p = timer->it.cpu.task;
+ struct sighand_struct *sighand;
+ unsigned long flags;
+- struct task_struct *p = timer->it.cpu.task;
+ u64 now;
+
+- WARN_ON_ONCE(p == NULL);
++ if (WARN_ON_ONCE(!p))
++ return;
+
+ /*
+ * Fetch the current sample and update the timer's expiry time.
+@@ -1206,7 +1210,9 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
+ u64 now;
+ int ret;
+
+- WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
++ if (WARN_ON_ONCE(clock_idx >= CPUCLOCK_SCHED))
++ return;
++
+ ret = cpu_timer_sample_group(clock_idx, tsk, &now);
+
+ if (oldval && ret != -EINVAL) {
+diff --git a/mm/compaction.c b/mm/compaction.c
+index faca45ebe62d..5079ddbec8f9 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -1540,6 +1540,17 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
+ unsigned long end_pfn = zone_end_pfn(zone);
+ const bool sync = cc->mode != MIGRATE_ASYNC;
+
++ /*
++ * These counters track activities during zone compaction. Initialize
++ * them before compacting a new zone.
++ */
++ cc->total_migrate_scanned = 0;
++ cc->total_free_scanned = 0;
++ cc->nr_migratepages = 0;
++ cc->nr_freepages = 0;
++ INIT_LIST_HEAD(&cc->freepages);
++ INIT_LIST_HEAD(&cc->migratepages);
++
+ cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
+ ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
+ cc->classzone_idx);
+@@ -1703,10 +1714,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
+ {
+ enum compact_result ret;
+ struct compact_control cc = {
+- .nr_freepages = 0,
+- .nr_migratepages = 0,
+- .total_migrate_scanned = 0,
+- .total_free_scanned = 0,
+ .order = order,
+ .gfp_mask = gfp_mask,
+ .zone = zone,
+@@ -1719,8 +1726,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
+ .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
+ .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
+ };
+- INIT_LIST_HEAD(&cc.freepages);
+- INIT_LIST_HEAD(&cc.migratepages);
+
+ ret = compact_zone(zone, &cc);
+
+@@ -1819,8 +1824,6 @@ static void compact_node(int nid)
+ struct zone *zone;
+ struct compact_control cc = {
+ .order = -1,
+- .total_migrate_scanned = 0,
+- .total_free_scanned = 0,
+ .mode = MIGRATE_SYNC,
+ .ignore_skip_hint = true,
+ .whole_zone = true,
+@@ -1834,11 +1837,7 @@ static void compact_node(int nid)
+ if (!populated_zone(zone))
+ continue;
+
+- cc.nr_freepages = 0;
+- cc.nr_migratepages = 0;
+ cc.zone = zone;
+- INIT_LIST_HEAD(&cc.freepages);
+- INIT_LIST_HEAD(&cc.migratepages);
+
+ compact_zone(zone, &cc);
+
+@@ -1947,8 +1946,6 @@ static void kcompactd_do_work(pg_data_t *pgdat)
+ struct zone *zone;
+ struct compact_control cc = {
+ .order = pgdat->kcompactd_max_order,
+- .total_migrate_scanned = 0,
+- .total_free_scanned = 0,
+ .classzone_idx = pgdat->kcompactd_classzone_idx,
+ .mode = MIGRATE_SYNC_LIGHT,
+ .ignore_skip_hint = false,
+@@ -1972,16 +1969,10 @@ static void kcompactd_do_work(pg_data_t *pgdat)
+ COMPACT_CONTINUE)
+ continue;
+
+- cc.nr_freepages = 0;
+- cc.nr_migratepages = 0;
+- cc.total_migrate_scanned = 0;
+- cc.total_free_scanned = 0;
+- cc.zone = zone;
+- INIT_LIST_HEAD(&cc.freepages);
+- INIT_LIST_HEAD(&cc.migratepages);
+-
+ if (kthread_should_stop())
+ return;
++
++ cc.zone = zone;
+ status = compact_zone(zone, &cc);
+
+ if (status == COMPACT_SUCCESS) {
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index ecde75f2189b..65da189a433b 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2637,6 +2637,16 @@ int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
+
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
+ !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
++
++ /*
++ * Enforce __GFP_NOFAIL allocation because callers are not
++ * prepared to see failures and likely do not have any failure
++ * handling code.
++ */
++ if (gfp & __GFP_NOFAIL) {
++ page_counter_charge(&memcg->kmem, nr_pages);
++ return 0;
++ }
+ cancel_charge(memcg, nr_pages);
+ return -ENOMEM;
+ }
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index dbddb7a409dd..a581fe2a2f1f 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -1089,9 +1089,10 @@ bool out_of_memory(struct oom_control *oc)
+ * The OOM killer does not compensate for IO-less reclaim.
+ * pagefault_out_of_memory lost its gfp context so we have to
+ * make sure exclude 0 mask - all other users should have at least
+- * ___GFP_DIRECT_RECLAIM to get here.
++ * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to
++ * invoke the OOM killer even if it is a GFP_NOFS allocation.
+ */
+- if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS))
++ if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
+ return true;
+
+ /*
+diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
+index 795fbc6c06aa..9abb18fffbc3 100644
+--- a/net/appletalk/ddp.c
++++ b/net/appletalk/ddp.c
+@@ -1028,6 +1028,11 @@ static int atalk_create(struct net *net, struct socket *sock, int protocol,
+ */
+ if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
+ goto out;
++
++ rc = -EPERM;
++ if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
++ goto out;
++
+ rc = -ENOMEM;
+ sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto, kern);
+ if (!sk)
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index 5d01edf8d819..44ec492f3dc2 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -858,6 +858,8 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
+ break;
+
+ case SOCK_RAW:
++ if (!capable(CAP_NET_RAW))
++ return -EPERM;
+ break;
+ default:
+ return -ESOCKTNOSUPPORT;
+diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
+index bc6b912603f1..89819745e482 100644
+--- a/net/ieee802154/socket.c
++++ b/net/ieee802154/socket.c
+@@ -1018,6 +1018,9 @@ static int ieee802154_create(struct net *net, struct socket *sock,
+
+ switch (sock->type) {
+ case SOCK_RAW:
++ rc = -EPERM;
++ if (!capable(CAP_NET_RAW))
++ goto out;
+ proto = &ieee802154_raw_prot;
+ ops = &ieee802154_raw_ops;
+ break;
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 17335a370e64..9d775b8df57d 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -219,7 +219,7 @@ static int tcp_write_timeout(struct sock *sk)
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct net *net = sock_net(sk);
+- bool expired, do_reset;
++ bool expired = false, do_reset;
+ int retry_until;
+
+ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+@@ -251,9 +251,10 @@ static int tcp_write_timeout(struct sock *sk)
+ if (tcp_out_of_resources(sk, do_reset))
+ return 1;
+ }
++ }
++ if (!expired)
+ expired = retransmits_timed_out(sk, retry_until,
+ icsk->icsk_user_timeout);
+- }
+ tcp_fastopen_active_detect_blackhole(sk, expired);
+
+ if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
+diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
+index ae296273ce3d..ff254e8c0c44 100644
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -1011,10 +1011,13 @@ static int llcp_sock_create(struct net *net, struct socket *sock,
+ sock->type != SOCK_RAW)
+ return -ESOCKTNOSUPPORT;
+
+- if (sock->type == SOCK_RAW)
++ if (sock->type == SOCK_RAW) {
++ if (!capable(CAP_NET_RAW))
++ return -EPERM;
+ sock->ops = &llcp_rawsock_ops;
+- else
++ } else {
+ sock->ops = &llcp_sock_ops;
++ }
+
+ sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC, kern);
+ if (sk == NULL)
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 0f5ce77460d4..8e396c7c8389 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -2239,7 +2239,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
+ [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
+ [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
+ [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
+- [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
++ [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC },
+ [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
+ [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
+ [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
+diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
+index 86e1e37eb4e8..5c75118539bb 100644
+--- a/net/qrtr/qrtr.c
++++ b/net/qrtr/qrtr.c
+@@ -157,6 +157,7 @@ static void __qrtr_node_release(struct kref *kref)
+ list_del(&node->item);
+ mutex_unlock(&qrtr_node_lock);
+
++ cancel_work_sync(&node->work);
+ skb_queue_purge(&node->rx_queue);
+ kfree(node);
+ }
+diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
+index 98635311a5a0..ea0738ceb5bb 100644
+--- a/net/sched/act_sample.c
++++ b/net/sched/act_sample.c
+@@ -134,6 +134,7 @@ static bool tcf_sample_dev_ok_push(struct net_device *dev)
+ case ARPHRD_TUNNEL6:
+ case ARPHRD_SIT:
+ case ARPHRD_IPGRE:
++ case ARPHRD_IP6GRE:
+ case ARPHRD_VOID:
+ case ARPHRD_NONE:
+ return false;
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 4159bcb479c6..e217ebc693f8 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -2038,8 +2038,10 @@ out:
+ void tcf_exts_destroy(struct tcf_exts *exts)
+ {
+ #ifdef CONFIG_NET_CLS_ACT
+- tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
+- kfree(exts->actions);
++ if (exts->actions) {
++ tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
++ kfree(exts->actions);
++ }
+ exts->nr_actions = 0;
+ #endif
+ }
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index b06cc5e50412..84fdc4857771 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1308,7 +1308,8 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
+ }
+
+ const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
+- [TCA_KIND] = { .type = NLA_STRING },
++ [TCA_KIND] = { .type = NLA_NUL_STRING,
++ .len = IFNAMSIZ - 1 },
+ [TCA_RATE] = { .type = NLA_BINARY,
+ .len = sizeof(struct tc_estimator) },
+ [TCA_STAB] = { .type = NLA_NESTED },
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 4dfe10b9f96c..86350fe5cfc8 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -749,7 +749,7 @@ static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
+ struct disttable *d;
+ int i;
+
+- if (n > NETEM_DIST_MAX)
++ if (!n || n > NETEM_DIST_MAX)
+ return -EINVAL;
+
+ d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index c14e8f6e5e19..d641d81da759 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -930,6 +930,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
+ }
+
+ cfg80211_process_rdev_events(rdev);
++ cfg80211_mlme_purge_registrations(dev->ieee80211_ptr);
+ }
+
+ err = rdev_change_virtual_intf(rdev, dev, ntype, params);
+diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c
+index 6d5bbd31db7f..bd29e4e7a524 100644
+--- a/scripts/gcc-plugins/randomize_layout_plugin.c
++++ b/scripts/gcc-plugins/randomize_layout_plugin.c
+@@ -443,13 +443,13 @@ static int is_pure_ops_struct(const_tree node)
+ if (node == fieldtype)
+ continue;
+
+- if (!is_fptr(fieldtype))
+- return 0;
+-
+- if (code != RECORD_TYPE && code != UNION_TYPE)
++ if (code == RECORD_TYPE || code == UNION_TYPE) {
++ if (!is_pure_ops_struct(fieldtype))
++ return 0;
+ continue;
++ }
+
+- if (!is_pure_ops_struct(fieldtype))
++ if (!is_fptr(fieldtype))
+ return 0;
+ }
+
+diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
+index 743015e87a96..e240fdfcae31 100644
+--- a/sound/firewire/motu/motu.c
++++ b/sound/firewire/motu/motu.c
+@@ -255,6 +255,17 @@ static const struct snd_motu_spec motu_audio_express = {
+ .analog_out_ports = 4,
+ };
+
++static const struct snd_motu_spec motu_4pre = {
++ .name = "4pre",
++ .protocol = &snd_motu_protocol_v3,
++ .flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
++ SND_MOTU_SPEC_TX_MICINST_CHUNK |
++ SND_MOTU_SPEC_TX_RETURN_CHUNK |
++ SND_MOTU_SPEC_RX_SEPARETED_MAIN,
++ .analog_in_ports = 2,
++ .analog_out_ports = 2,
++};
++
+ #define SND_MOTU_DEV_ENTRY(model, data) \
+ { \
+ .match_flags = IEEE1394_MATCH_VENDOR_ID | \
+@@ -272,6 +283,7 @@ static const struct ieee1394_device_id motu_id_table[] = {
+ SND_MOTU_DEV_ENTRY(0x000015, &motu_828mk3), /* FireWire only. */
+ SND_MOTU_DEV_ENTRY(0x000035, &motu_828mk3), /* Hybrid. */
+ SND_MOTU_DEV_ENTRY(0x000033, &motu_audio_express),
++ SND_MOTU_DEV_ENTRY(0x000045, &motu_4pre),
+ { }
+ };
+ MODULE_DEVICE_TABLE(ieee1394, motu_id_table);
+diff --git a/sound/firewire/tascam/tascam-pcm.c b/sound/firewire/tascam/tascam-pcm.c
+index e4cc8990e195..9e58633e2dea 100644
+--- a/sound/firewire/tascam/tascam-pcm.c
++++ b/sound/firewire/tascam/tascam-pcm.c
+@@ -57,6 +57,9 @@ static int pcm_open(struct snd_pcm_substream *substream)
+ goto err_locked;
+
+ err = snd_tscm_stream_get_clock(tscm, &clock);
++ if (err < 0)
++ goto err_locked;
++
+ if (clock != SND_TSCM_CLOCK_INTERNAL ||
+ amdtp_stream_pcm_running(&tscm->rx_stream) ||
+ amdtp_stream_pcm_running(&tscm->tx_stream)) {
+diff --git a/sound/firewire/tascam/tascam-stream.c b/sound/firewire/tascam/tascam-stream.c
+index f1657a4e0621..a1308f12a65b 100644
+--- a/sound/firewire/tascam/tascam-stream.c
++++ b/sound/firewire/tascam/tascam-stream.c
+@@ -9,20 +9,37 @@
+ #include <linux/delay.h>
+ #include "tascam.h"
+
++#define CLOCK_STATUS_MASK 0xffff0000
++#define CLOCK_CONFIG_MASK 0x0000ffff
++
+ #define CALLBACK_TIMEOUT 500
+
+ static int get_clock(struct snd_tscm *tscm, u32 *data)
+ {
++ int trial = 0;
+ __be32 reg;
+ int err;
+
+- err = snd_fw_transaction(tscm->unit, TCODE_READ_QUADLET_REQUEST,
+- TSCM_ADDR_BASE + TSCM_OFFSET_CLOCK_STATUS,
+- &reg, sizeof(reg), 0);
+- if (err >= 0)
++ while (trial++ < 5) {
++ err = snd_fw_transaction(tscm->unit, TCODE_READ_QUADLET_REQUEST,
++ TSCM_ADDR_BASE + TSCM_OFFSET_CLOCK_STATUS,
++ &reg, sizeof(reg), 0);
++ if (err < 0)
++ return err;
++
+ *data = be32_to_cpu(reg);
++ if (*data & CLOCK_STATUS_MASK)
++ break;
+
+- return err;
++ // In intermediate state after changing clock status.
++ msleep(50);
++ }
++
++ // Still in the intermediate state.
++ if (trial >= 5)
++ return -EAGAIN;
++
++ return 0;
+ }
+
+ static int set_clock(struct snd_tscm *tscm, unsigned int rate,
+@@ -35,7 +52,7 @@ static int set_clock(struct snd_tscm *tscm, unsigned int rate,
+ err = get_clock(tscm, &data);
+ if (err < 0)
+ return err;
+- data &= 0x0000ffff;
++ data &= CLOCK_CONFIG_MASK;
+
+ if (rate > 0) {
+ data &= 0x000000ff;
+@@ -80,17 +97,14 @@ static int set_clock(struct snd_tscm *tscm, unsigned int rate,
+
+ int snd_tscm_stream_get_rate(struct snd_tscm *tscm, unsigned int *rate)
+ {
+- u32 data = 0x0;
+- unsigned int trials = 0;
++ u32 data;
+ int err;
+
+- while (data == 0x0 || trials++ < 5) {
+- err = get_clock(tscm, &data);
+- if (err < 0)
+- return err;
++ err = get_clock(tscm, &data);
++ if (err < 0)
++ return err;
+
+- data = (data & 0xff000000) >> 24;
+- }
++ data = (data & 0xff000000) >> 24;
+
+ /* Check base rate. */
+ if ((data & 0x0f) == 0x01)
+diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
+index 74244d8e2909..e858b6fa0c3a 100644
+--- a/sound/hda/hdac_controller.c
++++ b/sound/hda/hdac_controller.c
+@@ -443,6 +443,8 @@ static void azx_int_disable(struct hdac_bus *bus)
+ list_for_each_entry(azx_dev, &bus->stream_list, list)
+ snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0);
+
++ synchronize_irq(bus->irq);
++
+ /* disable SIE for all streams */
+ snd_hdac_chip_writeb(bus, INTCTL, 0);
+
+diff --git a/sound/i2c/other/ak4xxx-adda.c b/sound/i2c/other/ak4xxx-adda.c
+index 7f2761a2e7c8..971197c34fce 100644
+--- a/sound/i2c/other/ak4xxx-adda.c
++++ b/sound/i2c/other/ak4xxx-adda.c
+@@ -789,11 +789,12 @@ static int build_adc_controls(struct snd_akm4xxx *ak)
+ return err;
+
+ memset(&knew, 0, sizeof(knew));
+- knew.name = ak->adc_info[mixer_ch].selector_name;
+- if (!knew.name) {
++ if (!ak->adc_info ||
++ !ak->adc_info[mixer_ch].selector_name) {
+ knew.name = "Capture Channel";
+ knew.index = mixer_ch + ak->idx_offset * 2;
+- }
++ } else
++ knew.name = ak->adc_info[mixer_ch].selector_name;
+
+ knew.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ knew.info = ak4xxx_capture_source_info;
+diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
+index a41c1bec7c88..8fcb421193e0 100644
+--- a/sound/pci/hda/hda_controller.c
++++ b/sound/pci/hda/hda_controller.c
+@@ -877,10 +877,13 @@ static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
+ */
+ if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
+ hbus->response_reset = 1;
++ dev_err(chip->card->dev,
++ "No response from codec, resetting bus: last cmd=0x%08x\n",
++ bus->last_cmd[addr]);
+ return -EAGAIN; /* give a chance to retry */
+ }
+
+- dev_err(chip->card->dev,
++ dev_WARN(chip->card->dev,
+ "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
+ bus->last_cmd[addr]);
+ chip->single_cmd = 1;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 0b24c5ce2fd6..bfc45086cf79 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1455,9 +1455,9 @@ static int azx_free(struct azx *chip)
+ }
+
+ if (bus->chip_init) {
++ azx_stop_chip(chip);
+ azx_clear_irq_pending(chip);
+ azx_stop_all_streams(chip);
+- azx_stop_chip(chip);
+ }
+
+ if (bus->irq >= 0)
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index e4fbfb5557ab..107ec7f3e221 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -2583,6 +2583,8 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec,
+ /* precondition and allocation for Intel codecs */
+ static int alloc_intel_hdmi(struct hda_codec *codec)
+ {
++ int err;
++
+ /* requires i915 binding */
+ if (!codec->bus->core.audio_component) {
+ codec_info(codec, "No i915 binding for Intel HDMI/DP codec\n");
+@@ -2591,7 +2593,12 @@ static int alloc_intel_hdmi(struct hda_codec *codec)
+ return -ENODEV;
+ }
+
+- return alloc_generic_hdmi(codec);
++ err = alloc_generic_hdmi(codec);
++ if (err < 0)
++ return err;
++ /* no need to handle unsol events */
++ codec->patch_ops.unsol_event = NULL;
++ return 0;
+ }
+
+ /* parse and post-process for Intel codecs */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 7f74ebee8c2d..e1b08d6f2a51 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1057,6 +1057,9 @@ static const struct snd_pci_quirk beep_white_list[] = {
+ SND_PCI_QUIRK(0x1043, 0x834a, "EeePC", 1),
+ SND_PCI_QUIRK(0x1458, 0xa002, "GA-MA790X", 1),
+ SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1),
++ /* blacklist -- no beep available */
++ SND_PCI_QUIRK(0x17aa, 0x309e, "Lenovo ThinkCentre M73", 0),
++ SND_PCI_QUIRK(0x17aa, 0x30a3, "Lenovo ThinkCentre M93", 0),
+ {}
+ };
+
+@@ -5676,6 +5679,7 @@ enum {
+ ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+ ALC299_FIXUP_PREDATOR_SPK,
+ ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
++ ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
+ };
+
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -6714,6 +6718,16 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+ },
++ [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x19, 0x04a11040 },
++ { 0x21, 0x04211020 },
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -6977,6 +6991,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+ SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MBXP", ALC256_FIXUP_HUAWEI_MBXP_PINS),
+ SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
++ SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
+
+ #if 0
+ /* Below is a quirk table taken from the old code.
+@@ -7141,6 +7156,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
+ {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
+ {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
++ {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
+ {}
+ };
+ #define ALC225_STANDARD_PINS \
+diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
+index e97d12d578b0..9ebe77c3784a 100644
+--- a/sound/soc/codecs/es8316.c
++++ b/sound/soc/codecs/es8316.c
+@@ -46,7 +46,10 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(adc_vol_tlv, -9600, 50, 1);
+ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_max_gain_tlv, -650, 150, 0);
+ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_min_gain_tlv, -1200, 150, 0);
+ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_target_tlv, -1650, 150, 0);
+-static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(hpmixer_gain_tlv, -1200, 150, 0);
++static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(hpmixer_gain_tlv,
++ 0, 4, TLV_DB_SCALE_ITEM(-1200, 150, 0),
++ 8, 11, TLV_DB_SCALE_ITEM(-450, 150, 0),
++);
+
+ static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(adc_pga_gain_tlv,
+ 0, 0, TLV_DB_SCALE_ITEM(-350, 0, 0),
+@@ -84,7 +87,7 @@ static const struct snd_kcontrol_new es8316_snd_controls[] = {
+ SOC_DOUBLE_TLV("Headphone Playback Volume", ES8316_CPHP_ICAL_VOL,
+ 4, 0, 3, 1, hpout_vol_tlv),
+ SOC_DOUBLE_TLV("Headphone Mixer Volume", ES8316_HPMIX_VOL,
+- 0, 4, 7, 0, hpmixer_gain_tlv),
++ 0, 4, 11, 0, hpmixer_gain_tlv),
+
+ SOC_ENUM("Playback Polarity", dacpol),
+ SOC_DOUBLE_R_TLV("DAC Playback Volume", ES8316_DAC_VOLL,
+diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
+index 60764f6201b1..18cddf1729a6 100644
+--- a/sound/soc/codecs/sgtl5000.c
++++ b/sound/soc/codecs/sgtl5000.c
+@@ -1165,12 +1165,17 @@ static int sgtl5000_set_power_regs(struct snd_soc_component *component)
+ SGTL5000_INT_OSC_EN);
+ /* Enable VDDC charge pump */
+ ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP;
+- } else if (vddio >= 3100 && vdda >= 3100) {
++ } else {
+ ana_pwr &= ~SGTL5000_VDDC_CHRGPMP_POWERUP;
+- /* VDDC use VDDIO rail */
+- lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
+- lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
+- SGTL5000_VDDC_MAN_ASSN_SHIFT;
++ /*
++ * if vddio == vdda the source of charge pump should be
++ * assigned manually to VDDIO
++ */
++ if (vddio == vdda) {
++ lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
++ lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
++ SGTL5000_VDDC_MAN_ASSN_SHIFT;
++ }
+ }
+
+ snd_soc_component_write(component, SGTL5000_CHIP_LINREG_CTRL, lreg_ctrl);
+@@ -1280,6 +1285,7 @@ static int sgtl5000_probe(struct snd_soc_component *component)
+ int ret;
+ u16 reg;
+ struct sgtl5000_priv *sgtl5000 = snd_soc_component_get_drvdata(component);
++ unsigned int zcd_mask = SGTL5000_HP_ZCD_EN | SGTL5000_ADC_ZCD_EN;
+
+ /* power up sgtl5000 */
+ ret = sgtl5000_set_power_regs(component);
+@@ -1305,9 +1311,8 @@ static int sgtl5000_probe(struct snd_soc_component *component)
+ reg = ((sgtl5000->lrclk_strength) << SGTL5000_PAD_I2S_LRCLK_SHIFT | 0x5f);
+ snd_soc_component_write(component, SGTL5000_CHIP_PAD_STRENGTH, reg);
+
+- snd_soc_component_write(component, SGTL5000_CHIP_ANA_CTRL,
+- SGTL5000_HP_ZCD_EN |
+- SGTL5000_ADC_ZCD_EN);
++ snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
++ zcd_mask, zcd_mask);
+
+ snd_soc_component_update_bits(component, SGTL5000_CHIP_MIC_CTRL,
+ SGTL5000_BIAS_R_MASK,
+diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
+index bf92d36b8f8a..3c75dcf91741 100644
+--- a/sound/soc/codecs/tlv320aic31xx.c
++++ b/sound/soc/codecs/tlv320aic31xx.c
+@@ -1441,7 +1441,8 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
+ aic31xx->gpio_reset = devm_gpiod_get_optional(aic31xx->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(aic31xx->gpio_reset)) {
+- dev_err(aic31xx->dev, "not able to acquire gpio\n");
++ if (PTR_ERR(aic31xx->gpio_reset) != -EPROBE_DEFER)
++ dev_err(aic31xx->dev, "not able to acquire gpio\n");
+ return PTR_ERR(aic31xx->gpio_reset);
+ }
+
+@@ -1452,7 +1453,9 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
+ ARRAY_SIZE(aic31xx->supplies),
+ aic31xx->supplies);
+ if (ret) {
+- dev_err(aic31xx->dev, "Failed to request supplies: %d\n", ret);
++ if (ret != -EPROBE_DEFER)
++ dev_err(aic31xx->dev,
++ "Failed to request supplies: %d\n", ret);
+ return ret;
+ }
+
+diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
+index 09b2967befd9..d83be26d6446 100644
+--- a/sound/soc/fsl/fsl_ssi.c
++++ b/sound/soc/fsl/fsl_ssi.c
+@@ -799,15 +799,6 @@ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
+ u32 wl = SSI_SxCCR_WL(sample_size);
+ int ret;
+
+- /*
+- * SSI is properly configured if it is enabled and running in
+- * the synchronous mode; Note that AC97 mode is an exception
+- * that should set separate configurations for STCCR and SRCCR
+- * despite running in the synchronous mode.
+- */
+- if (ssi->streams && ssi->synchronous)
+- return 0;
+-
+ if (fsl_ssi_is_i2s_master(ssi)) {
+ ret = fsl_ssi_set_bclk(substream, dai, hw_params);
+ if (ret)
+@@ -823,6 +814,15 @@ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
+ }
+ }
+
++ /*
++ * SSI is properly configured if it is enabled and running in
++ * the synchronous mode; Note that AC97 mode is an exception
++ * that should set separate configurations for STCCR and SRCCR
++ * despite running in the synchronous mode.
++ */
++ if (ssi->streams && ssi->synchronous)
++ return 0;
++
+ if (!fsl_ssi_is_ac97(ssi)) {
+ /*
+ * Keep the ssi->i2s_net intact while having a local variable
+diff --git a/sound/soc/intel/common/sst-ipc.c b/sound/soc/intel/common/sst-ipc.c
+index dcff13802c00..771734fd7707 100644
+--- a/sound/soc/intel/common/sst-ipc.c
++++ b/sound/soc/intel/common/sst-ipc.c
+@@ -231,6 +231,8 @@ struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc,
+
+ if (ipc->ops.reply_msg_match != NULL)
+ header = ipc->ops.reply_msg_match(header, &mask);
++ else
++ mask = (u64)-1;
+
+ if (list_empty(&ipc->rx_list)) {
+ dev_err(ipc->dev, "error: rx list empty but received 0x%llx\n",
+diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c
+index 5d7ac2ee7a3c..faf1cba57abb 100644
+--- a/sound/soc/intel/skylake/skl-debug.c
++++ b/sound/soc/intel/skylake/skl-debug.c
+@@ -196,7 +196,7 @@ static ssize_t fw_softreg_read(struct file *file, char __user *user_buf,
+ memset(d->fw_read_buff, 0, FW_REG_BUF);
+
+ if (w0_stat_sz > 0)
+- __iowrite32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2);
++ __ioread32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2);
+
+ for (offset = 0; offset < FW_REG_SIZE; offset += 16) {
+ ret += snprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset);
+diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
+index 01a050cf8775..3cef2ebfd8be 100644
+--- a/sound/soc/intel/skylake/skl-nhlt.c
++++ b/sound/soc/intel/skylake/skl-nhlt.c
+@@ -231,7 +231,7 @@ int skl_nhlt_update_topology_bin(struct skl *skl)
+ struct hdac_bus *bus = skl_to_bus(skl);
+ struct device *dev = bus->dev;
+
+- dev_dbg(dev, "oem_id %.6s, oem_table_id %8s oem_revision %d\n",
++ dev_dbg(dev, "oem_id %.6s, oem_table_id %.8s oem_revision %d\n",
+ nhlt->header.oem_id, nhlt->header.oem_table_id,
+ nhlt->header.oem_revision);
+
+diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
+index 051f96405346..549a137878a6 100644
+--- a/sound/soc/sh/rcar/adg.c
++++ b/sound/soc/sh/rcar/adg.c
+@@ -30,6 +30,7 @@ struct rsnd_adg {
+ struct clk *clkout[CLKOUTMAX];
+ struct clk_onecell_data onecell;
+ struct rsnd_mod mod;
++ int clk_rate[CLKMAX];
+ u32 flags;
+ u32 ckr;
+ u32 rbga;
+@@ -113,9 +114,9 @@ static void __rsnd_adg_get_timesel_ratio(struct rsnd_priv *priv,
+ unsigned int val, en;
+ unsigned int min, diff;
+ unsigned int sel_rate[] = {
+- clk_get_rate(adg->clk[CLKA]), /* 0000: CLKA */
+- clk_get_rate(adg->clk[CLKB]), /* 0001: CLKB */
+- clk_get_rate(adg->clk[CLKC]), /* 0010: CLKC */
++ adg->clk_rate[CLKA], /* 0000: CLKA */
++ adg->clk_rate[CLKB], /* 0001: CLKB */
++ adg->clk_rate[CLKC], /* 0010: CLKC */
+ adg->rbga_rate_for_441khz, /* 0011: RBGA */
+ adg->rbgb_rate_for_48khz, /* 0100: RBGB */
+ };
+@@ -331,7 +332,7 @@ int rsnd_adg_clk_query(struct rsnd_priv *priv, unsigned int rate)
+ * AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC/AUDIO_CLKI.
+ */
+ for_each_rsnd_clk(clk, adg, i) {
+- if (rate == clk_get_rate(clk))
++ if (rate == adg->clk_rate[i])
+ return sel_table[i];
+ }
+
+@@ -398,10 +399,18 @@ void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable)
+
+ for_each_rsnd_clk(clk, adg, i) {
+ ret = 0;
+- if (enable)
++ if (enable) {
+ ret = clk_prepare_enable(clk);
+- else
++
++ /*
++ * We shouldn't use clk_get_rate() under
++ * atomic context. Let's keep it when
++ * rsnd_adg_clk_enable() was called
++ */
++ adg->clk_rate[i] = clk_get_rate(adg->clk[i]);
++ } else {
+ clk_disable_unprepare(clk);
++ }
+
+ if (ret < 0)
+ dev_warn(dev, "can't use clk %d\n", i);
+diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
+index 30e791a53352..232df04ca586 100644
+--- a/sound/soc/soc-generic-dmaengine-pcm.c
++++ b/sound/soc/soc-generic-dmaengine-pcm.c
+@@ -313,6 +313,12 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
+
+ if (!dmaengine_pcm_can_report_residue(dev, pcm->chan[i]))
+ pcm->flags |= SND_DMAENGINE_PCM_FLAG_NO_RESIDUE;
++
++ if (rtd->pcm->streams[i].pcm->name[0] == '\0') {
++ strncpy(rtd->pcm->streams[i].pcm->name,
++ rtd->pcm->streams[i].pcm->id,
++ sizeof(rtd->pcm->streams[i].pcm->name));
++ }
+ }
+
+ return 0;
+diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
+index 6173dd86c62c..18cf8404d27c 100644
+--- a/sound/soc/sunxi/sun4i-i2s.c
++++ b/sound/soc/sunxi/sun4i-i2s.c
+@@ -223,10 +223,11 @@ static const struct sun4i_i2s_clk_div sun4i_i2s_mclk_div[] = {
+ };
+
+ static int sun4i_i2s_get_bclk_div(struct sun4i_i2s *i2s,
+- unsigned int oversample_rate,
++ unsigned long parent_rate,
++ unsigned int sampling_rate,
+ unsigned int word_size)
+ {
+- int div = oversample_rate / word_size / 2;
++ int div = parent_rate / sampling_rate / word_size / 2;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sun4i_i2s_bclk_div); i++) {
+@@ -316,8 +317,8 @@ static int sun4i_i2s_set_clk_rate(struct snd_soc_dai *dai,
+ return -EINVAL;
+ }
+
+- bclk_div = sun4i_i2s_get_bclk_div(i2s, oversample_rate,
+- word_size);
++ bclk_div = sun4i_i2s_get_bclk_div(i2s, i2s->mclk_freq,
++ rate, word_size);
+ if (bclk_div < 0) {
+ dev_err(dai->dev, "Unsupported BCLK divider: %d\n", bclk_div);
+ return -EINVAL;
+diff --git a/sound/soc/uniphier/aio-cpu.c b/sound/soc/uniphier/aio-cpu.c
+index ee90e6c3937c..2ae582a99b63 100644
+--- a/sound/soc/uniphier/aio-cpu.c
++++ b/sound/soc/uniphier/aio-cpu.c
+@@ -424,8 +424,11 @@ int uniphier_aio_dai_suspend(struct snd_soc_dai *dai)
+ {
+ struct uniphier_aio *aio = uniphier_priv(dai);
+
+- reset_control_assert(aio->chip->rst);
+- clk_disable_unprepare(aio->chip->clk);
++ aio->chip->num_wup_aios--;
++ if (!aio->chip->num_wup_aios) {
++ reset_control_assert(aio->chip->rst);
++ clk_disable_unprepare(aio->chip->clk);
++ }
+
+ return 0;
+ }
+@@ -439,13 +442,15 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai)
+ if (!aio->chip->active)
+ return 0;
+
+- ret = clk_prepare_enable(aio->chip->clk);
+- if (ret)
+- return ret;
++ if (!aio->chip->num_wup_aios) {
++ ret = clk_prepare_enable(aio->chip->clk);
++ if (ret)
++ return ret;
+
+- ret = reset_control_deassert(aio->chip->rst);
+- if (ret)
+- goto err_out_clock;
++ ret = reset_control_deassert(aio->chip->rst);
++ if (ret)
++ goto err_out_clock;
++ }
+
+ aio_iecout_set_enable(aio->chip, true);
+ aio_chip_init(aio->chip);
+@@ -458,7 +463,7 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai)
+
+ ret = aio_init(sub);
+ if (ret)
+- goto err_out_clock;
++ goto err_out_reset;
+
+ if (!sub->setting)
+ continue;
+@@ -466,11 +471,16 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai)
+ aio_port_reset(sub);
+ aio_src_reset(sub);
+ }
++ aio->chip->num_wup_aios++;
+
+ return 0;
+
++err_out_reset:
++ if (!aio->chip->num_wup_aios)
++ reset_control_assert(aio->chip->rst);
+ err_out_clock:
+- clk_disable_unprepare(aio->chip->clk);
++ if (!aio->chip->num_wup_aios)
++ clk_disable_unprepare(aio->chip->clk);
+
+ return ret;
+ }
+@@ -619,6 +629,7 @@ int uniphier_aio_probe(struct platform_device *pdev)
+ return PTR_ERR(chip->rst);
+
+ chip->num_aios = chip->chip_spec->num_dais;
++ chip->num_wup_aios = chip->num_aios;
+ chip->aios = devm_kcalloc(dev,
+ chip->num_aios, sizeof(struct uniphier_aio),
+ GFP_KERNEL);
+diff --git a/sound/soc/uniphier/aio.h b/sound/soc/uniphier/aio.h
+index ca6ccbae0ee8..a7ff7e556429 100644
+--- a/sound/soc/uniphier/aio.h
++++ b/sound/soc/uniphier/aio.h
+@@ -285,6 +285,7 @@ struct uniphier_aio_chip {
+
+ struct uniphier_aio *aios;
+ int num_aios;
++ int num_wup_aios;
+ struct uniphier_aio_pll *plls;
+ int num_plls;
+
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 35c57a4204a8..13ea63c959d3 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -464,6 +464,7 @@ static int set_sync_endpoint(struct snd_usb_substream *subs,
+ }
+ ep = get_endpoint(alts, 1)->bEndpointAddress;
+ if (get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
++ get_endpoint(alts, 0)->bSynchAddress != 0 &&
+ ((is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) ||
+ (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) {
+ dev_err(&dev->dev,
+diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h
+index 57aaeaf8e192..edba4d93e9e6 100644
+--- a/tools/include/uapi/asm/bitsperlong.h
++++ b/tools/include/uapi/asm/bitsperlong.h
+@@ -1,22 +1,22 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ #if defined(__i386__) || defined(__x86_64__)
+-#include "../../arch/x86/include/uapi/asm/bitsperlong.h"
++#include "../../../arch/x86/include/uapi/asm/bitsperlong.h"
+ #elif defined(__aarch64__)
+-#include "../../arch/arm64/include/uapi/asm/bitsperlong.h"
++#include "../../../arch/arm64/include/uapi/asm/bitsperlong.h"
+ #elif defined(__powerpc__)
+-#include "../../arch/powerpc/include/uapi/asm/bitsperlong.h"
++#include "../../../arch/powerpc/include/uapi/asm/bitsperlong.h"
+ #elif defined(__s390__)
+-#include "../../arch/s390/include/uapi/asm/bitsperlong.h"
++#include "../../../arch/s390/include/uapi/asm/bitsperlong.h"
+ #elif defined(__sparc__)
+-#include "../../arch/sparc/include/uapi/asm/bitsperlong.h"
++#include "../../../arch/sparc/include/uapi/asm/bitsperlong.h"
+ #elif defined(__mips__)
+-#include "../../arch/mips/include/uapi/asm/bitsperlong.h"
++#include "../../../arch/mips/include/uapi/asm/bitsperlong.h"
+ #elif defined(__ia64__)
+-#include "../../arch/ia64/include/uapi/asm/bitsperlong.h"
++#include "../../../arch/ia64/include/uapi/asm/bitsperlong.h"
+ #elif defined(__riscv)
+-#include "../../arch/riscv/include/uapi/asm/bitsperlong.h"
++#include "../../../arch/riscv/include/uapi/asm/bitsperlong.h"
+ #elif defined(__alpha__)
+-#include "../../arch/alpha/include/uapi/asm/bitsperlong.h"
++#include "../../../arch/alpha/include/uapi/asm/bitsperlong.h"
+ #else
+ #include <asm-generic/bitsperlong.h>
+ #endif
+diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
+index 0b4e833088a4..95a43ccb6dd0 100644
+--- a/tools/lib/traceevent/Makefile
++++ b/tools/lib/traceevent/Makefile
+@@ -55,15 +55,15 @@ set_plugin_dir := 1
+
+ # Set plugin_dir to preffered global plugin location
+ # If we install under $HOME directory we go under
+-# $(HOME)/.traceevent/plugins
++# $(HOME)/.local/lib/traceevent/plugins
+ #
+ # We dont set PLUGIN_DIR in case we install under $HOME
+ # directory, because by default the code looks under:
+-# $(HOME)/.traceevent/plugins by default.
++# $(HOME)/.local/lib/traceevent/plugins by default.
+ #
+ ifeq ($(plugin_dir),)
+ ifeq ($(prefix),$(HOME))
+-override plugin_dir = $(HOME)/.traceevent/plugins
++override plugin_dir = $(HOME)/.local/lib/traceevent/plugins
+ set_plugin_dir := 0
+ else
+ override plugin_dir = $(libdir)/traceevent/plugins
+diff --git a/tools/lib/traceevent/event-plugin.c b/tools/lib/traceevent/event-plugin.c
+index f17e25097e1e..52874eb94ace 100644
+--- a/tools/lib/traceevent/event-plugin.c
++++ b/tools/lib/traceevent/event-plugin.c
+@@ -16,7 +16,7 @@
+ #include "event-parse.h"
+ #include "event-utils.h"
+
+-#define LOCAL_PLUGIN_DIR ".traceevent/plugins"
++#define LOCAL_PLUGIN_DIR ".local/lib/traceevent/plugins/"
+
+ static struct registered_plugin_options {
+ struct registered_plugin_options *next;
+diff --git a/tools/perf/perf.c b/tools/perf/perf.c
+index a11cb006f968..80f8ae8b1366 100644
+--- a/tools/perf/perf.c
++++ b/tools/perf/perf.c
+@@ -439,6 +439,9 @@ int main(int argc, const char **argv)
+
+ srandom(time(NULL));
+
++ /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */
++ config_exclusive_filename = getenv("PERF_CONFIG");
++
+ err = perf_config(perf_default_config, NULL);
+ if (err)
+ return err;
+diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+index 4ce276efe6b4..fe223fc5c1f8 100755
+--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
++++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+@@ -29,6 +29,10 @@ if [ $err -ne 0 ] ; then
+ exit $err
+ fi
+
++# Do not use whatever ~/.perfconfig file, it may change the output
++# via trace.{show_timestamp,show_prefix,etc}
++export PERF_CONFIG=/dev/null
++
+ trace_open_vfs_getname
+ err=$?
+ rm -f ${file}
+diff --git a/tools/perf/trace/beauty/ioctl.c b/tools/perf/trace/beauty/ioctl.c
+index 1be3b4cf0827..82346ca06f17 100644
+--- a/tools/perf/trace/beauty/ioctl.c
++++ b/tools/perf/trace/beauty/ioctl.c
+@@ -22,7 +22,7 @@
+ static size_t ioctl__scnprintf_tty_cmd(int nr, int dir, char *bf, size_t size)
+ {
+ static const char *ioctl_tty_cmd[] = {
+- "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
++ [_IOC_NR(TCGETS)] = "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
+ "TCSETAF", "TCSBRK", "TCXONC", "TCFLSH", "TIOCEXCL", "TIOCNXCL", "TIOCSCTTY",
+ "TIOCGPGRP", "TIOCSPGRP", "TIOCOUTQ", "TIOCSTI", "TIOCGWINSZ", "TIOCSWINSZ",
+ "TIOCMGET", "TIOCMBIS", "TIOCMBIC", "TIOCMSET", "TIOCGSOFTCAR", "TIOCSSOFTCAR",
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index 54c34c107cab..0c70788593c8 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -2184,8 +2184,10 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
+ /* On s390 the socket_id number is not related to the numbers of cpus.
+ * The socket_id number might be higher than the numbers of cpus.
+ * This depends on the configuration.
++ * AArch64 is the same.
+ */
+- if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4))
++ if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4)
++ || !strncmp(ph->env.arch, "aarch64", 7)))
+ do_core_id_test = false;
+
+ for (i = 0; i < (u32)cpu_nr; i++) {
+diff --git a/tools/perf/util/xyarray.h b/tools/perf/util/xyarray.h
+index 7ffe562e7ae7..2627b038b6f2 100644
+--- a/tools/perf/util/xyarray.h
++++ b/tools/perf/util/xyarray.h
+@@ -2,6 +2,7 @@
+ #ifndef _PERF_XYARRAY_H_
+ #define _PERF_XYARRAY_H_ 1
+
++#include <linux/compiler.h>
+ #include <sys/types.h>
+
+ struct xyarray {
+@@ -10,7 +11,7 @@ struct xyarray {
+ size_t entries;
+ size_t max_x;
+ size_t max_y;
+- char contents[];
++ char contents[] __aligned(8);
+ };
+
+ struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size);