summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2019-12-31 12:48:28 -0500
committerMike Pagano <mpagano@gentoo.org>2019-12-31 12:48:28 -0500
commit26acee3a7d09b52f0931bbb00de5253f7ce5701c (patch)
treee155fca2e803a48a4492bf4da2258c7264e473f0 /1006_linux-5.4.7.patch
parentAdd CONFIG selections for GENTOO_LINUX_INIT_SYSTEMD (diff)
downloadlinux-patches-26acee3a7d09b52f0931bbb00de5253f7ce5701c.tar.gz
linux-patches-26acee3a7d09b52f0931bbb00de5253f7ce5701c.tar.bz2
linux-patches-26acee3a7d09b52f0931bbb00de5253f7ce5701c.zip
Linux patch 5.4.75.4-8
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
Diffstat (limited to '1006_linux-5.4.7.patch')
-rw-r--r--1006_linux-5.4.7.patch13632
1 files changed, 13632 insertions, 0 deletions
diff --git a/1006_linux-5.4.7.patch b/1006_linux-5.4.7.patch
new file mode 100644
index 00000000..213068aa
--- /dev/null
+++ b/1006_linux-5.4.7.patch
@@ -0,0 +1,13632 @@
+diff --git a/Makefile b/Makefile
+index 20ec7c20279e..0e2e0a034064 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
+index d1eae47b83f6..82f7ae030600 100644
+--- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
++++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
+@@ -160,12 +160,12 @@
+ regulator-enable-ramp-delay = <1000>;
+ };
+
+- /* Used by DSS */
++ /* Used by DSS and is the "zerov_regulator" trigger for SoC off mode */
+ vcsi: VCSI {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <1000>;
+- regulator-boot-on;
++ regulator-always-on;
+ };
+
+ vdac: VDAC {
+diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
+index c9f72b2665f1..43ae4e0c968f 100644
+--- a/arch/arm64/kernel/psci.c
++++ b/arch/arm64/kernel/psci.c
+@@ -81,7 +81,8 @@ static void cpu_psci_cpu_die(unsigned int cpu)
+
+ static int cpu_psci_cpu_kill(unsigned int cpu)
+ {
+- int err, i;
++ int err;
++ unsigned long start, end;
+
+ if (!psci_ops.affinity_info)
+ return 0;
+@@ -91,16 +92,18 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
+ * while it is dying. So, try again a few times.
+ */
+
+- for (i = 0; i < 10; i++) {
++ start = jiffies;
++ end = start + msecs_to_jiffies(100);
++ do {
+ err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
+ if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
+- pr_info("CPU%d killed.\n", cpu);
++ pr_info("CPU%d killed (polled %d ms)\n", cpu,
++ jiffies_to_msecs(jiffies - start));
+ return 0;
+ }
+
+- msleep(10);
+- pr_info("Retrying again to check for CPU kill\n");
+- }
++ usleep_range(100, 1000);
++ } while (time_before(jiffies, end));
+
+ pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
+ cpu, err);
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 46822afc57e0..01a515e0171e 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -2360,8 +2360,11 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
+ if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
+ return NULL;
+
++ if (!index_to_params(id, &params))
++ return NULL;
++
+ table = get_target_table(vcpu->arch.target, true, &num);
+- r = find_reg_by_id(id, &params, table, num);
++ r = find_reg(&params, table, num);
+ if (!r)
+ r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
+
+diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
+index 9228f7386220..fb842965d541 100644
+--- a/arch/mips/include/asm/barrier.h
++++ b/arch/mips/include/asm/barrier.h
+@@ -218,13 +218,14 @@
+ * ordering will be done by smp_llsc_mb() and friends.
+ */
+ #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
+-#define __WEAK_LLSC_MB " sync \n"
+-#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
+-#define __LLSC_CLOBBER
++# define __WEAK_LLSC_MB sync
++# define smp_llsc_mb() \
++ __asm__ __volatile__(__stringify(__WEAK_LLSC_MB) : : :"memory")
++# define __LLSC_CLOBBER
+ #else
+-#define __WEAK_LLSC_MB " \n"
+-#define smp_llsc_mb() do { } while (0)
+-#define __LLSC_CLOBBER "memory"
++# define __WEAK_LLSC_MB
++# define smp_llsc_mb() do { } while (0)
++# define __LLSC_CLOBBER "memory"
+ #endif
+
+ #ifdef CONFIG_CPU_CAVIUM_OCTEON
+diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
+index b83b0397462d..110220705e97 100644
+--- a/arch/mips/include/asm/futex.h
++++ b/arch/mips/include/asm/futex.h
+@@ -16,6 +16,7 @@
+ #include <asm/barrier.h>
+ #include <asm/compiler.h>
+ #include <asm/errno.h>
++#include <asm/sync.h>
+ #include <asm/war.h>
+
+ #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+@@ -32,7 +33,7 @@
+ " .set arch=r4000 \n" \
+ "2: sc $1, %2 \n" \
+ " beqzl $1, 1b \n" \
+- __WEAK_LLSC_MB \
++ __stringify(__WEAK_LLSC_MB) " \n" \
+ "3: \n" \
+ " .insn \n" \
+ " .set pop \n" \
+@@ -50,19 +51,19 @@
+ "i" (-EFAULT) \
+ : "memory"); \
+ } else if (cpu_has_llsc) { \
+- loongson_llsc_mb(); \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " .set push \n" \
+ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
++ " " __SYNC(full, loongson3_war) " \n" \
+ "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \
+ " .set pop \n" \
+ " " insn " \n" \
+ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
+ "2: "user_sc("$1", "%2")" \n" \
+ " beqz $1, 1b \n" \
+- __WEAK_LLSC_MB \
++ __stringify(__WEAK_LLSC_MB) " \n" \
+ "3: \n" \
+ " .insn \n" \
+ " .set pop \n" \
+@@ -147,7 +148,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ " .set arch=r4000 \n"
+ "2: sc $1, %2 \n"
+ " beqzl $1, 1b \n"
+- __WEAK_LLSC_MB
++ __stringify(__WEAK_LLSC_MB) " \n"
+ "3: \n"
+ " .insn \n"
+ " .set pop \n"
+@@ -164,13 +165,13 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ "i" (-EFAULT)
+ : "memory");
+ } else if (cpu_has_llsc) {
+- loongson_llsc_mb();
+ __asm__ __volatile__(
+ "# futex_atomic_cmpxchg_inatomic \n"
+ " .set push \n"
+ " .set noat \n"
+ " .set push \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
++ " " __SYNC(full, loongson3_war) " \n"
+ "1: "user_ll("%1", "%3")" \n"
+ " bne %1, %z4, 3f \n"
+ " .set pop \n"
+@@ -178,8 +179,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
+ "2: "user_sc("$1", "%2")" \n"
+ " beqz $1, 1b \n"
+- __WEAK_LLSC_MB
+- "3: \n"
++ "3: " __SYNC_ELSE(full, loongson3_war, __WEAK_LLSC_MB) "\n"
+ " .insn \n"
+ " .set pop \n"
+ " .section .fixup,\"ax\" \n"
+@@ -194,7 +194,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+ "i" (-EFAULT)
+ : "memory");
+- loongson_llsc_mb();
+ } else
+ return -ENOSYS;
+
+diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
+index 93a9dce31f25..813dfe5f45a5 100644
+--- a/arch/mips/include/asm/pgtable-64.h
++++ b/arch/mips/include/asm/pgtable-64.h
+@@ -18,10 +18,12 @@
+ #include <asm/fixmap.h>
+
+ #define __ARCH_USE_5LEVEL_HACK
+-#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
++#if CONFIG_PGTABLE_LEVELS == 2
+ #include <asm-generic/pgtable-nopmd.h>
+-#elif !(defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_MIPS_VA_BITS_48))
++#elif CONFIG_PGTABLE_LEVELS == 3
+ #include <asm-generic/pgtable-nopud.h>
++#else
++#include <asm-generic/5level-fixup.h>
+ #endif
+
+ /*
+@@ -216,6 +218,9 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+ return pgd_val(pgd);
+ }
+
++#define pgd_phys(pgd) virt_to_phys((void *)pgd_val(pgd))
++#define pgd_page(pgd) (pfn_to_page(pgd_phys(pgd) >> PAGE_SHIFT))
++
+ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+ {
+ return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
+diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
+index 1434fa60f3db..94e9ce994494 100644
+--- a/arch/mips/ralink/Kconfig
++++ b/arch/mips/ralink/Kconfig
+@@ -51,6 +51,7 @@ choice
+ select MIPS_GIC
+ select COMMON_CLK
+ select CLKSRC_MIPS_GIC
++ select HAVE_PCI if PCI_MT7621
+ endchoice
+
+ choice
+diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
+index e9a960e28f3c..cac95a3f30c2 100644
+--- a/arch/powerpc/include/asm/spinlock.h
++++ b/arch/powerpc/include/asm/spinlock.h
+@@ -36,10 +36,12 @@
+ #endif
+
+ #ifdef CONFIG_PPC_PSERIES
++DECLARE_STATIC_KEY_FALSE(shared_processor);
++
+ #define vcpu_is_preempted vcpu_is_preempted
+ static inline bool vcpu_is_preempted(int cpu)
+ {
+- if (!firmware_has_feature(FW_FEATURE_SPLPAR))
++ if (!static_branch_unlikely(&shared_processor))
+ return false;
+ return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
+ }
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index 5645bc9cbc09..add67498c126 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -619,8 +619,6 @@ void __do_irq(struct pt_regs *regs)
+
+ trace_irq_entry(regs);
+
+- check_stack_overflow();
+-
+ /*
+ * Query the platform PIC for the interrupt & ack it.
+ *
+@@ -652,6 +650,8 @@ void do_IRQ(struct pt_regs *regs)
+ irqsp = hardirq_ctx[raw_smp_processor_id()];
+ sirqsp = softirq_ctx[raw_smp_processor_id()];
+
++ check_stack_overflow();
++
+ /* Already there ? */
+ if (unlikely(cursp == irqsp || cursp == sirqsp)) {
+ __do_irq(regs);
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 0496e66aaa56..c6fbbd29bd87 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -1117,7 +1117,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+ ld r7, VCPU_GPR(R7)(r4)
+ bne ret_to_ultra
+
+- lwz r0, VCPU_CR(r4)
++ ld r0, VCPU_CR(r4)
+ mtcr r0
+
+ ld r0, VCPU_GPR(R0)(r4)
+@@ -1137,7 +1137,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+ * R3 = UV_RETURN
+ */
+ ret_to_ultra:
+- lwz r0, VCPU_CR(r4)
++ ld r0, VCPU_CR(r4)
+ mtcr r0
+
+ ld r0, VCPU_GPR(R3)(r4)
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index 0a40201f315f..0c8421dd01ab 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -74,6 +74,9 @@
+ #include "pseries.h"
+ #include "../../../../drivers/pci/pci.h"
+
++DEFINE_STATIC_KEY_FALSE(shared_processor);
++EXPORT_SYMBOL_GPL(shared_processor);
++
+ int CMO_PrPSP = -1;
+ int CMO_SecPSP = -1;
+ unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
+@@ -758,6 +761,10 @@ static void __init pSeries_setup_arch(void)
+
+ if (firmware_has_feature(FW_FEATURE_LPAR)) {
+ vpa_init(boot_cpuid);
++
++ if (lppaca_shared_proc(get_lppaca()))
++ static_branch_enable(&shared_processor);
++
+ ppc_md.power_save = pseries_lpar_idle;
+ ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
+ #ifdef CONFIG_PCI_IOV
+diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
+index d39e0f079217..686fe7aa192f 100644
+--- a/arch/s390/crypto/sha_common.c
++++ b/arch/s390/crypto/sha_common.c
+@@ -74,14 +74,17 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
+ struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int bsize = crypto_shash_blocksize(desc->tfm);
+ u64 bits;
+- unsigned int n, mbl_offset;
++ unsigned int n;
++ int mbl_offset;
+
+ n = ctx->count % bsize;
+ bits = ctx->count * 8;
+- mbl_offset = s390_crypto_shash_parmsize(ctx->func) / sizeof(u32);
++ mbl_offset = s390_crypto_shash_parmsize(ctx->func);
+ if (mbl_offset < 0)
+ return -EINVAL;
+
++ mbl_offset = mbl_offset / sizeof(u32);
++
+ /* set total msg bit length (mbl) in CPACF parmblock */
+ switch (ctx->func) {
+ case CPACF_KLMD_SHA_1:
+diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
+index bccb8f4a63e2..77606c4acd58 100644
+--- a/arch/s390/include/asm/pgalloc.h
++++ b/arch/s390/include/asm/pgalloc.h
+@@ -56,7 +56,12 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
+ crst_table_init(table, _REGION2_ENTRY_EMPTY);
+ return (p4d_t *) table;
+ }
+-#define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d)
++
++static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
++{
++ if (!mm_p4d_folded(mm))
++ crst_table_free(mm, (unsigned long *) p4d);
++}
+
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
+ {
+@@ -65,7 +70,12 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
+ crst_table_init(table, _REGION3_ENTRY_EMPTY);
+ return (pud_t *) table;
+ }
+-#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
++
++static inline void pud_free(struct mm_struct *mm, pud_t *pud)
++{
++ if (!mm_pud_folded(mm))
++ crst_table_free(mm, (unsigned long *) pud);
++}
+
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
+ {
+@@ -83,6 +93,8 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
+
+ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+ {
++ if (mm_pmd_folded(mm))
++ return;
+ pgtable_pmd_page_dtor(virt_to_page(pmd));
+ crst_table_free(mm, (unsigned long *) pmd);
+ }
+diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
+index 64539c221672..2dc9eb4e1acc 100644
+--- a/arch/s390/include/asm/timex.h
++++ b/arch/s390/include/asm/timex.h
+@@ -10,8 +10,9 @@
+ #ifndef _ASM_S390_TIMEX_H
+ #define _ASM_S390_TIMEX_H
+
+-#include <asm/lowcore.h>
++#include <linux/preempt.h>
+ #include <linux/time64.h>
++#include <asm/lowcore.h>
+
+ /* The value of the TOD clock for 1.1.1970. */
+ #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
+@@ -186,15 +187,18 @@ extern unsigned char tod_clock_base[16] __aligned(8);
+ /**
+ * get_clock_monotonic - returns current time in clock rate units
+ *
+- * The caller must ensure that preemption is disabled.
+ * The clock and tod_clock_base get changed via stop_machine.
+- * Therefore preemption must be disabled when calling this
+- * function, otherwise the returned value is not guaranteed to
+- * be monotonic.
++ * Therefore preemption must be disabled, otherwise the returned
++ * value is not guaranteed to be monotonic.
+ */
+ static inline unsigned long long get_tod_clock_monotonic(void)
+ {
+- return get_tod_clock() - *(unsigned long long *) &tod_clock_base[1];
++ unsigned long long tod;
++
++ preempt_disable_notrace();
++ tod = get_tod_clock() - *(unsigned long long *) &tod_clock_base[1];
++ preempt_enable_notrace();
++ return tod;
+ }
+
+ /**
+diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
+index 7abe6ae261b4..f304802ecf7b 100644
+--- a/arch/s390/kernel/dis.c
++++ b/arch/s390/kernel/dis.c
+@@ -461,10 +461,11 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
+ ptr += sprintf(ptr, "%%c%i", value);
+ else if (operand->flags & OPERAND_VR)
+ ptr += sprintf(ptr, "%%v%i", value);
+- else if (operand->flags & OPERAND_PCREL)
+- ptr += sprintf(ptr, "%lx", (signed int) value
+- + addr);
+- else if (operand->flags & OPERAND_SIGNED)
++ else if (operand->flags & OPERAND_PCREL) {
++ void *pcrel = (void *)((int)value + addr);
++
++ ptr += sprintf(ptr, "%px", pcrel);
++ } else if (operand->flags & OPERAND_SIGNED)
+ ptr += sprintf(ptr, "%i", value);
+ else
+ ptr += sprintf(ptr, "%u", value);
+@@ -536,7 +537,7 @@ void show_code(struct pt_regs *regs)
+ else
+ *ptr++ = ' ';
+ addr = regs->psw.addr + start - 32;
+- ptr += sprintf(ptr, "%016lx: ", addr);
++ ptr += sprintf(ptr, "%px: ", (void *)addr);
+ if (start + opsize >= end)
+ break;
+ for (i = 0; i < opsize; i++)
+@@ -564,7 +565,7 @@ void print_fn_code(unsigned char *code, unsigned long len)
+ opsize = insn_length(*code);
+ if (opsize > len)
+ break;
+- ptr += sprintf(ptr, "%p: ", code);
++ ptr += sprintf(ptr, "%px: ", code);
+ for (i = 0; i < opsize; i++)
+ ptr += sprintf(ptr, "%02x", code[i]);
+ *ptr++ = '\t';
+diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
+index 48d48b6187c0..0eb1d1cc53a8 100644
+--- a/arch/s390/kernel/perf_cpum_cf.c
++++ b/arch/s390/kernel/perf_cpum_cf.c
+@@ -199,7 +199,7 @@ static const int cpumf_generic_events_user[] = {
+ [PERF_COUNT_HW_BUS_CYCLES] = -1,
+ };
+
+-static int __hw_perf_event_init(struct perf_event *event)
++static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
+ {
+ struct perf_event_attr *attr = &event->attr;
+ struct hw_perf_event *hwc = &event->hw;
+@@ -207,7 +207,7 @@ static int __hw_perf_event_init(struct perf_event *event)
+ int err = 0;
+ u64 ev;
+
+- switch (attr->type) {
++ switch (type) {
+ case PERF_TYPE_RAW:
+ /* Raw events are used to access counters directly,
+ * hence do not permit excludes */
+@@ -294,17 +294,16 @@ static int __hw_perf_event_init(struct perf_event *event)
+
+ static int cpumf_pmu_event_init(struct perf_event *event)
+ {
++ unsigned int type = event->attr.type;
+ int err;
+
+- switch (event->attr.type) {
+- case PERF_TYPE_HARDWARE:
+- case PERF_TYPE_HW_CACHE:
+- case PERF_TYPE_RAW:
+- err = __hw_perf_event_init(event);
+- break;
+- default:
++ if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW)
++ err = __hw_perf_event_init(event, type);
++ else if (event->pmu->type == type)
++ /* Registered as unknown PMU */
++ err = __hw_perf_event_init(event, PERF_TYPE_RAW);
++ else
+ return -ENOENT;
+- }
+
+ if (unlikely(err) && event->destroy)
+ event->destroy(event);
+@@ -553,7 +552,7 @@ static int __init cpumf_pmu_init(void)
+ return -ENODEV;
+
+ cpumf_pmu.attr_groups = cpumf_cf_event_group();
+- rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
++ rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", -1);
+ if (rc)
+ pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
+ return rc;
+diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c
+index 2654e348801a..e949ab832ed7 100644
+--- a/arch/s390/kernel/perf_cpum_cf_diag.c
++++ b/arch/s390/kernel/perf_cpum_cf_diag.c
+@@ -243,13 +243,13 @@ static int cf_diag_event_init(struct perf_event *event)
+ int err = -ENOENT;
+
+ debug_sprintf_event(cf_diag_dbg, 5,
+- "%s event %p cpu %d config %#llx "
++ "%s event %p cpu %d config %#llx type:%u "
+ "sample_type %#llx cf_diag_events %d\n", __func__,
+- event, event->cpu, attr->config, attr->sample_type,
+- atomic_read(&cf_diag_events));
++ event, event->cpu, attr->config, event->pmu->type,
++ attr->sample_type, atomic_read(&cf_diag_events));
+
+ if (event->attr.config != PERF_EVENT_CPUM_CF_DIAG ||
+- event->attr.type != PERF_TYPE_RAW)
++ event->attr.type != event->pmu->type)
+ goto out;
+
+ /* Raw events are used to access counters directly,
+@@ -693,7 +693,7 @@ static int __init cf_diag_init(void)
+ }
+ debug_register_view(cf_diag_dbg, &debug_sprintf_view);
+
+- rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", PERF_TYPE_RAW);
++ rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", -1);
+ if (rc) {
+ debug_unregister_view(cf_diag_dbg, &debug_sprintf_view);
+ debug_unregister(cf_diag_dbg);
+diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
+index fcb6c2e92b07..1e75cc983546 100644
+--- a/arch/s390/kernel/perf_event.c
++++ b/arch/s390/kernel/perf_event.c
+@@ -224,9 +224,13 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+ {
+ struct unwind_state state;
++ unsigned long addr;
+
+- unwind_for_each_frame(&state, current, regs, 0)
+- perf_callchain_store(entry, state.ip);
++ unwind_for_each_frame(&state, current, regs, 0) {
++ addr = unwind_get_return_address(&state);
++ if (!addr || perf_callchain_store(entry, addr))
++ return;
++ }
+ }
+
+ /* Perf definitions for PMU event attributes in sysfs */
+diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
+index 1864a8bb9622..59ad7997fed1 100644
+--- a/arch/s390/mm/maccess.c
++++ b/arch/s390/mm/maccess.c
+@@ -70,7 +70,7 @@ void notrace s390_kernel_write(void *dst, const void *src, size_t size)
+ spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
+ }
+
+-static int __memcpy_real(void *dest, void *src, size_t count)
++static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count)
+ {
+ register unsigned long _dest asm("2") = (unsigned long) dest;
+ register unsigned long _len1 asm("3") = (unsigned long) count;
+@@ -91,19 +91,23 @@ static int __memcpy_real(void *dest, void *src, size_t count)
+ return rc;
+ }
+
+-static unsigned long _memcpy_real(unsigned long dest, unsigned long src,
+- unsigned long count)
++static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest,
++ unsigned long src,
++ unsigned long count)
+ {
+ int irqs_disabled, rc;
+ unsigned long flags;
+
+ if (!count)
+ return 0;
+- flags = __arch_local_irq_stnsm(0xf8UL);
++ flags = arch_local_irq_save();
+ irqs_disabled = arch_irqs_disabled_flags(flags);
+ if (!irqs_disabled)
+ trace_hardirqs_off();
++ __arch_local_irq_stnsm(0xf8); // disable DAT
+ rc = __memcpy_real((void *) dest, (void *) src, (size_t) count);
++ if (flags & PSW_MASK_DAT)
++ __arch_local_irq_stosm(0x04); // enable DAT
+ if (!irqs_disabled)
+ trace_hardirqs_on();
+ __arch_local_irq_ssm(flags);
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index ce88211b9c6c..c8c16b5eed6b 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -23,6 +23,7 @@
+ #include <linux/filter.h>
+ #include <linux/init.h>
+ #include <linux/bpf.h>
++#include <linux/mm.h>
+ #include <asm/cacheflush.h>
+ #include <asm/dis.h>
+ #include <asm/facility.h>
+@@ -1369,7 +1370,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
+ }
+
+ memset(&jit, 0, sizeof(jit));
+- jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
++ jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
+ if (jit.addrs == NULL) {
+ fp = orig_fp;
+ goto out;
+@@ -1422,7 +1423,7 @@ skip_init_ctx:
+ if (!fp->is_func || extra_pass) {
+ bpf_prog_fill_jited_linfo(fp, jit.addrs + 1);
+ free_addrs:
+- kfree(jit.addrs);
++ kvfree(jit.addrs);
+ kfree(jit_data);
+ fp->aux->jit_data = NULL;
+ }
+diff --git a/arch/sh/include/cpu-sh4/cpu/sh7734.h b/arch/sh/include/cpu-sh4/cpu/sh7734.h
+index 96f0246ad2f2..82b63208135a 100644
+--- a/arch/sh/include/cpu-sh4/cpu/sh7734.h
++++ b/arch/sh/include/cpu-sh4/cpu/sh7734.h
+@@ -134,7 +134,7 @@ enum {
+ GPIO_FN_EX_WAIT1, GPIO_FN_SD1_DAT0_A, GPIO_FN_DREQ2, GPIO_FN_CAN1_TX_C,
+ GPIO_FN_ET0_LINK_C, GPIO_FN_ET0_ETXD5_A,
+ GPIO_FN_EX_WAIT0, GPIO_FN_TCLK1_B,
+- GPIO_FN_RD_WR, GPIO_FN_TCLK0,
++ GPIO_FN_RD_WR, GPIO_FN_TCLK0, GPIO_FN_CAN_CLK_B, GPIO_FN_ET0_ETXD4,
+ GPIO_FN_EX_CS5, GPIO_FN_SD1_CMD_A, GPIO_FN_ATADIR, GPIO_FN_QSSL_B,
+ GPIO_FN_ET0_ETXD3_A,
+ GPIO_FN_EX_CS4, GPIO_FN_SD1_WP_A, GPIO_FN_ATAWR, GPIO_FN_QMI_QIO1_B,
+diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h
+index 0acf5ee45a21..ef5638f641f2 100644
+--- a/arch/x86/include/asm/crash.h
++++ b/arch/x86/include/asm/crash.h
+@@ -2,6 +2,8 @@
+ #ifndef _ASM_X86_CRASH_H
+ #define _ASM_X86_CRASH_H
+
++struct kimage;
++
+ int crash_load_segments(struct kimage *image);
+ int crash_copy_backup_region(struct kimage *image);
+ int crash_setup_memmap_entries(struct kimage *image,
+diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
+index 0c47aa82e2e2..28183ee3cc42 100644
+--- a/arch/x86/include/asm/fixmap.h
++++ b/arch/x86/include/asm/fixmap.h
+@@ -156,7 +156,7 @@ extern pte_t *kmap_pte;
+ extern pte_t *pkmap_page_table;
+
+ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
+-void native_set_fixmap(enum fixed_addresses idx,
++void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
+ phys_addr_t phys, pgprot_t flags);
+
+ #ifndef CONFIG_PARAVIRT_XXL
+diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h
+index e046a405743d..90eb70df0b18 100644
+--- a/arch/x86/include/asm/syscall_wrapper.h
++++ b/arch/x86/include/asm/syscall_wrapper.h
+@@ -48,12 +48,13 @@
+ * To keep the naming coherent, re-define SYSCALL_DEFINE0 to create an alias
+ * named __ia32_sys_*()
+ */
+-#define SYSCALL_DEFINE0(sname) \
+- SYSCALL_METADATA(_##sname, 0); \
+- asmlinkage long __x64_sys_##sname(void); \
+- ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
+- SYSCALL_ALIAS(__ia32_sys_##sname, __x64_sys_##sname); \
+- asmlinkage long __x64_sys_##sname(void)
++
++#define SYSCALL_DEFINE0(sname) \
++ SYSCALL_METADATA(_##sname, 0); \
++ asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused);\
++ ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
++ SYSCALL_ALIAS(__ia32_sys_##sname, __x64_sys_##sname); \
++ asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused)
+
+ #define COND_SYSCALL(name) \
+ cond_syscall(__x64_sys_##name); \
+@@ -181,11 +182,11 @@
+ * macros to work correctly.
+ */
+ #ifndef SYSCALL_DEFINE0
+-#define SYSCALL_DEFINE0(sname) \
+- SYSCALL_METADATA(_##sname, 0); \
+- asmlinkage long __x64_sys_##sname(void); \
+- ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
+- asmlinkage long __x64_sys_##sname(void)
++#define SYSCALL_DEFINE0(sname) \
++ SYSCALL_METADATA(_##sname, 0); \
++ asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused);\
++ ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
++ asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused)
+ #endif
+
+ #ifndef COND_SYSCALL
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index d6af97fd170a..f0262cb5657a 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1727,9 +1727,10 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
+
+ static inline bool ioapic_irqd_mask(struct irq_data *data)
+ {
+- /* If we are moving the irq we need to mask it */
++ /* If we are moving the IRQ we need to mask it */
+ if (unlikely(irqd_is_setaffinity_pending(data))) {
+- mask_ioapic_irq(data);
++ if (!irqd_irq_masked(data))
++ mask_ioapic_irq(data);
+ return true;
+ }
+ return false;
+@@ -1766,7 +1767,9 @@ static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
+ */
+ if (!io_apic_level_ack_pending(data->chip_data))
+ irq_move_masked_irq(data);
+- unmask_ioapic_irq(data);
++ /* If the IRQ is masked in the core, leave it: */
++ if (!irqd_irq_masked(data))
++ unmask_ioapic_irq(data);
+ }
+ }
+ #else
+diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
+index 6ea7fdc82f3c..259f3f4e2e5f 100644
+--- a/arch/x86/kernel/cpu/mce/amd.c
++++ b/arch/x86/kernel/cpu/mce/amd.c
+@@ -266,10 +266,10 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
+ smca_set_misc_banks_map(bank, cpu);
+
+ /* Return early if this bank was already initialized. */
+- if (smca_banks[bank].hwid)
++ if (smca_banks[bank].hwid && smca_banks[bank].hwid->hwid_mcatype != 0)
+ return;
+
+- if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
++ if (rdmsr_safe(MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
+ pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
+ return;
+ }
+diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
+index 743370ee4983..aecb15ba66cd 100644
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -814,8 +814,8 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
+ if (quirk_no_way_out)
+ quirk_no_way_out(i, m, regs);
+
++ m->bank = i;
+ if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
+- m->bank = i;
+ mce_read_aux(m, i);
+ *msg = tmp;
+ return 1;
+diff --git a/arch/x86/kernel/cpu/mce/therm_throt.c b/arch/x86/kernel/cpu/mce/therm_throt.c
+index 6e2becf547c5..bc441d68d060 100644
+--- a/arch/x86/kernel/cpu/mce/therm_throt.c
++++ b/arch/x86/kernel/cpu/mce/therm_throt.c
+@@ -188,7 +188,7 @@ static void therm_throt_process(bool new_event, int event, int level)
+ /* if we just entered the thermal event */
+ if (new_event) {
+ if (event == THERMAL_THROTTLING_EVENT)
+- pr_crit("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
++ pr_warn("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
+ this_cpu,
+ level == CORE_LEVEL ? "Core" : "Package",
+ state->count);
+diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
+index 4cba91ec8049..606711f5ebf8 100644
+--- a/arch/x86/kernel/early-quirks.c
++++ b/arch/x86/kernel/early-quirks.c
+@@ -710,6 +710,8 @@ static struct chipset early_qrk[] __initdata = {
+ */
+ { PCI_VENDOR_ID_INTEL, 0x0f00,
+ PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
++ { PCI_VENDOR_ID_INTEL, 0x3e20,
++ PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
+ { PCI_VENDOR_ID_INTEL, 0x3ec4,
+ PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
+ { PCI_VENDOR_ID_BROADCOM, 0x4331,
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 53dbcca9af09..b1d5a8c94a57 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -402,7 +402,8 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
+ entry->edx |= F(SPEC_CTRL);
+ if (boot_cpu_has(X86_FEATURE_STIBP))
+ entry->edx |= F(INTEL_STIBP);
+- if (boot_cpu_has(X86_FEATURE_SSBD))
++ if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
++ boot_cpu_has(X86_FEATURE_AMD_SSBD))
+ entry->edx |= F(SPEC_CTRL_SSBD);
+ /*
+ * We emulate ARCH_CAPABILITIES in software even
+@@ -759,7 +760,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
+ entry->ebx |= F(AMD_IBRS);
+ if (boot_cpu_has(X86_FEATURE_STIBP))
+ entry->ebx |= F(AMD_STIBP);
+- if (boot_cpu_has(X86_FEATURE_SSBD))
++ if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
++ boot_cpu_has(X86_FEATURE_AMD_SSBD))
+ entry->ebx |= F(AMD_SSBD);
+ if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
+ entry->ebx |= F(AMD_SSB_NO);
+diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
+index e0b85930dd77..0a0e9112f284 100644
+--- a/arch/x86/lib/x86-opcode-map.txt
++++ b/arch/x86/lib/x86-opcode-map.txt
+@@ -333,7 +333,7 @@ AVXcode: 1
+ 06: CLTS
+ 07: SYSRET (o64)
+ 08: INVD
+-09: WBINVD
++09: WBINVD | WBNOINVD (F3)
+ 0a:
+ 0b: UD2 (1B)
+ 0c:
+@@ -364,7 +364,7 @@ AVXcode: 1
+ # a ModR/M byte.
+ 1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
+ 1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
+-1c:
++1c: Grp20 (1A),(1C)
+ 1d:
+ 1e:
+ 1f: NOP Ev
+@@ -792,6 +792,8 @@ f3: Grp17 (1A)
+ f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
+ f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
+ f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
++f8: MOVDIR64B Gv,Mdqq (66) | ENQCMD Gv,Mdqq (F2) | ENQCMDS Gv,Mdqq (F3)
++f9: MOVDIRI My,Gy
+ EndTable
+
+ Table: 3-byte opcode 2 (0x0f 0x3a)
+@@ -943,9 +945,9 @@ GrpTable: Grp6
+ EndTable
+
+ GrpTable: Grp7
+-0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
+-1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
+-2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
++0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B)
++1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) | ENCLS (111),(11B)
++2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) | ENCLU (111),(11B)
+ 3: LIDT Ms
+ 4: SMSW Mw/Rv
+ 5: rdpkru (110),(11B) | wrpkru (111),(11B)
+@@ -1020,7 +1022,7 @@ GrpTable: Grp15
+ 3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
+ 4: XSAVE | ptwrite Ey (F3),(11B)
+ 5: XRSTOR | lfence (11B)
+-6: XSAVEOPT | clwb (66) | mfence (11B)
++6: XSAVEOPT | clwb (66) | mfence (11B) | TPAUSE Rd (66),(11B) | UMONITOR Rv (F3),(11B) | UMWAIT Rd (F2),(11B)
+ 7: clflush | clflushopt (66) | sfence (11B)
+ EndTable
+
+@@ -1051,6 +1053,10 @@ GrpTable: Grp19
+ 6: vscatterpf1qps/d Wx (66),(ev)
+ EndTable
+
++GrpTable: Grp20
++0: cldemote Mb
++EndTable
++
+ # AMD's Prefetch Group
+ GrpTable: GrpP
+ 0: PREFETCH
+diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
+index f98a0c956764..9b41391867dc 100644
+--- a/arch/x86/math-emu/fpu_system.h
++++ b/arch/x86/math-emu/fpu_system.h
+@@ -107,6 +107,8 @@ static inline bool seg_writable(struct desc_struct *d)
+ #define FPU_access_ok(y,z) if ( !access_ok(y,z) ) \
+ math_abort(FPU_info,SIGSEGV)
+ #define FPU_abort math_abort(FPU_info, SIGSEGV)
++#define FPU_copy_from_user(to, from, n) \
++ do { if (copy_from_user(to, from, n)) FPU_abort; } while (0)
+
+ #undef FPU_IGNORE_CODE_SEGV
+ #ifdef FPU_IGNORE_CODE_SEGV
+@@ -122,7 +124,7 @@ static inline bool seg_writable(struct desc_struct *d)
+ #define FPU_code_access_ok(z) FPU_access_ok((void __user *)FPU_EIP,z)
+ #endif
+
+-#define FPU_get_user(x,y) get_user((x),(y))
+-#define FPU_put_user(x,y) put_user((x),(y))
++#define FPU_get_user(x,y) do { if (get_user((x),(y))) FPU_abort; } while (0)
++#define FPU_put_user(x,y) do { if (put_user((x),(y))) FPU_abort; } while (0)
+
+ #endif
+diff --git a/arch/x86/math-emu/reg_ld_str.c b/arch/x86/math-emu/reg_ld_str.c
+index f3779743d15e..fe6246ff9887 100644
+--- a/arch/x86/math-emu/reg_ld_str.c
++++ b/arch/x86/math-emu/reg_ld_str.c
+@@ -85,7 +85,7 @@ int FPU_load_extended(long double __user *s, int stnr)
+
+ RE_ENTRANT_CHECK_OFF;
+ FPU_access_ok(s, 10);
+- __copy_from_user(sti_ptr, s, 10);
++ FPU_copy_from_user(sti_ptr, s, 10);
+ RE_ENTRANT_CHECK_ON;
+
+ return FPU_tagof(sti_ptr);
+@@ -1126,9 +1126,9 @@ void frstor(fpu_addr_modes addr_modes, u_char __user *data_address)
+ /* Copy all registers in stack order. */
+ RE_ENTRANT_CHECK_OFF;
+ FPU_access_ok(s, 80);
+- __copy_from_user(register_base + offset, s, other);
++ FPU_copy_from_user(register_base + offset, s, other);
+ if (offset)
+- __copy_from_user(register_base, s + other, offset);
++ FPU_copy_from_user(register_base, s + other, offset);
+ RE_ENTRANT_CHECK_ON;
+
+ for (i = 0; i < 8; i++) {
+diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
+index 3e4b9035bb9a..7bd2c3a52297 100644
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -643,8 +643,8 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
+ fixmaps_set++;
+ }
+
+-void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
+- pgprot_t flags)
++void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
++ phys_addr_t phys, pgprot_t flags)
+ {
+ /* Sanitize 'prot' against any unsupported bits: */
+ pgprot_val(flags) &= __default_kernel_pte_mask;
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index e01267f99183..27ca68621137 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -1212,7 +1212,7 @@ static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
+ return HRTIMER_NORESTART;
+ }
+
+-static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
++static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
+ {
+ struct ioc *ioc = iocg->ioc;
+ struct blkcg_gq *blkg = iocg_to_blkg(iocg);
+@@ -1229,11 +1229,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
+ /* clear or maintain depending on the overage */
+ if (time_before_eq64(vtime, now->vnow)) {
+ blkcg_clear_delay(blkg);
+- return;
++ return false;
+ }
+ if (!atomic_read(&blkg->use_delay) &&
+ time_before_eq64(vtime, now->vnow + vmargin))
+- return;
++ return false;
+
+ /* use delay */
+ if (cost) {
+@@ -1250,10 +1250,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
+ oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer));
+ if (hrtimer_is_queued(&iocg->delay_timer) &&
+ abs(oexpires - expires) <= margin_ns / 4)
+- return;
++ return true;
+
+ hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires),
+ margin_ns / 4, HRTIMER_MODE_ABS);
++ return true;
+ }
+
+ static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer)
+@@ -1739,7 +1740,9 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
+ */
+ if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) {
+ atomic64_add(abs_cost, &iocg->abs_vdebt);
+- iocg_kick_delay(iocg, &now, cost);
++ if (iocg_kick_delay(iocg, &now, cost))
++ blkcg_schedule_throttle(rqos->q,
++ (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
+ return;
+ }
+
+diff --git a/crypto/Kconfig b/crypto/Kconfig
+index 9e524044d312..29472fb795f3 100644
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -309,6 +309,7 @@ config CRYPTO_AEGIS128
+ config CRYPTO_AEGIS128_SIMD
+ bool "Support SIMD acceleration for AEGIS-128"
+ depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON)
++ depends on !ARM || CC_IS_CLANG || GCC_VERSION >= 40800
+ default y
+
+ config CRYPTO_AEGIS128_AESNI_SSE2
+diff --git a/crypto/Makefile b/crypto/Makefile
+index fcb1ee679782..aa740c8492b9 100644
+--- a/crypto/Makefile
++++ b/crypto/Makefile
+@@ -93,7 +93,7 @@ obj-$(CONFIG_CRYPTO_AEGIS128) += aegis128.o
+ aegis128-y := aegis128-core.o
+
+ ifeq ($(ARCH),arm)
+-CFLAGS_aegis128-neon-inner.o += -ffreestanding -march=armv7-a -mfloat-abi=softfp
++CFLAGS_aegis128-neon-inner.o += -ffreestanding -march=armv8-a -mfloat-abi=softfp
+ CFLAGS_aegis128-neon-inner.o += -mfpu=crypto-neon-fp-armv8
+ aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o
+ endif
+diff --git a/crypto/asymmetric_keys/asym_tpm.c b/crypto/asymmetric_keys/asym_tpm.c
+index 76d2ce3a1b5b..5154e280ada2 100644
+--- a/crypto/asymmetric_keys/asym_tpm.c
++++ b/crypto/asymmetric_keys/asym_tpm.c
+@@ -486,6 +486,7 @@ static int tpm_key_encrypt(struct tpm_key *tk,
+ if (ret < 0)
+ goto error_free_tfm;
+
++ ret = -ENOMEM;
+ req = akcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req)
+ goto error_free_tfm;
+diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
+index 364b9df9d631..d7f43d4ea925 100644
+--- a/crypto/asymmetric_keys/public_key.c
++++ b/crypto/asymmetric_keys/public_key.c
+@@ -184,6 +184,7 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
++ ret = -ENOMEM;
+ req = akcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req)
+ goto error_free_tfm;
+diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
+index 4a2cde2c536a..ce93a355bd1c 100644
+--- a/drivers/acpi/button.c
++++ b/drivers/acpi/button.c
+@@ -78,6 +78,17 @@ static const struct dmi_system_id lid_blacklst[] = {
+ DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"),
+ },
+ },
++ {
++ /*
++ * Medion Akoya E2215T, notification of the LID device only
++ * happens on close, not on open and _LID always returns closed.
++ */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "E2215T MD60198"),
++ },
++ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
++ },
+ {}
+ };
+
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 28c492be0a57..74c9b3032d46 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -6708,6 +6708,9 @@ void ata_host_detach(struct ata_host *host)
+ {
+ int i;
+
++ /* Ensure ata_port probe has completed */
++ async_synchronize_full();
++
+ for (i = 0; i < host->n_ports; i++)
+ ata_port_detach(host->ports[i]);
+
+diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile
+index 37e5ae387400..4a66888e7253 100644
+--- a/drivers/base/firmware_loader/builtin/Makefile
++++ b/drivers/base/firmware_loader/builtin/Makefile
+@@ -8,7 +8,8 @@ fwdir := $(addprefix $(srctree)/,$(filter-out /%,$(fwdir)))$(filter /%,$(fwdir))
+ obj-y := $(addsuffix .gen.o, $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE)))
+
+ FWNAME = $(patsubst $(obj)/%.gen.S,%,$@)
+-FWSTR = $(subst /,_,$(subst .,_,$(subst -,_,$(FWNAME))))
++comma := ,
++FWSTR = $(subst $(comma),_,$(subst /,_,$(subst .,_,$(subst -,_,$(FWNAME)))))
+ ASM_WORD = $(if $(CONFIG_64BIT),.quad,.long)
+ ASM_ALIGN = $(if $(CONFIG_64BIT),3,2)
+ PROGBITS = $(if $(CONFIG_ARM),%,@)progbits
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index f6f77eaa7217..ef6e251857c8 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -417,18 +417,20 @@ out_free_page:
+ return ret;
+ }
+
+-static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos)
++static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
++ int mode)
+ {
+ /*
+- * We use punch hole to reclaim the free space used by the
+- * image a.k.a. discard. However we do not support discard if
+- * encryption is enabled, because it may give an attacker
+- * useful information.
++ * We use fallocate to manipulate the space mappings used by the image
++ * a.k.a. discard/zerorange. However we do not support this if
++ * encryption is enabled, because it may give an attacker useful
++ * information.
+ */
+ struct file *file = lo->lo_backing_file;
+- int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
+ int ret;
+
++ mode |= FALLOC_FL_KEEP_SIZE;
++
+ if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) {
+ ret = -EOPNOTSUPP;
+ goto out;
+@@ -596,9 +598,17 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
+ switch (req_op(rq)) {
+ case REQ_OP_FLUSH:
+ return lo_req_flush(lo, rq);
+- case REQ_OP_DISCARD:
+ case REQ_OP_WRITE_ZEROES:
+- return lo_discard(lo, rq, pos);
++ /*
++ * If the caller doesn't want deallocation, call zeroout to
++ * write zeroes the range. Otherwise, punch them out.
++ */
++ return lo_fallocate(lo, rq, pos,
++ (rq->cmd_flags & REQ_NOUNMAP) ?
++ FALLOC_FL_ZERO_RANGE :
++ FALLOC_FL_PUNCH_HOLE);
++ case REQ_OP_DISCARD:
++ return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
+ case REQ_OP_WRITE:
+ if (lo->transfer)
+ return lo_write_transfer(lo, rq, pos);
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 57532465fb83..b4607dd96185 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -1296,10 +1296,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
+ mutex_unlock(&nbd->config_lock);
+ ret = wait_event_interruptible(config->recv_wq,
+ atomic_read(&config->recv_threads) == 0);
+- if (ret) {
++ if (ret)
+ sock_shutdown(nbd);
+- flush_workqueue(nbd->recv_workq);
+- }
++ flush_workqueue(nbd->recv_workq);
++
+ mutex_lock(&nbd->config_lock);
+ nbd_bdev_reset(bdev);
+ /* user requested, ignore socket errors */
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index a9c35ebb30f8..23e606aaaea4 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -3807,8 +3807,8 @@ static int btusb_probe(struct usb_interface *intf,
+ btusb_check_needs_reset_resume(intf);
+ }
+
+-#ifdef CONFIG_BT_HCIBTUSB_RTL
+- if (id->driver_info & BTUSB_REALTEK) {
++ if (IS_ENABLED(CONFIG_BT_HCIBTUSB_RTL) &&
++ (id->driver_info & BTUSB_REALTEK)) {
+ hdev->setup = btrtl_setup_realtek;
+ hdev->shutdown = btrtl_shutdown_realtek;
+ hdev->cmd_timeout = btusb_rtl_cmd_timeout;
+@@ -3819,7 +3819,6 @@ static int btusb_probe(struct usb_interface *intf,
+ */
+ set_bit(BTUSB_WAKEUP_DISABLE, &data->flags);
+ }
+-#endif
+
+ if (id->driver_info & BTUSB_AMP) {
+ /* AMP controllers do not support SCO packets */
+diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
+index 38b719017186..648e39ce6bd9 100644
+--- a/drivers/char/hw_random/omap3-rom-rng.c
++++ b/drivers/char/hw_random/omap3-rom-rng.c
+@@ -121,7 +121,8 @@ static int omap3_rom_rng_remove(struct platform_device *pdev)
+ {
+ cancel_delayed_work_sync(&idle_work);
+ hwrng_unregister(&omap3_rom_rng_ops);
+- clk_disable_unprepare(rng_clk);
++ if (!rng_idle)
++ clk_disable_unprepare(rng_clk);
+ return 0;
+ }
+
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index 2aab80e19ae0..3c8a559506e8 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -448,6 +448,8 @@ enum ipmi_stat_indexes {
+
+ #define IPMI_IPMB_NUM_SEQ 64
+ struct ipmi_smi {
++ struct module *owner;
++
+ /* What interface number are we? */
+ int intf_num;
+
+@@ -1220,6 +1222,11 @@ int ipmi_create_user(unsigned int if_num,
+ if (rv)
+ goto out_kfree;
+
++ if (!try_module_get(intf->owner)) {
++ rv = -ENODEV;
++ goto out_kfree;
++ }
++
+ /* Note that each existing user holds a refcount to the interface. */
+ kref_get(&intf->refcount);
+
+@@ -1349,6 +1356,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
+ }
+
+ kref_put(&intf->refcount, intf_free);
++ module_put(intf->owner);
+ }
+
+ int ipmi_destroy_user(struct ipmi_user *user)
+@@ -2459,7 +2467,7 @@ static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
+ * been recently fetched, this will just use the cached data. Otherwise
+ * it will run a new fetch.
+ *
+- * Except for the first time this is called (in ipmi_register_smi()),
++ * Except for the first time this is called (in ipmi_add_smi()),
+ * this will always return good data;
+ */
+ static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
+@@ -3377,10 +3385,11 @@ static void redo_bmc_reg(struct work_struct *work)
+ kref_put(&intf->refcount, intf_free);
+ }
+
+-int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
+- void *send_info,
+- struct device *si_dev,
+- unsigned char slave_addr)
++int ipmi_add_smi(struct module *owner,
++ const struct ipmi_smi_handlers *handlers,
++ void *send_info,
++ struct device *si_dev,
++ unsigned char slave_addr)
+ {
+ int i, j;
+ int rv;
+@@ -3406,7 +3415,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
+ return rv;
+ }
+
+-
++ intf->owner = owner;
+ intf->bmc = &intf->tmp_bmc;
+ INIT_LIST_HEAD(&intf->bmc->intfs);
+ mutex_init(&intf->bmc->dyn_mutex);
+@@ -3514,7 +3523,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
+
+ return rv;
+ }
+-EXPORT_SYMBOL(ipmi_register_smi);
++EXPORT_SYMBOL(ipmi_add_smi);
+
+ static void deliver_smi_err_response(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg,
+diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
+index 2ec47a69a2a6..b23b0b999232 100644
+--- a/drivers/char/tpm/tpm-dev-common.c
++++ b/drivers/char/tpm/tpm-dev-common.c
+@@ -61,6 +61,12 @@ static void tpm_dev_async_work(struct work_struct *work)
+
+ mutex_lock(&priv->buffer_mutex);
+ priv->command_enqueued = false;
++ ret = tpm_try_get_ops(priv->chip);
++ if (ret) {
++ priv->response_length = ret;
++ goto out;
++ }
++
+ ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
+ sizeof(priv->data_buffer));
+ tpm_put_ops(priv->chip);
+@@ -68,6 +74,7 @@ static void tpm_dev_async_work(struct work_struct *work)
+ priv->response_length = ret;
+ mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
+ }
++out:
+ mutex_unlock(&priv->buffer_mutex);
+ wake_up_interruptible(&priv->async_wait);
+ }
+@@ -204,6 +211,7 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
+ if (file->f_flags & O_NONBLOCK) {
+ priv->command_enqueued = true;
+ queue_work(tpm_dev_wq, &priv->async_work);
++ tpm_put_ops(priv->chip);
+ mutex_unlock(&priv->buffer_mutex);
+ return size;
+ }
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index 270f43acbb77..f528fc39ea6b 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -899,13 +899,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+
+ if (wait_startup(chip, 0) != 0) {
+ rc = -ENODEV;
+- goto out_err;
++ goto err_start;
+ }
+
+ /* Take control of the TPM's interrupt hardware and shut it off */
+ rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
+ if (rc < 0)
+- goto out_err;
++ goto err_start;
+
+ intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT |
+ TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT;
+@@ -914,21 +914,21 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+
+ rc = tpm_chip_start(chip);
+ if (rc)
+- goto out_err;
++ goto err_start;
++
+ rc = tpm2_probe(chip);
+- tpm_chip_stop(chip);
+ if (rc)
+- goto out_err;
++ goto err_probe;
+
+ rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor);
+ if (rc < 0)
+- goto out_err;
++ goto err_probe;
+
+ priv->manufacturer_id = vendor;
+
+ rc = tpm_tis_read8(priv, TPM_RID(0), &rid);
+ if (rc < 0)
+- goto out_err;
++ goto err_probe;
+
+ dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n",
+ (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2",
+@@ -937,13 +937,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ probe = probe_itpm(chip);
+ if (probe < 0) {
+ rc = -ENODEV;
+- goto out_err;
++ goto err_probe;
+ }
+
+ /* Figure out the capabilities */
+ rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps);
+ if (rc < 0)
+- goto out_err;
++ goto err_probe;
+
+ dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
+ intfcaps);
+@@ -977,10 +977,9 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ if (tpm_get_timeouts(chip)) {
+ dev_err(dev, "Could not get TPM timeouts and durations\n");
+ rc = -ENODEV;
+- goto out_err;
++ goto err_probe;
+ }
+
+- tpm_chip_start(chip);
+ chip->flags |= TPM_CHIP_FLAG_IRQ;
+ if (irq) {
+ tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
+@@ -991,18 +990,20 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ } else {
+ tpm_tis_probe_irq(chip, intmask);
+ }
+- tpm_chip_stop(chip);
+ }
+
++ tpm_chip_stop(chip);
++
+ rc = tpm_chip_register(chip);
+ if (rc)
+- goto out_err;
+-
+- if (chip->ops->clk_enable != NULL)
+- chip->ops->clk_enable(chip, false);
++ goto err_start;
+
+ return 0;
+-out_err:
++
++err_probe:
++ tpm_chip_stop(chip);
++
++err_start:
+ if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
+ chip->ops->clk_enable(chip, false);
+
+diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
+index 388bdb94f841..d3486ee79ab5 100644
+--- a/drivers/clk/imx/clk-composite-8m.c
++++ b/drivers/clk/imx/clk-composite-8m.c
+@@ -142,6 +142,7 @@ struct clk *imx8m_clk_composite_flags(const char *name,
+ mux->reg = reg;
+ mux->shift = PCG_PCS_SHIFT;
+ mux->mask = PCG_PCS_MASK;
++ mux->lock = &imx_ccm_lock;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+@@ -161,6 +162,7 @@ struct clk *imx8m_clk_composite_flags(const char *name,
+ gate_hw = &gate->hw;
+ gate->reg = reg;
+ gate->bit_idx = PCG_CGC_SHIFT;
++ gate->lock = &imx_ccm_lock;
+
+ hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
+ mux_hw, &clk_mux_ops, div_hw,
+diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
+index 2022d9bead91..a0f650150367 100644
+--- a/drivers/clk/imx/clk-imx7ulp.c
++++ b/drivers/clk/imx/clk-imx7ulp.c
+@@ -40,6 +40,7 @@ static const struct clk_div_table ulp_div_table[] = {
+ { .val = 5, .div = 16, },
+ { .val = 6, .div = 32, },
+ { .val = 7, .div = 64, },
++ { /* sentinel */ },
+ };
+
+ static const int pcc2_uart_clk_ids[] __initconst = {
+diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
+index 7a815ec76aa5..d43b4a3c0de8 100644
+--- a/drivers/clk/imx/clk-pll14xx.c
++++ b/drivers/clk/imx/clk-pll14xx.c
+@@ -153,7 +153,7 @@ static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll)
+ {
+ u32 val;
+
+- return readl_poll_timeout(pll->base, val, val & LOCK_TIMEOUT_US, 0,
++ return readl_poll_timeout(pll->base, val, val & LOCK_STATUS, 0,
+ LOCK_TIMEOUT_US);
+ }
+
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index bc19d6c16aaa..a7db4f22a077 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -2634,6 +2634,13 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+ if (cpufreq_disabled())
+ return -ENODEV;
+
++ /*
++ * The cpufreq core depends heavily on the availability of device
++ * structure, make sure they are available before proceeding further.
++ */
++ if (!get_cpu_device(0))
++ return -EPROBE_DEFER;
++
+ if (!driver_data || !driver_data->verify || !driver_data->init ||
+ !(driver_data->setpolicy || driver_data->target_index ||
+ driver_data->target) ||
+diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+index eca32e443716..9907a165135b 100644
+--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
++++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+@@ -25,7 +25,7 @@
+ static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev;
+
+ /**
+- * sun50i_cpufreq_get_efuse() - Parse and return efuse value present on SoC
++ * sun50i_cpufreq_get_efuse() - Determine speed grade from efuse value
+ * @versions: Set to the value parsed from efuse
+ *
+ * Returns 0 if success.
+@@ -69,21 +69,16 @@ static int sun50i_cpufreq_get_efuse(u32 *versions)
+ return PTR_ERR(speedbin);
+
+ efuse_value = (*speedbin >> NVMEM_SHIFT) & NVMEM_MASK;
+- switch (efuse_value) {
+- case 0b0001:
+- *versions = 1;
+- break;
+- case 0b0011:
+- *versions = 2;
+- break;
+- default:
+- /*
+- * For other situations, we treat it as bin0.
+- * This vf table can be run for any good cpu.
+- */
++
++ /*
++ * We treat unexpected efuse values as if the SoC was from
++ * the slowest bin. Expected efuse values are 1-3, slowest
++ * to fastest.
++ */
++ if (efuse_value >= 1 && efuse_value <= 3)
++ *versions = efuse_value - 1;
++ else
+ *versions = 0;
+- break;
+- }
+
+ kfree(speedbin);
+ return 0;
+diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
+index 00920a2b95ce..db99cee1991c 100644
+--- a/drivers/crypto/atmel-aes.c
++++ b/drivers/crypto/atmel-aes.c
+@@ -145,7 +145,7 @@ struct atmel_aes_xts_ctx {
+ u32 key2[AES_KEYSIZE_256 / sizeof(u32)];
+ };
+
+-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
+ struct atmel_aes_authenc_ctx {
+ struct atmel_aes_base_ctx base;
+ struct atmel_sha_authenc_ctx *auth;
+@@ -157,7 +157,7 @@ struct atmel_aes_reqctx {
+ u32 lastc[AES_BLOCK_SIZE / sizeof(u32)];
+ };
+
+-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
+ struct atmel_aes_authenc_reqctx {
+ struct atmel_aes_reqctx base;
+
+@@ -486,7 +486,7 @@ static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
+ return (dd->flags & AES_FLAGS_ENCRYPT);
+ }
+
+-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
+ static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
+ #endif
+
+@@ -515,7 +515,7 @@ static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
+
+ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
+ {
+-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
+ if (dd->ctx->is_aead)
+ atmel_aes_authenc_complete(dd, err);
+ #endif
+@@ -1980,7 +1980,7 @@ static struct crypto_alg aes_xts_alg = {
+ }
+ };
+
+-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
+ /* authenc aead functions */
+
+ static int atmel_aes_authenc_start(struct atmel_aes_dev *dd);
+@@ -2467,7 +2467,7 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
+ {
+ int i;
+
+-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
+ if (dd->caps.has_authenc)
+ for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++)
+ crypto_unregister_aead(&aes_authenc_algs[i]);
+@@ -2514,7 +2514,7 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
+ goto err_aes_xts_alg;
+ }
+
+-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
+ if (dd->caps.has_authenc) {
+ for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) {
+ err = crypto_register_aead(&aes_authenc_algs[i]);
+@@ -2526,7 +2526,7 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
+
+ return 0;
+
+-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
+ /* i = ARRAY_SIZE(aes_authenc_algs); */
+ err_aes_authenc_alg:
+ for (j = 0; j < i; j++)
+@@ -2716,7 +2716,7 @@ static int atmel_aes_probe(struct platform_device *pdev)
+
+ atmel_aes_get_cap(aes_dd);
+
+-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
+ if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
+ err = -EPROBE_DEFER;
+ goto iclk_unprepare;
+diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h
+index cbd37a2edada..d6de810df44f 100644
+--- a/drivers/crypto/atmel-authenc.h
++++ b/drivers/crypto/atmel-authenc.h
+@@ -12,7 +12,7 @@
+ #ifndef __ATMEL_AUTHENC_H__
+ #define __ATMEL_AUTHENC_H__
+
+-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
+
+ #include <crypto/authenc.h>
+ #include <crypto/hash.h>
+diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
+index 84cb8748a795..d32626458e67 100644
+--- a/drivers/crypto/atmel-sha.c
++++ b/drivers/crypto/atmel-sha.c
+@@ -2212,7 +2212,7 @@ static struct ahash_alg sha_hmac_algs[] = {
+ },
+ };
+
+-#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
++#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
+ /* authenc functions */
+
+ static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd);
+diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
+index 294debd435b6..991a4425f006 100644
+--- a/drivers/crypto/inside-secure/safexcel.c
++++ b/drivers/crypto/inside-secure/safexcel.c
+@@ -1120,6 +1120,8 @@ static int safexcel_request_ring_irq(void *pdev, int irqid,
+ irq_name, irq);
+ return irq;
+ }
++ } else {
++ return -ENXIO;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, handler,
+diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+index 6536fd4bee65..7e5e092a23b3 100644
+--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
++++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+@@ -72,7 +72,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
+ oi = 0;
+ oo = 0;
+ do {
+- todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
++ todo = min(rx_cnt, ileft);
++ todo = min_t(size_t, todo, (mi.length - oi) / 4);
+ if (todo) {
+ ileft -= todo;
+ writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
+@@ -87,7 +88,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
+ rx_cnt = SS_RXFIFO_SPACES(spaces);
+ tx_cnt = SS_TXFIFO_SPACES(spaces);
+
+- todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
++ todo = min(tx_cnt, oleft);
++ todo = min_t(size_t, todo, (mo.length - oo) / 4);
+ if (todo) {
+ oleft -= todo;
+ readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
+@@ -239,7 +241,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ * todo is the number of consecutive 4byte word that we
+ * can read from current SG
+ */
+- todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
++ todo = min(rx_cnt, ileft / 4);
++ todo = min_t(size_t, todo, (mi.length - oi) / 4);
+ if (todo && !ob) {
+ writesl(ss->base + SS_RXFIFO, mi.addr + oi,
+ todo);
+@@ -253,8 +256,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ * we need to be able to write all buf in one
+ * pass, so it is why we min() with rx_cnt
+ */
+- todo = min3(rx_cnt * 4 - ob, ileft,
+- mi.length - oi);
++ todo = min(rx_cnt * 4 - ob, ileft);
++ todo = min_t(size_t, todo, mi.length - oi);
+ memcpy(buf + ob, mi.addr + oi, todo);
+ ileft -= todo;
+ oi += todo;
+@@ -274,7 +277,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ spaces = readl(ss->base + SS_FCSR);
+ rx_cnt = SS_RXFIFO_SPACES(spaces);
+ tx_cnt = SS_TXFIFO_SPACES(spaces);
+- dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
++ dev_dbg(ss->dev,
++ "%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
+ mode,
+ oi, mi.length, ileft, areq->cryptlen, rx_cnt,
+ oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
+@@ -282,7 +286,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ if (!tx_cnt)
+ continue;
+ /* todo in 4bytes word */
+- todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
++ todo = min(tx_cnt, oleft / 4);
++ todo = min_t(size_t, todo, (mo.length - oo) / 4);
+ if (todo) {
+ readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
+ oleft -= todo * 4;
+@@ -308,7 +313,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ * no more than remaining buffer
+ * no need to test against oleft
+ */
+- todo = min(mo.length - oo, obl - obo);
++ todo = min_t(size_t,
++ mo.length - oo, obl - obo);
+ memcpy(mo.addr + oo, bufo + obo, todo);
+ oleft -= todo;
+ obo += todo;
+diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
+index fcffba5ef927..1369c5fa3087 100644
+--- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
++++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
+@@ -272,8 +272,8 @@ static int sun4i_hash(struct ahash_request *areq)
+ */
+ while (op->len < 64 && i < end) {
+ /* how many bytes we can read from current SG */
+- in_r = min3(mi.length - in_i, end - i,
+- 64 - op->len);
++ in_r = min(end - i, 64 - op->len);
++ in_r = min_t(size_t, mi.length - in_i, in_r);
+ memcpy(op->buf + op->len, mi.addr + in_i, in_r);
+ op->len += in_r;
+ i += in_r;
+@@ -293,8 +293,8 @@ static int sun4i_hash(struct ahash_request *areq)
+ }
+ if (mi.length - in_i > 3 && i < end) {
+ /* how many bytes we can read from current SG */
+- in_r = min3(mi.length - in_i, areq->nbytes - i,
+- ((mi.length - in_i) / 4) * 4);
++ in_r = min_t(size_t, mi.length - in_i, areq->nbytes - i);
++ in_r = min_t(size_t, ((mi.length - in_i) / 4) * 4, in_r);
+ /* how many bytes we can write in the device*/
+ todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4);
+ writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo);
+@@ -320,8 +320,8 @@ static int sun4i_hash(struct ahash_request *areq)
+ if ((areq->nbytes - i) < 64) {
+ while (i < areq->nbytes && in_i < mi.length && op->len < 64) {
+ /* how many bytes we can read from current SG */
+- in_r = min3(mi.length - in_i, areq->nbytes - i,
+- 64 - op->len);
++ in_r = min(areq->nbytes - i, 64 - op->len);
++ in_r = min_t(size_t, mi.length - in_i, in_r);
+ memcpy(op->buf + op->len, mi.addr + in_i, in_r);
+ op->len += in_r;
+ i += in_r;
+diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
+index 42d19205166b..673fb29fda53 100644
+--- a/drivers/crypto/virtio/virtio_crypto_algs.c
++++ b/drivers/crypto/virtio/virtio_crypto_algs.c
+@@ -105,8 +105,6 @@ virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
+ *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
+ break;
+ default:
+- pr_err("virtio_crypto: Unsupported key length: %d\n",
+- key_len);
+ return -EINVAL;
+ }
+ return 0;
+@@ -484,6 +482,11 @@ static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
+ /* Use the first data virtqueue as default */
+ struct data_queue *data_vq = &vcrypto->data_vq[0];
+
++ if (!req->nbytes)
++ return 0;
++ if (req->nbytes % AES_BLOCK_SIZE)
++ return -EINVAL;
++
+ vc_req->dataq = data_vq;
+ vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
+ vc_sym_req->ablkcipher_ctx = ctx;
+@@ -504,6 +507,11 @@ static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
+ /* Use the first data virtqueue as default */
+ struct data_queue *data_vq = &vcrypto->data_vq[0];
+
++ if (!req->nbytes)
++ return 0;
++ if (req->nbytes % AES_BLOCK_SIZE)
++ return -EINVAL;
++
+ vc_req->dataq = data_vq;
+ vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
+ vc_sym_req->ablkcipher_ctx = ctx;
+diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
+index cab32cfec9c4..709670d2b553 100644
+--- a/drivers/crypto/vmx/Makefile
++++ b/drivers/crypto/vmx/Makefile
+@@ -3,13 +3,13 @@ obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
+ vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
+
+ ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
+-TARGET := linux-ppc64le
++override flavour := linux-ppc64le
+ else
+-TARGET := linux-ppc64
++override flavour := linux-ppc64
+ endif
+
+ quiet_cmd_perl = PERL $@
+- cmd_perl = $(PERL) $(<) $(TARGET) > $(@)
++ cmd_perl = $(PERL) $(<) $(flavour) > $(@)
+
+ targets += aesp8-ppc.S ghashp8-ppc.S
+
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index c1d4536ae466..cc5e56d752c8 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -2936,6 +2936,7 @@ static int init_csrows_df(struct mem_ctl_info *mci)
+ dimm->mtype = pvt->dram_type;
+ dimm->edac_mode = edac_mode;
+ dimm->dtype = dev_type;
++ dimm->grain = 64;
+ }
+ }
+
+@@ -3012,6 +3013,7 @@ static int init_csrows(struct mem_ctl_info *mci)
+ dimm = csrow->channels[j]->dimm;
+ dimm->mtype = pvt->dram_type;
+ dimm->edac_mode = edac_mode;
++ dimm->grain = 64;
+ }
+ }
+
+diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
+index 296e714bf553..523dd56a798c 100644
+--- a/drivers/edac/ghes_edac.c
++++ b/drivers/edac/ghes_edac.c
+@@ -231,6 +231,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
+ /* Cleans the error report buffer */
+ memset(e, 0, sizeof (*e));
+ e->error_count = 1;
++ e->grain = 1;
+ strcpy(e->label, "unknown label");
+ e->msg = pvt->msg;
+ e->other_detail = pvt->other_detail;
+@@ -326,7 +327,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
+
+ /* Error grain */
+ if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK)
+- e->grain = ~(mem_err->physical_addr_mask & ~PAGE_MASK);
++ e->grain = ~mem_err->physical_addr_mask + 1;
+
+ /* Memory error location, mapped on e->location */
+ p = e->location;
+@@ -442,8 +443,13 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
+ if (p > pvt->other_detail)
+ *(p - 1) = '\0';
+
++ /* Sanity-check driver-supplied grain value. */
++ if (WARN_ON_ONCE(!e->grain))
++ e->grain = 1;
++
++ grain_bits = fls_long(e->grain - 1);
++
+ /* Generate the trace event */
+- grain_bits = fls_long(e->grain);
+ snprintf(pvt->detail_location, sizeof(pvt->detail_location),
+ "APEI location: %s %s", e->location, e->other_detail);
+ trace_mc_event(type, e->msg, e->label, e->error_count,
+diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
+index dc43847ad2b0..b3d93baf4fc5 100644
+--- a/drivers/extcon/extcon-sm5502.c
++++ b/drivers/extcon/extcon-sm5502.c
+@@ -65,6 +65,10 @@ struct sm5502_muic_info {
+ /* Default value of SM5502 register to bring up MUIC device. */
+ static struct reg_data sm5502_reg_data[] = {
+ {
++ .reg = SM5502_REG_RESET,
++ .val = SM5502_REG_RESET_MASK,
++ .invert = true,
++ }, {
+ .reg = SM5502_REG_CONTROL,
+ .val = SM5502_REG_CONTROL_MASK_INT_MASK,
+ .invert = false,
+diff --git a/drivers/extcon/extcon-sm5502.h b/drivers/extcon/extcon-sm5502.h
+index 9dbb634d213b..ce1f1ec310c4 100644
+--- a/drivers/extcon/extcon-sm5502.h
++++ b/drivers/extcon/extcon-sm5502.h
+@@ -237,6 +237,8 @@ enum sm5502_reg {
+ #define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
+ | (DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
+
++#define SM5502_REG_RESET_MASK (0x1)
++
+ /* SM5502 Interrupts */
+ enum sm5502_irq {
+ /* INT1 */
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index e98bbf8e56d9..34d41f67b54d 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -970,6 +970,24 @@ static int __init efi_memreserve_map_root(void)
+ return 0;
+ }
+
++static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
++{
++ struct resource *res, *parent;
++
++ res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
++ if (!res)
++ return -ENOMEM;
++
++ res->name = "reserved";
++ res->flags = IORESOURCE_MEM;
++ res->start = addr;
++ res->end = addr + size - 1;
++
++ /* we expect a conflict with a 'System RAM' region */
++ parent = request_resource_conflict(&iomem_resource, res);
++ return parent ? request_resource(parent, res) : 0;
++}
++
+ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
+ {
+ struct linux_efi_memreserve *rsv;
+@@ -994,7 +1012,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
+ rsv->entry[index].size = size;
+
+ memunmap(rsv);
+- return 0;
++ return efi_mem_reserve_iomem(addr, size);
+ }
+ memunmap(rsv);
+ }
+@@ -1004,6 +1022,12 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
+ if (!rsv)
+ return -ENOMEM;
+
++ rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
++ if (rc) {
++ free_page((unsigned long)rsv);
++ return rc;
++ }
++
+ /*
+ * The memremap() call above assumes that a linux_efi_memreserve entry
+ * never crosses a page boundary, so let's ensure that this remains true
+@@ -1020,7 +1044,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
+ efi_memreserve_root->next = __pa(rsv);
+ spin_unlock(&efi_mem_reserve_persistent_lock);
+
+- return 0;
++ return efi_mem_reserve_iomem(addr, size);
+ }
+
+ static int __init efi_memreserve_root_init(void)
+diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
+index 1f76740f33b6..9282239b4d95 100644
+--- a/drivers/fsi/fsi-core.c
++++ b/drivers/fsi/fsi-core.c
+@@ -544,6 +544,31 @@ static int fsi_slave_scan(struct fsi_slave *slave)
+ return 0;
+ }
+
++static unsigned long aligned_access_size(size_t offset, size_t count)
++{
++ unsigned long offset_unit, count_unit;
++
++ /* Criteria:
++ *
++ * 1. Access size must be less than or equal to the maximum access
++ * width or the highest power-of-two factor of offset
++ * 2. Access size must be less than or equal to the amount specified by
++ * count
++ *
++ * The access width is optimal if we can calculate 1 to be strictly
++ * equal while still satisfying 2.
++ */
++
++ /* Find 1 by the bottom bit of offset (with a 4 byte access cap) */
++ offset_unit = BIT(__builtin_ctzl(offset | 4));
++
++ /* Find 2 by the top bit of count */
++ count_unit = BIT(8 * sizeof(unsigned long) - 1 - __builtin_clzl(count));
++
++ /* Constrain the maximum access width to the minimum of both criteria */
++ return BIT(__builtin_ctzl(offset_unit | count_unit));
++}
++
+ static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+@@ -559,8 +584,7 @@ static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
+ return -EINVAL;
+
+ for (total_len = 0; total_len < count; total_len += read_len) {
+- read_len = min_t(size_t, count, 4);
+- read_len -= off & 0x3;
++ read_len = aligned_access_size(off, count - total_len);
+
+ rc = fsi_slave_read(slave, off, buf + total_len, read_len);
+ if (rc)
+@@ -587,8 +611,7 @@ static ssize_t fsi_slave_sysfs_raw_write(struct file *file,
+ return -EINVAL;
+
+ for (total_len = 0; total_len < count; total_len += write_len) {
+- write_len = min_t(size_t, count, 4);
+- write_len -= off & 0x3;
++ write_len = aligned_access_size(off, count - total_len);
+
+ rc = fsi_slave_write(slave, off, buf + total_len, write_len);
+ if (rc)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+index 5652cc72ed3a..81842ba8cd75 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -859,6 +859,9 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
+ struct amdgpu_device *adev = dev->dev_private;
+ int r = 0, i;
+
++ /* Avoid accidently unparking the sched thread during GPU reset */
++ mutex_lock(&adev->lock_reset);
++
+ /* hold on the scheduler */
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+@@ -884,6 +887,8 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
+ kthread_unpark(ring->sched.thread);
+ }
+
++ mutex_unlock(&adev->lock_reset);
++
+ return 0;
+ }
+
+@@ -1036,6 +1041,9 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
+ if (!fences)
+ return -ENOMEM;
+
++ /* Avoid accidently unparking the sched thread during GPU reset */
++ mutex_lock(&adev->lock_reset);
++
+ /* stop the scheduler */
+ kthread_park(ring->sched.thread);
+
+@@ -1075,6 +1083,8 @@ failure:
+ /* restart the scheduler */
+ kthread_unpark(ring->sched.thread);
+
++ mutex_unlock(&adev->lock_reset);
++
+ ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
+
+ if (fences)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+index b66d29d5ffa2..b158230af8db 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+@@ -138,6 +138,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
+ }
+
+ dma_fence_put(fence);
++ fence = NULL;
+
+ r = amdgpu_bo_kmap(vram_obj, &vram_map);
+ if (r) {
+@@ -183,6 +184,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
+ }
+
+ dma_fence_put(fence);
++ fence = NULL;
+
+ r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
+ if (r) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+index 77674a7b9616..91899d28fa72 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+@@ -170,7 +170,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
+ __field(unsigned int, context)
+ __field(unsigned int, seqno)
+ __field(struct dma_fence *, fence)
+- __field(char *, ring_name)
++ __string(ring, to_amdgpu_ring(job->base.sched)->name)
+ __field(u32, num_ibs)
+ ),
+
+@@ -179,12 +179,12 @@ TRACE_EVENT(amdgpu_cs_ioctl,
+ __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
+ __entry->context = job->base.s_fence->finished.context;
+ __entry->seqno = job->base.s_fence->finished.seqno;
+- __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
++ __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
+ __entry->num_ibs = job->num_ibs;
+ ),
+ TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
+ __entry->sched_job_id, __get_str(timeline), __entry->context,
+- __entry->seqno, __entry->ring_name, __entry->num_ibs)
++ __entry->seqno, __get_str(ring), __entry->num_ibs)
+ );
+
+ TRACE_EVENT(amdgpu_sched_run_job,
+@@ -195,7 +195,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
+ __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
+ __field(unsigned int, context)
+ __field(unsigned int, seqno)
+- __field(char *, ring_name)
++ __string(ring, to_amdgpu_ring(job->base.sched)->name)
+ __field(u32, num_ibs)
+ ),
+
+@@ -204,12 +204,12 @@ TRACE_EVENT(amdgpu_sched_run_job,
+ __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
+ __entry->context = job->base.s_fence->finished.context;
+ __entry->seqno = job->base.s_fence->finished.seqno;
+- __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
++ __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
+ __entry->num_ibs = job->num_ibs;
+ ),
+ TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
+ __entry->sched_job_id, __get_str(timeline), __entry->context,
+- __entry->seqno, __entry->ring_name, __entry->num_ibs)
++ __entry->seqno, __get_str(ring), __entry->num_ibs)
+ );
+
+
+@@ -468,7 +468,7 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
+ TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence),
+ TP_ARGS(sched_job, fence),
+ TP_STRUCT__entry(
+- __field(const char *,name)
++ __string(ring, sched_job->base.sched->name);
+ __field(uint64_t, id)
+ __field(struct dma_fence *, fence)
+ __field(uint64_t, ctx)
+@@ -476,14 +476,14 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
+ ),
+
+ TP_fast_assign(
+- __entry->name = sched_job->base.sched->name;
++ __assign_str(ring, sched_job->base.sched->name)
+ __entry->id = sched_job->base.id;
+ __entry->fence = fence;
+ __entry->ctx = fence->context;
+ __entry->seqno = fence->seqno;
+ ),
+ TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u",
+- __entry->name, __entry->id,
++ __get_str(ring), __entry->id,
+ __entry->fence, __entry->ctx,
+ __entry->seqno)
+ );
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 5251352f5922..c7514f743409 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1034,10 +1034,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
+ id->oa_base != job->oa_base ||
+ id->oa_size != job->oa_size);
+ bool vm_flush_needed = job->vm_needs_flush;
+- bool pasid_mapping_needed = id->pasid != job->pasid ||
+- !id->pasid_mapping ||
+- !dma_fence_is_signaled(id->pasid_mapping);
+ struct dma_fence *fence = NULL;
++ bool pasid_mapping_needed = false;
+ unsigned patch_offset = 0;
+ int r;
+
+@@ -1047,6 +1045,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
+ pasid_mapping_needed = true;
+ }
+
++ mutex_lock(&id_mgr->lock);
++ if (id->pasid != job->pasid || !id->pasid_mapping ||
++ !dma_fence_is_signaled(id->pasid_mapping))
++ pasid_mapping_needed = true;
++ mutex_unlock(&id_mgr->lock);
++
+ gds_switch_needed &= !!ring->funcs->emit_gds_switch;
+ vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
+ job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
+@@ -1086,9 +1090,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
+ }
+
+ if (pasid_mapping_needed) {
++ mutex_lock(&id_mgr->lock);
+ id->pasid = job->pasid;
+ dma_fence_put(id->pasid_mapping);
+ id->pasid_mapping = dma_fence_get(fence);
++ mutex_unlock(&id_mgr->lock);
+ }
+ dma_fence_put(fence);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 97cf0b536873..c9ba2ec6d038 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -2930,7 +2930,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
+ * And it's needed by gfxoff feature.
+ */
+ if (adev->gfx.rlc.is_rlc_v2_1) {
+- gfx_v9_1_init_rlc_save_restore_list(adev);
++ if (adev->asic_type == CHIP_VEGA12)
++ gfx_v9_1_init_rlc_save_restore_list(adev);
+ gfx_v9_0_enable_save_restore_machine(adev);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+index 10166104b8a3..d483684db95b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+@@ -398,6 +398,34 @@ static bool psp_v11_0_support_vmr_ring(struct psp_context *psp)
+ return false;
+ }
+
++static int psp_v11_0_ring_stop(struct psp_context *psp,
++ enum psp_ring_type ring_type)
++{
++ int ret = 0;
++ struct amdgpu_device *adev = psp->adev;
++
++ /* Write the ring destroy command*/
++ if (psp_v11_0_support_vmr_ring(psp))
++ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
++ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
++ else
++ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
++ GFX_CTRL_CMD_ID_DESTROY_RINGS);
++
++ /* there might be handshake issue with hardware which needs delay */
++ mdelay(20);
++
++ /* Wait for response flag (bit 31) */
++ if (psp_v11_0_support_vmr_ring(psp))
++ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
++ 0x80000000, 0x80000000, false);
++ else
++ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
++ 0x80000000, 0x80000000, false);
++
++ return ret;
++}
++
+ static int psp_v11_0_ring_create(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+ {
+@@ -407,6 +435,12 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
+ struct amdgpu_device *adev = psp->adev;
+
+ if (psp_v11_0_support_vmr_ring(psp)) {
++ ret = psp_v11_0_ring_stop(psp, ring_type);
++ if (ret) {
++ DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n");
++ return ret;
++ }
++
+ /* Write low address of the ring to C2PMSG_102 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
+@@ -451,33 +485,6 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
+ return ret;
+ }
+
+-static int psp_v11_0_ring_stop(struct psp_context *psp,
+- enum psp_ring_type ring_type)
+-{
+- int ret = 0;
+- struct amdgpu_device *adev = psp->adev;
+-
+- /* Write the ring destroy command*/
+- if (psp_v11_0_support_vmr_ring(psp))
+- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+- GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+- else
+- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
+- GFX_CTRL_CMD_ID_DESTROY_RINGS);
+-
+- /* there might be handshake issue with hardware which needs delay */
+- mdelay(20);
+-
+- /* Wait for response flag (bit 31) */
+- if (psp_v11_0_support_vmr_ring(psp))
+- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+- 0x80000000, 0x80000000, false);
+- else
+- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+- 0x80000000, 0x80000000, false);
+-
+- return ret;
+-}
+
+ static int psp_v11_0_ring_destroy(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
+index 57bb5f9e08b2..88ae27a5a03d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
+@@ -64,7 +64,8 @@ static int si_ih_irq_init(struct amdgpu_device *adev)
+ u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+
+ si_ih_disable_interrupts(adev);
+- WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
++ /* set dummy read address to dummy page address */
++ WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
+ interrupt_cntl = RREG32(INTERRUPT_CNTL);
+ interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+ interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index d985e31fcc1e..f335f73919d1 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1676,7 +1676,8 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
+ struct kfd_dev *dev = dqm->dev;
+ struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
+ uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
+- dev->device_info->num_sdma_engines *
++ (dev->device_info->num_sdma_engines +
++ dev->device_info->num_xgmi_sdma_engines) *
+ dev->device_info->num_sdma_queues_per_engine +
+ dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+index c56ac47cd318..bc47f6a44456 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+@@ -62,6 +62,11 @@ int kfd_interrupt_init(struct kfd_dev *kfd)
+ }
+
+ kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
++ if (unlikely(!kfd->ih_wq)) {
++ kfifo_free(&kfd->ih_fifo);
++ dev_err(kfd_chardev(), "Failed to allocate KFD IH workqueue\n");
++ return -ENOMEM;
++ }
+ spin_lock_init(&kfd->interrupt_lock);
+
+ INIT_WORK(&kfd->interrupt_work, interrupt_wq);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 4139f129eafb..4e9c15c409ba 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -940,6 +940,11 @@ static int dm_late_init(void *handle)
+ params.backlight_lut_array_size = 16;
+ params.backlight_lut_array = linear_lut;
+
++ /* Min backlight level after ABM reduction, Don't allow below 1%
++ * 0xFFFF x 0.01 = 0x28F
++ */
++ params.min_abm_backlight = 0x28F;
++
+ /* todo will enable for navi10 */
+ if (adev->asic_type <= CHIP_RAVEN) {
+ ret = dmcu_load_iram(dmcu, params);
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+index 3e8ac303bd52..23ec283eb07b 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+@@ -320,6 +320,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
+ bool safe_to_lower)
+ {
++ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
++
+ struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ /* Min fclk = 1.2GHz since all the extra scemi logic seems to run off of it */
+ int fclk_adj = new_clocks->fclk_khz > 1200000 ? new_clocks->fclk_khz : 1200000;
+@@ -357,14 +359,18 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
+ clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
+ }
+
+- /* Both fclk and dppclk ref are run on the same scemi clock so we
+- * need to keep the same value for both
++ /* Both fclk and ref_dppclk run on the same scemi clock.
++ * So take the higher value since the DPP DTO is typically programmed
++ * such that max dppclk is 1:1 with ref_dppclk.
+ */
+ if (clk_mgr->clks.fclk_khz > clk_mgr->clks.dppclk_khz)
+ clk_mgr->clks.dppclk_khz = clk_mgr->clks.fclk_khz;
+ if (clk_mgr->clks.dppclk_khz > clk_mgr->clks.fclk_khz)
+ clk_mgr->clks.fclk_khz = clk_mgr->clks.dppclk_khz;
+
++ // Both fclk and ref_dppclk run on the same scemi clock.
++ clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz;
++
+ dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks);
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+index 50984c1811bb..468c6bb0e311 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+@@ -33,7 +33,7 @@
+ #include "mp/mp_12_0_0_sh_mask.h"
+
+ #define REG(reg_name) \
+- (MP1_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
++ (MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
+
+ #define FN(reg_name, field) \
+ FD(reg_name##__##field)
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index ca20b150afcc..067f5579f452 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -2169,8 +2169,10 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
+ dp_set_fec_ready(link, false);
+ }
+ #endif
+- } else
+- link->link_enc->funcs->disable_output(link->link_enc, signal);
++ } else {
++ if (signal != SIGNAL_TYPE_VIRTUAL)
++ link->link_enc->funcs->disable_output(link->link_enc, signal);
++ }
+
+ if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ /* MST disable link only when no stream use the link */
+@@ -2217,7 +2219,7 @@ static bool dp_active_dongle_validate_timing(
+ break;
+ }
+
+- if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
++ if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
+ dongle_caps->extendedCapValid == false)
+ return true;
+
+@@ -2767,6 +2769,15 @@ void core_link_enable_stream(
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ COLOR_DEPTH_UNDEFINED);
+
++ /* This second call is needed to reconfigure the DIG
++ * as a workaround for the incorrect value being applied
++ * from transmitter control.
++ */
++ if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
++ stream->link->link_enc->funcs->setup(
++ stream->link->link_enc,
++ pipe_ctx->stream->signal);
++
+ #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index f5742719b5d9..5a583707d198 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -2545,6 +2545,7 @@ static void get_active_converter_info(
+ uint8_t data, struct dc_link *link)
+ {
+ union dp_downstream_port_present ds_port = { .byte = data };
++ memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps));
+
+ /* decode converter info*/
+ if (!ds_port.fields.PORT_PRESENT) {
+@@ -2691,6 +2692,7 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
+ * keep receiver powered all the time.*/
+ case DP_BRANCH_DEVICE_ID_0010FA:
+ case DP_BRANCH_DEVICE_ID_0080E1:
++ case DP_BRANCH_DEVICE_ID_00E04C:
+ link->wa_flags.dp_keep_receiver_powered = true;
+ break;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+index 79438c4f1e20..a519dbc5ecb6 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+@@ -277,7 +277,8 @@ void dp_retrain_link_dp_test(struct dc_link *link,
+ if (pipes[i].stream != NULL &&
+ !pipes[i].top_pipe && !pipes[i].prev_odm_pipe &&
+ pipes[i].stream->link != NULL &&
+- pipes[i].stream_res.stream_enc != NULL) {
++ pipes[i].stream_res.stream_enc != NULL &&
++ pipes[i].stream->link == link) {
+ udelay(100);
+
+ pipes[i].stream_res.stream_enc->funcs->dp_blank(
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+index bf1d7bb90e0f..bb09243758fe 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+@@ -423,10 +423,10 @@ bool dc_stream_add_writeback(struct dc *dc,
+
+ if (dwb->funcs->is_enabled(dwb)) {
+ /* writeback pipe already enabled, only need to update */
+- dc->hwss.update_writeback(dc, stream_status, wb_info);
++ dc->hwss.update_writeback(dc, stream_status, wb_info, dc->current_state);
+ } else {
+ /* Enable writeback pipe from scratch*/
+- dc->hwss.enable_writeback(dc, stream_status, wb_info);
++ dc->hwss.enable_writeback(dc, stream_status, wb_info, dc->current_state);
+ }
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+index 58bd131d5b48..7700a855d77c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+@@ -77,6 +77,9 @@ static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
++ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
++ 1, 80000);
++
+ return true;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index b3ae1c41fc69..937a8ba81160 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -1356,7 +1356,8 @@ bool dcn20_update_bandwidth(
+ static void dcn20_enable_writeback(
+ struct dc *dc,
+ const struct dc_stream_status *stream_status,
+- struct dc_writeback_info *wb_info)
++ struct dc_writeback_info *wb_info,
++ struct dc_state *context)
+ {
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+@@ -1373,7 +1374,7 @@ static void dcn20_enable_writeback(
+ optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst);
+ /* set MCIF_WB buffer and arbitration configuration */
+ mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
+- mcif_wb->funcs->config_mcif_arb(mcif_wb, &dc->current_state->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
++ mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
+ /* Enable MCIF_WB */
+ mcif_wb->funcs->enable_mcif(mcif_wb);
+ /* Enable DWB */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+index 2137e2be2140..dda90995ba93 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+@@ -287,6 +287,10 @@ void optc2_get_optc_source(struct timing_generator *optc,
+ *num_of_src_opp = 2;
+ else
+ *num_of_src_opp = 1;
++
++ /* Work around VBIOS not updating OPTC_NUM_OF_INPUT_SEGMENT */
++ if (*src_opp_id_1 == 0xf)
++ *num_of_src_opp = 1;
+ }
+
+ void optc2_set_dwb_source(struct timing_generator *optc,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+index 6b2f2f1a1c9c..78b2cc2e122f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+@@ -1765,7 +1765,7 @@ int dcn20_populate_dml_pipes_from_context(
+ pipe_cnt = i;
+ continue;
+ }
+- if (!resource_are_streams_timing_synchronizable(
++ if (dc->debug.disable_timing_sync || !resource_are_streams_timing_synchronizable(
+ res_ctx->pipe_ctx[pipe_cnt].stream,
+ res_ctx->pipe_ctx[i].stream)) {
+ synchronized_vblank = false;
+@@ -2474,6 +2474,7 @@ bool dcn20_fast_validate_bw(
+ &context->res_ctx, dc->res_pool,
+ pipe, hsplit_pipe))
+ goto validate_fail;
++ dcn20_build_mapped_resource(dc, context, pipe->stream);
+ } else
+ dcn20_split_stream_for_mpc(
+ &context->res_ctx, dc->res_pool,
+@@ -3040,7 +3041,7 @@ static void cap_soc_clocks(
+ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
+ struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states)
+ {
+- struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES] = {0};
++ struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES];
+ int i;
+ int num_calculated_states = 0;
+ int min_dcfclk = 0;
+@@ -3048,6 +3049,8 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_
+ if (num_states == 0)
+ return;
+
++ memset(calculated_states, 0, sizeof(calculated_states));
++
+ if (dc->bb_overrides.min_dcfclk_mhz > 0)
+ min_dcfclk = dc->bb_overrides.min_dcfclk_mhz;
+ else
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+index d1266741763b..f5f6b4a0f0aa 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+@@ -22,6 +22,7 @@
+ * Authors: AMD
+ *
+ */
++#include <linux/delay.h>
+ #include "dm_services.h"
+ #include "dcn20/dcn20_hubbub.h"
+ #include "dcn21_hubbub.h"
+@@ -71,30 +72,39 @@ static uint32_t convert_and_clamp(
+ void dcn21_dchvm_init(struct hubbub *hubbub)
+ {
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
++ uint32_t riommu_active;
++ int i;
+
+ //Init DCHVM block
+ REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
+
+ //Poll until RIOMMU_ACTIVE = 1
+- //TODO: Figure out interval us and retry count
+- REG_WAIT(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, 1, 5, 100);
++ for (i = 0; i < 100; i++) {
++ REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active);
+
+- //Reflect the power status of DCHUBBUB
+- REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
++ if (riommu_active)
++ break;
++ else
++ udelay(5);
++ }
++
++ if (riommu_active) {
++ //Reflect the power status of DCHUBBUB
++ REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
+
+- //Start rIOMMU prefetching
+- REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
++ //Start rIOMMU prefetching
++ REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
+
+- // Enable dynamic clock gating
+- REG_UPDATE_4(DCHVM_CLK_CTRL,
+- HVM_DISPCLK_R_GATE_DIS, 0,
+- HVM_DISPCLK_G_GATE_DIS, 0,
+- HVM_DCFCLK_R_GATE_DIS, 0,
+- HVM_DCFCLK_G_GATE_DIS, 0);
++ // Enable dynamic clock gating
++ REG_UPDATE_4(DCHVM_CLK_CTRL,
++ HVM_DISPCLK_R_GATE_DIS, 0,
++ HVM_DISPCLK_G_GATE_DIS, 0,
++ HVM_DCFCLK_R_GATE_DIS, 0,
++ HVM_DCFCLK_G_GATE_DIS, 0);
+
+- //Poll until HOSTVM_PREFETCH_DONE = 1
+- //TODO: Figure out interval us and retry count
+- REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
++ //Poll until HOSTVM_PREFETCH_DONE = 1
++ REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
++ }
+ }
+
+ static int hubbub21_init_dchub(struct hubbub *hubbub,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+index 3a938cd414ea..f6cc2d6f576d 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+@@ -321,10 +321,12 @@ struct hw_sequencer_funcs {
+ struct dc_state *context);
+ void (*update_writeback)(struct dc *dc,
+ const struct dc_stream_status *stream_status,
+- struct dc_writeback_info *wb_info);
++ struct dc_writeback_info *wb_info,
++ struct dc_state *context);
+ void (*enable_writeback)(struct dc *dc,
+ const struct dc_stream_status *stream_status,
+- struct dc_writeback_info *wb_info);
++ struct dc_writeback_info *wb_info,
++ struct dc_state *context);
+ void (*disable_writeback)(struct dc *dc,
+ unsigned int dwb_pipe_inst);
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+index 18961707db23..9ad49da50a17 100644
+--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
++++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+@@ -31,6 +31,8 @@
+ #define DP_BRANCH_DEVICE_ID_0022B9 0x0022B9
+ #define DP_BRANCH_DEVICE_ID_00001A 0x00001A
+ #define DP_BRANCH_DEVICE_ID_0080E1 0x0080e1
++#define DP_BRANCH_DEVICE_ID_90CC24 0x90CC24
++#define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C
+
+ enum ddc_result {
+ DDC_RESULT_UNKNOWN = 0,
+diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+index ec70c9b12e1a..0978c698f0f8 100644
+--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+@@ -743,6 +743,10 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+ nominal_field_rate_in_uhz =
+ mod_freesync_calc_nominal_field_rate(stream);
+
++ /* Rounded to the nearest Hz */
++ nominal_field_rate_in_uhz = 1000000ULL *
++ div_u64(nominal_field_rate_in_uhz + 500000, 1000000);
++
+ min_refresh_in_uhz = in_config->min_refresh_in_uhz;
+ max_refresh_in_uhz = in_config->max_refresh_in_uhz;
+
+@@ -996,14 +1000,13 @@ unsigned long long mod_freesync_calc_nominal_field_rate(
+ const struct dc_stream_state *stream)
+ {
+ unsigned long long nominal_field_rate_in_uhz = 0;
++ unsigned int total = stream->timing.h_total * stream->timing.v_total;
+
+- /* Calculate nominal field rate for stream */
++ /* Calculate nominal field rate for stream, rounded up to nearest integer */
+ nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz / 10;
+ nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL;
+- nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
+- stream->timing.h_total);
+- nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
+- stream->timing.v_total);
++
++ nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, total);
+
+ return nominal_field_rate_in_uhz;
+ }
+diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+index 05e2be856037..ba1aafe40512 100644
+--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
++++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+@@ -115,7 +115,7 @@ static const struct abm_parameters * const abm_settings[] = {
+ /* NOTE: iRAM is 256B in size */
+ struct iram_table_v_2 {
+ /* flags */
+- uint16_t flags; /* 0x00 U16 */
++ uint16_t min_abm_backlight; /* 0x00 U16 */
+
+ /* parameters for ABM2.0 algorithm */
+ uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x02 U0.8 */
+@@ -140,10 +140,10 @@ struct iram_table_v_2 {
+
+ /* For reading PSR State directly from IRAM */
+ uint8_t psr_state; /* 0xf0 */
+- uint8_t dmcu_mcp_interface_version; /* 0xf1 */
+- uint8_t dmcu_abm_feature_version; /* 0xf2 */
+- uint8_t dmcu_psr_feature_version; /* 0xf3 */
+- uint16_t dmcu_version; /* 0xf4 */
++ uint8_t dmcu_mcp_interface_version; /* 0xf1 */
++ uint8_t dmcu_abm_feature_version; /* 0xf2 */
++ uint8_t dmcu_psr_feature_version; /* 0xf3 */
++ uint16_t dmcu_version; /* 0xf4 */
+ uint8_t dmcu_state; /* 0xf6 */
+
+ uint16_t blRampReduction; /* 0xf7 */
+@@ -164,42 +164,43 @@ struct iram_table_v_2_2 {
+ uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x16 U0.8 */
+ uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x2a U2.6 */
+ uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x3e U2.6 */
+- uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */
+- uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */
+- uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */
+- uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */
+- uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */
+- uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */
+- uint8_t pad[21]; /* 0x6b U0.8 */
++ uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */
++ uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */
++ uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */
++ uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */
++ uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */
++ uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */
++ uint16_t min_abm_backlight; /* 0x6b U16 */
++ uint8_t pad[19]; /* 0x6d U0.8 */
+
+ /* parameters for crgb conversion */
+- uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */
+- uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */
+- uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */
++ uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */
++ uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */
++ uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */
+
+ /* parameters for custom curve */
+ /* thresholds for brightness --> backlight */
+- uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */
++ uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */
+ /* offsets for brightness --> backlight */
+- uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */
++ uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */
+
+ /* For reading PSR State directly from IRAM */
+- uint8_t psr_state; /* 0xf0 */
+- uint8_t dmcu_mcp_interface_version; /* 0xf1 */
+- uint8_t dmcu_abm_feature_version; /* 0xf2 */
+- uint8_t dmcu_psr_feature_version; /* 0xf3 */
+- uint16_t dmcu_version; /* 0xf4 */
+- uint8_t dmcu_state; /* 0xf6 */
+-
+- uint8_t dummy1; /* 0xf7 */
+- uint8_t dummy2; /* 0xf8 */
+- uint8_t dummy3; /* 0xf9 */
+- uint8_t dummy4; /* 0xfa */
+- uint8_t dummy5; /* 0xfb */
+- uint8_t dummy6; /* 0xfc */
+- uint8_t dummy7; /* 0xfd */
+- uint8_t dummy8; /* 0xfe */
+- uint8_t dummy9; /* 0xff */
++ uint8_t psr_state; /* 0xf0 */
++ uint8_t dmcu_mcp_interface_version; /* 0xf1 */
++ uint8_t dmcu_abm_feature_version; /* 0xf2 */
++ uint8_t dmcu_psr_feature_version; /* 0xf3 */
++ uint16_t dmcu_version; /* 0xf4 */
++ uint8_t dmcu_state; /* 0xf6 */
++
++ uint8_t dummy1; /* 0xf7 */
++ uint8_t dummy2; /* 0xf8 */
++ uint8_t dummy3; /* 0xf9 */
++ uint8_t dummy4; /* 0xfa */
++ uint8_t dummy5; /* 0xfb */
++ uint8_t dummy6; /* 0xfc */
++ uint8_t dummy7; /* 0xfd */
++ uint8_t dummy8; /* 0xfe */
++ uint8_t dummy9; /* 0xff */
+ };
+ #pragma pack(pop)
+
+@@ -271,7 +272,8 @@ void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters
+ {
+ unsigned int set = params.set;
+
+- ram_table->flags = 0x0;
++ ram_table->min_abm_backlight =
++ cpu_to_be16(params.min_abm_backlight);
+ ram_table->deviation_gain = 0xb3;
+
+ ram_table->blRampReduction =
+@@ -445,6 +447,9 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
+
+ ram_table->flags = 0x0;
+
++ ram_table->min_abm_backlight =
++ cpu_to_be16(params.min_abm_backlight);
++
+ ram_table->deviation_gain[0] = 0xb3;
+ ram_table->deviation_gain[1] = 0xa8;
+ ram_table->deviation_gain[2] = 0x98;
+@@ -588,6 +593,10 @@ void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
+ unsigned int set = params.set;
+
+ ram_table->flags = 0x0;
++
++ ram_table->min_abm_backlight =
++ cpu_to_be16(params.min_abm_backlight);
++
+ for (i = 0; i < NUM_AGGR_LEVEL; i++) {
+ ram_table->hybrid_factor[i] = abm_settings[set][i].brightness_gain;
+ ram_table->contrast_factor[i] = abm_settings[set][i].contrast_factor;
+diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+index da5df00fedce..e54157026330 100644
+--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
++++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+@@ -38,6 +38,7 @@ struct dmcu_iram_parameters {
+ unsigned int backlight_lut_array_size;
+ unsigned int backlight_ramping_reduction;
+ unsigned int backlight_ramping_start;
++ unsigned int min_abm_backlight;
+ unsigned int set;
+ };
+
+diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+index 4acf139ea014..58c091ab67b2 100644
+--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+@@ -1344,7 +1344,10 @@ static int smu_suspend(void *handle)
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+- bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
++ bool baco_feature_is_enabled = false;
++
++ if(!(adev->flags & AMD_IS_APU))
++ baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
+
+ ret = smu_system_features_control(smu, false);
+ if (ret)
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
+index df6ff9252401..b068d1c7b44d 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
+@@ -29,7 +29,7 @@
+ #include "vega20_baco.h"
+ #include "vega20_smumgr.h"
+
+-
++#include "amdgpu_ras.h"
+
+ static const struct soc15_baco_cmd_entry clean_baco_tbl[] =
+ {
+@@ -74,6 +74,7 @@ int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
+ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
++ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ enum BACO_STATE cur_state;
+ uint32_t data;
+
+@@ -84,10 +85,11 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+- data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
+- data |= 0x80000000;
+- WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
+-
++ if (!ras || !ras->supported) {
++ data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
++ data |= 0x80000000;
++ WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
++ }
+
+ if(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0))
+ return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+index e62bfba51562..e5283dafc414 100644
+--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
++++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+@@ -183,11 +183,13 @@ static int renoir_print_clk_levels(struct smu_context *smu,
+ int i, size = 0, ret = 0;
+ uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+- SmuMetrics_t metrics = {0};
++ SmuMetrics_t metrics;
+
+ if (!clk_table || clk_type >= SMU_CLK_COUNT)
+ return -EINVAL;
+
++ memset(&metrics, 0, sizeof(metrics));
++
+ ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
+ (void *)&metrics, false);
+ if (ret)
+diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+index 624d257da20f..52c42569a111 100644
+--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
++++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+@@ -250,6 +250,7 @@ komeda_crtc_atomic_enable(struct drm_crtc *crtc,
+ {
+ komeda_crtc_prepare(to_kcrtc(crtc));
+ drm_crtc_vblank_on(crtc);
++ WARN_ON(drm_crtc_vblank_get(crtc));
+ komeda_crtc_do_flush(crtc, old);
+ }
+
+@@ -319,6 +320,7 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc,
+ }
+ }
+
++ drm_crtc_vblank_put(crtc);
+ drm_crtc_vblank_off(crtc);
+ komeda_crtc_unprepare(kcrtc);
+ }
+diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
+index 3c7cc5af735c..56df07cdab68 100644
+--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
++++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
+@@ -715,7 +715,9 @@ static int anx78xx_init_pdata(struct anx78xx *anx78xx)
+ /* 1.0V digital core power regulator */
+ pdata->dvdd10 = devm_regulator_get(dev, "dvdd10");
+ if (IS_ERR(pdata->dvdd10)) {
+- DRM_ERROR("DVDD10 regulator not found\n");
++ if (PTR_ERR(pdata->dvdd10) != -EPROBE_DEFER)
++ DRM_ERROR("DVDD10 regulator not found\n");
++
+ return PTR_ERR(pdata->dvdd10);
+ }
+
+@@ -1332,7 +1334,9 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
+
+ err = anx78xx_init_pdata(anx78xx);
+ if (err) {
+- DRM_ERROR("Failed to initialize pdata: %d\n", err);
++ if (err != -EPROBE_DEFER)
++ DRM_ERROR("Failed to initialize pdata: %d\n", err);
++
+ return err;
+ }
+
+diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+index 521d689413c8..1326f2c734bf 100644
+--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
++++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+@@ -36,6 +36,7 @@
+ #include "dw-hdmi-cec.h"
+ #include "dw-hdmi.h"
+
++#define DDC_CI_ADDR 0x37
+ #define DDC_SEGMENT_ADDR 0x30
+
+ #define HDMI_EDID_LEN 512
+@@ -398,6 +399,15 @@ static int dw_hdmi_i2c_xfer(struct i2c_adapter *adap,
+ u8 addr = msgs[0].addr;
+ int i, ret = 0;
+
++ if (addr == DDC_CI_ADDR)
++ /*
++ * The internal I2C controller does not support the multi-byte
++ * read and write operations needed for DDC/CI.
++ * TOFIX: Blacklist the DDC/CI address until we filter out
++ * unsupported I2C operations.
++ */
++ return -EOPNOTSUPP;
++
+ dev_dbg(hdmi->dev, "xfer: num: %d, addr: %#x\n", num, addr);
+
+ for (i = 0; i < num; i++) {
+@@ -2023,7 +2033,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
+
+ /* HDMI Initialization Step E - Configure audio */
+ hdmi_clk_regenerator_update_pixel_clock(hdmi);
+- hdmi_enable_audio_clk(hdmi, true);
++ hdmi_enable_audio_clk(hdmi, hdmi->audio_enable);
+ }
+
+ /* not for DVI mode */
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 6b0177112e18..3f50b8865db4 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -3722,7 +3722,7 @@ cea_db_offsets(const u8 *cea, int *start, int *end)
+ if (*end < 4 || *end > 127)
+ return -ERANGE;
+ } else {
+- return -ENOTSUPP;
++ return -EOPNOTSUPP;
+ }
+
+ return 0;
+@@ -4191,7 +4191,7 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
+
+ if (cea_revision(cea) < 3) {
+ DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
+- return -ENOTSUPP;
++ return -EOPNOTSUPP;
+ }
+
+ if (cea_db_offsets(cea, &start, &end)) {
+@@ -4252,7 +4252,7 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
+
+ if (cea_revision(cea) < 3) {
+ DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
+- return -ENOTSUPP;
++ return -EOPNOTSUPP;
+ }
+
+ if (cea_db_offsets(cea, &start, &end)) {
+diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
+index 1961f713aaab..f8154316a3b0 100644
+--- a/drivers/gpu/drm/drm_mipi_dbi.c
++++ b/drivers/gpu/drm/drm_mipi_dbi.c
+@@ -955,7 +955,7 @@ static int mipi_dbi_typec1_command(struct mipi_dbi *dbi, u8 *cmd,
+ int ret;
+
+ if (mipi_dbi_command_is_read(dbi, *cmd))
+- return -ENOTSUPP;
++ return -EOPNOTSUPP;
+
+ MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num);
+
+@@ -1187,8 +1187,7 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file,
+ struct mipi_dbi_dev *dbidev = m->private;
+ u8 val, cmd = 0, parameters[64];
+ char *buf, *pos, *token;
+- unsigned int i;
+- int ret, idx;
++ int i, ret, idx;
+
+ if (!drm_dev_enter(&dbidev->drm, &idx))
+ return -ENODEV;
+diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
+index fd1fbc77871f..552ec82e9bc5 100644
+--- a/drivers/gpu/drm/drm_vblank.c
++++ b/drivers/gpu/drm/drm_vblank.c
+@@ -1581,7 +1581,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
+ unsigned int flags, pipe, high_pipe;
+
+ if (!dev->irq_enabled)
+- return -EINVAL;
++ return -EOPNOTSUPP;
+
+ if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
+ return -EINVAL;
+@@ -1838,7 +1838,7 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
+ return -EOPNOTSUPP;
+
+ if (!dev->irq_enabled)
+- return -EINVAL;
++ return -EOPNOTSUPP;
+
+ crtc = drm_crtc_find(dev, file_priv, get_seq->crtc_id);
+ if (!crtc)
+@@ -1896,7 +1896,7 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data,
+ return -EOPNOTSUPP;
+
+ if (!dev->irq_enabled)
+- return -EINVAL;
++ return -EOPNOTSUPP;
+
+ crtc = drm_crtc_find(dev, file_priv, queue_seq->crtc_id);
+ if (!crtc)
+diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
+index bc1565f1822a..09aa73c0f2ad 100644
+--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
++++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
+@@ -852,6 +852,10 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
+
+ static void hdmi_connector_destroy(struct drm_connector *connector)
+ {
++ struct hdmi_context *hdata = connector_to_hdmi(connector);
++
++ cec_notifier_conn_unregister(hdata->notifier);
++
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ }
+@@ -935,6 +939,7 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
+ {
+ struct hdmi_context *hdata = encoder_to_hdmi(encoder);
+ struct drm_connector *connector = &hdata->connector;
++ struct cec_connector_info conn_info;
+ int ret;
+
+ connector->interlace_allowed = true;
+@@ -957,6 +962,15 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
+ DRM_DEV_ERROR(hdata->dev, "Failed to attach bridge\n");
+ }
+
++ cec_fill_conn_info_from_drm(&conn_info, connector);
++
++ hdata->notifier = cec_notifier_conn_register(hdata->dev, NULL,
++ &conn_info);
++ if (!hdata->notifier) {
++ ret = -ENOMEM;
++ DRM_DEV_ERROR(hdata->dev, "Failed to allocate CEC notifier\n");
++ }
++
+ return ret;
+ }
+
+@@ -1528,8 +1542,8 @@ static void hdmi_disable(struct drm_encoder *encoder)
+ */
+ mutex_unlock(&hdata->mutex);
+ cancel_delayed_work(&hdata->hotplug_work);
+- cec_notifier_set_phys_addr(hdata->notifier,
+- CEC_PHYS_ADDR_INVALID);
++ if (hdata->notifier)
++ cec_notifier_phys_addr_invalidate(hdata->notifier);
+ return;
+ }
+
+@@ -2006,12 +2020,6 @@ static int hdmi_probe(struct platform_device *pdev)
+ }
+ }
+
+- hdata->notifier = cec_notifier_get(&pdev->dev);
+- if (hdata->notifier == NULL) {
+- ret = -ENOMEM;
+- goto err_hdmiphy;
+- }
+-
+ pm_runtime_enable(dev);
+
+ audio_infoframe = &hdata->audio.infoframe;
+@@ -2023,7 +2031,7 @@ static int hdmi_probe(struct platform_device *pdev)
+
+ ret = hdmi_register_audio_device(hdata);
+ if (ret)
+- goto err_notifier_put;
++ goto err_rpm_disable;
+
+ ret = component_add(&pdev->dev, &hdmi_component_ops);
+ if (ret)
+@@ -2034,8 +2042,7 @@ static int hdmi_probe(struct platform_device *pdev)
+ err_unregister_audio:
+ platform_device_unregister(hdata->audio.pdev);
+
+-err_notifier_put:
+- cec_notifier_put(hdata->notifier);
++err_rpm_disable:
+ pm_runtime_disable(dev);
+
+ err_hdmiphy:
+@@ -2054,12 +2061,10 @@ static int hdmi_remove(struct platform_device *pdev)
+ struct hdmi_context *hdata = platform_get_drvdata(pdev);
+
+ cancel_delayed_work_sync(&hdata->hotplug_work);
+- cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
+
+ component_del(&pdev->dev, &hdmi_component_ops);
+ platform_device_unregister(hdata->audio.pdev);
+
+- cec_notifier_put(hdata->notifier);
+ pm_runtime_disable(&pdev->dev);
+
+ if (!IS_ERR(hdata->reg_hdmi_en))
+diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
+index 167c10767dd4..900e5499249d 100644
+--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
++++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
+@@ -129,6 +129,7 @@ static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
+ s32 freq_error, min_error = 100000;
+
+ memset(best_clock, 0, sizeof(*best_clock));
++ memset(&clock, 0, sizeof(clock));
+
+ for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+ for (clock.n = limit->n.min; clock.n <= limit->n.max;
+@@ -185,6 +186,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
+ int err = target;
+
+ memset(best_clock, 0, sizeof(*best_clock));
++ memset(&clock, 0, sizeof(clock));
+
+ for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+ for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
+diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
+index ac491a781952..f690793ae2d5 100644
+--- a/drivers/gpu/drm/meson/meson_vclk.c
++++ b/drivers/gpu/drm/meson/meson_vclk.c
+@@ -638,13 +638,18 @@ static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
+ if (frac >= HDMI_FRAC_MAX_GXBB)
+ return false;
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+- meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL) ||
+- meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
++ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
+ /* Empiric supported min/max dividers */
+ if (m < 106 || m > 247)
+ return false;
+ if (frac >= HDMI_FRAC_MAX_GXL)
+ return false;
++ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
++ /* Empiric supported min/max dividers */
++ if (m < 106 || m > 247)
++ return false;
++ if (frac >= HDMI_FRAC_MAX_G12A)
++ return false;
+ }
+
+ return true;
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+index e686331fa089..691c1a277d91 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+@@ -352,26 +352,26 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
+ cxdbg = ioremap(res->start, resource_size(res));
+
+ if (cxdbg) {
+- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT,
++ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
+ A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
+
+- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM,
++ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
+ A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
+
+- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
++ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
++ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
++ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
++ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
++ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
+ 0x76543210);
+- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
++ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
+ 0xFEDCBA98);
+
+- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
++ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
++ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
++ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
++ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+ }
+
+ a6xx_state->debugbus = state_kcalloc(a6xx_state,
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 94dfa2e5a9ab..a442a955f98c 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -1131,6 +1131,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
+ const char *name = connector->name;
+ struct nouveau_encoder *nv_encoder;
+ int ret;
++ bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
++
++ if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
++ NV_DEBUG(drm, "service %s\n", name);
++ drm_dp_cec_irq(&nv_connector->aux);
++ if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
++ nv50_mstm_service(nv_encoder->dp.mstm);
++
++ return NVIF_NOTIFY_KEEP;
++ }
+
+ ret = pm_runtime_get(drm->dev->dev);
+ if (ret == 0) {
+@@ -1151,25 +1161,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
+ return NVIF_NOTIFY_DROP;
+ }
+
+- if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
+- NV_DEBUG(drm, "service %s\n", name);
+- drm_dp_cec_irq(&nv_connector->aux);
+- if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
+- nv50_mstm_service(nv_encoder->dp.mstm);
+- } else {
+- bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
+-
++ if (!plugged)
++ drm_dp_cec_unset_edid(&nv_connector->aux);
++ NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
++ if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) {
+ if (!plugged)
+- drm_dp_cec_unset_edid(&nv_connector->aux);
+- NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
+- if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) {
+- if (!plugged)
+- nv50_mstm_remove(nv_encoder->dp.mstm);
+- }
+-
+- drm_helper_hpd_irq_event(connector->dev);
++ nv50_mstm_remove(nv_encoder->dp.mstm);
+ }
+
++ drm_helper_hpd_irq_event(connector->dev);
++
+ pm_runtime_mark_last_busy(drm->dev->dev);
+ pm_runtime_put_autosuspend(drm->dev->dev);
+ return NVIF_NOTIFY_KEEP;
+diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+index b5b14aa059ea..2aa89eaecf6f 100644
+--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
++++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+@@ -426,6 +426,7 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
+ return PTR_ERR(ts->dsi);
+ }
+
++ drm_panel_init(&ts->base);
+ ts->base.dev = dev;
+ ts->base.funcs = &rpi_touchscreen_funcs;
+
+diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+index 5e3e92ea9ea6..3b2612ae931e 100644
+--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
++++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+@@ -381,6 +381,7 @@ static int st7789v_probe(struct spi_device *spi)
+ spi_set_drvdata(spi, ctx);
+ ctx->spi = spi;
+
++ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = &spi->dev;
+ ctx->panel.funcs = &st7789v_drm_funcs;
+
+diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
+index f39b97ed4ade..2af64459b3d7 100644
+--- a/drivers/gpu/drm/scheduler/sched_main.c
++++ b/drivers/gpu/drm/scheduler/sched_main.c
+@@ -632,43 +632,41 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
+ }
+
+ /**
+- * drm_sched_cleanup_jobs - destroy finished jobs
++ * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
+ *
+ * @sched: scheduler instance
+ *
+- * Remove all finished jobs from the mirror list and destroy them.
++ * Returns the next finished job from the mirror list (if there is one)
++ * ready for it to be destroyed.
+ */
+-static void drm_sched_cleanup_jobs(struct drm_gpu_scheduler *sched)
++static struct drm_sched_job *
++drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
+ {
++ struct drm_sched_job *job;
+ unsigned long flags;
+
+ /* Don't destroy jobs while the timeout worker is running */
+ if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+ !cancel_delayed_work(&sched->work_tdr))
+- return;
+-
++ return NULL;
+
+- while (!list_empty(&sched->ring_mirror_list)) {
+- struct drm_sched_job *job;
++ spin_lock_irqsave(&sched->job_list_lock, flags);
+
+- job = list_first_entry(&sched->ring_mirror_list,
++ job = list_first_entry_or_null(&sched->ring_mirror_list,
+ struct drm_sched_job, node);
+- if (!dma_fence_is_signaled(&job->s_fence->finished))
+- break;
+
+- spin_lock_irqsave(&sched->job_list_lock, flags);
++ if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
+ /* remove job from ring_mirror_list */
+ list_del_init(&job->node);
+- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+-
+- sched->ops->free_job(job);
++ } else {
++ job = NULL;
++ /* queue timeout for next job */
++ drm_sched_start_timeout(sched);
+ }
+
+- /* queue timeout for next job */
+- spin_lock_irqsave(&sched->job_list_lock, flags);
+- drm_sched_start_timeout(sched);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
++ return job;
+ }
+
+ /**
+@@ -708,12 +706,19 @@ static int drm_sched_main(void *param)
+ struct drm_sched_fence *s_fence;
+ struct drm_sched_job *sched_job;
+ struct dma_fence *fence;
++ struct drm_sched_job *cleanup_job = NULL;
+
+ wait_event_interruptible(sched->wake_up_worker,
+- (drm_sched_cleanup_jobs(sched),
++ (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
+ (!drm_sched_blocked(sched) &&
+ (entity = drm_sched_select_entity(sched))) ||
+- kthread_should_stop()));
++ kthread_should_stop());
++
++ if (cleanup_job) {
++ sched->ops->free_job(cleanup_job);
++ /* queue timeout for next job */
++ drm_sched_start_timeout(sched);
++ }
+
+ if (!entity)
+ continue;
+diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+index 1636344ba9ec..f83522717488 100644
+--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
++++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+@@ -437,9 +437,9 @@ static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
+ SUN6I_DSI_BURST_LINE_SYNC_POINT(SUN6I_DSI_SYNC_POINT));
+
+ val = SUN6I_DSI_TCON_DRQ_ENABLE_MODE;
+- } else if ((mode->hsync_end - mode->hdisplay) > 20) {
++ } else if ((mode->hsync_start - mode->hdisplay) > 20) {
+ /* Maaaaaagic */
+- u16 drq = (mode->hsync_end - mode->hdisplay) - 20;
++ u16 drq = (mode->hsync_start - mode->hdisplay) - 20;
+
+ drq *= mipi_dsi_pixel_format_to_bpp(device->format);
+ drq /= 32;
+diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
+index e1669ada0a40..75e65d9536d5 100644
+--- a/drivers/gpu/drm/tegra/sor.c
++++ b/drivers/gpu/drm/tegra/sor.c
+@@ -3200,6 +3200,11 @@ static int tegra_sor_parse_dt(struct tegra_sor *sor)
+ * earlier
+ */
+ sor->pad = TEGRA_IO_PAD_HDMI_DP0 + sor->index;
++ } else {
++ if (sor->soc->supports_edp)
++ sor->index = 0;
++ else
++ sor->index = 1;
+ }
+
+ err = of_property_read_u32_array(np, "nvidia,xbar-cfg", xbar_cfg, 5);
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 98819462f025..f07803699809 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -926,7 +926,8 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
+ */
+ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
+ struct ttm_mem_type_manager *man,
+- struct ttm_mem_reg *mem)
++ struct ttm_mem_reg *mem,
++ bool no_wait_gpu)
+ {
+ struct dma_fence *fence;
+ int ret;
+@@ -935,19 +936,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
+ fence = dma_fence_get(man->move);
+ spin_unlock(&man->move_lock);
+
+- if (fence) {
+- dma_resv_add_shared_fence(bo->base.resv, fence);
++ if (!fence)
++ return 0;
+
+- ret = dma_resv_reserve_shared(bo->base.resv, 1);
+- if (unlikely(ret)) {
+- dma_fence_put(fence);
+- return ret;
+- }
++ if (no_wait_gpu)
++ return -EBUSY;
++
++ dma_resv_add_shared_fence(bo->base.resv, fence);
+
+- dma_fence_put(bo->moving);
+- bo->moving = fence;
++ ret = dma_resv_reserve_shared(bo->base.resv, 1);
++ if (unlikely(ret)) {
++ dma_fence_put(fence);
++ return ret;
+ }
+
++ dma_fence_put(bo->moving);
++ bo->moving = fence;
+ return 0;
+ }
+
+@@ -978,7 +982,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
+ return ret;
+ } while (1);
+
+- return ttm_bo_add_move_fence(bo, man, mem);
++ return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
+ }
+
+ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
+@@ -1120,14 +1124,18 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ if (unlikely(ret))
+ goto error;
+
+- if (mem->mm_node) {
+- ret = ttm_bo_add_move_fence(bo, man, mem);
+- if (unlikely(ret)) {
+- (*man->func->put_node)(man, mem);
+- goto error;
+- }
+- return 0;
++ if (!mem->mm_node)
++ continue;
++
++ ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
++ if (unlikely(ret)) {
++ (*man->func->put_node)(man, mem);
++ if (ret == -EBUSY)
++ continue;
++
++ goto error;
+ }
++ return 0;
+ }
+
+ for (i = 0; i < placement->num_busy_placement; ++i) {
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index ee7d4e7b0ee3..0853b980bcb3 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -1285,6 +1285,9 @@ static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = {
+
+ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
+ {
++#ifdef CONFIG_DRM_VC4_HDMI_CEC
++ struct cec_connector_info conn_info;
++#endif
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = drm->dev_private;
+@@ -1403,13 +1406,15 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
+ #ifdef CONFIG_DRM_VC4_HDMI_CEC
+ hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
+ vc4, "vc4",
+- CEC_CAP_TRANSMIT |
+- CEC_CAP_LOG_ADDRS |
+- CEC_CAP_PASSTHROUGH |
+- CEC_CAP_RC, 1);
++ CEC_CAP_DEFAULTS |
++ CEC_CAP_CONNECTOR_INFO, 1);
+ ret = PTR_ERR_OR_ZERO(hdmi->cec_adap);
+ if (ret < 0)
+ goto err_destroy_conn;
++
++ cec_fill_conn_info_from_drm(&conn_info, hdmi->connector);
++ cec_s_conn_info(hdmi->cec_adap, &conn_info);
++
+ HDMI_WRITE(VC4_HDMI_CPU_MASK_SET, 0xffffffff);
+ value = HDMI_READ(VC4_HDMI_CEC_CNTRL_1);
+ value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK;
+diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+index 0a88ef11b9d3..a662394f6892 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
++++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+@@ -463,25 +463,29 @@ out:
+ }
+
+ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file)
++ struct drm_file *file)
+ {
+ struct drm_virtgpu_3d_wait *args = data;
+- struct drm_gem_object *gobj = NULL;
+- struct virtio_gpu_object *qobj = NULL;
++ struct drm_gem_object *obj;
++ long timeout = 15 * HZ;
+ int ret;
+- bool nowait = false;
+
+- gobj = drm_gem_object_lookup(file, args->handle);
+- if (gobj == NULL)
++ obj = drm_gem_object_lookup(file, args->handle);
++ if (obj == NULL)
+ return -ENOENT;
+
+- qobj = gem_to_virtio_gpu_obj(gobj);
+-
+- if (args->flags & VIRTGPU_WAIT_NOWAIT)
+- nowait = true;
+- ret = virtio_gpu_object_wait(qobj, nowait);
++ if (args->flags & VIRTGPU_WAIT_NOWAIT) {
++ ret = dma_resv_test_signaled_rcu(obj->resv, true);
++ } else {
++ ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
++ timeout);
++ }
++ if (ret == 0)
++ ret = -EBUSY;
++ else if (ret > 0)
++ ret = 0;
+
+- drm_gem_object_put_unlocked(gobj);
++ drm_gem_object_put_unlocked(obj);
+ return ret;
+ }
+
+diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
+index eaa5c3352c13..22559670faee 100644
+--- a/drivers/gpu/host1x/job.c
++++ b/drivers/gpu/host1x/job.c
+@@ -436,7 +436,8 @@ out:
+ return err;
+ }
+
+-static inline int copy_gathers(struct host1x_job *job, struct device *dev)
++static inline int copy_gathers(struct device *host, struct host1x_job *job,
++ struct device *dev)
+ {
+ struct host1x_firewall fw;
+ size_t size = 0;
+@@ -459,12 +460,12 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
+ * Try a non-blocking allocation from a higher priority pools first,
+ * as awaiting for the allocation here is a major performance hit.
+ */
+- job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy,
++ job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
+ GFP_NOWAIT);
+
+ /* the higher priority allocation failed, try the generic-blocking */
+ if (!job->gather_copy_mapped)
+- job->gather_copy_mapped = dma_alloc_wc(dev, size,
++ job->gather_copy_mapped = dma_alloc_wc(host, size,
+ &job->gather_copy,
+ GFP_KERNEL);
+ if (!job->gather_copy_mapped)
+@@ -512,7 +513,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
+ goto out;
+
+ if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
+- err = copy_gathers(job, dev);
++ err = copy_gathers(host->dev, job, dev);
+ if (err)
+ goto out;
+ }
+@@ -573,7 +574,7 @@ void host1x_job_unpin(struct host1x_job *job)
+ job->num_unpins = 0;
+
+ if (job->gather_copy_size)
+- dma_free_wc(job->channel->dev, job->gather_copy_size,
++ dma_free_wc(host->dev, job->gather_copy_size,
+ job->gather_copy_mapped, job->gather_copy);
+ }
+ EXPORT_SYMBOL(host1x_job_unpin);
+diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
+index 0dfd97bbde9e..ca232ec565e8 100644
+--- a/drivers/hwtracing/intel_th/core.c
++++ b/drivers/hwtracing/intel_th/core.c
+@@ -834,9 +834,6 @@ static irqreturn_t intel_th_irq(int irq, void *data)
+ ret |= d->irq(th->thdev[i]);
+ }
+
+- if (ret == IRQ_NONE)
+- pr_warn_ratelimited("nobody cared for irq\n");
+-
+ return ret;
+ }
+
+@@ -887,6 +884,7 @@ intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
+
+ if (th->irq == -1)
+ th->irq = devres[r].start;
++ th->num_irqs++;
+ break;
+ default:
+ dev_warn(dev, "Unknown resource type %lx\n",
+@@ -940,6 +938,9 @@ void intel_th_free(struct intel_th *th)
+
+ th->num_thdevs = 0;
+
++ for (i = 0; i < th->num_irqs; i++)
++ devm_free_irq(th->dev, th->irq + i, th);
++
+ pm_runtime_get_sync(th->dev);
+ pm_runtime_forbid(th->dev);
+
+diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
+index 0df480072b6c..6f4f5486fe6d 100644
+--- a/drivers/hwtracing/intel_th/intel_th.h
++++ b/drivers/hwtracing/intel_th/intel_th.h
+@@ -261,6 +261,7 @@ enum th_mmio_idx {
+ * @num_thdevs: number of devices in the @thdev array
+ * @num_resources: number of resources in the @resource array
+ * @irq: irq number
++ * @num_irqs: number of IRQs is use
+ * @id: this Intel TH controller's device ID in the system
+ * @major: device node major for output devices
+ */
+@@ -277,6 +278,7 @@ struct intel_th {
+ unsigned int num_thdevs;
+ unsigned int num_resources;
+ int irq;
++ int num_irqs;
+
+ int id;
+ int major;
+diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
+index 6d240dfae9d9..8e48c7458aa3 100644
+--- a/drivers/hwtracing/intel_th/msu.c
++++ b/drivers/hwtracing/intel_th/msu.c
+@@ -1676,10 +1676,13 @@ static int intel_th_msc_init(struct msc *msc)
+ return 0;
+ }
+
+-static void msc_win_switch(struct msc *msc)
++static int msc_win_switch(struct msc *msc)
+ {
+ struct msc_window *first;
+
++ if (list_empty(&msc->win_list))
++ return -EINVAL;
++
+ first = list_first_entry(&msc->win_list, struct msc_window, entry);
+
+ if (msc_is_last_win(msc->cur_win))
+@@ -1691,6 +1694,8 @@ static void msc_win_switch(struct msc *msc)
+ msc->base_addr = msc_win_base_dma(msc->cur_win);
+
+ intel_th_trace_switch(msc->thdev);
++
++ return 0;
+ }
+
+ /**
+@@ -2025,16 +2030,15 @@ win_switch_store(struct device *dev, struct device_attribute *attr,
+ if (val != 1)
+ return -EINVAL;
+
++ ret = -EINVAL;
+ mutex_lock(&msc->buf_mutex);
+ /*
+ * Window switch can only happen in the "multi" mode.
+ * If a external buffer is engaged, they have the full
+ * control over window switching.
+ */
+- if (msc->mode != MSC_MODE_MULTI || msc->mbuf)
+- ret = -ENOTSUPP;
+- else
+- msc_win_switch(msc);
++ if (msc->mode == MSC_MODE_MULTI && !msc->mbuf)
++ ret = msc_win_switch(msc);
+ mutex_unlock(&msc->buf_mutex);
+
+ return ret ? ret : size;
+diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
+index ebf3e30e989a..e9d90b53bbc4 100644
+--- a/drivers/hwtracing/intel_th/pci.c
++++ b/drivers/hwtracing/intel_th/pci.c
+@@ -204,6 +204,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x06a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
++ {
++ /* Comet Lake PCH-V */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa3a6),
++ .driver_data = (kernel_ulong_t)&intel_th_2x,
++ },
+ {
+ /* Ice Lake NNPI */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
+@@ -229,6 +234,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
++ {
++ /* Elkhart Lake */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26),
++ .driver_data = (kernel_ulong_t)&intel_th_2x,
++ },
+ { 0 },
+ };
+
+diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
+index 5fa78c273a25..65c7c9329b1c 100644
+--- a/drivers/iio/adc/dln2-adc.c
++++ b/drivers/iio/adc/dln2-adc.c
+@@ -524,6 +524,10 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
+ u16 conflict;
+ unsigned int trigger_chan;
+
++ ret = iio_triggered_buffer_postenable(indio_dev);
++ if (ret)
++ return ret;
++
+ mutex_lock(&dln2->mutex);
+
+ /* Enable ADC */
+@@ -537,6 +541,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
+ (int)conflict);
+ ret = -EBUSY;
+ }
++ iio_triggered_buffer_predisable(indio_dev);
+ return ret;
+ }
+
+@@ -550,6 +555,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
+ mutex_unlock(&dln2->mutex);
+ if (ret < 0) {
+ dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
++ iio_triggered_buffer_predisable(indio_dev);
+ return ret;
+ }
+ } else {
+@@ -557,12 +563,12 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
+ mutex_unlock(&dln2->mutex);
+ }
+
+- return iio_triggered_buffer_postenable(indio_dev);
++ return 0;
+ }
+
+ static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev)
+ {
+- int ret;
++ int ret, ret2;
+ struct dln2_adc *dln2 = iio_priv(indio_dev);
+
+ mutex_lock(&dln2->mutex);
+@@ -577,12 +583,14 @@ static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev)
+ ret = dln2_adc_set_port_enabled(dln2, false, NULL);
+
+ mutex_unlock(&dln2->mutex);
+- if (ret < 0) {
++ if (ret < 0)
+ dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
+- return ret;
+- }
+
+- return iio_triggered_buffer_predisable(indio_dev);
++ ret2 = iio_triggered_buffer_predisable(indio_dev);
++ if (ret == 0)
++ ret = ret2;
++
++ return ret;
+ }
+
+ static const struct iio_buffer_setup_ops dln2_adc_buffer_setup_ops = {
+diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
+index 214883458582..e3be8eedd773 100644
+--- a/drivers/iio/adc/max1027.c
++++ b/drivers/iio/adc/max1027.c
+@@ -458,6 +458,14 @@ static int max1027_probe(struct spi_device *spi)
+ return ret;
+ }
+
++ /* Internal reset */
++ st->reg = MAX1027_RST_REG;
++ ret = spi_write(st->spi, &st->reg, 1);
++ if (ret < 0) {
++ dev_err(&indio_dev->dev, "Failed to reset the ADC\n");
++ return ret;
++ }
++
+ /* Disable averaging */
+ st->reg = MAX1027_AVG_REG;
+ ret = spi_write(st->spi, &st->reg, 1);
+diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
+index cc42219a64f7..979070196da9 100644
+--- a/drivers/iio/dac/Kconfig
++++ b/drivers/iio/dac/Kconfig
+@@ -60,8 +60,8 @@ config AD5446
+ help
+ Say yes here to build support for Analog Devices AD5300, AD5301, AD5310,
+ AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453,
+- AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5601, AD5602, AD5611, AD5612,
+- AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs
++ AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5600, AD5601, AD5602, AD5611,
++ AD5612, AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs
+ as well as Texas Instruments DAC081S101, DAC101S101, DAC121S101.
+
+ To compile this driver as a module, choose M here: the
+diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
+index 7df8b4cc295d..61c670f7fc5f 100644
+--- a/drivers/iio/dac/ad5446.c
++++ b/drivers/iio/dac/ad5446.c
+@@ -327,6 +327,7 @@ enum ad5446_supported_spi_device_ids {
+ ID_AD5541A,
+ ID_AD5512A,
+ ID_AD5553,
++ ID_AD5600,
+ ID_AD5601,
+ ID_AD5611,
+ ID_AD5621,
+@@ -381,6 +382,10 @@ static const struct ad5446_chip_info ad5446_spi_chip_info[] = {
+ .channel = AD5446_CHANNEL(14, 16, 0),
+ .write = ad5446_write,
+ },
++ [ID_AD5600] = {
++ .channel = AD5446_CHANNEL(16, 16, 0),
++ .write = ad5446_write,
++ },
+ [ID_AD5601] = {
+ .channel = AD5446_CHANNEL_POWERDOWN(8, 16, 6),
+ .write = ad5446_write,
+@@ -448,6 +453,7 @@ static const struct spi_device_id ad5446_spi_ids[] = {
+ {"ad5542a", ID_AD5541A}, /* ad5541a and ad5542a are compatible */
+ {"ad5543", ID_AD5541A}, /* ad5541a and ad5543 are compatible */
+ {"ad5553", ID_AD5553},
++ {"ad5600", ID_AD5600},
+ {"ad5601", ID_AD5601},
+ {"ad5611", ID_AD5611},
+ {"ad5621", ID_AD5621},
+diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c
+index 28347df78cff..adb5ab9e3439 100644
+--- a/drivers/iio/light/bh1750.c
++++ b/drivers/iio/light/bh1750.c
+@@ -59,9 +59,9 @@ struct bh1750_chip_info {
+
+ u16 int_time_low_mask;
+ u16 int_time_high_mask;
+-}
++};
+
+-static const bh1750_chip_info_tbl[] = {
++static const struct bh1750_chip_info bh1750_chip_info_tbl[] = {
+ [BH1710] = { 140, 1022, 300, 400, 250000000, 2, 0x001F, 0x03E0 },
+ [BH1721] = { 140, 1020, 300, 400, 250000000, 2, 0x0010, 0x03E0 },
+ [BH1750] = { 31, 254, 69, 1740, 57500000, 1, 0x001F, 0x00E0 },
+diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c
+index 2354302375de..52f53f3123b1 100644
+--- a/drivers/iio/pressure/cros_ec_baro.c
++++ b/drivers/iio/pressure/cros_ec_baro.c
+@@ -114,6 +114,7 @@ static int cros_ec_baro_write(struct iio_dev *indio_dev,
+ static const struct iio_info cros_ec_baro_info = {
+ .read_raw = &cros_ec_baro_read,
+ .write_raw = &cros_ec_baro_write,
++ .read_avail = &cros_ec_sensors_core_read_avail,
+ };
+
+ static int cros_ec_baro_probe(struct platform_device *pdev)
+@@ -149,6 +150,8 @@ static int cros_ec_baro_probe(struct platform_device *pdev)
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_FREQUENCY);
++ channel->info_mask_shared_by_all_available =
++ BIT(IIO_CHAN_INFO_SAMP_FREQ);
+ channel->scan_type.realbits = CROS_EC_SENSOR_BITS;
+ channel->scan_type.storagebits = CROS_EC_SENSOR_BITS;
+ channel->scan_type.shift = 0;
+diff --git a/drivers/iio/temperature/max31856.c b/drivers/iio/temperature/max31856.c
+index f184ba5601d9..73ed550e3fc9 100644
+--- a/drivers/iio/temperature/max31856.c
++++ b/drivers/iio/temperature/max31856.c
+@@ -284,6 +284,8 @@ static int max31856_probe(struct spi_device *spi)
+ spi_set_drvdata(spi, indio_dev);
+
+ indio_dev->info = &max31856_info;
++ indio_dev->dev.parent = &spi->dev;
++ indio_dev->dev.of_node = spi->dev.of_node;
+ indio_dev->name = id->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = max31856_channels;
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
+index 50a92442c4f7..2b5bd7206fc6 100644
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -1199,9 +1199,21 @@ static void setup_dma_device(struct ib_device *device)
+ WARN_ON_ONCE(!parent);
+ device->dma_device = parent;
+ }
+- /* Setup default max segment size for all IB devices */
+- dma_set_max_seg_size(device->dma_device, SZ_2G);
+
++ if (!device->dev.dma_parms) {
++ if (parent) {
++ /*
++ * The caller did not provide DMA parameters, so
++ * 'parent' probably represents a PCI device. The PCI
++ * core sets the maximum segment size to 64
++ * KB. Increase this parameter to 2 GB.
++ */
++ device->dev.dma_parms = parent->dma_parms;
++ dma_set_max_seg_size(device->dma_device, SZ_2G);
++ } else {
++ WARN_ON_ONCE(true);
++ }
++ }
+ }
+
+ /*
+@@ -2397,8 +2409,12 @@ int ib_modify_port(struct ib_device *device,
+ rc = device->ops.modify_port(device, port_num,
+ port_modify_mask,
+ port_modify);
++ else if (rdma_protocol_roce(device, port_num) &&
++ ((port_modify->set_port_cap_mask & ~IB_PORT_CM_SUP) == 0 ||
++ (port_modify->clr_port_cap_mask & ~IB_PORT_CM_SUP) == 0))
++ rc = 0;
+ else
+- rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS;
++ rc = -EOPNOTSUPP;
+ return rc;
+ }
+ EXPORT_SYMBOL(ib_modify_port);
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index 30a54f8aa42c..27e2df44d043 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -477,6 +477,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
+ bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
+ req.update_period_ms = cpu_to_le32(1000);
+ req.stats_dma_addr = cpu_to_le64(dma_map);
++ req.stats_dma_length = cpu_to_le16(sizeof(struct ctx_hw_stats_ext));
+ req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+@@ -1270,10 +1271,10 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
+ return;
+ }
+ rdev->qplib_ctx.hwrm_intf_ver =
+- (u64)resp.hwrm_intf_major << 48 |
+- (u64)resp.hwrm_intf_minor << 32 |
+- (u64)resp.hwrm_intf_build << 16 |
+- resp.hwrm_intf_patch;
++ (u64)le16_to_cpu(resp.hwrm_intf_major) << 48 |
++ (u64)le16_to_cpu(resp.hwrm_intf_minor) << 32 |
++ (u64)le16_to_cpu(resp.hwrm_intf_build) << 16 |
++ le16_to_cpu(resp.hwrm_intf_patch);
+ }
+
+ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
+index fbda11a7ab1a..aaa76d792185 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
+@@ -186,7 +186,9 @@ struct bnxt_qplib_chip_ctx {
+ u8 chip_metal;
+ };
+
+-#define CHIP_NUM_57500 0x1750
++#define CHIP_NUM_57508 0x1750
++#define CHIP_NUM_57504 0x1751
++#define CHIP_NUM_57502 0x1752
+
+ struct bnxt_qplib_res {
+ struct pci_dev *pdev;
+@@ -203,7 +205,9 @@ struct bnxt_qplib_res {
+
+ static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
+ {
+- return (cctx->chip_num == CHIP_NUM_57500);
++ return (cctx->chip_num == CHIP_NUM_57508 ||
++ cctx->chip_num == CHIP_NUM_57504 ||
++ cctx->chip_num == CHIP_NUM_57502);
+ }
+
+ static inline u8 bnxt_qplib_get_hwq_type(struct bnxt_qplib_res *res)
+diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
+index 3c412bc5b94f..0778f4f7dccd 100644
+--- a/drivers/infiniband/hw/efa/efa_com.c
++++ b/drivers/infiniband/hw/efa/efa_com.c
+@@ -317,6 +317,7 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu
+ struct efa_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
+ {
++ struct efa_admin_aq_entry *aqe;
+ struct efa_comp_ctx *comp_ctx;
+ u16 queue_size_mask;
+ u16 cmd_id;
+@@ -350,7 +351,9 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu
+
+ reinit_completion(&comp_ctx->wait_event);
+
+- memcpy(&aq->sq.entries[pi], cmd, cmd_size_in_bytes);
++ aqe = &aq->sq.entries[pi];
++ memset(aqe, 0, sizeof(*aqe));
++ memcpy(aqe, cmd, cmd_size_in_bytes);
+
+ aq->sq.pc++;
+ atomic64_inc(&aq->stats.submitted_cmd);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
+index 0a31d0a3d657..06871731ac43 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
++++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
+@@ -98,11 +98,15 @@ static int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
+ goto err;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+- if (!table_attr)
++ if (!table_attr) {
++ ret = -EMSGSIZE;
+ goto err;
++ }
+
+- if (hns_roce_fill_cq(msg, context))
++ if (hns_roce_fill_cq(msg, context)) {
++ ret = -EMSGSIZE;
+ goto err_cancel_table;
++ }
+
+ nla_nest_end(msg, table_attr);
+ kfree(context);
+@@ -113,7 +117,7 @@ err_cancel_table:
+ nla_nest_cancel(msg, table_attr);
+ err:
+ kfree(context);
+- return -EMSGSIZE;
++ return ret;
+ }
+
+ int hns_roce_fill_res_entry(struct sk_buff *msg,
+diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
+index 43ea2c13b212..108667ae6b14 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
+@@ -180,8 +180,7 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
+ {
+ struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
+ struct hns_roce_ib_create_srq ucmd;
+- u32 page_shift;
+- u32 npages;
++ struct hns_roce_buf *buf;
+ int ret;
+
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
+@@ -191,11 +190,13 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
+ if (IS_ERR(srq->umem))
+ return PTR_ERR(srq->umem);
+
+- npages = (ib_umem_page_count(srq->umem) +
+- (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
+- (1 << hr_dev->caps.srqwqe_buf_pg_sz);
+- page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
+- ret = hns_roce_mtt_init(hr_dev, npages, page_shift, &srq->mtt);
++ buf = &srq->buf;
++ buf->npages = (ib_umem_page_count(srq->umem) +
++ (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
++ (1 << hr_dev->caps.srqwqe_buf_pg_sz);
++ buf->page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
++ ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift,
++ &srq->mtt);
+ if (ret)
+ goto err_user_buf;
+
+@@ -212,9 +213,12 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
+ goto err_user_srq_mtt;
+ }
+
+- ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->idx_que.umem),
+- PAGE_SHIFT, &srq->idx_que.mtt);
+-
++ buf = &srq->idx_que.idx_buf;
++ buf->npages = DIV_ROUND_UP(ib_umem_page_count(srq->idx_que.umem),
++ 1 << hr_dev->caps.idx_buf_pg_sz);
++ buf->page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
++ ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift,
++ &srq->idx_que.mtt);
+ if (ret) {
+ dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n");
+ goto err_user_idx_mtt;
+diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
+index dc71b6e16a07..b462eaca1ee3 100644
+--- a/drivers/infiniband/hw/qedr/main.c
++++ b/drivers/infiniband/hw/qedr/main.c
+@@ -357,6 +357,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->sgid_lock);
++ xa_init_flags(&dev->srqs, XA_FLAGS_LOCK_IRQ);
+
+ if (IS_IWARP(dev)) {
+ xa_init_flags(&dev->qps, XA_FLAGS_LOCK_IRQ);
+diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
+index 6f3ce86019b7..a7ccca3c4f89 100644
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -1577,6 +1577,14 @@ static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
+
+ ib_umem_release(qp->urq.umem);
+ qp->urq.umem = NULL;
++
++ if (rdma_protocol_roce(&dev->ibdev, 1)) {
++ qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
++ qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
++ } else {
++ kfree(qp->usq.pbl_tbl);
++ kfree(qp->urq.pbl_tbl);
++ }
+ }
+
+ static int qedr_create_user_qp(struct qedr_dev *dev,
+@@ -2673,8 +2681,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
+
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+
+- if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
+- qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
++ if (mr->type != QEDR_MR_DMA)
++ free_mr_info(dev, &mr->info);
+
+ /* it could be user registered memory. */
+ ib_umem_release(mr->umem);
+diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
+index 05a92f997f60..fb01407a310f 100644
+--- a/drivers/infiniband/sw/siw/siw_main.c
++++ b/drivers/infiniband/sw/siw/siw_main.c
+@@ -248,24 +248,6 @@ static struct ib_qp *siw_get_base_qp(struct ib_device *base_dev, int id)
+ return NULL;
+ }
+
+-static void siw_verbs_sq_flush(struct ib_qp *base_qp)
+-{
+- struct siw_qp *qp = to_siw_qp(base_qp);
+-
+- down_write(&qp->state_lock);
+- siw_sq_flush(qp);
+- up_write(&qp->state_lock);
+-}
+-
+-static void siw_verbs_rq_flush(struct ib_qp *base_qp)
+-{
+- struct siw_qp *qp = to_siw_qp(base_qp);
+-
+- down_write(&qp->state_lock);
+- siw_rq_flush(qp);
+- up_write(&qp->state_lock);
+-}
+-
+ static const struct ib_device_ops siw_device_ops = {
+ .owner = THIS_MODULE,
+ .uverbs_abi_ver = SIW_ABI_VERSION,
+@@ -284,8 +266,6 @@ static const struct ib_device_ops siw_device_ops = {
+ .destroy_cq = siw_destroy_cq,
+ .destroy_qp = siw_destroy_qp,
+ .destroy_srq = siw_destroy_srq,
+- .drain_rq = siw_verbs_rq_flush,
+- .drain_sq = siw_verbs_sq_flush,
+ .get_dma_mr = siw_get_dma_mr,
+ .get_port_immutable = siw_get_port_immutable,
+ .iw_accept = siw_accept,
+diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
+index b18a677832e1..1b1a40db529c 100644
+--- a/drivers/infiniband/sw/siw/siw_verbs.c
++++ b/drivers/infiniband/sw/siw/siw_verbs.c
+@@ -685,6 +685,47 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
+ return bytes;
+ }
+
++/* Complete SQ WR's without processing */
++static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr,
++ const struct ib_send_wr **bad_wr)
++{
++ struct siw_sqe sqe = {};
++ int rv = 0;
++
++ while (wr) {
++ sqe.id = wr->wr_id;
++ sqe.opcode = wr->opcode;
++ rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR);
++ if (rv) {
++ if (bad_wr)
++ *bad_wr = wr;
++ break;
++ }
++ wr = wr->next;
++ }
++ return rv;
++}
++
++/* Complete RQ WR's without processing */
++static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr,
++ const struct ib_recv_wr **bad_wr)
++{
++ struct siw_rqe rqe = {};
++ int rv = 0;
++
++ while (wr) {
++ rqe.id = wr->wr_id;
++ rv = siw_rqe_complete(qp, &rqe, 0, 0, SIW_WC_WR_FLUSH_ERR);
++ if (rv) {
++ if (bad_wr)
++ *bad_wr = wr;
++ break;
++ }
++ wr = wr->next;
++ }
++ return rv;
++}
++
+ /*
+ * siw_post_send()
+ *
+@@ -703,26 +744,54 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
+ unsigned long flags;
+ int rv = 0;
+
++ if (wr && !qp->kernel_verbs) {
++ siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
++ *bad_wr = wr;
++ return -EINVAL;
++ }
++
+ /*
+ * Try to acquire QP state lock. Must be non-blocking
+ * to accommodate kernel clients needs.
+ */
+ if (!down_read_trylock(&qp->state_lock)) {
+- *bad_wr = wr;
+- siw_dbg_qp(qp, "QP locked, state %d\n", qp->attrs.state);
+- return -ENOTCONN;
++ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
++ /*
++ * ERROR state is final, so we can be sure
++ * this state will not change as long as the QP
++ * exists.
++ *
++ * This handles an ib_drain_sq() call with
++ * a concurrent request to set the QP state
++ * to ERROR.
++ */
++ rv = siw_sq_flush_wr(qp, wr, bad_wr);
++ } else {
++ siw_dbg_qp(qp, "QP locked, state %d\n",
++ qp->attrs.state);
++ *bad_wr = wr;
++ rv = -ENOTCONN;
++ }
++ return rv;
+ }
+ if (unlikely(qp->attrs.state != SIW_QP_STATE_RTS)) {
++ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
++ /*
++ * Immediately flush this WR to CQ, if QP
++ * is in ERROR state. SQ is guaranteed to
++ * be empty, so WR complets in-order.
++ *
++ * Typically triggered by ib_drain_sq().
++ */
++ rv = siw_sq_flush_wr(qp, wr, bad_wr);
++ } else {
++ siw_dbg_qp(qp, "QP out of state %d\n",
++ qp->attrs.state);
++ *bad_wr = wr;
++ rv = -ENOTCONN;
++ }
+ up_read(&qp->state_lock);
+- *bad_wr = wr;
+- siw_dbg_qp(qp, "QP out of state %d\n", qp->attrs.state);
+- return -ENOTCONN;
+- }
+- if (wr && !qp->kernel_verbs) {
+- siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
+- up_read(&qp->state_lock);
+- *bad_wr = wr;
+- return -EINVAL;
++ return rv;
+ }
+ spin_lock_irqsave(&qp->sq_lock, flags);
+
+@@ -917,24 +986,54 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
+ *bad_wr = wr;
+ return -EOPNOTSUPP; /* what else from errno.h? */
+ }
++ if (!qp->kernel_verbs) {
++ siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n");
++ *bad_wr = wr;
++ return -EINVAL;
++ }
++
+ /*
+ * Try to acquire QP state lock. Must be non-blocking
+ * to accommodate kernel clients needs.
+ */
+ if (!down_read_trylock(&qp->state_lock)) {
+- *bad_wr = wr;
+- return -ENOTCONN;
+- }
+- if (!qp->kernel_verbs) {
+- siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n");
+- up_read(&qp->state_lock);
+- *bad_wr = wr;
+- return -EINVAL;
++ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
++ /*
++ * ERROR state is final, so we can be sure
++ * this state will not change as long as the QP
++ * exists.
++ *
++ * This handles an ib_drain_rq() call with
++ * a concurrent request to set the QP state
++ * to ERROR.
++ */
++ rv = siw_rq_flush_wr(qp, wr, bad_wr);
++ } else {
++ siw_dbg_qp(qp, "QP locked, state %d\n",
++ qp->attrs.state);
++ *bad_wr = wr;
++ rv = -ENOTCONN;
++ }
++ return rv;
+ }
+ if (qp->attrs.state > SIW_QP_STATE_RTS) {
++ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
++ /*
++ * Immediately flush this WR to CQ, if QP
++ * is in ERROR state. RQ is guaranteed to
++ * be empty, so WR complets in-order.
++ *
++ * Typically triggered by ib_drain_rq().
++ */
++ rv = siw_rq_flush_wr(qp, wr, bad_wr);
++ } else {
++ siw_dbg_qp(qp, "QP out of state %d\n",
++ qp->attrs.state);
++ *bad_wr = wr;
++ rv = -ENOTCONN;
++ }
+ up_read(&qp->state_lock);
+- *bad_wr = wr;
+- return -EINVAL;
++ return rv;
+ }
+ /*
+ * Serialize potentially multiple producers.
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 2e72fc5af157..c4c015c60446 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -646,6 +646,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
+ if (ib_conn->pi_support) {
+ u32 sig_caps = ib_dev->attrs.sig_prot_cap;
+
++ shost->sg_prot_tablesize = shost->sg_tablesize;
+ scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP |
+ SHOST_DIX_GUARD_CRC);
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 6db6d969e31c..4ce797d4259f 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -5447,9 +5447,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
+ int prot = 0;
+ int ret;
+
+- if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
+- return -EINVAL;
+-
+ if (iommu_prot & IOMMU_READ)
+ prot |= DMA_PTE_READ;
+ if (iommu_prot & IOMMU_WRITE)
+@@ -5492,8 +5489,6 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
+ /* Cope with horrid API which requires us to unmap more than the
+ size argument if it happens to be a large-page mapping. */
+ BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
+- if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
+- return 0;
+
+ if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
+ size = VTD_PAGE_SIZE << level_to_offset_bits(level);
+@@ -5525,9 +5520,6 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
+ int level = 0;
+ u64 phys = 0;
+
+- if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
+- return 0;
+-
+ pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
+ if (pte)
+ phys = dma_pte_addr(pte);
+@@ -5705,8 +5697,8 @@ static void intel_iommu_get_resv_regions(struct device *device,
+ struct pci_dev *pdev = to_pci_dev(device);
+
+ if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) {
+- reg = iommu_alloc_resv_region(0, 1UL << 24, 0,
+- IOMMU_RESV_DIRECT);
++ reg = iommu_alloc_resv_region(0, 1UL << 24, prot,
++ IOMMU_RESV_DIRECT_RELAXABLE);
+ if (reg)
+ list_add_tail(&reg->list, head);
+ }
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index d658c7c6a2ab..24248aa8a7e5 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -312,8 +312,8 @@ int iommu_insert_resv_region(struct iommu_resv_region *new,
+ list_for_each_entry_safe(iter, tmp, regions, list) {
+ phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
+
+- /* no merge needed on elements of different types than @nr */
+- if (iter->type != nr->type) {
++ /* no merge needed on elements of different types than @new */
++ if (iter->type != new->type) {
+ list_move_tail(&iter->list, &stack);
+ continue;
+ }
+@@ -2221,13 +2221,13 @@ request_default_domain_for_dev(struct device *dev, unsigned long type)
+ goto out;
+ }
+
+- iommu_group_create_direct_mappings(group, dev);
+-
+ /* Make the domain the default for this group */
+ if (group->default_domain)
+ iommu_domain_free(group->default_domain);
+ group->default_domain = domain;
+
++ iommu_group_create_direct_mappings(group, dev);
++
+ dev_info(dev, "Using iommu %s mapping\n",
+ type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
+
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index 6f776823b9ba..a1df0d95151c 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -377,7 +377,10 @@ retry_invalidate:
+ if (!fifo_full(&ca->free_inc))
+ goto retry_invalidate;
+
+- bch_prio_write(ca);
++ if (bch_prio_write(ca, false) < 0) {
++ ca->invalidate_needs_gc = 1;
++ wake_up_gc(ca->set);
++ }
+ }
+ }
+ out:
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 013e35a9e317..deb924e1d790 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -977,7 +977,7 @@ bool bch_cached_dev_error(struct cached_dev *dc);
+ __printf(2, 3)
+ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...);
+
+-void bch_prio_write(struct cache *ca);
++int bch_prio_write(struct cache *ca, bool wait);
+ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
+
+ extern struct workqueue_struct *bcache_wq;
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 20ed838e9413..64999c7a8033 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -529,12 +529,29 @@ static void prio_io(struct cache *ca, uint64_t bucket, int op,
+ closure_sync(cl);
+ }
+
+-void bch_prio_write(struct cache *ca)
++int bch_prio_write(struct cache *ca, bool wait)
+ {
+ int i;
+ struct bucket *b;
+ struct closure cl;
+
++ pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu",
++ fifo_used(&ca->free[RESERVE_PRIO]),
++ fifo_used(&ca->free[RESERVE_NONE]),
++ fifo_used(&ca->free_inc));
++
++ /*
++ * Pre-check if there are enough free buckets. In the non-blocking
++ * scenario it's better to fail early rather than starting to allocate
++ * buckets and do a cleanup later in case of failure.
++ */
++ if (!wait) {
++ size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) +
++ fifo_used(&ca->free[RESERVE_NONE]);
++ if (prio_buckets(ca) > avail)
++ return -ENOMEM;
++ }
++
+ closure_init_stack(&cl);
+
+ lockdep_assert_held(&ca->set->bucket_lock);
+@@ -544,9 +561,6 @@ void bch_prio_write(struct cache *ca)
+ atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
+ &ca->meta_sectors_written);
+
+- //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
+- // fifo_used(&ca->free_inc), fifo_used(&ca->unused));
+-
+ for (i = prio_buckets(ca) - 1; i >= 0; --i) {
+ long bucket;
+ struct prio_set *p = ca->disk_buckets;
+@@ -564,7 +578,7 @@ void bch_prio_write(struct cache *ca)
+ p->magic = pset_magic(&ca->sb);
+ p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
+
+- bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
++ bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait);
+ BUG_ON(bucket == -1);
+
+ mutex_unlock(&ca->set->bucket_lock);
+@@ -593,6 +607,7 @@ void bch_prio_write(struct cache *ca)
+
+ ca->prio_last_buckets[i] = ca->prio_buckets[i];
+ }
++ return 0;
+ }
+
+ static void prio_read(struct cache *ca, uint64_t bucket)
+@@ -761,20 +776,28 @@ static inline int idx_to_first_minor(int idx)
+
+ static void bcache_device_free(struct bcache_device *d)
+ {
++ struct gendisk *disk = d->disk;
++
+ lockdep_assert_held(&bch_register_lock);
+
+- pr_info("%s stopped", d->disk->disk_name);
++ if (disk)
++ pr_info("%s stopped", disk->disk_name);
++ else
++ pr_err("bcache device (NULL gendisk) stopped");
+
+ if (d->c)
+ bcache_device_detach(d);
+- if (d->disk && d->disk->flags & GENHD_FL_UP)
+- del_gendisk(d->disk);
+- if (d->disk && d->disk->queue)
+- blk_cleanup_queue(d->disk->queue);
+- if (d->disk) {
++
++ if (disk) {
++ if (disk->flags & GENHD_FL_UP)
++ del_gendisk(disk);
++
++ if (disk->queue)
++ blk_cleanup_queue(disk->queue);
++
+ ida_simple_remove(&bcache_device_idx,
+- first_minor_to_idx(d->disk->first_minor));
+- put_disk(d->disk);
++ first_minor_to_idx(disk->first_minor));
++ put_disk(disk);
+ }
+
+ bioset_exit(&d->bio_split);
+@@ -1954,7 +1977,7 @@ static int run_cache_set(struct cache_set *c)
+
+ mutex_lock(&c->bucket_lock);
+ for_each_cache(ca, c, i)
+- bch_prio_write(ca);
++ bch_prio_write(ca, true);
+ mutex_unlock(&c->bucket_lock);
+
+ err = "cannot allocate new UUID bucket";
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index b092c7b5282f..3ad18246fcb3 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -2139,6 +2139,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ memcpy(page_address(store.sb_page),
+ page_address(bitmap->storage.sb_page),
+ sizeof(bitmap_super_t));
++ spin_lock_irq(&bitmap->counts.lock);
+ md_bitmap_file_unmap(&bitmap->storage);
+ bitmap->storage = store;
+
+@@ -2154,7 +2155,6 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ blocks = min(old_counts.chunks << old_counts.chunkshift,
+ chunks << chunkshift);
+
+- spin_lock_irq(&bitmap->counts.lock);
+ /* For cluster raid, need to pre-allocate bitmap */
+ if (mddev_is_clustered(bitmap->mddev)) {
+ unsigned long page;
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index b8dd56b746da..805b33e27496 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -1105,6 +1105,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
+ char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
+ mdp_super_t *sb;
+ int ret;
++ bool spare_disk = true;
+
+ /*
+ * Calculate the position of the superblock (512byte sectors),
+@@ -1155,8 +1156,18 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
+ else
+ rdev->desc_nr = sb->this_disk.number;
+
++ /* not spare disk, or LEVEL_MULTIPATH */
++ if (sb->level == LEVEL_MULTIPATH ||
++ (rdev->desc_nr >= 0 &&
++ sb->disks[rdev->desc_nr].state &
++ ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
++ spare_disk = false;
++
+ if (!refdev) {
+- ret = 1;
++ if (!spare_disk)
++ ret = 1;
++ else
++ ret = 0;
+ } else {
+ __u64 ev1, ev2;
+ mdp_super_t *refsb = page_address(refdev->sb_page);
+@@ -1172,7 +1183,8 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
+ }
+ ev1 = md_event(sb);
+ ev2 = md_event(refsb);
+- if (ev1 > ev2)
++
++ if (!spare_disk && ev1 > ev2)
+ ret = 1;
+ else
+ ret = 0;
+@@ -1532,6 +1544,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
+ sector_t sectors;
+ char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
+ int bmask;
++ bool spare_disk = true;
+
+ /*
+ * Calculate the position of the superblock in 512byte sectors.
+@@ -1665,8 +1678,19 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
+ sb->level != 0)
+ return -EINVAL;
+
++ /* not spare disk, or LEVEL_MULTIPATH */
++ if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) ||
++ (rdev->desc_nr >= 0 &&
++ rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
++ (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
++ le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
++ spare_disk = false;
++
+ if (!refdev) {
+- ret = 1;
++ if (!spare_disk)
++ ret = 1;
++ else
++ ret = 0;
+ } else {
+ __u64 ev1, ev2;
+ struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
+@@ -1683,7 +1707,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
+ ev1 = le64_to_cpu(sb->events);
+ ev2 = le64_to_cpu(refsb->events);
+
+- if (ev1 > ev2)
++ if (!spare_disk && ev1 > ev2)
+ ret = 1;
+ else
+ ret = 0;
+@@ -3604,7 +3628,7 @@ abort_free:
+ * Check a full RAID array for plausibility
+ */
+
+-static void analyze_sbs(struct mddev *mddev)
++static int analyze_sbs(struct mddev *mddev)
+ {
+ int i;
+ struct md_rdev *rdev, *freshest, *tmp;
+@@ -3625,6 +3649,12 @@ static void analyze_sbs(struct mddev *mddev)
+ md_kick_rdev_from_array(rdev);
+ }
+
++ /* Cannot find a valid fresh disk */
++ if (!freshest) {
++ pr_warn("md: cannot find a valid disk\n");
++ return -EINVAL;
++ }
++
+ super_types[mddev->major_version].
+ validate_super(mddev, freshest);
+
+@@ -3659,6 +3689,8 @@ static void analyze_sbs(struct mddev *mddev)
+ clear_bit(In_sync, &rdev->flags);
+ }
+ }
++
++ return 0;
+ }
+
+ /* Read a fixed-point number.
+@@ -5577,7 +5609,9 @@ int md_run(struct mddev *mddev)
+ if (!mddev->raid_disks) {
+ if (!mddev->persistent)
+ return -EINVAL;
+- analyze_sbs(mddev);
++ err = analyze_sbs(mddev);
++ if (err)
++ return -EINVAL;
+ }
+
+ if (mddev->level != LEVEL_NONE)
+diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
+index 7eee1812bba3..fcffcc31d168 100644
+--- a/drivers/media/i2c/Kconfig
++++ b/drivers/media/i2c/Kconfig
+@@ -1113,6 +1113,7 @@ comment "SDR tuner chips"
+ config SDR_MAX2175
+ tristate "Maxim 2175 RF to Bits tuner"
+ depends on VIDEO_V4L2 && MEDIA_SDR_SUPPORT && I2C
++ select REGMAP_I2C
+ help
+ Support for Maxim 2175 tuner. It is an advanced analog/digital
+ radio receiver with RF-to-Bits front-end designed for SDR solutions.
+diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
+index 925c171e7797..7a49651f4d1f 100644
+--- a/drivers/media/i2c/ad5820.c
++++ b/drivers/media/i2c/ad5820.c
+@@ -309,6 +309,7 @@ static int ad5820_probe(struct i2c_client *client,
+ v4l2_i2c_subdev_init(&coil->subdev, client, &ad5820_ops);
+ coil->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ coil->subdev.internal_ops = &ad5820_internal_ops;
++ coil->subdev.entity.function = MEDIA_ENT_F_LENS;
+ strscpy(coil->subdev.name, "ad5820 focus", sizeof(coil->subdev.name));
+
+ ret = media_entity_pads_init(&coil->subdev.entity, 0, NULL);
+diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
+index f4ded0669ff9..e1ff38009cf0 100644
+--- a/drivers/media/i2c/ov2659.c
++++ b/drivers/media/i2c/ov2659.c
+@@ -419,10 +419,14 @@ static struct sensor_register ov2659_720p[] = {
+ { REG_TIMING_YINC, 0x11 },
+ { REG_TIMING_VERT_FORMAT, 0x80 },
+ { REG_TIMING_HORIZ_FORMAT, 0x00 },
++ { 0x370a, 0x12 },
+ { 0x3a03, 0xe8 },
+ { 0x3a09, 0x6f },
+ { 0x3a0b, 0x5d },
+ { 0x3a15, 0x9a },
++ { REG_VFIFO_READ_START_H, 0x00 },
++ { REG_VFIFO_READ_START_L, 0x80 },
++ { REG_ISP_CTRL02, 0x00 },
+ { REG_NULL, 0x00 },
+ };
+
+@@ -1201,11 +1205,15 @@ static int ov2659_s_stream(struct v4l2_subdev *sd, int on)
+ goto unlock;
+ }
+
+- ov2659_set_pixel_clock(ov2659);
+- ov2659_set_frame_size(ov2659);
+- ov2659_set_format(ov2659);
+- ov2659_set_streaming(ov2659, 1);
+- ov2659->streaming = on;
++ ret = ov2659_set_pixel_clock(ov2659);
++ if (!ret)
++ ret = ov2659_set_frame_size(ov2659);
++ if (!ret)
++ ret = ov2659_set_format(ov2659);
++ if (!ret) {
++ ov2659_set_streaming(ov2659, 1);
++ ov2659->streaming = on;
++ }
+
+ unlock:
+ mutex_unlock(&ov2659->lock);
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index 500d9bbff10b..18dd2d717088 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -1611,6 +1611,11 @@ ov5640_find_mode(struct ov5640_dev *sensor, enum ov5640_frame_rate fr,
+ !(mode->hact == 640 && mode->vact == 480))
+ return NULL;
+
++ /* 2592x1944 only works at 15fps max */
++ if ((mode->hact == 2592 && mode->vact == 1944) &&
++ fr > OV5640_15_FPS)
++ return NULL;
++
+ return mode;
+ }
+
+diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
+index 5b9af5e5b7f1..a5b2448c0abc 100644
+--- a/drivers/media/i2c/ov6650.c
++++ b/drivers/media/i2c/ov6650.c
+@@ -130,6 +130,7 @@
+ #define CLKRC_24MHz 0xc0
+ #define CLKRC_DIV_MASK 0x3f
+ #define GET_CLKRC_DIV(x) (((x) & CLKRC_DIV_MASK) + 1)
++#define DEF_CLKRC 0x00
+
+ #define COMA_RESET BIT(7)
+ #define COMA_QCIF BIT(5)
+@@ -465,38 +466,39 @@ static int ov6650_set_selection(struct v4l2_subdev *sd,
+ {
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov6650 *priv = to_ov6650(client);
+- struct v4l2_rect rect = sel->r;
+ int ret;
+
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+ sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+- v4l_bound_align_image(&rect.width, 2, W_CIF, 1,
+- &rect.height, 2, H_CIF, 1, 0);
+- v4l_bound_align_image(&rect.left, DEF_HSTRT << 1,
+- (DEF_HSTRT << 1) + W_CIF - (__s32)rect.width, 1,
+- &rect.top, DEF_VSTRT << 1,
+- (DEF_VSTRT << 1) + H_CIF - (__s32)rect.height, 1,
+- 0);
++ v4l_bound_align_image(&sel->r.width, 2, W_CIF, 1,
++ &sel->r.height, 2, H_CIF, 1, 0);
++ v4l_bound_align_image(&sel->r.left, DEF_HSTRT << 1,
++ (DEF_HSTRT << 1) + W_CIF - (__s32)sel->r.width, 1,
++ &sel->r.top, DEF_VSTRT << 1,
++ (DEF_VSTRT << 1) + H_CIF - (__s32)sel->r.height,
++ 1, 0);
+
+- ret = ov6650_reg_write(client, REG_HSTRT, rect.left >> 1);
++ ret = ov6650_reg_write(client, REG_HSTRT, sel->r.left >> 1);
+ if (!ret) {
+- priv->rect.left = rect.left;
++ priv->rect.width += priv->rect.left - sel->r.left;
++ priv->rect.left = sel->r.left;
+ ret = ov6650_reg_write(client, REG_HSTOP,
+- (rect.left + rect.width) >> 1);
++ (sel->r.left + sel->r.width) >> 1);
+ }
+ if (!ret) {
+- priv->rect.width = rect.width;
+- ret = ov6650_reg_write(client, REG_VSTRT, rect.top >> 1);
++ priv->rect.width = sel->r.width;
++ ret = ov6650_reg_write(client, REG_VSTRT, sel->r.top >> 1);
+ }
+ if (!ret) {
+- priv->rect.top = rect.top;
++ priv->rect.height += priv->rect.top - sel->r.top;
++ priv->rect.top = sel->r.top;
+ ret = ov6650_reg_write(client, REG_VSTOP,
+- (rect.top + rect.height) >> 1);
++ (sel->r.top + sel->r.height) >> 1);
+ }
+ if (!ret)
+- priv->rect.height = rect.height;
++ priv->rect.height = sel->r.height;
+
+ return ret;
+ }
+@@ -610,7 +612,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
+ dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code);
+ return -EINVAL;
+ }
+- priv->code = code;
+
+ if (code == MEDIA_BUS_FMT_Y8_1X8 ||
+ code == MEDIA_BUS_FMT_SBGGR8_1X8) {
+@@ -636,7 +637,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
+ dev_dbg(&client->dev, "max resolution: CIF\n");
+ coma_mask |= COMA_QCIF;
+ }
+- priv->half_scale = half_scale;
+
+ clkrc = CLKRC_12MHz;
+ mclk = 12000000;
+@@ -654,8 +654,13 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
+ ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask);
+ if (!ret)
+ ret = ov6650_reg_write(client, REG_CLKRC, clkrc);
+- if (!ret)
++ if (!ret) {
++ priv->half_scale = half_scale;
++
+ ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask);
++ }
++ if (!ret)
++ priv->code = code;
+
+ if (!ret) {
+ mf->colorspace = priv->colorspace;
+@@ -754,19 +759,17 @@ static int ov6650_s_frame_interval(struct v4l2_subdev *sd,
+ else if (div > GET_CLKRC_DIV(CLKRC_DIV_MASK))
+ div = GET_CLKRC_DIV(CLKRC_DIV_MASK);
+
+- /*
+- * Keep result to be used as tpf limit
+- * for subsequent clock divider calculations
+- */
+- priv->tpf.numerator = div;
+- priv->tpf.denominator = FRAME_RATE_MAX;
++ tpf->numerator = div;
++ tpf->denominator = FRAME_RATE_MAX;
+
+- clkrc = to_clkrc(&priv->tpf, priv->pclk_limit, priv->pclk_max);
++ clkrc = to_clkrc(tpf, priv->pclk_limit, priv->pclk_max);
+
+ ret = ov6650_reg_rmw(client, REG_CLKRC, clkrc, CLKRC_DIV_MASK);
+ if (!ret) {
+- tpf->numerator = GET_CLKRC_DIV(clkrc);
+- tpf->denominator = FRAME_RATE_MAX;
++ priv->tpf.numerator = GET_CLKRC_DIV(clkrc);
++ priv->tpf.denominator = FRAME_RATE_MAX;
++
++ *tpf = priv->tpf;
+ }
+
+ return ret;
+@@ -989,8 +992,10 @@ static int ov6650_probe(struct i2c_client *client,
+ V4L2_CID_GAMMA, 0, 0xff, 1, 0x12);
+
+ priv->subdev.ctrl_handler = &priv->hdl;
+- if (priv->hdl.error)
+- return priv->hdl.error;
++ if (priv->hdl.error) {
++ ret = priv->hdl.error;
++ goto ectlhdlfree;
++ }
+
+ v4l2_ctrl_auto_cluster(2, &priv->autogain, 0, true);
+ v4l2_ctrl_auto_cluster(3, &priv->autowb, 0, true);
+@@ -1005,11 +1010,17 @@ static int ov6650_probe(struct i2c_client *client,
+ priv->code = MEDIA_BUS_FMT_YUYV8_2X8;
+ priv->colorspace = V4L2_COLORSPACE_JPEG;
+
++ /* Hardware default frame interval */
++ priv->tpf.numerator = GET_CLKRC_DIV(DEF_CLKRC);
++ priv->tpf.denominator = FRAME_RATE_MAX;
++
+ priv->subdev.internal_ops = &ov6650_internal_ops;
+
+ ret = v4l2_async_register_subdev(&priv->subdev);
+- if (ret)
+- v4l2_ctrl_handler_free(&priv->hdl);
++ if (!ret)
++ return 0;
++ectlhdlfree:
++ v4l2_ctrl_handler_free(&priv->hdl);
+
+ return ret;
+ }
+diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
+index 9adf8e034e7d..42805dfbffeb 100644
+--- a/drivers/media/i2c/smiapp/smiapp-core.c
++++ b/drivers/media/i2c/smiapp/smiapp-core.c
+@@ -3101,19 +3101,23 @@ static int smiapp_probe(struct i2c_client *client)
+ if (rval < 0)
+ goto out_media_entity_cleanup;
+
+- rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd);
+- if (rval < 0)
+- goto out_media_entity_cleanup;
+-
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_get_noresume(&client->dev);
+ pm_runtime_enable(&client->dev);
++
++ rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd);
++ if (rval < 0)
++ goto out_disable_runtime_pm;
++
+ pm_runtime_set_autosuspend_delay(&client->dev, 1000);
+ pm_runtime_use_autosuspend(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
+
+ return 0;
+
++out_disable_runtime_pm:
++ pm_runtime_disable(&client->dev);
++
+ out_media_entity_cleanup:
+ media_entity_cleanup(&sensor->src->sd.entity);
+
+diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
+index 81285b8d5cfb..003ba22334cd 100644
+--- a/drivers/media/i2c/st-mipid02.c
++++ b/drivers/media/i2c/st-mipid02.c
+@@ -971,6 +971,11 @@ static int mipid02_probe(struct i2c_client *client)
+ bridge->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+
++ if (IS_ERR(bridge->reset_gpio)) {
++ dev_err(dev, "failed to get reset GPIO\n");
++ return PTR_ERR(bridge->reset_gpio);
++ }
++
+ ret = mipid02_get_regulators(bridge);
+ if (ret) {
+ dev_err(dev, "failed to get regulators %d", ret);
+diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
+index dcc0f02aeb70..b8abcd550604 100644
+--- a/drivers/media/pci/cx88/cx88-video.c
++++ b/drivers/media/pci/cx88/cx88-video.c
+@@ -1277,7 +1277,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
+ core = cx88_core_get(dev->pci);
+ if (!core) {
+ err = -EINVAL;
+- goto fail_free;
++ goto fail_disable;
+ }
+ dev->core = core;
+
+@@ -1323,7 +1323,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
+ cc->step, cc->default_value);
+ if (!vc) {
+ err = core->audio_hdl.error;
+- goto fail_core;
++ goto fail_irq;
+ }
+ vc->priv = (void *)cc;
+ }
+@@ -1337,7 +1337,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
+ cc->step, cc->default_value);
+ if (!vc) {
+ err = core->video_hdl.error;
+- goto fail_core;
++ goto fail_irq;
+ }
+ vc->priv = (void *)cc;
+ if (vc->id == V4L2_CID_CHROMA_AGC)
+@@ -1509,11 +1509,14 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
+
+ fail_unreg:
+ cx8800_unregister_video(dev);
+- free_irq(pci_dev->irq, dev);
+ mutex_unlock(&core->lock);
++fail_irq:
++ free_irq(pci_dev->irq, dev);
+ fail_core:
+ core->v4ldev = NULL;
+ cx88_core_put(core, dev->pci);
++fail_disable:
++ pci_disable_device(pci_dev);
+ fail_free:
+ kfree(dev);
+ return err;
+diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
+index 2b42ba1f5949..e13dbf27a9c2 100644
+--- a/drivers/media/platform/am437x/am437x-vpfe.c
++++ b/drivers/media/platform/am437x/am437x-vpfe.c
+@@ -1830,6 +1830,10 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
+ if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
+ return -ENODATA;
+
++ /* if trying to set the same std then nothing to do */
++ if (vpfe_standards[vpfe->std_index].std_id == std_id)
++ return 0;
++
+ /* If streaming is started, return error */
+ if (vb2_is_busy(&vpfe->buffer_queue)) {
+ vpfe_err(vpfe, "%s device busy\n", __func__);
+diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
+index eb12f3793062..096a7c9a8963 100644
+--- a/drivers/media/platform/aspeed-video.c
++++ b/drivers/media/platform/aspeed-video.c
+@@ -606,6 +606,16 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg)
+ aspeed_video_start_frame(video);
+ }
+
++ /*
++ * CAPTURE_COMPLETE and FRAME_COMPLETE interrupts come even when these
++ * are disabled in the VE_INTERRUPT_CTRL register so clear them to
++ * prevent unnecessary interrupt calls.
++ */
++ if (sts & VE_INTERRUPT_CAPTURE_COMPLETE)
++ sts &= ~VE_INTERRUPT_CAPTURE_COMPLETE;
++ if (sts & VE_INTERRUPT_FRAME_COMPLETE)
++ sts &= ~VE_INTERRUPT_FRAME_COMPLETE;
++
+ return sts ? IRQ_NONE : IRQ_HANDLED;
+ }
+
+@@ -741,6 +751,8 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
+ }
+
+ set_bit(VIDEO_RES_DETECT, &video->flags);
++ aspeed_video_update(video, VE_CTRL,
++ VE_CTRL_VSYNC_POL | VE_CTRL_HSYNC_POL, 0);
+ aspeed_video_enable_mode_detect(video);
+
+ rc = wait_event_interruptible_timeout(video->wait,
+diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
+index a838189d4490..9aaf3b8060d5 100644
+--- a/drivers/media/platform/exynos4-is/media-dev.c
++++ b/drivers/media/platform/exynos4-is/media-dev.c
+@@ -1457,12 +1457,12 @@ static int fimc_md_probe(struct platform_device *pdev)
+ ret = v4l2_device_register(dev, &fmd->v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "Failed to register v4l2_device: %d\n", ret);
+- return ret;
++ goto err_md;
+ }
+
+ ret = fimc_md_get_clocks(fmd);
+ if (ret)
+- goto err_md;
++ goto err_v4l2dev;
+
+ ret = fimc_md_get_pinctrl(fmd);
+ if (ret < 0) {
+@@ -1519,9 +1519,10 @@ err_m_ent:
+ fimc_md_unregister_entities(fmd);
+ err_clk:
+ fimc_md_put_clocks(fmd);
++err_v4l2dev:
++ v4l2_device_unregister(&fmd->v4l2_dev);
+ err_md:
+ media_device_cleanup(&fmd->media_dev);
+- v4l2_device_unregister(&fmd->v4l2_dev);
+ return ret;
+ }
+
+diff --git a/drivers/media/platform/meson/ao-cec-g12a.c b/drivers/media/platform/meson/ao-cec-g12a.c
+index 3b39e875292e..3d8fe854feb0 100644
+--- a/drivers/media/platform/meson/ao-cec-g12a.c
++++ b/drivers/media/platform/meson/ao-cec-g12a.c
+@@ -662,34 +662,27 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
+ if (IS_ERR(ao_cec->adap))
+ return PTR_ERR(ao_cec->adap);
+
+- ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
+- ao_cec->adap);
+- if (!ao_cec->notify) {
+- ret = -ENOMEM;
+- goto out_probe_adapter;
+- }
+-
+ ao_cec->adap->owner = THIS_MODULE;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+- goto out_probe_notify;
++ goto out_probe_adapter;
+ }
+
+ ao_cec->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+ &meson_ao_cec_g12a_regmap_conf);
+ if (IS_ERR(ao_cec->regmap)) {
+ ret = PTR_ERR(ao_cec->regmap);
+- goto out_probe_notify;
++ goto out_probe_adapter;
+ }
+
+ ao_cec->regmap_cec = devm_regmap_init(&pdev->dev, NULL, ao_cec,
+ &meson_ao_cec_g12a_cec_regmap_conf);
+ if (IS_ERR(ao_cec->regmap_cec)) {
+ ret = PTR_ERR(ao_cec->regmap_cec);
+- goto out_probe_notify;
++ goto out_probe_adapter;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+@@ -699,45 +692,52 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
+ 0, NULL, ao_cec);
+ if (ret) {
+ dev_err(&pdev->dev, "irq request failed\n");
+- goto out_probe_notify;
++ goto out_probe_adapter;
+ }
+
+ ao_cec->oscin = devm_clk_get(&pdev->dev, "oscin");
+ if (IS_ERR(ao_cec->oscin)) {
+ dev_err(&pdev->dev, "oscin clock request failed\n");
+ ret = PTR_ERR(ao_cec->oscin);
+- goto out_probe_notify;
++ goto out_probe_adapter;
+ }
+
+ ret = meson_ao_cec_g12a_setup_clk(ao_cec);
+ if (ret)
+- goto out_probe_notify;
++ goto out_probe_adapter;
+
+ ret = clk_prepare_enable(ao_cec->core);
+ if (ret) {
+ dev_err(&pdev->dev, "core clock enable failed\n");
+- goto out_probe_notify;
++ goto out_probe_adapter;
+ }
+
+ device_reset_optional(&pdev->dev);
+
+ platform_set_drvdata(pdev, ao_cec);
+
++ ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
++ ao_cec->adap);
++ if (!ao_cec->notify) {
++ ret = -ENOMEM;
++ goto out_probe_core_clk;
++ }
++
+ ret = cec_register_adapter(ao_cec->adap, &pdev->dev);
+ if (ret < 0)
+- goto out_probe_core_clk;
++ goto out_probe_notify;
+
+ /* Setup Hardware */
+ regmap_write(ao_cec->regmap, CECB_GEN_CNTL_REG, CECB_GEN_CNTL_RESET);
+
+ return 0;
+
+-out_probe_core_clk:
+- clk_disable_unprepare(ao_cec->core);
+-
+ out_probe_notify:
+ cec_notifier_cec_adap_unregister(ao_cec->notify);
+
++out_probe_core_clk:
++ clk_disable_unprepare(ao_cec->core);
++
+ out_probe_adapter:
+ cec_delete_adapter(ao_cec->adap);
+
+diff --git a/drivers/media/platform/meson/ao-cec.c b/drivers/media/platform/meson/ao-cec.c
+index 64ed549bf012..03600e8b3ef0 100644
+--- a/drivers/media/platform/meson/ao-cec.c
++++ b/drivers/media/platform/meson/ao-cec.c
+@@ -624,20 +624,13 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
+ if (IS_ERR(ao_cec->adap))
+ return PTR_ERR(ao_cec->adap);
+
+- ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
+- ao_cec->adap);
+- if (!ao_cec->notify) {
+- ret = -ENOMEM;
+- goto out_probe_adapter;
+- }
+-
+ ao_cec->adap->owner = THIS_MODULE;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ao_cec->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ao_cec->base)) {
+ ret = PTR_ERR(ao_cec->base);
+- goto out_probe_notify;
++ goto out_probe_adapter;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+@@ -647,20 +640,20 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
+ 0, NULL, ao_cec);
+ if (ret) {
+ dev_err(&pdev->dev, "irq request failed\n");
+- goto out_probe_notify;
++ goto out_probe_adapter;
+ }
+
+ ao_cec->core = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(ao_cec->core)) {
+ dev_err(&pdev->dev, "core clock request failed\n");
+ ret = PTR_ERR(ao_cec->core);
+- goto out_probe_notify;
++ goto out_probe_adapter;
+ }
+
+ ret = clk_prepare_enable(ao_cec->core);
+ if (ret) {
+ dev_err(&pdev->dev, "core clock enable failed\n");
+- goto out_probe_notify;
++ goto out_probe_adapter;
+ }
+
+ ret = clk_set_rate(ao_cec->core, CEC_CLK_RATE);
+@@ -674,9 +667,16 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
+ ao_cec->pdev = pdev;
+ platform_set_drvdata(pdev, ao_cec);
+
++ ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
++ ao_cec->adap);
++ if (!ao_cec->notify) {
++ ret = -ENOMEM;
++ goto out_probe_clk;
++ }
++
+ ret = cec_register_adapter(ao_cec->adap, &pdev->dev);
+ if (ret < 0)
+- goto out_probe_clk;
++ goto out_probe_notify;
+
+ /* Setup Hardware */
+ writel_relaxed(CEC_GEN_CNTL_RESET,
+@@ -684,12 +684,12 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
+
+ return 0;
+
+-out_probe_clk:
+- clk_disable_unprepare(ao_cec->core);
+-
+ out_probe_notify:
+ cec_notifier_cec_adap_unregister(ao_cec->notify);
+
++out_probe_clk:
++ clk_disable_unprepare(ao_cec->core);
++
+ out_probe_adapter:
+ cec_delete_adapter(ao_cec->adap);
+
+diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
+index e6eff512a8a1..84e982f259a0 100644
+--- a/drivers/media/platform/qcom/venus/core.c
++++ b/drivers/media/platform/qcom/venus/core.c
+@@ -427,10 +427,11 @@ static const struct venus_resources msm8916_res = {
+ };
+
+ static const struct freq_tbl msm8996_freq_table[] = {
+- { 1944000, 490000000 }, /* 4k UHD @ 60 */
+- { 972000, 320000000 }, /* 4k UHD @ 30 */
+- { 489600, 150000000 }, /* 1080p @ 60 */
+- { 244800, 75000000 }, /* 1080p @ 30 */
++ { 1944000, 520000000 }, /* 4k UHD @ 60 (decode only) */
++ { 972000, 520000000 }, /* 4k UHD @ 30 */
++ { 489600, 346666667 }, /* 1080p @ 60 */
++ { 244800, 150000000 }, /* 1080p @ 30 */
++ { 108000, 75000000 }, /* 720p @ 30 */
+ };
+
+ static const struct reg_val msm8996_reg_preset[] = {
+diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
+index 7129a2aea09a..0d8855014ab3 100644
+--- a/drivers/media/platform/qcom/venus/hfi_venus.c
++++ b/drivers/media/platform/qcom/venus/hfi_venus.c
+@@ -1472,6 +1472,7 @@ static int venus_suspend_3xx(struct venus_core *core)
+ {
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+ struct device *dev = core->dev;
++ u32 ctrl_status;
+ bool val;
+ int ret;
+
+@@ -1487,6 +1488,10 @@ static int venus_suspend_3xx(struct venus_core *core)
+ return -EINVAL;
+ }
+
++ ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
++ if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
++ goto power_off;
++
+ /*
+ * Power collapse sequence for Venus 3xx and 4xx versions:
+ * 1. Check for ARM9 and video core to be idle by checking WFI bit
+@@ -1511,6 +1516,7 @@ static int venus_suspend_3xx(struct venus_core *core)
+ if (ret)
+ return ret;
+
++power_off:
+ mutex_lock(&hdev->lock);
+
+ ret = venus_power_off(hdev);
+diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
+index 608e5217ccd5..0f267a237b42 100644
+--- a/drivers/media/platform/rcar_drif.c
++++ b/drivers/media/platform/rcar_drif.c
+@@ -912,6 +912,7 @@ static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
+ {
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
++ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+ f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
+ f->fmt.sdr.buffersize = sdr->fmt->buffersize;
+
+diff --git a/drivers/media/platform/seco-cec/seco-cec.c b/drivers/media/platform/seco-cec/seco-cec.c
+index 9cd60fe1867c..a86b6e8f9196 100644
+--- a/drivers/media/platform/seco-cec/seco-cec.c
++++ b/drivers/media/platform/seco-cec/seco-cec.c
+@@ -675,6 +675,7 @@ err_notifier:
+ err_delete_adapter:
+ cec_delete_adapter(secocec->cec_adap);
+ err:
++ release_region(BRA_SMB_BASE_ADDR, 7);
+ dev_err(dev, "%s device probe failed\n", dev_name(dev));
+
+ return ret;
+diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h
+index 28bc94129348..9bacfd603250 100644
+--- a/drivers/media/platform/ti-vpe/vpdma.h
++++ b/drivers/media/platform/ti-vpe/vpdma.h
+@@ -57,6 +57,7 @@ struct vpdma_data_format {
+ * line stride of source and dest
+ * buffers should be 16 byte aligned
+ */
++#define VPDMA_MAX_STRIDE 65520 /* Max line stride 16 byte aligned */
+ #define VPDMA_DTD_DESC_SIZE 32 /* 8 words */
+ #define VPDMA_CFD_CTD_DESC_SIZE 16 /* 4 words */
+
+diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
+index 60b575bb44c4..8b14ba4a3d9e 100644
+--- a/drivers/media/platform/ti-vpe/vpe.c
++++ b/drivers/media/platform/ti-vpe/vpe.c
+@@ -338,20 +338,25 @@ enum {
+ };
+
+ /* find our format description corresponding to the passed v4l2_format */
+-static struct vpe_fmt *find_format(struct v4l2_format *f)
++static struct vpe_fmt *__find_format(u32 fourcc)
+ {
+ struct vpe_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) {
+ fmt = &vpe_formats[k];
+- if (fmt->fourcc == f->fmt.pix.pixelformat)
++ if (fmt->fourcc == fourcc)
+ return fmt;
+ }
+
+ return NULL;
+ }
+
++static struct vpe_fmt *find_format(struct v4l2_format *f)
++{
++ return __find_format(f->fmt.pix.pixelformat);
++}
++
+ /*
+ * there is one vpe_dev structure in the driver, it is shared by
+ * all instances.
+@@ -1013,11 +1018,14 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
+ dma_addr_t dma_addr;
+ u32 flags = 0;
+ u32 offset = 0;
++ u32 stride;
+
+ if (port == VPE_PORT_MV_OUT) {
+ vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
+ dma_addr = ctx->mv_buf_dma[mv_buf_selector];
+ q_data = &ctx->q_data[Q_DATA_SRC];
++ stride = ALIGN((q_data->width * vpdma_fmt->depth) >> 3,
++ VPDMA_STRIDE_ALIGN);
+ } else {
+ /* to incorporate interleaved formats */
+ int plane = fmt->coplanar ? p_data->vb_part : 0;
+@@ -1044,6 +1052,7 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
+ }
+ /* Apply the offset */
+ dma_addr += offset;
++ stride = q_data->bytesperline[VPE_LUMA];
+ }
+
+ if (q_data->flags & Q_DATA_FRAME_1D)
+@@ -1055,7 +1064,7 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
+ MAX_W, MAX_H);
+
+ vpdma_add_out_dtd(&ctx->desc_list, q_data->width,
+- q_data->bytesperline[VPE_LUMA], &q_data->c_rect,
++ stride, &q_data->c_rect,
+ vpdma_fmt, dma_addr, MAX_OUT_WIDTH_REG1,
+ MAX_OUT_HEIGHT_REG1, p_data->channel, flags);
+ }
+@@ -1074,10 +1083,13 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
+ dma_addr_t dma_addr;
+ u32 flags = 0;
+ u32 offset = 0;
++ u32 stride;
+
+ if (port == VPE_PORT_MV_IN) {
+ vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
+ dma_addr = ctx->mv_buf_dma[mv_buf_selector];
++ stride = ALIGN((q_data->width * vpdma_fmt->depth) >> 3,
++ VPDMA_STRIDE_ALIGN);
+ } else {
+ /* to incorporate interleaved formats */
+ int plane = fmt->coplanar ? p_data->vb_part : 0;
+@@ -1104,6 +1116,7 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
+ }
+ /* Apply the offset */
+ dma_addr += offset;
++ stride = q_data->bytesperline[VPE_LUMA];
+
+ if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB) {
+ /*
+@@ -1139,10 +1152,10 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
+ if (p_data->vb_part && fmt->fourcc == V4L2_PIX_FMT_NV12)
+ frame_height /= 2;
+
+- vpdma_add_in_dtd(&ctx->desc_list, q_data->width,
+- q_data->bytesperline[VPE_LUMA], &q_data->c_rect,
+- vpdma_fmt, dma_addr, p_data->channel, field, flags, frame_width,
+- frame_height, 0, 0);
++ vpdma_add_in_dtd(&ctx->desc_list, q_data->width, stride,
++ &q_data->c_rect, vpdma_fmt, dma_addr,
++ p_data->channel, field, flags, frame_width,
++ frame_height, 0, 0);
+ }
+
+ /*
+@@ -1391,9 +1404,6 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
+ /* the previous dst mv buffer becomes the next src mv buffer */
+ ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector;
+
+- if (ctx->aborting)
+- goto finished;
+-
+ s_vb = ctx->src_vbs[0];
+ d_vb = ctx->dst_vb;
+
+@@ -1404,6 +1414,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
+ d_vb->timecode = s_vb->timecode;
+
+ d_vb->sequence = ctx->sequence;
++ s_vb->sequence = ctx->sequence;
+
+ d_q_data = &ctx->q_data[Q_DATA_DST];
+ if (d_q_data->flags & Q_IS_INTERLACED) {
+@@ -1457,6 +1468,9 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
+ ctx->src_vbs[0] = NULL;
+ ctx->dst_vb = NULL;
+
++ if (ctx->aborting)
++ goto finished;
++
+ ctx->bufs_completed++;
+ if (ctx->bufs_completed < ctx->bufs_per_job && job_ready(ctx)) {
+ device_run(ctx);
+@@ -1566,9 +1580,9 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
+ unsigned int stride = 0;
+
+ if (!fmt || !(fmt->types & type)) {
+- vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
++ vpe_dbg(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
+ pix->pixelformat);
+- return -EINVAL;
++ fmt = __find_format(V4L2_PIX_FMT_YUYV);
+ }
+
+ if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE
+@@ -1615,7 +1629,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
+ &pix->height, MIN_H, MAX_H, H_ALIGN,
+ S_ALIGN);
+
+- if (!pix->num_planes)
++ if (!pix->num_planes || pix->num_planes > 2)
+ pix->num_planes = fmt->coplanar ? 2 : 1;
+ else if (pix->num_planes > 1 && !fmt->coplanar)
+ pix->num_planes = 1;
+@@ -1654,6 +1668,10 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
+ if (stride > plane_fmt->bytesperline)
+ plane_fmt->bytesperline = stride;
+
++ plane_fmt->bytesperline = clamp_t(u32, plane_fmt->bytesperline,
++ stride,
++ VPDMA_MAX_STRIDE);
++
+ plane_fmt->bytesperline = ALIGN(plane_fmt->bytesperline,
+ VPDMA_STRIDE_ALIGN);
+
+@@ -2274,7 +2292,7 @@ static int vpe_open(struct file *file)
+ v4l2_ctrl_handler_setup(hdl);
+
+ s_q_data = &ctx->q_data[Q_DATA_SRC];
+- s_q_data->fmt = &vpe_formats[2];
++ s_q_data->fmt = __find_format(V4L2_PIX_FMT_YUYV);
+ s_q_data->width = 1920;
+ s_q_data->height = 1080;
+ s_q_data->nplanes = 1;
+@@ -2352,6 +2370,12 @@ static int vpe_release(struct file *file)
+
+ mutex_lock(&dev->dev_mutex);
+ free_mv_buffers(ctx);
++
++ vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
++ vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
++ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
++ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
++
+ vpdma_free_desc_list(&ctx->desc_list);
+ vpdma_free_desc_buf(&ctx->mmr_adb);
+
+diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
+index 0ee143ae0f6b..82350097503e 100644
+--- a/drivers/media/platform/vicodec/vicodec-core.c
++++ b/drivers/media/platform/vicodec/vicodec-core.c
+@@ -2139,6 +2139,9 @@ static void vicodec_v4l2_dev_release(struct v4l2_device *v4l2_dev)
+ v4l2_m2m_release(dev->stateful_enc.m2m_dev);
+ v4l2_m2m_release(dev->stateful_dec.m2m_dev);
+ v4l2_m2m_release(dev->stateless_dec.m2m_dev);
++#ifdef CONFIG_MEDIA_CONTROLLER
++ media_device_cleanup(&dev->mdev);
++#endif
+ kfree(dev);
+ }
+
+@@ -2250,7 +2253,6 @@ static int vicodec_remove(struct platform_device *pdev)
+ v4l2_m2m_unregister_media_controller(dev->stateful_enc.m2m_dev);
+ v4l2_m2m_unregister_media_controller(dev->stateful_dec.m2m_dev);
+ v4l2_m2m_unregister_media_controller(dev->stateless_dec.m2m_dev);
+- media_device_cleanup(&dev->mdev);
+ #endif
+
+ video_unregister_device(&dev->stateful_enc.vfd);
+diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
+index acd3bd48c7e2..8d6b09623d88 100644
+--- a/drivers/media/platform/vim2m.c
++++ b/drivers/media/platform/vim2m.c
+@@ -1073,6 +1073,9 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned int count)
+ if (!q_data)
+ return -EINVAL;
+
++ if (V4L2_TYPE_IS_OUTPUT(q->type))
++ ctx->aborting = 0;
++
+ q_data->sequence = 0;
+ return 0;
+ }
+@@ -1272,6 +1275,9 @@ static void vim2m_device_release(struct video_device *vdev)
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+ v4l2_m2m_release(dev->m2m_dev);
++#ifdef CONFIG_MEDIA_CONTROLLER
++ media_device_cleanup(&dev->mdev);
++#endif
+ kfree(dev);
+ }
+
+@@ -1343,6 +1349,7 @@ static int vim2m_probe(struct platform_device *pdev)
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(dev->m2m_dev);
++ dev->m2m_dev = NULL;
+ goto error_dev;
+ }
+
+@@ -1395,7 +1402,6 @@ static int vim2m_remove(struct platform_device *pdev)
+ #ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_unregister(&dev->mdev);
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev);
+- media_device_cleanup(&dev->mdev);
+ #endif
+ video_unregister_device(&dev->vfd);
+
+diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c
+index 7e1ae0b12f1e..a3120f4f7a90 100644
+--- a/drivers/media/platform/vimc/vimc-common.c
++++ b/drivers/media/platform/vimc/vimc-common.c
+@@ -375,7 +375,7 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
+ {
+ int ret;
+
+- /* Allocate the pads */
++ /* Allocate the pads. Should be released from the sd_int_op release */
+ ved->pads = vimc_pads_init(num_pads, pads_flag);
+ if (IS_ERR(ved->pads))
+ return PTR_ERR(ved->pads);
+@@ -424,7 +424,6 @@ EXPORT_SYMBOL_GPL(vimc_ent_sd_register);
+ void vimc_ent_sd_unregister(struct vimc_ent_device *ved, struct v4l2_subdev *sd)
+ {
+ media_entity_cleanup(ved->ent);
+- vimc_pads_cleanup(ved->pads);
+ v4l2_device_unregister_subdev(sd);
+ }
+ EXPORT_SYMBOL_GPL(vimc_ent_sd_unregister);
+diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
+index b72b8385067b..baafd9d7fb2c 100644
+--- a/drivers/media/platform/vimc/vimc-debayer.c
++++ b/drivers/media/platform/vimc/vimc-debayer.c
+@@ -484,6 +484,7 @@ static void vimc_deb_release(struct v4l2_subdev *sd)
+ struct vimc_deb_device *vdeb =
+ container_of(sd, struct vimc_deb_device, sd);
+
++ vimc_pads_cleanup(vdeb->ved.pads);
+ kfree(vdeb);
+ }
+
+diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
+index 49ab8d9dd9c9..c0d9f43d5777 100644
+--- a/drivers/media/platform/vimc/vimc-scaler.c
++++ b/drivers/media/platform/vimc/vimc-scaler.c
+@@ -343,6 +343,7 @@ static void vimc_sca_release(struct v4l2_subdev *sd)
+ struct vimc_sca_device *vsca =
+ container_of(sd, struct vimc_sca_device, sd);
+
++ vimc_pads_cleanup(vsca->ved.pads);
+ kfree(vsca);
+ }
+
+diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
+index 4a6a7e8e66c2..420573e5f6d6 100644
+--- a/drivers/media/platform/vimc/vimc-sensor.c
++++ b/drivers/media/platform/vimc/vimc-sensor.c
+@@ -292,6 +292,7 @@ static void vimc_sen_release(struct v4l2_subdev *sd)
+
+ v4l2_ctrl_handler_free(&vsen->hdl);
+ tpg_free(&vsen->tpg);
++ vimc_pads_cleanup(vsen->ved.pads);
+ kfree(vsen);
+ }
+
+diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
+index 53315c8dd2bb..f6a5cdbd74e7 100644
+--- a/drivers/media/platform/vivid/vivid-core.c
++++ b/drivers/media/platform/vivid/vivid-core.c
+@@ -616,6 +616,9 @@ static void vivid_dev_release(struct v4l2_device *v4l2_dev)
+
+ vivid_free_controls(dev);
+ v4l2_device_unregister(&dev->v4l2_dev);
++#ifdef CONFIG_MEDIA_CONTROLLER
++ media_device_cleanup(&dev->mdev);
++#endif
+ vfree(dev->scaled_line);
+ vfree(dev->blended_line);
+ vfree(dev->edid);
+@@ -1580,7 +1583,6 @@ static int vivid_remove(struct platform_device *pdev)
+
+ #ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_unregister(&dev->mdev);
+- media_device_cleanup(&dev->mdev);
+ #endif
+
+ if (dev->has_vid_cap) {
+diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
+index 7541698a0be1..f491420d7b53 100644
+--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
++++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
+@@ -482,6 +482,8 @@ static int si470x_i2c_remove(struct i2c_client *client)
+ if (radio->gpio_reset)
+ gpiod_set_value(radio->gpio_reset, 0);
+
++ v4l2_ctrl_handler_free(&radio->hdl);
++ v4l2_device_unregister(&radio->v4l2_dev);
+ return 0;
+ }
+
+diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
+index 1a801dc286f8..d1331f828108 100644
+--- a/drivers/media/usb/b2c2/flexcop-usb.c
++++ b/drivers/media/usb/b2c2/flexcop-usb.c
+@@ -504,7 +504,13 @@ urb_error:
+ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
+ {
+ /* use the alternate setting with the larges buffer */
+- usb_set_interface(fc_usb->udev,0,1);
++ int ret = usb_set_interface(fc_usb->udev, 0, 1);
++
++ if (ret) {
++ err("set interface failed.");
++ return ret;
++ }
++
+ switch (fc_usb->udev->speed) {
+ case USB_SPEED_LOW:
+ err("cannot handle USB speed because it is too slow.");
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+index a34717eba409..eaa08c7999d4 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+@@ -898,8 +898,12 @@ static void pvr2_v4l2_internal_check(struct pvr2_channel *chp)
+ pvr2_v4l2_dev_disassociate_parent(vp->dev_video);
+ pvr2_v4l2_dev_disassociate_parent(vp->dev_radio);
+ if (!list_empty(&vp->dev_video->devbase.fh_list) ||
+- !list_empty(&vp->dev_radio->devbase.fh_list))
++ (vp->dev_radio &&
++ !list_empty(&vp->dev_radio->devbase.fh_list))) {
++ pvr2_trace(PVR2_TRACE_STRUCT,
++ "pvr2_v4l2 internal_check exit-empty id=%p", vp);
+ return;
++ }
+ pvr2_v4l2_destroy_no_lock(vp);
+ }
+
+@@ -935,7 +939,8 @@ static int pvr2_v4l2_release(struct file *file)
+ kfree(fhp);
+ if (vp->channel.mc_head->disconnect_flag &&
+ list_empty(&vp->dev_video->devbase.fh_list) &&
+- list_empty(&vp->dev_radio->devbase.fh_list)) {
++ (!vp->dev_radio ||
++ list_empty(&vp->dev_radio->devbase.fh_list))) {
+ pvr2_v4l2_destroy_no_lock(vp);
+ }
+ return 0;
+diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
+index 1d8f38824631..cd84dbbf6a89 100644
+--- a/drivers/media/v4l2-core/v4l2-ctrls.c
++++ b/drivers/media/v4l2-core/v4l2-ctrls.c
+@@ -3144,6 +3144,7 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
+ struct v4l2_ctrl_handler *prev_hdl = NULL;
+ struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL;
+
++ mutex_lock(main_hdl->lock);
+ if (list_empty(&main_hdl->requests_queued))
+ goto queue;
+
+@@ -3175,18 +3176,22 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
+ queue:
+ list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
+ hdl->request_is_queued = true;
++ mutex_unlock(main_hdl->lock);
+ }
+
+ static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
+ {
+ struct v4l2_ctrl_handler *hdl =
+ container_of(obj, struct v4l2_ctrl_handler, req_obj);
++ struct v4l2_ctrl_handler *main_hdl = obj->priv;
+
+ list_del_init(&hdl->requests);
++ mutex_lock(main_hdl->lock);
+ if (hdl->request_is_queued) {
+ list_del_init(&hdl->requests_queued);
+ hdl->request_is_queued = false;
+ }
++ mutex_unlock(main_hdl->lock);
+ }
+
+ static void v4l2_ctrl_request_release(struct media_request_object *obj)
+@@ -4128,9 +4133,11 @@ void v4l2_ctrl_request_complete(struct media_request *req,
+ v4l2_ctrl_unlock(ctrl);
+ }
+
++ mutex_lock(main_hdl->lock);
+ WARN_ON(!hdl->request_is_queued);
+ list_del_init(&hdl->requests_queued);
+ hdl->request_is_queued = false;
++ mutex_unlock(main_hdl->lock);
+ media_request_object_complete(obj);
+ media_request_object_put(obj);
+ }
+diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
+index 51b912743f0f..21bb96ce4cd6 100644
+--- a/drivers/media/v4l2-core/v4l2-ioctl.c
++++ b/drivers/media/v4l2-core/v4l2-ioctl.c
+@@ -1466,10 +1466,26 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
+ return ret;
+ }
+
++static void v4l_pix_format_touch(struct v4l2_pix_format *p)
++{
++ /*
++ * The v4l2_pix_format structure contains fields that make no sense for
++ * touch. Set them to default values in this case.
++ */
++
++ p->field = V4L2_FIELD_NONE;
++ p->colorspace = V4L2_COLORSPACE_RAW;
++ p->flags = 0;
++ p->ycbcr_enc = 0;
++ p->quantization = 0;
++ p->xfer_func = 0;
++}
++
+ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+ {
+ struct v4l2_format *p = arg;
++ struct video_device *vfd = video_devdata(file);
+ int ret = check_fmt(file, p->type);
+
+ if (ret)
+@@ -1507,6 +1523,8 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
+ ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg);
+ /* just in case the driver zeroed it again */
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
++ if (vfd->vfl_type == VFL_TYPE_TOUCH)
++ v4l_pix_format_touch(&p->fmt.pix);
+ return ret;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg);
+@@ -1544,21 +1562,6 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
+ return -EINVAL;
+ }
+
+-static void v4l_pix_format_touch(struct v4l2_pix_format *p)
+-{
+- /*
+- * The v4l2_pix_format structure contains fields that make no sense for
+- * touch. Set them to default values in this case.
+- */
+-
+- p->field = V4L2_FIELD_NONE;
+- p->colorspace = V4L2_COLORSPACE_RAW;
+- p->flags = 0;
+- p->ycbcr_enc = 0;
+- p->quantization = 0;
+- p->xfer_func = 0;
+-}
+-
+ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+ {
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 1b1a794d639d..842f2210dc7e 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -1430,8 +1430,8 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
+ return -ENOMEM;
+
+ data->miscdev.minor = MISC_DYNAMIC_MINOR;
+- data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s",
+- domains[domain_id]);
++ data->miscdev.name = devm_kasprintf(rdev, GFP_KERNEL, "fastrpc-%s",
++ domains[domain_id]);
+ data->miscdev.fops = &fastrpc_fops;
+ err = misc_register(&data->miscdev);
+ if (err)
+diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
+index 2870c25da166..4d1b44de1492 100644
+--- a/drivers/misc/ocxl/file.c
++++ b/drivers/misc/ocxl/file.c
+@@ -18,18 +18,15 @@ static struct class *ocxl_class;
+ static struct mutex minors_idr_lock;
+ static struct idr minors_idr;
+
+-static struct ocxl_file_info *find_file_info(dev_t devno)
++static struct ocxl_file_info *find_and_get_file_info(dev_t devno)
+ {
+ struct ocxl_file_info *info;
+
+- /*
+- * We don't declare an RCU critical section here, as our AFU
+- * is protected by a reference counter on the device. By the time the
+- * info reference is removed from the idr, the ref count of
+- * the device is already at 0, so no user API will access that AFU and
+- * this function can't return it.
+- */
++ mutex_lock(&minors_idr_lock);
+ info = idr_find(&minors_idr, MINOR(devno));
++ if (info)
++ get_device(&info->dev);
++ mutex_unlock(&minors_idr_lock);
+ return info;
+ }
+
+@@ -58,14 +55,16 @@ static int afu_open(struct inode *inode, struct file *file)
+
+ pr_debug("%s for device %x\n", __func__, inode->i_rdev);
+
+- info = find_file_info(inode->i_rdev);
++ info = find_and_get_file_info(inode->i_rdev);
+ if (!info)
+ return -ENODEV;
+
+ rc = ocxl_context_alloc(&ctx, info->afu, inode->i_mapping);
+- if (rc)
++ if (rc) {
++ put_device(&info->dev);
+ return rc;
+-
++ }
++ put_device(&info->dev);
+ file->private_data = ctx;
+ return 0;
+ }
+@@ -487,7 +486,6 @@ static void info_release(struct device *dev)
+ {
+ struct ocxl_file_info *info = container_of(dev, struct ocxl_file_info, dev);
+
+- free_minor(info);
+ ocxl_afu_put(info->afu);
+ kfree(info);
+ }
+@@ -577,6 +575,7 @@ void ocxl_file_unregister_afu(struct ocxl_afu *afu)
+
+ ocxl_file_make_invisible(info);
+ ocxl_sysfs_unregister_afu(info);
++ free_minor(info);
+ device_unregister(&info->dev);
+ }
+
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index 189e42674d85..010fe29a4888 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -228,6 +228,7 @@
+ #define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
+ #define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
+
++#define MSDC_PATCH_BIT1_CMDTA (0x7 << 3) /* RW */
+ #define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */
+
+ #define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */
+@@ -1881,6 +1882,7 @@ static int hs400_tune_response(struct mmc_host *mmc, u32 opcode)
+
+ /* select EMMC50 PAD CMD tune */
+ sdr_set_bits(host->base + PAD_CMD_TUNE, BIT(0));
++ sdr_set_field(host->base + MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMDTA, 2);
+
+ if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
+ mmc->ios.timing == MMC_TIMING_UHS_SDR104)
+diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
+index b75c82d8d6c1..3d0bb5e2e09b 100644
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -99,7 +99,7 @@
+
+ #define CORE_PWRSAVE_DLL BIT(3)
+
+-#define DDR_CONFIG_POR_VAL 0x80040853
++#define DDR_CONFIG_POR_VAL 0x80040873
+
+
+ #define INVALID_TUNING_PHASE -1
+@@ -148,8 +148,9 @@ struct sdhci_msm_offset {
+ u32 core_ddr_200_cfg;
+ u32 core_vendor_spec3;
+ u32 core_dll_config_2;
++ u32 core_dll_config_3;
++ u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */
+ u32 core_ddr_config;
+- u32 core_ddr_config_2;
+ };
+
+ static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
+@@ -177,8 +178,8 @@ static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
+ .core_ddr_200_cfg = 0x224,
+ .core_vendor_spec3 = 0x250,
+ .core_dll_config_2 = 0x254,
+- .core_ddr_config = 0x258,
+- .core_ddr_config_2 = 0x25c,
++ .core_dll_config_3 = 0x258,
++ .core_ddr_config = 0x25c,
+ };
+
+ static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
+@@ -207,8 +208,8 @@ static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
+ .core_ddr_200_cfg = 0x184,
+ .core_vendor_spec3 = 0x1b0,
+ .core_dll_config_2 = 0x1b4,
+- .core_ddr_config = 0x1b8,
+- .core_ddr_config_2 = 0x1bc,
++ .core_ddr_config_old = 0x1b8,
++ .core_ddr_config = 0x1bc,
+ };
+
+ struct sdhci_msm_variant_ops {
+@@ -253,6 +254,7 @@ struct sdhci_msm_host {
+ const struct sdhci_msm_offset *offset;
+ bool use_cdr;
+ u32 transfer_mode;
++ bool updated_ddr_cfg;
+ };
+
+ static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
+@@ -924,8 +926,10 @@ out:
+ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
+ {
+ struct mmc_host *mmc = host->mmc;
+- u32 dll_status, config;
++ u32 dll_status, config, ddr_cfg_offset;
+ int ret;
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ const struct sdhci_msm_offset *msm_offset =
+ sdhci_priv_msm_offset(host);
+
+@@ -938,8 +942,11 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
+ * bootloaders. In the future, if this changes, then the desired
+ * values will need to be programmed appropriately.
+ */
+- writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr +
+- msm_offset->core_ddr_config);
++ if (msm_host->updated_ddr_cfg)
++ ddr_cfg_offset = msm_offset->core_ddr_config;
++ else
++ ddr_cfg_offset = msm_offset->core_ddr_config_old;
++ writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + ddr_cfg_offset);
+
+ if (mmc->ios.enhanced_strobe) {
+ config = readl_relaxed(host->ioaddr +
+@@ -1899,6 +1906,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
+ msm_offset->core_vendor_spec_capabilities0);
+ }
+
++ if (core_major == 1 && core_minor >= 0x49)
++ msm_host->updated_ddr_cfg = true;
++
+ /*
+ * Power on reset state may trigger power irq if previous status of
+ * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
+diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
+index 1d1953dfc54b..889ed98ec0e7 100644
+--- a/drivers/mmc/host/sdhci-of-esdhc.c
++++ b/drivers/mmc/host/sdhci-of-esdhc.c
+@@ -710,9 +710,6 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+
+- if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc"))
+- mdelay(5);
+-
+ if (mask & SDHCI_RESET_ALL) {
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~ESDHC_TB_EN;
+@@ -1126,8 +1123,8 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
+ host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
+
+ if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
+- host->quirks2 |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
+- host->quirks2 |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
++ host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
++ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ }
+
+ if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index eaffa85bc728..642a9667db4d 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -26,6 +26,7 @@
+ #include <linux/mmc/slot-gpio.h>
+ #include <linux/mmc/sdhci-pci-data.h>
+ #include <linux/acpi.h>
++#include <linux/dmi.h>
+
+ #ifdef CONFIG_X86
+ #include <asm/iosf_mbi.h>
+@@ -782,11 +783,18 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
+ return 0;
+ }
+
++static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
++{
++ return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
++ dmi_match(DMI_BIOS_VENDOR, "LENOVO");
++}
++
+ static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
+ {
+ int ret = byt_emmc_probe_slot(slot);
+
+- slot->host->mmc->caps2 |= MMC_CAP2_CQE;
++ if (!glk_broken_cqhci(slot))
++ slot->host->mmc->caps2 |= MMC_CAP2_CQE;
+
+ if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
+ slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES,
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index b056400e34b1..5f9df2dbde06 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1871,9 +1871,7 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+ else if (timing == MMC_TIMING_UHS_SDR12)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+- else if (timing == MMC_TIMING_SD_HS ||
+- timing == MMC_TIMING_MMC_HS ||
+- timing == MMC_TIMING_UHS_SDR25)
++ else if (timing == MMC_TIMING_UHS_SDR25)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ else if (timing == MMC_TIMING_UHS_SDR50)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+@@ -2408,8 +2406,8 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
+ sdhci_send_tuning(host, opcode);
+
+ if (!host->tuning_done) {
+- pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
+- mmc_hostname(host->mmc));
++ pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
++ mmc_hostname(host->mmc));
+ sdhci_abort_tuning(host, opcode);
+ return -ETIMEDOUT;
+ }
+@@ -3758,6 +3756,9 @@ int sdhci_setup_host(struct sdhci_host *host)
+ mmc_hostname(mmc), host->version);
+ }
+
++ if (host->quirks & SDHCI_QUIRK_BROKEN_CQE)
++ mmc->caps2 &= ~MMC_CAP2_CQE;
++
+ if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
+ host->flags |= SDHCI_USE_SDMA;
+ else if (!(host->caps & SDHCI_CAN_DO_SDMA))
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 0ed3e0eaef5f..fe83ece6965b 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -409,6 +409,8 @@ struct sdhci_host {
+ #define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15)
+ /* Controller reports inverted write-protect state */
+ #define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16)
++/* Controller has unusable command queue engine */
++#define SDHCI_QUIRK_BROKEN_CQE (1<<17)
+ /* Controller does not like fast PIO transfers */
+ #define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18)
+ /* Controller does not have a LED */
+diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
+index 9b6e1001e77c..dec5a99f52cf 100644
+--- a/drivers/mmc/host/tmio_mmc_core.c
++++ b/drivers/mmc/host/tmio_mmc_core.c
+@@ -1184,7 +1184,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+- mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
++ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_4_BIT_DATA | pdata->capabilities;
+ mmc->caps2 |= pdata->capabilities2;
+ mmc->max_segs = pdata->max_segs ? : 32;
+ mmc->max_blk_size = TMIO_MAX_BLK_SIZE;
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 62f65573eb04..face00c622ed 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3612,32 +3612,35 @@ static int bond_neigh_init(struct neighbour *n)
+ const struct net_device_ops *slave_ops;
+ struct neigh_parms parms;
+ struct slave *slave;
+- int ret;
++ int ret = 0;
+
+- slave = bond_first_slave(bond);
++ rcu_read_lock();
++ slave = bond_first_slave_rcu(bond);
+ if (!slave)
+- return 0;
++ goto out;
+ slave_ops = slave->dev->netdev_ops;
+ if (!slave_ops->ndo_neigh_setup)
+- return 0;
+-
+- parms.neigh_setup = NULL;
+- parms.neigh_cleanup = NULL;
+- ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
+- if (ret)
+- return ret;
++ goto out;
+
+- /* Assign slave's neigh_cleanup to neighbour in case cleanup is called
+- * after the last slave has been detached. Assumes that all slaves
+- * utilize the same neigh_cleanup (true at this writing as only user
+- * is ipoib).
++ /* TODO: find another way [1] to implement this.
++ * Passing a zeroed structure is fragile,
++ * but at least we do not pass garbage.
++ *
++ * [1] One way would be that ndo_neigh_setup() never touch
++ * struct neigh_parms, but propagate the new neigh_setup()
++ * back to ___neigh_create() / neigh_parms_alloc()
+ */
+- n->parms->neigh_cleanup = parms.neigh_cleanup;
++ memset(&parms, 0, sizeof(parms));
++ ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
+
+- if (!parms.neigh_setup)
+- return 0;
++ if (ret)
++ goto out;
+
+- return parms.neigh_setup(n);
++ if (parms.neigh_setup)
++ ret = parms.neigh_setup(n);
++out:
++ rcu_read_unlock();
++ return ret;
+ }
+
+ /* The bonding ndo_neigh_setup is called at init time beofre any
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index 57f9a2f51085..e5c207ad3c77 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -389,6 +389,34 @@ static struct flexcan_mb __iomem *flexcan_get_mb(const struct flexcan_priv *priv
+ (&priv->regs->mb[bank][priv->mb_size * mb_index]);
+ }
+
++static int flexcan_low_power_enter_ack(struct flexcan_priv *priv)
++{
++ struct flexcan_regs __iomem *regs = priv->regs;
++ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
++
++ while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
++ udelay(10);
++
++ if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
++ return -ETIMEDOUT;
++
++ return 0;
++}
++
++static int flexcan_low_power_exit_ack(struct flexcan_priv *priv)
++{
++ struct flexcan_regs __iomem *regs = priv->regs;
++ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
++
++ while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
++ udelay(10);
++
++ if (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
++ return -ETIMEDOUT;
++
++ return 0;
++}
++
+ static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable)
+ {
+ struct flexcan_regs __iomem *regs = priv->regs;
+@@ -407,7 +435,6 @@ static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable)
+ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
+ {
+ struct flexcan_regs __iomem *regs = priv->regs;
+- unsigned int ackval;
+ u32 reg_mcr;
+
+ reg_mcr = priv->read(&regs->mcr);
+@@ -418,36 +445,24 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
+ regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
+ 1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
+
+- /* get stop acknowledgment */
+- if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr,
+- ackval, ackval & (1 << priv->stm.ack_bit),
+- 0, FLEXCAN_TIMEOUT_US))
+- return -ETIMEDOUT;
+-
+- return 0;
++ return flexcan_low_power_enter_ack(priv);
+ }
+
+ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
+ {
+ struct flexcan_regs __iomem *regs = priv->regs;
+- unsigned int ackval;
+ u32 reg_mcr;
+
+ /* remove stop request */
+ regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
+ 1 << priv->stm.req_bit, 0);
+
+- /* get stop acknowledgment */
+- if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr,
+- ackval, !(ackval & (1 << priv->stm.ack_bit)),
+- 0, FLEXCAN_TIMEOUT_US))
+- return -ETIMEDOUT;
+
+ reg_mcr = priv->read(&regs->mcr);
+ reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
+ priv->write(reg_mcr, &regs->mcr);
+
+- return 0;
++ return flexcan_low_power_exit_ack(priv);
+ }
+
+ static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
+@@ -506,39 +521,25 @@ static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
+ static int flexcan_chip_enable(struct flexcan_priv *priv)
+ {
+ struct flexcan_regs __iomem *regs = priv->regs;
+- unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+ u32 reg;
+
+ reg = priv->read(&regs->mcr);
+ reg &= ~FLEXCAN_MCR_MDIS;
+ priv->write(reg, &regs->mcr);
+
+- while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+- udelay(10);
+-
+- if (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
+- return -ETIMEDOUT;
+-
+- return 0;
++ return flexcan_low_power_exit_ack(priv);
+ }
+
+ static int flexcan_chip_disable(struct flexcan_priv *priv)
+ {
+ struct flexcan_regs __iomem *regs = priv->regs;
+- unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+ u32 reg;
+
+ reg = priv->read(&regs->mcr);
+ reg |= FLEXCAN_MCR_MDIS;
+ priv->write(reg, &regs->mcr);
+
+- while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+- udelay(10);
+-
+- if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+- return -ETIMEDOUT;
+-
+- return 0;
++ return flexcan_low_power_enter_ack(priv);
+ }
+
+ static int flexcan_chip_freeze(struct flexcan_priv *priv)
+@@ -1703,6 +1704,9 @@ static int __maybe_unused flexcan_resume(struct device *device)
+ netif_start_queue(dev);
+ if (device_may_wakeup(device)) {
+ disable_irq_wake(dev->irq);
++ err = flexcan_exit_stop_mode(priv);
++ if (err)
++ return err;
+ } else {
+ err = pm_runtime_force_resume(device);
+ if (err)
+@@ -1748,14 +1752,9 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
+ {
+ struct net_device *dev = dev_get_drvdata(device);
+ struct flexcan_priv *priv = netdev_priv(dev);
+- int err;
+
+- if (netif_running(dev) && device_may_wakeup(device)) {
++ if (netif_running(dev) && device_may_wakeup(device))
+ flexcan_enable_wakeup_irq(priv, false);
+- err = flexcan_exit_stop_mode(priv);
+- if (err)
+- return err;
+- }
+
+ return 0;
+ }
+diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
+index 3db619209fe1..d5d4bfa9c8fd 100644
+--- a/drivers/net/can/m_can/tcan4x5x.c
++++ b/drivers/net/can/m_can/tcan4x5x.c
+@@ -354,6 +354,8 @@ static int tcan4x5x_parse_config(struct m_can_classdev *cdev)
+ if (IS_ERR(tcan4x5x->reset_gpio))
+ tcan4x5x->reset_gpio = NULL;
+
++ usleep_range(700, 1000);
++
+ tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev,
+ "device-state",
+ GPIOD_IN);
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+index 07d2f3aa2c02..ae4c37e1bb75 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+@@ -608,7 +608,7 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv,
+ struct kvaser_cmd *cmd;
+ int err;
+
+- cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
++ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+@@ -1140,7 +1140,7 @@ static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv)
+ struct kvaser_cmd *cmd;
+ int rc;
+
+- cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
++ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+@@ -1206,7 +1206,7 @@ static int kvaser_usb_leaf_flush_queue(struct kvaser_usb_net_priv *priv)
+ struct kvaser_cmd *cmd;
+ int rc;
+
+- cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
++ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
+index 7c482b2d78d2..2be846ee627d 100644
+--- a/drivers/net/can/xilinx_can.c
++++ b/drivers/net/can/xilinx_can.c
+@@ -60,6 +60,8 @@ enum xcan_reg {
+ XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */
+ XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */
+ XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */
++ XCAN_AFR_2_MASK_OFFSET = 0x0A00, /* Acceptance Filter MASK */
++ XCAN_AFR_2_ID_OFFSET = 0x0A04, /* Acceptance Filter ID */
+ };
+
+ #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00)
+@@ -1803,6 +1805,11 @@ static int xcan_probe(struct platform_device *pdev)
+
+ pm_runtime_put(&pdev->dev);
+
++ if (priv->devtype.flags & XCAN_FLAG_CANFD_2) {
++ priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000);
++ priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000);
++ }
++
+ netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
+ priv->reg_base, ndev->irq, priv->can.clock.freq,
+ hw_tx_max, priv->tx_max);
+diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
+index f6232ce8481f..685e12b05a7c 100644
+--- a/drivers/net/dsa/Kconfig
++++ b/drivers/net/dsa/Kconfig
+@@ -77,6 +77,7 @@ config NET_DSA_REALTEK_SMI
+ config NET_DSA_SMSC_LAN9303
+ tristate
+ select NET_DSA_TAG_LAN9303
++ select REGMAP
+ ---help---
+ This enables support for the SMSC/Microchip LAN9303 3 port ethernet
+ switch chips.
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index cc3536315eff..a7132c1593c3 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -347,7 +347,7 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
+ * frames should be flooded or not.
+ */
+ b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
+- mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN;
++ mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
+ b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
+ }
+
+@@ -526,6 +526,8 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
+
+ cpu_port = ds->ports[port].cpu_dp->index;
+
++ b53_br_egress_floods(ds, port, true, true);
++
+ if (dev->ops->irq_enable)
+ ret = dev->ops->irq_enable(dev, port);
+ if (ret)
+@@ -641,6 +643,8 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port)
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
+
+ b53_brcm_hdr_setup(dev->ds, port);
++
++ b53_br_egress_floods(dev->ds, port, true, true);
+ }
+
+ static void b53_enable_mib(struct b53_device *dev)
+@@ -1766,19 +1770,26 @@ int b53_br_egress_floods(struct dsa_switch *ds, int port,
+ struct b53_device *dev = ds->priv;
+ u16 uc, mc;
+
+- b53_read16(dev, B53_CTRL_PAGE, B53_UC_FWD_EN, &uc);
++ b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
+ if (unicast)
+ uc |= BIT(port);
+ else
+ uc &= ~BIT(port);
+- b53_write16(dev, B53_CTRL_PAGE, B53_UC_FWD_EN, uc);
++ b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
++
++ b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
++ if (multicast)
++ mc |= BIT(port);
++ else
++ mc &= ~BIT(port);
++ b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
+
+- b53_read16(dev, B53_CTRL_PAGE, B53_MC_FWD_EN, &mc);
++ b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
+ if (multicast)
+ mc |= BIT(port);
+ else
+ mc &= ~BIT(port);
+- b53_write16(dev, B53_CTRL_PAGE, B53_MC_FWD_EN, mc);
++ b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
+
+ return 0;
+
+diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
+index aa140662c7c2..4e5a428ab1a4 100644
+--- a/drivers/net/dsa/sja1105/sja1105_main.c
++++ b/drivers/net/dsa/sja1105/sja1105_main.c
+@@ -1389,6 +1389,8 @@ int sja1105_static_config_reload(struct sja1105_private *priv)
+ int speed_mbps[SJA1105_NUM_PORTS];
+ int rc, i;
+
++ mutex_lock(&priv->mgmt_lock);
++
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ /* Back up the dynamic link speed changed by sja1105_adjust_port_config
+@@ -1420,6 +1422,8 @@ int sja1105_static_config_reload(struct sja1105_private *priv)
+ goto out;
+ }
+ out:
++ mutex_unlock(&priv->mgmt_lock);
++
+ return rc;
+ }
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
+index 7c941eba0bc9..0ce37d54ed10 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.h
++++ b/drivers/net/ethernet/amazon/ena/ena_com.h
+@@ -72,7 +72,7 @@
+ /*****************************************************************************/
+ /* ENA adaptive interrupt moderation settings */
+
+-#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 196
++#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64
+ #define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0
+ #define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+index 16553d92fad2..8c1c73b0ced7 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
++++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+@@ -315,10 +315,9 @@ static int ena_get_coalesce(struct net_device *net_dev,
+ ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) *
+ ena_dev->intr_delay_resolution;
+
+- if (!ena_com_get_adaptive_moderation_enabled(ena_dev))
+- coalesce->rx_coalesce_usecs =
+- ena_com_get_nonadaptive_moderation_interval_rx(ena_dev)
+- * ena_dev->intr_delay_resolution;
++ coalesce->rx_coalesce_usecs =
++ ena_com_get_nonadaptive_moderation_interval_rx(ena_dev)
++ * ena_dev->intr_delay_resolution;
+
+ coalesce->use_adaptive_rx_coalesce =
+ ena_com_get_adaptive_moderation_enabled(ena_dev);
+@@ -367,12 +366,6 @@ static int ena_set_coalesce(struct net_device *net_dev,
+
+ ena_update_tx_rings_intr_moderation(adapter);
+
+- if (coalesce->use_adaptive_rx_coalesce) {
+- if (!ena_com_get_adaptive_moderation_enabled(ena_dev))
+- ena_com_enable_adaptive_moderation(ena_dev);
+- return 0;
+- }
+-
+ rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev,
+ coalesce->rx_coalesce_usecs);
+ if (rc)
+@@ -380,10 +373,13 @@ static int ena_set_coalesce(struct net_device *net_dev,
+
+ ena_update_rx_rings_intr_moderation(adapter);
+
+- if (!coalesce->use_adaptive_rx_coalesce) {
+- if (ena_com_get_adaptive_moderation_enabled(ena_dev))
+- ena_com_disable_adaptive_moderation(ena_dev);
+- }
++ if (coalesce->use_adaptive_rx_coalesce &&
++ !ena_com_get_adaptive_moderation_enabled(ena_dev))
++ ena_com_enable_adaptive_moderation(ena_dev);
++
++ if (!coalesce->use_adaptive_rx_coalesce &&
++ ena_com_get_adaptive_moderation_enabled(ena_dev))
++ ena_com_disable_adaptive_moderation(ena_dev);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+index 0edbb0a76847..5097a44686b3 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+@@ -2397,15 +2397,21 @@ static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
+ /* send the ramrod on all the queues of the PF */
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
++ int tx_idx;
+
+ /* Set the appropriate Queue object */
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+- /* Update the Queue state */
+- rc = bnx2x_queue_state_change(bp, &q_params);
+- if (rc) {
+- BNX2X_ERR("Failed to configure Tx switching\n");
+- return rc;
++ for (tx_idx = FIRST_TX_COS_INDEX;
++ tx_idx < fp->max_cos; tx_idx++) {
++ q_params.params.update.cid_index = tx_idx;
++
++ /* Update the Queue state */
++ rc = bnx2x_queue_state_change(bp, &q_params);
++ if (rc) {
++ BNX2X_ERR("Failed to configure Tx switching\n");
++ return rc;
++ }
+ }
+ }
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 04ec909e06df..527e1bf93116 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1767,8 +1767,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+
+ rc = -EIO;
+ if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
+- netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
+- bnxt_sched_reset(bp, rxr);
++ bnapi->cp_ring.rx_buf_errors++;
++ if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
++ netdev_warn(bp->dev, "RX buffer error %x\n",
++ rx_err);
++ bnxt_sched_reset(bp, rxr);
++ }
+ }
+ goto next_rx_no_len;
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index d333589811a5..5163bb848618 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -927,6 +927,7 @@ struct bnxt_cp_ring_info {
+ dma_addr_t hw_stats_map;
+ u32 hw_stats_ctx_id;
+ u64 rx_l4_csum_errors;
++ u64 rx_buf_errors;
+ u64 missed_irqs;
+
+ struct bnxt_ring_struct cp_ring_struct;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+index 7151244f8c7d..7d2cfea05737 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+@@ -311,10 +311,17 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
+ } else {
+ rc = hwrm_send_message_silent(bp, msg, msg_len,
+ HWRM_CMD_TIMEOUT);
+- if (!rc)
++ if (!rc) {
+ bnxt_copy_from_nvm_data(val, data,
+ nvm_param.nvm_num_bits,
+ nvm_param.dl_num_bytes);
++ } else {
++ struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
++
++ if (resp->cmd_err ==
++ NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST)
++ rc = -EOPNOTSUPP;
++ }
+ }
+ dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
+ if (rc == -EACCES)
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index 51c140476717..89f95428556e 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -173,6 +173,7 @@ static const char * const bnxt_ring_tpa2_stats_str[] = {
+
+ static const char * const bnxt_ring_sw_stats_str[] = {
+ "rx_l4_csum_errors",
++ "rx_buf_errors",
+ "missed_irqs",
+ };
+
+@@ -552,6 +553,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
+ for (k = 0; k < stat_fields; j++, k++)
+ buf[j] = le64_to_cpu(hw_stats[k]);
+ buf[j++] = cpr->rx_l4_csum_errors;
++ buf[j++] = cpr->rx_buf_errors;
+ buf[j++] = cpr->missed_irqs;
+
+ bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
+diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
+index a8f4c69252ff..2814b96751b4 100644
+--- a/drivers/net/ethernet/cortina/gemini.c
++++ b/drivers/net/ethernet/cortina/gemini.c
+@@ -576,6 +576,8 @@ static int gmac_setup_txqs(struct net_device *netdev)
+
+ if (port->txq_dma_base & ~DMA_Q_BASE_MASK) {
+ dev_warn(geth->dev, "TX queue base is not aligned\n");
++ dma_free_coherent(geth->dev, len * sizeof(*desc_ring),
++ desc_ring, port->txq_dma_base);
+ kfree(skb_tab);
+ return -ENOMEM;
+ }
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+index a9503aea527f..6437fe6b9abf 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+@@ -160,10 +160,10 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
+ irq = mc_dev->irqs[0];
+ ptp_qoriq->irq = irq->msi_desc->irq;
+
+- err = devm_request_threaded_irq(dev, ptp_qoriq->irq, NULL,
+- dpaa2_ptp_irq_handler_thread,
+- IRQF_NO_SUSPEND | IRQF_ONESHOT,
+- dev_name(dev), ptp_qoriq);
++ err = request_threaded_irq(ptp_qoriq->irq, NULL,
++ dpaa2_ptp_irq_handler_thread,
++ IRQF_NO_SUSPEND | IRQF_ONESHOT,
++ dev_name(dev), ptp_qoriq);
+ if (err < 0) {
+ dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
+ goto err_free_mc_irq;
+@@ -173,18 +173,20 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
+ DPRTC_IRQ_INDEX, 1);
+ if (err < 0) {
+ dev_err(dev, "dprtc_set_irq_enable(): %d\n", err);
+- goto err_free_mc_irq;
++ goto err_free_threaded_irq;
+ }
+
+ err = ptp_qoriq_init(ptp_qoriq, base, &dpaa2_ptp_caps);
+ if (err)
+- goto err_free_mc_irq;
++ goto err_free_threaded_irq;
+
+ dpaa2_phc_index = ptp_qoriq->phc_index;
+ dev_set_drvdata(dev, ptp_qoriq);
+
+ return 0;
+
++err_free_threaded_irq:
++ free_irq(ptp_qoriq->irq, ptp_qoriq);
+ err_free_mc_irq:
+ fsl_mc_free_irqs(mc_dev);
+ err_unmap:
+diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
+index 4606a7e4a6d1..2ffe035e96d6 100644
+--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
++++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
+@@ -543,9 +543,9 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ skb_tx_timestamp(skb);
+
+ hip04_set_xmit_desc(priv, phys);
+- priv->tx_head = TX_NEXT(tx_head);
+ count++;
+ netdev_sent_queue(ndev, skb->len);
++ priv->tx_head = TX_NEXT(tx_head);
+
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 616cad0faa21..84d8816c8681 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -1692,6 +1692,9 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
+ time_after(jiffies,
+ (trans_start + ndev->watchdog_timeo))) {
+ timeout_queue = i;
++ netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
++ q->state,
++ jiffies_to_msecs(jiffies - trans_start));
+ break;
+ }
+ }
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index c052bb33b3d3..162881005a6d 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -9443,6 +9443,9 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
+ return ret;
+ }
+
++ /* Log and clear the hw errors those already occurred */
++ hclge_handle_all_hns_hw_errors(ae_dev);
++
+ /* Re-enable the hw error interrupts because
+ * the interrupts get disabled on global reset.
+ */
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 0686ded7ad3a..e1ab2feeae53 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -176,7 +176,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
+ ltb->map_id = adapter->map_id;
+ adapter->map_id++;
+
+- init_completion(&adapter->fw_done);
++ reinit_completion(&adapter->fw_done);
+ rc = send_request_map(adapter, ltb->addr,
+ ltb->size, ltb->map_id);
+ if (rc) {
+@@ -215,7 +215,7 @@ static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
+
+ memset(ltb->buff, 0, ltb->size);
+
+- init_completion(&adapter->fw_done);
++ reinit_completion(&adapter->fw_done);
+ rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
+ if (rc)
+ return rc;
+@@ -943,7 +943,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
+ if (adapter->vpd->buff)
+ len = adapter->vpd->len;
+
+- init_completion(&adapter->fw_done);
++ reinit_completion(&adapter->fw_done);
+ crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
+ crq.get_vpd_size.cmd = GET_VPD_SIZE;
+ rc = ibmvnic_send_crq(adapter, &crq);
+@@ -1689,7 +1689,7 @@ static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
+ crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
+ ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
+
+- init_completion(&adapter->fw_done);
++ reinit_completion(&adapter->fw_done);
+ rc = ibmvnic_send_crq(adapter, &crq);
+ if (rc) {
+ rc = -EIO;
+@@ -2316,7 +2316,7 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter)
+ adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
+ adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
+
+- init_completion(&adapter->reset_done);
++ reinit_completion(&adapter->reset_done);
+ adapter->wait_for_reset = true;
+ rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
+ if (rc)
+@@ -2332,7 +2332,7 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter)
+ adapter->desired.rx_entries = adapter->fallback.rx_entries;
+ adapter->desired.tx_entries = adapter->fallback.tx_entries;
+
+- init_completion(&adapter->reset_done);
++ reinit_completion(&adapter->reset_done);
+ adapter->wait_for_reset = true;
+ rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
+ if (rc)
+@@ -2603,7 +2603,7 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
+ cpu_to_be32(sizeof(struct ibmvnic_statistics));
+
+ /* Wait for data to be written */
+- init_completion(&adapter->stats_done);
++ reinit_completion(&adapter->stats_done);
+ rc = ibmvnic_send_crq(adapter, &crq);
+ if (rc)
+ return;
+@@ -4408,7 +4408,7 @@ static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
+ memset(&crq, 0, sizeof(crq));
+ crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
+ crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
+- init_completion(&adapter->fw_done);
++ reinit_completion(&adapter->fw_done);
+ rc = ibmvnic_send_crq(adapter, &crq);
+ if (rc)
+ return rc;
+@@ -4960,6 +4960,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
+ INIT_LIST_HEAD(&adapter->rwi_list);
+ spin_lock_init(&adapter->rwi_lock);
+ init_completion(&adapter->init_done);
++ init_completion(&adapter->fw_done);
++ init_completion(&adapter->reset_done);
++ init_completion(&adapter->stats_done);
+ clear_bit(0, &adapter->resetting);
+
+ do {
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
+index 7560f06768e0..3160b5bbe672 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
+@@ -2571,9 +2571,16 @@ noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
+ if (status)
+ return status;
+
+- hw->phy.link_info.req_fec_info =
+- abilities.fec_cfg_curr_mod_ext_info &
+- (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
++ if (abilities.fec_cfg_curr_mod_ext_info &
++ I40E_AQ_ENABLE_FEC_AUTO)
++ hw->phy.link_info.req_fec_info =
++ (I40E_AQ_REQUEST_FEC_KR |
++ I40E_AQ_REQUEST_FEC_RS);
++ else
++ hw->phy.link_info.req_fec_info =
++ abilities.fec_cfg_curr_mod_ext_info &
++ (I40E_AQ_REQUEST_FEC_KR |
++ I40E_AQ_REQUEST_FEC_RS);
+
+ memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+ sizeof(hw->phy.link_info.module_type));
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+index 41e1240acaea..b577e6adf3bf 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -722,7 +722,14 @@ static void i40e_get_settings_link_up_fec(u8 req_fec_info,
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+
+- if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) {
++ if ((I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) &&
++ (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info)) {
++ ethtool_link_ksettings_add_link_mode(ks, advertising,
++ FEC_NONE);
++ ethtool_link_ksettings_add_link_mode(ks, advertising,
++ FEC_BASER);
++ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
++ } else if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ } else if (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+@@ -730,12 +737,6 @@ static void i40e_get_settings_link_up_fec(u8 req_fec_info,
+ } else {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_NONE);
+- if (I40E_AQ_SET_FEC_AUTO & req_fec_info) {
+- ethtool_link_ksettings_add_link_mode(ks, advertising,
+- FEC_RS);
+- ethtool_link_ksettings_add_link_mode(ks, advertising,
+- FEC_BASER);
+- }
+ }
+ }
+
+@@ -1437,6 +1438,7 @@ static int i40e_get_fec_param(struct net_device *netdev,
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status status = 0;
+ int err = 0;
++ u8 fec_cfg;
+
+ /* Get the current phy config */
+ memset(&abilities, 0, sizeof(abilities));
+@@ -1448,18 +1450,16 @@ static int i40e_get_fec_param(struct net_device *netdev,
+ }
+
+ fecparam->fec = 0;
+- if (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_AUTO)
++ fec_cfg = abilities.fec_cfg_curr_mod_ext_info;
++ if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
+ fecparam->fec |= ETHTOOL_FEC_AUTO;
+- if ((abilities.fec_cfg_curr_mod_ext_info &
+- I40E_AQ_SET_FEC_REQUEST_RS) ||
+- (abilities.fec_cfg_curr_mod_ext_info &
+- I40E_AQ_SET_FEC_ABILITY_RS))
++ else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_RS |
++ I40E_AQ_SET_FEC_ABILITY_RS))
+ fecparam->fec |= ETHTOOL_FEC_RS;
+- if ((abilities.fec_cfg_curr_mod_ext_info &
+- I40E_AQ_SET_FEC_REQUEST_KR) ||
+- (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_ABILITY_KR))
++ else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_KR |
++ I40E_AQ_SET_FEC_ABILITY_KR))
+ fecparam->fec |= ETHTOOL_FEC_BASER;
+- if (abilities.fec_cfg_curr_mod_ext_info == 0)
++ if (fec_cfg == 0)
+ fecparam->fec |= ETHTOOL_FEC_OFF;
+
+ if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 6031223eafab..339925af0206 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3534,14 +3534,14 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
+ q_vector->rx.target_itr =
+ ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
+ wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
+- q_vector->rx.target_itr);
++ q_vector->rx.target_itr >> 1);
+ q_vector->rx.current_itr = q_vector->rx.target_itr;
+
+ q_vector->tx.next_update = jiffies + 1;
+ q_vector->tx.target_itr =
+ ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
+ wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
+- q_vector->tx.target_itr);
++ q_vector->tx.target_itr >> 1);
+ q_vector->tx.current_itr = q_vector->tx.target_itr;
+
+ wr32(hw, I40E_PFINT_RATEN(vector - 1),
+@@ -3646,11 +3646,11 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
+ /* set the ITR configuration */
+ q_vector->rx.next_update = jiffies + 1;
+ q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
+- wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr);
++ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
+ q_vector->rx.current_itr = q_vector->rx.target_itr;
+ q_vector->tx.next_update = jiffies + 1;
+ q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
+- wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr);
++ wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
+ q_vector->tx.current_itr = q_vector->tx.target_itr;
+
+ i40e_enable_misc_int_causes(pf);
+@@ -11396,7 +11396,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
+
+ /* associate no queues to the misc vector */
+ wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
+- wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
++ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
+
+ i40e_flush(hw);
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
+index 2353166c654e..c68709c7ef81 100644
+--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
++++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
+@@ -948,7 +948,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ if (ice_sq_done(hw, cq))
+ break;
+
+- mdelay(1);
++ udelay(ICE_CTL_Q_SQ_CMD_USEC);
+ total_delay++;
+ } while (total_delay < cq->sq_cmd_timeout);
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
+index 44945c2165d8..4df9da359135 100644
+--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
++++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
+@@ -31,8 +31,9 @@ enum ice_ctl_q {
+ ICE_CTL_Q_MAILBOX,
+ };
+
+-/* Control Queue default settings */
+-#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */
++/* Control Queue timeout settings - max delay 250ms */
++#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
++#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
+
+ struct ice_ctl_q_ring {
+ void *dma_head; /* Virtual address to DMA head */
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index 7e23034df955..1fe9f6050635 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -3368,10 +3368,17 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
+ struct ice_vsi *vsi = np->vsi;
+
+ if (q_num < 0) {
+- int i;
++ int v_idx;
++
++ ice_for_each_q_vector(vsi, v_idx) {
++ /* In some cases if DCB is configured the num_[rx|tx]q
++ * can be less than vsi->num_q_vectors. This check
++ * accounts for that so we don't report a false failure
++ */
++ if (v_idx >= vsi->num_rxq && v_idx >= vsi->num_txq)
++ goto set_complete;
+
+- ice_for_each_q_vector(vsi, i) {
+- if (ice_set_q_coalesce(vsi, ec, i))
++ if (ice_set_q_coalesce(vsi, ec, v_idx))
+ return -EINVAL;
+ }
+ goto set_complete;
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 214cd6eca405..2408f0de95fc 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -3970,8 +3970,13 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
+ }
+
+ ice_for_each_txq(vsi, i) {
+- vsi->tx_rings[i]->netdev = vsi->netdev;
+- err = ice_setup_tx_ring(vsi->tx_rings[i]);
++ struct ice_ring *ring = vsi->tx_rings[i];
++
++ if (!ring)
++ return -EINVAL;
++
++ ring->netdev = vsi->netdev;
++ err = ice_setup_tx_ring(ring);
+ if (err)
+ break;
+ }
+@@ -3996,8 +4001,13 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
+ }
+
+ ice_for_each_rxq(vsi, i) {
+- vsi->rx_rings[i]->netdev = vsi->netdev;
+- err = ice_setup_rx_ring(vsi->rx_rings[i]);
++ struct ice_ring *ring = vsi->rx_rings[i];
++
++ if (!ring)
++ return -EINVAL;
++
++ ring->netdev = vsi->netdev;
++ err = ice_setup_rx_ring(ring);
+ if (err)
+ break;
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+index b45797f39b2f..c0637a0cbfe8 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+@@ -317,8 +317,9 @@ void ice_free_vfs(struct ice_pf *pf)
+ pf->num_alloc_vfs = 0;
+ for (i = 0; i < tmp; i++) {
+ if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
+- /* disable VF qp mappings */
++ /* disable VF qp mappings and set VF disable state */
+ ice_dis_vf_mappings(&pf->vf[i]);
++ set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
+ ice_free_vf_res(&pf->vf[i]);
+ }
+ }
+@@ -1287,9 +1288,12 @@ static void ice_vc_notify_vf_reset(struct ice_vf *vf)
+ if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+ return;
+
+- /* verify if the VF is in either init or active before proceeding */
+- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+- !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
++ /* Bail out if VF is in disabled state, neither initialized, nor active
++ * state - otherwise proceed with notifications
++ */
++ if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
++ !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
++ test_bit(ICE_VF_STATE_DIS, vf->vf_states))
+ return;
+
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 91b3780ddb04..1a7203fede12 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -8639,7 +8639,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ adapter->ptp_clock) {
+- if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
++ if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
++ !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
+ &adapter->state)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index c2c7f214a56a..814a4ba4e7fa 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -3443,6 +3443,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ }
+
++ if (!(attr->action &
++ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
++ NL_SET_ERR_MSG(extack, "Rule must have at least one forward/drop action");
++ return -EOPNOTSUPP;
++ }
++
+ if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "current firmware doesn't support split rule for port mirroring");
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+index 39d600c8b92d..210ebc91d3d6 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+@@ -5637,8 +5637,13 @@ static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
+ if (mlxsw_sp_fib6_rt_should_ignore(rt))
+ return;
+
++ /* Multipath routes are first added to the FIB trie and only then
++ * notified. If we vetoed the addition, we will get a delete
++ * notification for a route we do not have. Therefore, do not warn if
++ * route was not found.
++ */
+ fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
+- if (WARN_ON(!fib6_entry))
++ if (!fib6_entry)
+ return;
+
+ /* If not all the nexthops are deleted, then only reduce the nexthop
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+index 7c4a15e967df..5defd31d481c 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+@@ -65,17 +65,17 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
+ freed_stats_id = priv->stats_ring_size;
+ /* Check for unallocated entries first. */
+ if (priv->stats_ids.init_unalloc > 0) {
+- if (priv->active_mem_unit == priv->total_mem_units) {
+- priv->stats_ids.init_unalloc--;
+- priv->active_mem_unit = 0;
+- }
+-
+ *stats_context_id =
+ FIELD_PREP(NFP_FL_STAT_ID_STAT,
+ priv->stats_ids.init_unalloc - 1) |
+ FIELD_PREP(NFP_FL_STAT_ID_MU_NUM,
+ priv->active_mem_unit);
+- priv->active_mem_unit++;
++
++ if (++priv->active_mem_unit == priv->total_mem_units) {
++ priv->stats_ids.init_unalloc--;
++ priv->active_mem_unit = 0;
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+index 9a6a9a008714..c8bdbf057d5a 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
+@@ -1230,7 +1230,7 @@ qede_configure_mcast_filtering(struct net_device *ndev,
+ netif_addr_lock_bh(ndev);
+
+ mc_count = netdev_mc_count(ndev);
+- if (mc_count < 64) {
++ if (mc_count <= 64) {
+ netdev_for_each_mc_addr(ha, ndev) {
+ ether_addr_copy(temp, ha->addr);
+ temp += ETH_ALEN;
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
+index a220cc7c947a..ba53612ae0df 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -1406,6 +1406,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
+ rxq->rx_buf_seg_size = roundup_pow_of_two(size);
+ } else {
+ rxq->rx_buf_seg_size = PAGE_SIZE;
++ edev->ndev->features &= ~NETIF_F_GRO_HW;
+ }
+
+ /* Allocate the parallel driver ring for Rx buffers */
+@@ -1450,6 +1451,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
+ }
+ }
+
++ edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
+ if (!edev->gro_disable)
+ qede_set_tpa_param(rxq);
+ err:
+@@ -1702,8 +1704,6 @@ static void qede_init_fp(struct qede_dev *edev)
+ snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+ edev->ndev->name, queue_id);
+ }
+-
+- edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
+ }
+
+ static int qede_set_real_num_queues(struct qede_dev *edev)
+diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
+index b4b8ba00ee01..986f26578d34 100644
+--- a/drivers/net/ethernet/qlogic/qla3xxx.c
++++ b/drivers/net/ethernet/qlogic/qla3xxx.c
+@@ -2756,6 +2756,9 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
+ int err;
+
+ for (i = 0; i < qdev->num_large_buffers; i++) {
++ lrg_buf_cb = &qdev->lrg_buf[i];
++ memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
++
+ skb = netdev_alloc_skb(qdev->ndev,
+ qdev->lrg_buffer_len);
+ if (unlikely(!skb)) {
+@@ -2766,11 +2769,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
+ ql_free_large_buffers(qdev);
+ return -ENOMEM;
+ } else {
+-
+- lrg_buf_cb = &qdev->lrg_buf[i];
+- memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
+ lrg_buf_cb->index = i;
+- lrg_buf_cb->skb = skb;
+ /*
+ * We save some space to copy the ethhdr from first
+ * buffer
+@@ -2792,6 +2791,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
+ return -ENOMEM;
+ }
+
++ lrg_buf_cb->skb = skb;
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
+ qdev->lrg_buffer_len -
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 4fe0977d01fa..5ae0b5663d54 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -680,6 +680,7 @@ struct rtl8169_private {
+ struct rtl8169_counters *counters;
+ struct rtl8169_tc_offsets tc_offset;
+ u32 saved_wolopts;
++ int eee_adv;
+
+ const char *fw_name;
+ struct rtl_fw *rtl_fw;
+@@ -2075,6 +2076,10 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
+ }
+
+ ret = phy_ethtool_set_eee(tp->phydev, data);
++
++ if (!ret)
++ tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN,
++ MDIO_AN_EEE_ADV);
+ out:
+ pm_runtime_put_noidle(d);
+ return ret;
+@@ -2105,10 +2110,16 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
+ static void rtl_enable_eee(struct rtl8169_private *tp)
+ {
+ struct phy_device *phydev = tp->phydev;
+- int supported = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
++ int adv;
++
++ /* respect EEE advertisement the user may have set */
++ if (tp->eee_adv >= 0)
++ adv = tp->eee_adv;
++ else
++ adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
+
+- if (supported > 0)
+- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, supported);
++ if (adv >= 0)
++ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
+ }
+
+ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
+@@ -7064,6 +7075,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ tp->pci_dev = pdev;
+ tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
+ tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1;
++ tp->eee_adv = -1;
+
+ /* Get the *optional* external "ether_clk" used on some boards */
+ rc = rtl_get_ether_clk(tp);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+index 170c3a052b14..1f230bd854c4 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -320,7 +320,7 @@ out:
+ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
+ struct device_node *np, struct device *dev)
+ {
+- bool mdio = true;
++ bool mdio = false;
+ static const struct of_device_id need_mdio_ids[] = {
+ { .compatible = "snps,dwc-qos-ethernet-4.10" },
+ {},
+diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
+index 834afca3a019..137632b09c72 100644
+--- a/drivers/net/ethernet/ti/Kconfig
++++ b/drivers/net/ethernet/ti/Kconfig
+@@ -22,6 +22,7 @@ config TI_DAVINCI_EMAC
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) || COMPILE_TEST
+ select TI_DAVINCI_MDIO
+ select PHYLIB
++ select GENERIC_ALLOCATOR
+ ---help---
+ This driver supports TI's DaVinci Ethernet .
+
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
+index 84025dcc78d5..e7c24396933e 100644
+--- a/drivers/net/ethernet/ti/cpsw_ale.c
++++ b/drivers/net/ethernet/ti/cpsw_ale.c
+@@ -779,6 +779,7 @@ void cpsw_ale_start(struct cpsw_ale *ale)
+ void cpsw_ale_stop(struct cpsw_ale *ale)
+ {
+ del_timer_sync(&ale->timer);
++ cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
+ cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
+ }
+
+@@ -862,6 +863,7 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
+ ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS;
+ }
+
++ cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
+ return ale;
+ }
+
+diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
+index 37ba708ac781..6614fa3089b2 100644
+--- a/drivers/net/ethernet/ti/davinci_cpdma.c
++++ b/drivers/net/ethernet/ti/davinci_cpdma.c
+@@ -1018,7 +1018,6 @@ static int cpdma_chan_submit_si(struct submit_info *si)
+ struct cpdma_chan *chan = si->chan;
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ int len = si->len;
+- int swlen = len;
+ struct cpdma_desc __iomem *desc;
+ dma_addr_t buffer;
+ u32 mode;
+@@ -1046,7 +1045,6 @@ static int cpdma_chan_submit_si(struct submit_info *si)
+ if (si->data_dma) {
+ buffer = si->data_dma;
+ dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir);
+- swlen |= CPDMA_DMA_EXT_MAP;
+ } else {
+ buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir);
+ ret = dma_mapping_error(ctlr->dev, buffer);
+@@ -1065,7 +1063,8 @@ static int cpdma_chan_submit_si(struct submit_info *si)
+ writel_relaxed(mode | len, &desc->hw_mode);
+ writel_relaxed((uintptr_t)si->token, &desc->sw_token);
+ writel_relaxed(buffer, &desc->sw_buffer);
+- writel_relaxed(swlen, &desc->sw_len);
++ writel_relaxed(si->data_dma ? len | CPDMA_DMA_EXT_MAP : len,
++ &desc->sw_len);
+ desc_read(desc, sw_len);
+
+ __cpdma_chan_submit(chan, desc);
+diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
+index b517c1af9de0..91a1059517f5 100644
+--- a/drivers/net/fjes/fjes_main.c
++++ b/drivers/net/fjes/fjes_main.c
+@@ -166,6 +166,9 @@ static int fjes_acpi_add(struct acpi_device *device)
+ /* create platform_device */
+ plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
+ ARRAY_SIZE(fjes_resource));
++ if (IS_ERR(plat_dev))
++ return PTR_ERR(plat_dev);
++
+ device->driver_data = plat_dev;
+
+ return 0;
+diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
+index 37fceaf9fa10..cf4455bbf888 100644
+--- a/drivers/net/phy/dp83867.c
++++ b/drivers/net/phy/dp83867.c
+@@ -95,6 +95,10 @@
+ #define DP83867_IO_MUX_CFG_CLK_O_SEL_MASK (0x1f << 8)
+ #define DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT 8
+
++/* CFG3 bits */
++#define DP83867_CFG3_INT_OE BIT(7)
++#define DP83867_CFG3_ROBUST_AUTO_MDIX BIT(9)
++
+ /* CFG4 bits */
+ #define DP83867_CFG4_PORT_MIRROR_EN BIT(0)
+
+@@ -410,12 +414,13 @@ static int dp83867_config_init(struct phy_device *phydev)
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val);
+ }
+
++ val = phy_read(phydev, DP83867_CFG3);
+ /* Enable Interrupt output INT_OE in CFG3 register */
+- if (phy_interrupt_is_valid(phydev)) {
+- val = phy_read(phydev, DP83867_CFG3);
+- val |= BIT(7);
+- phy_write(phydev, DP83867_CFG3, val);
+- }
++ if (phy_interrupt_is_valid(phydev))
++ val |= DP83867_CFG3_INT_OE;
++
++ val |= DP83867_CFG3_ROBUST_AUTO_MDIX;
++ phy_write(phydev, DP83867_CFG3, val);
+
+ if (dp83867->port_mirroring != DP83867_PORT_MIRROING_KEEP)
+ dp83867_config_port_mirroring(phydev);
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index adb66a2fae18..2bf0fda209a8 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -488,7 +488,7 @@ static int phy_bus_match(struct device *dev, struct device_driver *drv)
+
+ if (phydev->is_c45) {
+ for (i = 1; i < num_ids; i++) {
+- if (!(phydev->c45_ids.devices_in_package & (1 << i)))
++ if (phydev->c45_ids.device_ids[i] == 0xffffffff)
+ continue;
+
+ if ((phydrv->phy_id & phydrv->phy_id_mask) ==
+@@ -552,7 +552,7 @@ static const struct device_type mdio_bus_phy_type = {
+ .pm = MDIO_BUS_PHY_PM_OPS,
+ };
+
+-static int phy_request_driver_module(struct phy_device *dev, int phy_id)
++static int phy_request_driver_module(struct phy_device *dev, u32 phy_id)
+ {
+ int ret;
+
+@@ -564,15 +564,15 @@ static int phy_request_driver_module(struct phy_device *dev, int phy_id)
+ * then modprobe isn't available.
+ */
+ if (IS_ENABLED(CONFIG_MODULES) && ret < 0 && ret != -ENOENT) {
+- phydev_err(dev, "error %d loading PHY driver module for ID 0x%08x\n",
+- ret, phy_id);
++ phydev_err(dev, "error %d loading PHY driver module for ID 0x%08lx\n",
++ ret, (unsigned long)phy_id);
+ return ret;
+ }
+
+ return 0;
+ }
+
+-struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
++struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
+ bool is_c45,
+ struct phy_c45_device_ids *c45_ids)
+ {
+@@ -596,8 +596,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
+ mdiodev->device_free = phy_mdio_device_free;
+ mdiodev->device_remove = phy_mdio_device_remove;
+
+- dev->speed = 0;
+- dev->duplex = -1;
++ dev->speed = SPEED_UNKNOWN;
++ dev->duplex = DUPLEX_UNKNOWN;
+ dev->pause = 0;
+ dev->asym_pause = 0;
+ dev->link = 0;
+@@ -632,7 +632,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
+ int i;
+
+ for (i = 1; i < num_ids; i++) {
+- if (!(c45_ids->devices_in_package & (1 << i)))
++ if (c45_ids->device_ids[i] == 0xffffffff)
+ continue;
+
+ ret = phy_request_driver_module(dev,
+@@ -812,10 +812,13 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
+ */
+ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
+ {
+- struct phy_c45_device_ids c45_ids = {0};
++ struct phy_c45_device_ids c45_ids;
+ u32 phy_id = 0;
+ int r;
+
++ c45_ids.devices_in_package = 0;
++ memset(c45_ids.device_ids, 0xff, sizeof(c45_ids.device_ids));
++
+ r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
+ if (r)
+ return ERR_PTR(r);
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 8156b33ee3e7..ca70a1d840eb 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -2074,7 +2074,8 @@ static int team_ethtool_get_link_ksettings(struct net_device *dev,
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.port = PORT_OTHER;
+
+- list_for_each_entry(port, &team->port_list, list) {
++ rcu_read_lock();
++ list_for_each_entry_rcu(port, &team->port_list, list) {
+ if (team_port_txable(port)) {
+ if (port->state.speed != SPEED_UNKNOWN)
+ speed += port->state.speed;
+@@ -2083,6 +2084,8 @@ static int team_ethtool_get_link_ksettings(struct net_device *dev,
+ cmd->base.duplex = port->state.duplex;
+ }
+ }
++ rcu_read_unlock();
++
+ cmd->base.speed = speed ? : SPEED_UNKNOWN;
+
+ return 0;
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index a8d3141582a5..16564ebcde50 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -313,8 +313,8 @@ static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
+ tfile->napi_enabled = napi_en;
+ tfile->napi_frags_enabled = napi_en && napi_frags;
+ if (napi_en) {
+- netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
+- NAPI_POLL_WEIGHT);
++ netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
++ NAPI_POLL_WEIGHT);
+ napi_enable(&tfile->napi);
+ }
+ }
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index f24a1b0b801f..0becc79fd431 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -1808,6 +1808,7 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev)
+ dev->mdiobus->read = lan78xx_mdiobus_read;
+ dev->mdiobus->write = lan78xx_mdiobus_write;
+ dev->mdiobus->name = "lan78xx-mdiobus";
++ dev->mdiobus->parent = &dev->udev->dev;
+
+ snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
+ dev->udev->bus->busnum, dev->udev->devnum);
+diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
+index b6d2932383cf..1cfe75a2d0c3 100644
+--- a/drivers/net/wireless/ath/ath10k/coredump.c
++++ b/drivers/net/wireless/ath/ath10k/coredump.c
+@@ -1208,9 +1208,11 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
+ dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+ dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_RAM_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(crash_data->ramdump_buf_len);
+- memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf,
+- crash_data->ramdump_buf_len);
+- sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
++ if (crash_data->ramdump_buf_len) {
++ memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf,
++ crash_data->ramdump_buf_len);
++ sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
++ }
+ }
+
+ mutex_unlock(&ar->dump_mutex);
+@@ -1257,6 +1259,9 @@ int ath10k_coredump_register(struct ath10k *ar)
+ if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
+ crash_data->ramdump_buf_len = ath10k_coredump_get_ramdump_size(ar);
+
++ if (!crash_data->ramdump_buf_len)
++ return 0;
++
+ crash_data->ramdump_buf = vzalloc(crash_data->ramdump_buf_len);
+ if (!crash_data->ramdump_buf)
+ return -ENOMEM;
+diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
+index 53f1095de8ff..9f0e7b4943ec 100644
+--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
+@@ -2726,7 +2726,7 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+- if (!peer) {
++ if (!peer || !peer->sta) {
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+ continue;
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index a6d21856b7e7..36d24ea126a2 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -3708,7 +3708,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ enum ath10k_hw_txrx_mode txmode,
+ enum ath10k_mac_tx_path txpath,
+- struct sk_buff *skb)
++ struct sk_buff *skb, bool noque_offchan)
+ {
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+@@ -3738,10 +3738,10 @@ static int ath10k_mac_tx(struct ath10k *ar,
+ }
+ }
+
+- if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
++ if (!noque_offchan && info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+ if (!ath10k_mac_tx_frm_has_freq(ar)) {
+- ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n",
+- skb);
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac queued offchannel skb %pK len %d\n",
++ skb, skb->len);
+
+ skb_queue_tail(&ar->offchan_tx_queue, skb);
+ ieee80211_queue_work(hw, &ar->offchan_tx_work);
+@@ -3803,8 +3803,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
+
+ mutex_lock(&ar->conf_mutex);
+
+- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n",
+- skb);
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK len %d\n",
++ skb, skb->len);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ peer_addr = ieee80211_get_DA(hdr);
+@@ -3850,7 +3850,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
+ txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+ txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+
+- ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
++ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, true);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
+ ret);
+@@ -3860,8 +3860,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
+ time_left =
+ wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
+ if (time_left == 0)
+- ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n",
+- skb);
++ ath10k_warn(ar, "timed out waiting for offchannel skb %pK, len: %d\n",
++ skb, skb->len);
+
+ if (!peer && tmp_peer_created) {
+ ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
+@@ -3903,8 +3903,10 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
+ ar->running_fw->fw_file.fw_features)) {
+ paddr = dma_map_single(ar->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+- if (!paddr)
++ if (dma_mapping_error(ar->dev, paddr)) {
++ ieee80211_free_txskb(ar->hw, skb);
+ continue;
++ }
+ ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n",
+@@ -4097,7 +4099,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
+ spin_unlock_bh(&ar->htt.tx_lock);
+ }
+
+- ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
++ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
+ if (unlikely(ret)) {
+ ath10k_warn(ar, "failed to push frame: %d\n", ret);
+
+@@ -4378,7 +4380,7 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
+ spin_unlock_bh(&ar->htt.tx_lock);
+ }
+
+- ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
++ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
+ if (is_htt) {
+diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
+index 4102df016931..39abf8b12903 100644
+--- a/drivers/net/wireless/ath/ath10k/txrx.c
++++ b/drivers/net/wireless/ath/ath10k/txrx.c
+@@ -95,6 +95,8 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
++ info->status.rates[0].idx = -1;
++
+ trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+index 406b367c284c..85cf96461dde 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+@@ -1350,6 +1350,11 @@ void brcmf_detach(struct device *dev)
+ brcmf_fweh_detach(drvr);
+ brcmf_proto_detach(drvr);
+
++ if (drvr->mon_if) {
++ brcmf_net_detach(drvr->mon_if->ndev, false);
++ drvr->mon_if = NULL;
++ }
++
+ /* make sure primary interface removed last */
+ for (i = BRCMF_MAX_IFS - 1; i > -1; i--) {
+ if (drvr->iflist[i])
+diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
+index dd387aba3317..e8a4d604b910 100644
+--- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c
++++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
+@@ -171,6 +171,9 @@ void iwl_leds_init(struct iwl_priv *priv)
+
+ priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
+ wiphy_name(priv->hw->wiphy));
++ if (!priv->led.name)
++ return;
++
+ priv->led.brightness_set = iwl_led_brightness_set;
+ priv->led.blink_set = iwl_led_blink_set;
+ priv->led.max_brightness = 1;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
+index d104da9170ca..72c4b2b8399d 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
+@@ -129,6 +129,9 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm)
+
+ mvm->led.name = kasprintf(GFP_KERNEL, "%s-led",
+ wiphy_name(mvm->hw->wiphy));
++ if (!mvm->led.name)
++ return -ENOMEM;
++
+ mvm->led.brightness_set = iwl_led_brightness_set;
+ mvm->led.max_brightness = 1;
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+index 0ad8ed23a455..5ee33c8ae9d2 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+@@ -60,6 +60,7 @@
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
++#include <asm/unaligned.h>
+ #include <linux/etherdevice.h>
+ #include <linux/skbuff.h>
+ #include "iwl-trans.h"
+@@ -357,7 +358,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+ rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
+ hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res));
+ len = le16_to_cpu(rx_res->byte_count);
+- rx_pkt_status = le32_to_cpup((__le32 *)
++ rx_pkt_status = get_unaligned_le32((__le32 *)
+ (pkt->data + sizeof(*rx_res) + len));
+
+ /* Dont use dev_alloc_skb(), we'll have enough headroom once
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+index 19dd075f2f63..041dd75ac72b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+@@ -1429,6 +1429,7 @@ out_err:
+ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
+ {
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++ struct napi_struct *napi;
+ struct iwl_rxq *rxq;
+ u32 r, i, count = 0;
+ bool emergency = false;
+@@ -1534,8 +1535,16 @@ out:
+ if (unlikely(emergency && count))
+ iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
+
+- if (rxq->napi.poll)
+- napi_gro_flush(&rxq->napi, false);
++ napi = &rxq->napi;
++ if (napi->poll) {
++ if (napi->rx_count) {
++ netif_receive_skb_list(&napi->rx_list);
++ INIT_LIST_HEAD(&napi->rx_list);
++ napi->rx_count = 0;
++ }
++
++ napi_gro_flush(napi, false);
++ }
+
+ iwl_pcie_rxq_restock(trans, rxq);
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+index ca3bb4d65b00..df8455f14e4d 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+@@ -57,24 +57,6 @@
+ #include "internal.h"
+ #include "fw/dbg.h"
+
+-static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
+-{
+- iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+- HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+- udelay(20);
+- iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+- HPM_HIPM_GEN_CFG_CR_PG_EN |
+- HPM_HIPM_GEN_CFG_CR_SLP_EN);
+- udelay(20);
+- iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
+- HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+-
+- iwl_trans_sw_reset(trans);
+- iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+-
+- return 0;
+-}
+-
+ /*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
+@@ -110,13 +92,6 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
+
+ iwl_pcie_apm_config(trans);
+
+- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
+- trans->cfg->integrated) {
+- ret = iwl_pcie_gen2_force_power_gating(trans);
+- if (ret)
+- return ret;
+- }
+-
+ ret = iwl_finish_nic_init(trans, trans->trans_cfg);
+ if (ret)
+ return ret;
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 6961f00ff812..d3db38c3095b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -1783,6 +1783,29 @@ static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
+ return 0;
+ }
+
++static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
++{
++ int ret;
++
++ ret = iwl_finish_nic_init(trans, trans->trans_cfg);
++ if (ret < 0)
++ return ret;
++
++ iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
++ HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
++ udelay(20);
++ iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
++ HPM_HIPM_GEN_CFG_CR_PG_EN |
++ HPM_HIPM_GEN_CFG_CR_SLP_EN);
++ udelay(20);
++ iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
++ HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
++
++ iwl_trans_pcie_sw_reset(trans);
++
++ return 0;
++}
++
+ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
+ {
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+@@ -1802,6 +1825,13 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
+
+ iwl_trans_pcie_sw_reset(trans);
+
++ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
++ trans->cfg->integrated) {
++ err = iwl_pcie_gen2_force_power_gating(trans);
++ if (err)
++ return err;
++ }
++
+ err = iwl_pcie_apm_init(trans);
+ if (err)
+ return err;
+diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
+index 242d8845da3f..30f1025ecb9b 100644
+--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
++++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
+@@ -1179,6 +1179,10 @@ static int if_sdio_probe(struct sdio_func *func,
+
+ spin_lock_init(&card->lock);
+ card->workqueue = alloc_workqueue("libertas_sdio", WQ_MEM_RECLAIM, 0);
++ if (unlikely(!card->workqueue)) {
++ ret = -ENOMEM;
++ goto err_queue;
++ }
+ INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker);
+ init_waitqueue_head(&card->pwron_waitq);
+
+@@ -1230,6 +1234,7 @@ err_activate_card:
+ lbs_remove_card(priv);
+ free:
+ destroy_workqueue(card->workqueue);
++err_queue:
+ while (card->packets) {
+ packet = card->packets;
+ card->packets = card->packets->next;
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
+index eff06d59e9df..096334e941a1 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -687,8 +687,11 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
+ skb_put(skb, MAX_EVENT_SIZE);
+
+ if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE,
+- PCI_DMA_FROMDEVICE))
++ PCI_DMA_FROMDEVICE)) {
++ kfree_skb(skb);
++ kfree(card->evtbd_ring_vbase);
+ return -1;
++ }
+
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+index c328192307c4..ff3f3d98b625 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+@@ -1032,8 +1032,10 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
+ if (idx && (cur_rate->idx != info->status.rates[i].idx ||
+ cur_rate->flags != info->status.rates[i].flags)) {
+ i++;
+- if (i == ARRAY_SIZE(info->status.rates))
++ if (i == ARRAY_SIZE(info->status.rates)) {
++ i--;
+ break;
++ }
+
+ info->status.rates[i] = *cur_rate;
+ info->status.rates[i].count = 0;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+index e07ce2c10013..111e38ff954a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -914,8 +914,10 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
+ if (idx && (cur_rate->idx != info->status.rates[i].idx ||
+ cur_rate->flags != info->status.rates[i].flags)) {
+ i++;
+- if (i == ARRAY_SIZE(info->status.rates))
++ if (i == ARRAY_SIZE(info->status.rates)) {
++ i--;
+ break;
++ }
+
+ info->status.rates[i] = *cur_rate;
+ info->status.rates[i].count = 0;
+diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
+index dc0c7244b60e..c0c32805fb8d 100644
+--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
++++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
+@@ -83,6 +83,7 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
+ struct qlink_cmd *cmd;
+ struct qlink_resp *resp = NULL;
+ struct sk_buff *resp_skb = NULL;
++ int resp_res = 0;
+ u16 cmd_id;
+ u8 mac_id;
+ u8 vif_id;
+@@ -113,6 +114,7 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
+ }
+
+ resp = (struct qlink_resp *)resp_skb->data;
++ resp_res = le16_to_cpu(resp->result);
+ ret = qtnf_cmd_check_reply_header(resp, cmd_id, mac_id, vif_id,
+ const_resp_size);
+ if (ret)
+@@ -128,8 +130,8 @@ out:
+ else
+ consume_skb(resp_skb);
+
+- if (!ret && resp)
+- return qtnf_cmd_resp_result_decode(le16_to_cpu(resp->result));
++ if (!ret)
++ return qtnf_cmd_resp_result_decode(resp_res);
+
+ pr_warn("VIF%u.%u: cmd 0x%.4X failed: %d\n",
+ mac_id, vif_id, cmd_id, ret);
+diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
+index b57c8c18a8d0..7846383c8828 100644
+--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
++++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
+@@ -171,8 +171,9 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
+ return -EPROTO;
+ }
+
+- pr_debug("VIF%u.%u: BSSID:%pM status:%u\n",
+- vif->mac->macid, vif->vifid, join_info->bssid, status);
++ pr_debug("VIF%u.%u: BSSID:%pM chan:%u status:%u\n",
++ vif->mac->macid, vif->vifid, join_info->bssid,
++ le16_to_cpu(join_info->chan.chan.center_freq), status);
+
+ if (status != WLAN_STATUS_SUCCESS)
+ goto done;
+@@ -181,7 +182,7 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
+ if (!cfg80211_chandef_valid(&chandef)) {
+ pr_warn("MAC%u.%u: bad channel freq=%u cf1=%u cf2=%u bw=%u\n",
+ vif->mac->macid, vif->vifid,
+- chandef.chan->center_freq,
++ chandef.chan ? chandef.chan->center_freq : 0,
+ chandef.center_freq1,
+ chandef.center_freq2,
+ chandef.width);
+diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
+index 8ae318b5fe54..4824be0c6231 100644
+--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
++++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
+@@ -130,6 +130,8 @@ static int qtnf_dbg_shm_stats(struct seq_file *s, void *data)
+
+ int qtnf_pcie_fw_boot_done(struct qtnf_bus *bus)
+ {
++ struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
++ char card_id[64];
+ int ret;
+
+ bus->fw_state = QTNF_FW_STATE_BOOT_DONE;
+@@ -137,7 +139,9 @@ int qtnf_pcie_fw_boot_done(struct qtnf_bus *bus)
+ if (ret) {
+ pr_err("failed to attach core\n");
+ } else {
+- qtnf_debugfs_init(bus, DRV_NAME);
++ snprintf(card_id, sizeof(card_id), "%s:%s",
++ DRV_NAME, pci_name(priv->pdev));
++ qtnf_debugfs_init(bus, card_id);
+ qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show);
+ qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show);
+ qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats);
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+index ade057d868f7..5e9ce03067de 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+@@ -1341,6 +1341,7 @@ struct rtl8xxxu_fileops {
+ u8 has_s0s1:1;
+ u8 has_tx_report:1;
+ u8 gen2_thermal_meter:1;
++ u8 needs_full_init:1;
+ u32 adda_1t_init;
+ u32 adda_1t_path_on;
+ u32 adda_2t_path_on_a;
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+index ceffe05bd65b..f3cd314d1a9c 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+@@ -1670,6 +1670,7 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
+ .has_s0s1 = 1,
+ .has_tx_report = 1,
+ .gen2_thermal_meter = 1,
++ .needs_full_init = 1,
+ .adda_1t_init = 0x01c00014,
+ .adda_1t_path_on = 0x01c00014,
+ .adda_2t_path_on_a = 0x01c00014,
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+index c6c41fb962ff..361248e97568 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+@@ -3902,6 +3902,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
+ else
+ macpower = true;
+
++ if (fops->needs_full_init)
++ macpower = false;
++
+ ret = fops->power_on(priv);
+ if (ret < 0) {
+ dev_warn(dev, "%s: Failed power on\n", __func__);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+index 56cc3bc30860..f070f25bb735 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+@@ -1540,6 +1540,8 @@ static bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
+ * This is maybe necessary:
+ * rtlpriv->cfg->ops->fill_tx_cmddesc(hw, buffer, 1, 1, skb);
+ */
++ dev_kfree_skb(skb);
++
+ return true;
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
+index 4b59f3b46b28..348b0072cdd6 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
++++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
+@@ -1021,8 +1021,10 @@ int rtl_usb_probe(struct usb_interface *intf,
+ rtlpriv->hw = hw;
+ rtlpriv->usb_data = kcalloc(RTL_USB_MAX_RX_COUNT, sizeof(u32),
+ GFP_KERNEL);
+- if (!rtlpriv->usb_data)
++ if (!rtlpriv->usb_data) {
++ ieee80211_free_hw(hw);
+ return -ENOMEM;
++ }
+
+ /* this spin lock must be initialized early */
+ spin_lock_init(&rtlpriv->locks.usb_lock);
+@@ -1083,6 +1085,7 @@ error_out2:
+ _rtl_usb_io_handler_release(hw);
+ usb_put_dev(udev);
+ complete(&rtlpriv->firmware_loading_complete);
++ kfree(rtlpriv->usb_data);
+ return -ENODEV;
+ }
+ EXPORT_SYMBOL(rtl_usb_probe);
+diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
+index 793b40bdbf7c..3e95ad198912 100644
+--- a/drivers/net/wireless/realtek/rtw88/coex.c
++++ b/drivers/net/wireless/realtek/rtw88/coex.c
+@@ -1308,6 +1308,7 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
+ struct rtw_chip_info *chip = rtwdev->chip;
+ bool wl_hi_pri = false;
+ u8 table_case, tdma_case;
++ u32 slot_type = 0;
+
+ if (coex_stat->wl_linkscan_proc || coex_stat->wl_hi_pri_task1 ||
+ coex_stat->wl_hi_pri_task2)
+@@ -1318,14 +1319,16 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
+ if (wl_hi_pri) {
+ table_case = 15;
+ if (coex_stat->bt_a2dp_exist &&
+- !coex_stat->bt_pan_exist)
++ !coex_stat->bt_pan_exist) {
++ slot_type = TDMA_4SLOT;
+ tdma_case = 11;
+- else if (coex_stat->wl_hi_pri_task1)
++ } else if (coex_stat->wl_hi_pri_task1) {
+ tdma_case = 6;
+- else if (!coex_stat->bt_page)
++ } else if (!coex_stat->bt_page) {
+ tdma_case = 8;
+- else
++ } else {
+ tdma_case = 9;
++ }
+ } else if (coex_stat->wl_connected) {
+ table_case = 10;
+ tdma_case = 10;
+@@ -1361,7 +1364,7 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+- rtw_coex_tdma(rtwdev, false, tdma_case);
++ rtw_coex_tdma(rtwdev, false, tdma_case | slot_type);
+ }
+
+ static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev)
+@@ -1475,13 +1478,13 @@ static void rtw_coex_action_bt_a2dp(struct rtw_dev *rtwdev)
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
++ slot_type = TDMA_4SLOT;
++
+ if (coex_stat->wl_gl_busy && coex_stat->wl_noisy_level == 0)
+ table_case = 10;
+ else
+ table_case = 9;
+
+- slot_type = TDMA_4SLOT;
+-
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 13;
+ else
+@@ -1585,13 +1588,14 @@ static void rtw_coex_action_bt_a2dp_hid(struct rtw_dev *rtwdev)
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
++ slot_type = TDMA_4SLOT;
++
+ if (coex_stat->bt_ble_exist)
+ table_case = 26;
+ else
+ table_case = 9;
+
+ if (coex_stat->wl_gl_busy) {
+- slot_type = TDMA_4SLOT;
+ tdma_case = 13;
+ } else {
+ tdma_case = 14;
+@@ -1794,10 +1798,12 @@ static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev)
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
++ u32 slot_type = 0;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ if (coex_stat->bt_a2dp_exist) {
++ slot_type = TDMA_4SLOT;
+ table_case = 9;
+ tdma_case = 11;
+ } else {
+@@ -1818,7 +1824,7 @@ static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev)
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+- rtw_coex_tdma(rtwdev, false, tdma_case);
++ rtw_coex_tdma(rtwdev, false, tdma_case | slot_type);
+ }
+
+ static void rtw_coex_action_wl_not_connected(struct rtw_dev *rtwdev)
+diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
+index 6dd457741b15..7a3a4911bde2 100644
+--- a/drivers/net/wireless/realtek/rtw88/main.c
++++ b/drivers/net/wireless/realtek/rtw88/main.c
+@@ -1020,7 +1020,8 @@ static int rtw_dump_hw_feature(struct rtw_dev *rtwdev)
+
+ rtw_hw_config_rf_ant_num(rtwdev, efuse->hw_cap.ant_num);
+
+- if (efuse->hw_cap.nss == EFUSE_HW_CAP_IGNORE)
++ if (efuse->hw_cap.nss == EFUSE_HW_CAP_IGNORE ||
++ efuse->hw_cap.nss > rtwdev->hal.rf_path_num)
+ efuse->hw_cap.nss = rtwdev->hal.rf_path_num;
+
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
+index 4d1909aecd6c..9f60e4dc5a90 100644
+--- a/drivers/nfc/nxp-nci/i2c.c
++++ b/drivers/nfc/nxp-nci/i2c.c
+@@ -278,7 +278,7 @@ static int nxp_nci_i2c_probe(struct i2c_client *client,
+
+ r = devm_acpi_dev_add_driver_gpios(dev, acpi_nxp_nci_gpios);
+ if (r)
+- return r;
++ dev_dbg(dev, "Unable to add GPIO mapping table\n");
+
+ phy->gpiod_en = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(phy->gpiod_en)) {
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index af3212aec871..a6b7b242d516 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -313,7 +313,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
+ if (blk_mq_request_completed(req))
+ return true;
+
+- nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
++ nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
+ blk_mq_complete_request(req);
+ return true;
+ }
+@@ -611,8 +611,14 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
+ struct nvme_dsm_range *range;
+ struct bio *bio;
+
+- range = kmalloc_array(segments, sizeof(*range),
+- GFP_ATOMIC | __GFP_NOWARN);
++ /*
++ * Some devices do not consider the DSM 'Number of Ranges' field when
++ * determining how much data to DMA. Always allocate memory for maximum
++ * number of segments to prevent device reading beyond end of buffer.
++ */
++ static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
++
++ range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
+ if (!range) {
+ /*
+ * If we fail allocation our range, fallback to the controller
+@@ -652,7 +658,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
+
+ req->special_vec.bv_page = virt_to_page(range);
+ req->special_vec.bv_offset = offset_in_page(range);
+- req->special_vec.bv_len = sizeof(*range) * segments;
++ req->special_vec.bv_len = alloc_size;
+ req->rq_flags |= RQF_SPECIAL_PAYLOAD;
+
+ return BLK_STS_OK;
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index e0f064dcbd02..132ade51ee87 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -95,6 +95,7 @@ void nvme_failover_req(struct request *req)
+ }
+ break;
+ case NVME_SC_HOST_PATH_ERROR:
++ case NVME_SC_HOST_ABORTED_CMD:
+ /*
+ * Temporary transport disruption in talking to the controller.
+ * Try to send on a new path.
+diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
+index dff2f3c357f5..fc40555ca4cd 100644
+--- a/drivers/nvmem/imx-ocotp.c
++++ b/drivers/nvmem/imx-ocotp.c
+@@ -521,6 +521,10 @@ static int imx_ocotp_probe(struct platform_device *pdev)
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
++ clk_prepare_enable(priv->clk);
++ imx_ocotp_clr_err_if_set(priv->base);
++ clk_disable_unprepare(priv->clk);
++
+ priv->params = of_device_get_match_data(&pdev->dev);
+ imx_ocotp_nvmem_config.size = 4 * priv->params->nregs;
+ imx_ocotp_nvmem_config.dev = dev;
+diff --git a/drivers/parport/share.c b/drivers/parport/share.c
+index 7b4ee33c1935..15c81cffd2de 100644
+--- a/drivers/parport/share.c
++++ b/drivers/parport/share.c
+@@ -230,6 +230,18 @@ static int port_check(struct device *dev, void *dev_drv)
+ return 0;
+ }
+
++/*
++ * Iterates through all the devices connected to the bus and return 1
++ * if the device is a parallel port.
++ */
++
++static int port_detect(struct device *dev, void *dev_drv)
++{
++ if (is_parport(dev))
++ return 1;
++ return 0;
++}
++
+ /**
+ * parport_register_driver - register a parallel port device driver
+ * @drv: structure describing the driver
+@@ -282,6 +294,15 @@ int __parport_register_driver(struct parport_driver *drv, struct module *owner,
+ if (ret)
+ return ret;
+
++ /*
++ * check if bus has any parallel port registered, if
++ * none is found then load the lowlevel driver.
++ */
++ ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
++ port_detect);
++ if (!ret)
++ get_lowlevel_driver();
++
+ mutex_lock(&registration_lock);
+ if (drv->match_port)
+ bus_for_each_dev(&parport_bus_type, NULL, drv,
+diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs.c b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
+index b163b3a1558d..61054272a7c8 100644
+--- a/drivers/phy/qualcomm/phy-qcom-usb-hs.c
++++ b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
+@@ -158,8 +158,8 @@ static int qcom_usb_hs_phy_power_on(struct phy *phy)
+ /* setup initial state */
+ qcom_usb_hs_phy_vbus_notifier(&uphy->vbus_notify, state,
+ uphy->vbus_edev);
+- ret = devm_extcon_register_notifier(&ulpi->dev, uphy->vbus_edev,
+- EXTCON_USB, &uphy->vbus_notify);
++ ret = extcon_register_notifier(uphy->vbus_edev, EXTCON_USB,
++ &uphy->vbus_notify);
+ if (ret)
+ goto err_ulpi;
+ }
+@@ -180,6 +180,9 @@ static int qcom_usb_hs_phy_power_off(struct phy *phy)
+ {
+ struct qcom_usb_hs_phy *uphy = phy_get_drvdata(phy);
+
++ if (uphy->vbus_edev)
++ extcon_unregister_notifier(uphy->vbus_edev, EXTCON_USB,
++ &uphy->vbus_notify);
+ regulator_disable(uphy->v3p3);
+ regulator_disable(uphy->v1p8);
+ clk_disable_unprepare(uphy->sleep_clk);
+diff --git a/drivers/phy/renesas/phy-rcar-gen2.c b/drivers/phy/renesas/phy-rcar-gen2.c
+index 2926e4937301..2e279ac0fa4d 100644
+--- a/drivers/phy/renesas/phy-rcar-gen2.c
++++ b/drivers/phy/renesas/phy-rcar-gen2.c
+@@ -71,6 +71,7 @@ struct rcar_gen2_phy_driver {
+ struct rcar_gen2_phy_data {
+ const struct phy_ops *gen2_phy_ops;
+ const u32 (*select_value)[PHYS_PER_CHANNEL];
++ const u32 num_channels;
+ };
+
+ static int rcar_gen2_phy_init(struct phy *p)
+@@ -271,11 +272,13 @@ static const u32 usb20_select_value[][PHYS_PER_CHANNEL] = {
+ static const struct rcar_gen2_phy_data rcar_gen2_usb_phy_data = {
+ .gen2_phy_ops = &rcar_gen2_phy_ops,
+ .select_value = pci_select_value,
++ .num_channels = ARRAY_SIZE(pci_select_value),
+ };
+
+ static const struct rcar_gen2_phy_data rz_g1c_usb_phy_data = {
+ .gen2_phy_ops = &rz_g1c_phy_ops,
+ .select_value = usb20_select_value,
++ .num_channels = ARRAY_SIZE(usb20_select_value),
+ };
+
+ static const struct of_device_id rcar_gen2_phy_match_table[] = {
+@@ -389,7 +392,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
+ channel->selected_phy = -1;
+
+ error = of_property_read_u32(np, "reg", &channel_num);
+- if (error || channel_num > 2) {
++ if (error || channel_num >= data->num_channels) {
+ dev_err(dev, "Invalid \"reg\" property\n");
+ of_node_put(np);
+ return error;
+diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
+index 5d6d8b1e9062..dbaacde1b36a 100644
+--- a/drivers/pinctrl/devicetree.c
++++ b/drivers/pinctrl/devicetree.c
+@@ -29,6 +29,13 @@ struct pinctrl_dt_map {
+ static void dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map, unsigned num_maps)
+ {
++ int i;
++
++ for (i = 0; i < num_maps; ++i) {
++ kfree_const(map[i].dev_name);
++ map[i].dev_name = NULL;
++ }
++
+ if (pctldev) {
+ const struct pinctrl_ops *ops = pctldev->desc->pctlops;
+ if (ops->dt_free_map)
+@@ -63,7 +70,13 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
+
+ /* Initialize common mapping table entry fields */
+ for (i = 0; i < num_maps; i++) {
+- map[i].dev_name = dev_name(p->dev);
++ const char *devname;
++
++ devname = kstrdup_const(dev_name(p->dev), GFP_KERNEL);
++ if (!devname)
++ goto err_free_map;
++
++ map[i].dev_name = devname;
+ map[i].name = statename;
+ if (pctldev)
+ map[i].ctrl_dev_name = dev_name(pctldev->dev);
+@@ -71,10 +84,8 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
+
+ /* Remember the converted mapping table entries */
+ dt_map = kzalloc(sizeof(*dt_map), GFP_KERNEL);
+- if (!dt_map) {
+- dt_free_map(pctldev, map, num_maps);
+- return -ENOMEM;
+- }
++ if (!dt_map)
++ goto err_free_map;
+
+ dt_map->pctldev = pctldev;
+ dt_map->map = map;
+@@ -82,6 +93,10 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
+ list_add_tail(&dt_map->node, &p->dt_maps);
+
+ return pinctrl_register_map(map, num_maps, false);
++
++err_free_map:
++ dt_free_map(pctldev, map, num_maps);
++ return -ENOMEM;
+ }
+
+ struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
+diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
+index 9ffb22211d2b..7d658e6627e7 100644
+--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
+@@ -110,7 +110,6 @@ struct byt_gpio {
+ struct platform_device *pdev;
+ struct pinctrl_dev *pctl_dev;
+ struct pinctrl_desc pctl_desc;
+- raw_spinlock_t lock;
+ const struct intel_pinctrl_soc_data *soc_data;
+ struct intel_community *communities_copy;
+ struct byt_gpio_pin_context *saved_context;
+@@ -549,6 +548,8 @@ static const struct intel_pinctrl_soc_data *byt_soc_data[] = {
+ NULL
+ };
+
++static DEFINE_RAW_SPINLOCK(byt_lock);
++
+ static struct intel_community *byt_get_community(struct byt_gpio *vg,
+ unsigned int pin)
+ {
+@@ -658,7 +659,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg,
+ unsigned long flags;
+ int i;
+
+- raw_spin_lock_irqsave(&vg->lock, flags);
++ raw_spin_lock_irqsave(&byt_lock, flags);
+
+ for (i = 0; i < group.npins; i++) {
+ void __iomem *padcfg0;
+@@ -678,7 +679,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg,
+ writel(value, padcfg0);
+ }
+
+- raw_spin_unlock_irqrestore(&vg->lock, flags);
++ raw_spin_unlock_irqrestore(&byt_lock, flags);
+ }
+
+ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
+@@ -688,7 +689,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
+ unsigned long flags;
+ int i;
+
+- raw_spin_lock_irqsave(&vg->lock, flags);
++ raw_spin_lock_irqsave(&byt_lock, flags);
+
+ for (i = 0; i < group.npins; i++) {
+ void __iomem *padcfg0;
+@@ -708,7 +709,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
+ writel(value, padcfg0);
+ }
+
+- raw_spin_unlock_irqrestore(&vg->lock, flags);
++ raw_spin_unlock_irqrestore(&byt_lock, flags);
+ }
+
+ static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
+@@ -749,11 +750,11 @@ static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset)
+ unsigned long flags;
+ u32 value;
+
+- raw_spin_lock_irqsave(&vg->lock, flags);
++ raw_spin_lock_irqsave(&byt_lock, flags);
+ value = readl(reg);
+ value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
+ writel(value, reg);
+- raw_spin_unlock_irqrestore(&vg->lock, flags);
++ raw_spin_unlock_irqrestore(&byt_lock, flags);
+ }
+
+ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
+@@ -765,7 +766,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
+ u32 value, gpio_mux;
+ unsigned long flags;
+
+- raw_spin_lock_irqsave(&vg->lock, flags);
++ raw_spin_lock_irqsave(&byt_lock, flags);
+
+ /*
+ * In most cases, func pin mux 000 means GPIO function.
+@@ -787,7 +788,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
+ "pin %u forcibly re-configured as GPIO\n", offset);
+ }
+
+- raw_spin_unlock_irqrestore(&vg->lock, flags);
++ raw_spin_unlock_irqrestore(&byt_lock, flags);
+
+ pm_runtime_get(&vg->pdev->dev);
+
+@@ -815,7 +816,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
+ unsigned long flags;
+ u32 value;
+
+- raw_spin_lock_irqsave(&vg->lock, flags);
++ raw_spin_lock_irqsave(&byt_lock, flags);
+
+ value = readl(val_reg);
+ value &= ~BYT_DIR_MASK;
+@@ -832,7 +833,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
+ "Potential Error: Setting GPIO with direct_irq_en to output");
+ writel(value, val_reg);
+
+- raw_spin_unlock_irqrestore(&vg->lock, flags);
++ raw_spin_unlock_irqrestore(&byt_lock, flags);
+
+ return 0;
+ }
+@@ -901,11 +902,11 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
+ u32 conf, pull, val, debounce;
+ u16 arg = 0;
+
+- raw_spin_lock_irqsave(&vg->lock, flags);
++ raw_spin_lock_irqsave(&byt_lock, flags);
+ conf = readl(conf_reg);
+ pull = conf & BYT_PULL_ASSIGN_MASK;
+ val = readl(val_reg);
+- raw_spin_unlock_irqrestore(&vg->lock, flags);
++ raw_spin_unlock_irqrestore(&byt_lock, flags);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+@@ -932,9 +933,9 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
+ if (!(conf & BYT_DEBOUNCE_EN))
+ return -EINVAL;
+
+- raw_spin_lock_irqsave(&vg->lock, flags);
++ raw_spin_lock_irqsave(&byt_lock, flags);
+ debounce = readl(db_reg);
+- raw_spin_unlock_irqrestore(&vg->lock, flags);
++ raw_spin_unlock_irqrestore(&byt_lock, flags);
+
+ switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
+ case BYT_DEBOUNCE_PULSE_375US:
+@@ -986,7 +987,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
+ u32 conf, val, debounce;
+ int i, ret = 0;
+
+- raw_spin_lock_irqsave(&vg->lock, flags);
++ raw_spin_lock_irqsave(&byt_lock, flags);
+
+ conf = readl(conf_reg);
+ val = readl(val_reg);
+@@ -1094,7 +1095,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
+ if (!ret)
+ writel(conf, conf_reg);
+
+- raw_spin_unlock_irqrestore(&vg->lock, flags);
++ raw_spin_unlock_irqrestore(&byt_lock, flags);
+
+ return ret;
+ }
+@@ -1119,9 +1120,9 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned int offset)
+ unsigned long flags;
+ u32 val;
+
+- raw_spin_lock_irqsave(&vg->lock, flags);
++ raw_spin_lock_irqsave(&byt_lock, flags);
+ val = readl(reg);
+- raw_spin_unlock_irqrestore(&vg->lock, flags);
++ raw_spin_unlock_irqrestore(&byt_lock, flags);
+
+ return !!(val & BYT_LEVEL);
+ }
+@@ -1136,13 +1137,13 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+ if (!reg)
+ return;
+
+- raw_spin_lock_irqsave(&vg->lock, flags);
++ raw_spin_lock_irqsave(&byt_lock, flags);
+ old_val = readl(reg);
+ if (value)
+ writel(old_val | BYT_LEVEL, reg);
+ else
+ writel(old_val & ~BYT_LEVEL, reg);
+- raw_spin_unlock_irqrestore(&vg->lock, flags);
++ raw_spin_unlock_irqrestore(&byt_lock, flags);
+ }
+
+ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+@@ -1155,9 +1156,9 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+ if (!reg)
+ return -EINVAL;
+
+- raw_spin_lock_irqsave(&vg->lock, flags);
++ raw_spin_lock_irqsave(&byt_lock, flags);
+ value = readl(reg);
+- raw_spin_unlock_irqrestore(&vg->lock, flags);
++ raw_spin_unlock_irqrestore(&byt_lock, flags);
+
+ if (!(value & BYT_OUTPUT_EN))
+ return 0;
+@@ -1200,14 +1201,14 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+ const char *label;
+ unsigned int pin;
+
+- raw_spin_lock_irqsave(&vg->lock, flags);
++ raw_spin_lock_irqsave(&byt_lock, flags);
+ pin = vg->soc_data->pins[i].number;
+ reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
+ if (!reg) {
+ seq_printf(s,
+ "Could not retrieve pin %i conf0 reg\n",
+ pin);
+- raw_spin_unlock_irqrestore(&vg->lock, flags);
++ raw_spin_unlock_irqrestore(&byt_lock, flags);
+ continue;
+ }
+ conf0 = readl(reg);
+@@ -1216,11 +1217,11 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+ if (!reg) {
+ seq_printf(s,
+ "Could not retrieve pin %i val reg\n", pin);
+- raw_spin_unlock_irqrestore(&vg->lock, flags);
++ raw_spin_unlock_irqrestore(&byt_lock, flags);
+ continue;
+ }
+ val = readl(reg);
+- raw_spin_unlock_irqrestore(&vg->lock, flags);
++ raw_spin_unlock_irqrestore(&byt_lock, flags);
+
+ comm = byt_get_community(vg, pin);
+ if (!comm) {
+@@ -1304,9 +1305,9 @@ static void byt_irq_ack(struct irq_data *d)
+ if (!reg)
+ return;
+
+- raw_spin_lock(&vg->lock);
++ raw_spin_lock(&byt_lock);
+ writel(BIT(offset % 32), reg);
+- raw_spin_unlock(&vg->lock);
++ raw_spin_unlock(&byt_lock);
+ }
+
+ static void byt_irq_mask(struct irq_data *d)
+@@ -1330,7 +1331,7 @@ static void byt_irq_unmask(struct irq_data *d)
+ if (!reg)
+ return;
+
+- raw_spin_lock_irqsave(&vg->lock, flags);
++ raw_spin_lock_irqsave(&byt_lock, flags);
+ value = readl(reg);
+
+ switch (irqd_get_trigger_type(d)) {
+@@ -1353,7 +1354,7 @@ static void byt_irq_unmask(struct irq_data *d)
+
+ writel(value, reg);
+
+- raw_spin_unlock_irqrestore(&vg->lock, flags);
++ raw_spin_unlock_irqrestore(&byt_lock, flags);
+ }
+
+ static int byt_irq_type(struct irq_data *d, unsigned int type)
+@@ -1367,7 +1368,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
+ if (!reg || offset >= vg->chip.ngpio)
+ return -EINVAL;
+
+- raw_spin_lock_irqsave(&vg->lock, flags);
++ raw_spin_lock_irqsave(&byt_lock, flags);
+ value = readl(reg);
+
+ WARN(value & BYT_DIRECT_IRQ_EN,
+@@ -1389,7 +1390,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
+ else if (type & IRQ_TYPE_LEVEL_MASK)
+ irq_set_handler_locked(d, handle_level_irq);
+
+- raw_spin_unlock_irqrestore(&vg->lock, flags);
++ raw_spin_unlock_irqrestore(&byt_lock, flags);
+
+ return 0;
+ }
+@@ -1425,9 +1426,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
+ continue;
+ }
+
+- raw_spin_lock(&vg->lock);
++ raw_spin_lock(&byt_lock);
+ pending = readl(reg);
+- raw_spin_unlock(&vg->lock);
++ raw_spin_unlock(&byt_lock);
+ for_each_set_bit(pin, &pending, 32) {
+ virq = irq_find_mapping(vg->chip.irq.domain, base + pin);
+ generic_handle_irq(virq);
+@@ -1638,8 +1639,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
+ return PTR_ERR(vg->pctl_dev);
+ }
+
+- raw_spin_lock_init(&vg->lock);
+-
+ ret = byt_gpio_probe(vg);
+ if (ret)
+ return ret;
+@@ -1654,8 +1653,11 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
+ static int byt_gpio_suspend(struct device *dev)
+ {
+ struct byt_gpio *vg = dev_get_drvdata(dev);
++ unsigned long flags;
+ int i;
+
++ raw_spin_lock_irqsave(&byt_lock, flags);
++
+ for (i = 0; i < vg->soc_data->npins; i++) {
+ void __iomem *reg;
+ u32 value;
+@@ -1676,14 +1678,18 @@ static int byt_gpio_suspend(struct device *dev)
+ vg->saved_context[i].val = value;
+ }
+
++ raw_spin_unlock_irqrestore(&byt_lock, flags);
+ return 0;
+ }
+
+ static int byt_gpio_resume(struct device *dev)
+ {
+ struct byt_gpio *vg = dev_get_drvdata(dev);
++ unsigned long flags;
+ int i;
+
++ raw_spin_lock_irqsave(&byt_lock, flags);
++
+ for (i = 0; i < vg->soc_data->npins; i++) {
+ void __iomem *reg;
+ u32 value;
+@@ -1721,6 +1727,7 @@ static int byt_gpio_resume(struct device *dev)
+ }
+ }
+
++ raw_spin_unlock_irqrestore(&byt_lock, flags);
+ return 0;
+ }
+ #endif
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index 2c61141519f8..eab078244a4c 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -540,7 +540,8 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
+ irqreturn_t ret = IRQ_NONE;
+ unsigned int i, irqnr;
+ unsigned long flags;
+- u32 *regs, regval;
++ u32 __iomem *regs;
++ u32 regval;
+ u64 status, mask;
+
+ /* Read the wake status */
+diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c
+index 6399c8a2bc22..d6cfad7417b1 100644
+--- a/drivers/pinctrl/qcom/pinctrl-sc7180.c
++++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c
+@@ -77,6 +77,7 @@ enum {
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
++ .tile = SOUTH, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+@@ -102,6 +103,7 @@ enum {
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
++ .tile = SOUTH, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+@@ -1087,14 +1089,14 @@ static const struct msm_pingroup sc7180_groups[] = {
+ [116] = PINGROUP(116, WEST, qup04, qup04, _, _, _, _, _, _, _),
+ [117] = PINGROUP(117, WEST, dp_hot, _, _, _, _, _, _, _, _),
+ [118] = PINGROUP(118, WEST, _, _, _, _, _, _, _, _, _),
+- [119] = UFS_RESET(ufs_reset, 0x97f000),
+- [120] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x97a000, 15, 0),
+- [121] = SDC_QDSD_PINGROUP(sdc1_clk, 0x97a000, 13, 6),
+- [122] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x97a000, 11, 3),
+- [123] = SDC_QDSD_PINGROUP(sdc1_data, 0x97a000, 9, 0),
+- [124] = SDC_QDSD_PINGROUP(sdc2_clk, 0x97b000, 14, 6),
+- [125] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x97b000, 11, 3),
+- [126] = SDC_QDSD_PINGROUP(sdc2_data, 0x97b000, 9, 0),
++ [119] = UFS_RESET(ufs_reset, 0x7f000),
++ [120] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x7a000, 15, 0),
++ [121] = SDC_QDSD_PINGROUP(sdc1_clk, 0x7a000, 13, 6),
++ [122] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x7a000, 11, 3),
++ [123] = SDC_QDSD_PINGROUP(sdc1_data, 0x7a000, 9, 0),
++ [124] = SDC_QDSD_PINGROUP(sdc2_clk, 0x7b000, 14, 6),
++ [125] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x7b000, 11, 3),
++ [126] = SDC_QDSD_PINGROUP(sdc2_data, 0x7b000, 9, 0),
+ };
+
+ static const struct msm_pinctrl_soc_data sc7180_pinctrl = {
+diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
+index 2dfb8d9cfda1..5200dadd6b3e 100644
+--- a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
++++ b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
+@@ -448,6 +448,8 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM
+ #define MOD_SEL0_1_0 REV4(FM(SEL_SPEED_PULSE_IF_0), FM(SEL_SPEED_PULSE_IF_1), FM(SEL_SPEED_PULSE_IF_2), F_(0, 0))
+
+ /* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
++#define MOD_SEL1_31 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1)
++#define MOD_SEL1_30 FM(SEL_SSI2_0) FM(SEL_SSI2_1)
+ #define MOD_SEL1_29 FM(SEL_TIMER_TMU_0) FM(SEL_TIMER_TMU_1)
+ #define MOD_SEL1_28 FM(SEL_USB_20_CH0_0) FM(SEL_USB_20_CH0_1)
+ #define MOD_SEL1_26 FM(SEL_DRIF2_0) FM(SEL_DRIF2_1)
+@@ -468,7 +470,8 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM
+
+ #define PINMUX_MOD_SELS \
+ \
+-MOD_SEL0_30_29 \
++ MOD_SEL1_31 \
++MOD_SEL0_30_29 MOD_SEL1_30 \
+ MOD_SEL1_29 \
+ MOD_SEL0_28 MOD_SEL1_28 \
+ MOD_SEL0_27_26 \
+@@ -1058,7 +1061,7 @@ static const u16 pinmux_data[] = {
+ PINMUX_IPSR_MSEL(IP10_27_24, RIF0_CLK_B, SEL_DRIF0_1),
+ PINMUX_IPSR_MSEL(IP10_27_24, SCL2_B, SEL_I2C2_1),
+ PINMUX_IPSR_MSEL(IP10_27_24, TCLK1_A, SEL_TIMER_TMU_0),
+- PINMUX_IPSR_GPSR(IP10_27_24, SSI_SCK2_B),
++ PINMUX_IPSR_MSEL(IP10_27_24, SSI_SCK2_B, SEL_SSI2_1),
+ PINMUX_IPSR_GPSR(IP10_27_24, TS_SCK0),
+
+ PINMUX_IPSR_GPSR(IP10_31_28, SD0_WP),
+@@ -1067,7 +1070,7 @@ static const u16 pinmux_data[] = {
+ PINMUX_IPSR_MSEL(IP10_31_28, RIF0_D0_B, SEL_DRIF0_1),
+ PINMUX_IPSR_MSEL(IP10_31_28, SDA2_B, SEL_I2C2_1),
+ PINMUX_IPSR_MSEL(IP10_31_28, TCLK2_A, SEL_TIMER_TMU_0),
+- PINMUX_IPSR_GPSR(IP10_31_28, SSI_WS2_B),
++ PINMUX_IPSR_MSEL(IP10_31_28, SSI_WS2_B, SEL_SSI2_1),
+ PINMUX_IPSR_GPSR(IP10_31_28, TS_SDAT0),
+
+ /* IPSR11 */
+@@ -1085,13 +1088,13 @@ static const u16 pinmux_data[] = {
+
+ PINMUX_IPSR_MSEL(IP11_11_8, RX0_A, SEL_SCIF0_0),
+ PINMUX_IPSR_MSEL(IP11_11_8, HRX1_A, SEL_HSCIF1_0),
+- PINMUX_IPSR_GPSR(IP11_11_8, SSI_SCK2_A),
++ PINMUX_IPSR_MSEL(IP11_11_8, SSI_SCK2_A, SEL_SSI2_0),
+ PINMUX_IPSR_GPSR(IP11_11_8, RIF1_SYNC),
+ PINMUX_IPSR_GPSR(IP11_11_8, TS_SCK1),
+
+ PINMUX_IPSR_MSEL(IP11_15_12, TX0_A, SEL_SCIF0_0),
+ PINMUX_IPSR_GPSR(IP11_15_12, HTX1_A),
+- PINMUX_IPSR_GPSR(IP11_15_12, SSI_WS2_A),
++ PINMUX_IPSR_MSEL(IP11_15_12, SSI_WS2_A, SEL_SSI2_0),
+ PINMUX_IPSR_GPSR(IP11_15_12, RIF1_D0),
+ PINMUX_IPSR_GPSR(IP11_15_12, TS_SDAT1),
+
+@@ -1196,7 +1199,7 @@ static const u16 pinmux_data[] = {
+ PINMUX_IPSR_MSEL(IP13_19_16, RIF0_D1_A, SEL_DRIF0_0),
+ PINMUX_IPSR_MSEL(IP13_19_16, SDA1_B, SEL_I2C1_1),
+ PINMUX_IPSR_MSEL(IP13_19_16, TCLK2_B, SEL_TIMER_TMU_1),
+- PINMUX_IPSR_GPSR(IP13_19_16, SIM0_D_A),
++ PINMUX_IPSR_MSEL(IP13_19_16, SIM0_D_A, SEL_SIMCARD_0),
+
+ PINMUX_IPSR_GPSR(IP13_23_20, MLB_DAT),
+ PINMUX_IPSR_MSEL(IP13_23_20, TX0_B, SEL_SCIF0_1),
+@@ -1264,7 +1267,7 @@ static const u16 pinmux_data[] = {
+ PINMUX_IPSR_GPSR(IP15_15_12, TPU0TO2),
+ PINMUX_IPSR_MSEL(IP15_15_12, SDA1_D, SEL_I2C1_3),
+ PINMUX_IPSR_MSEL(IP15_15_12, FSO_CFE_1_N_B, SEL_FSO_1),
+- PINMUX_IPSR_GPSR(IP15_15_12, SIM0_D_B),
++ PINMUX_IPSR_MSEL(IP15_15_12, SIM0_D_B, SEL_SIMCARD_1),
+
+ PINMUX_IPSR_GPSR(IP15_19_16, SSI_SDATA6),
+ PINMUX_IPSR_MSEL(IP15_19_16, HRTS2_N_A, SEL_HSCIF2_0),
+@@ -4957,11 +4960,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+ MOD_SEL0_1_0 ))
+ },
+ { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
+- GROUP(2, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1,
+- 2, 2, 2, 1, 1, 2, 1, 4),
++ GROUP(1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1,
++ 1, 2, 2, 2, 1, 1, 2, 1, 4),
+ GROUP(
+- /* RESERVED 31, 30 */
+- 0, 0, 0, 0,
++ MOD_SEL1_31
++ MOD_SEL1_30
+ MOD_SEL1_29
+ MOD_SEL1_28
+ /* RESERVED 27 */
+diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+index 5dfd991ffdaa..dbc36079c381 100644
+--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
++++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+@@ -1450,7 +1450,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
+ GPIO_FN(ET0_ETXD2_A),
+ GPIO_FN(EX_CS5), GPIO_FN(SD1_CMD_A), GPIO_FN(ATADIR), GPIO_FN(QSSL_B),
+ GPIO_FN(ET0_ETXD3_A),
+- GPIO_FN(RD_WR), GPIO_FN(TCLK1_B),
++ GPIO_FN(RD_WR), GPIO_FN(TCLK0), GPIO_FN(CAN_CLK_B), GPIO_FN(ET0_ETXD4),
+ GPIO_FN(EX_WAIT0), GPIO_FN(TCLK1_B),
+ GPIO_FN(EX_WAIT1), GPIO_FN(SD1_DAT0_A), GPIO_FN(DREQ2),
+ GPIO_FN(CAN1_TX_C), GPIO_FN(ET0_LINK_C), GPIO_FN(ET0_ETXD5_A),
+@@ -1949,7 +1949,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+ /* IP3_20 [1] */
+ FN_EX_WAIT0, FN_TCLK1_B,
+ /* IP3_19_18 [2] */
+- FN_RD_WR, FN_TCLK1_B, 0, 0,
++ FN_RD_WR, FN_TCLK0, FN_CAN_CLK_B, FN_ET0_ETXD4,
+ /* IP3_17_15 [3] */
+ FN_EX_CS5, FN_SD1_CMD_A, FN_ATADIR, FN_QSSL_B,
+ FN_ET0_ETXD3_A, 0, 0, 0,
+diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
+index 9579a706fc08..a881b709af25 100644
+--- a/drivers/platform/x86/hp-wmi.c
++++ b/drivers/platform/x86/hp-wmi.c
+@@ -300,7 +300,7 @@ static int __init hp_wmi_bios_2008_later(void)
+
+ static int __init hp_wmi_bios_2009_later(void)
+ {
+- int state = 0;
++ u8 state[128];
+ int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state,
+ sizeof(state), sizeof(state));
+ if (!ret)
+diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
+index 61d6447d1966..00a96e4a1cdc 100644
+--- a/drivers/power/supply/cpcap-battery.c
++++ b/drivers/power/supply/cpcap-battery.c
+@@ -562,12 +562,14 @@ static irqreturn_t cpcap_battery_irq_thread(int irq, void *data)
+ switch (d->action) {
+ case CPCAP_BATTERY_IRQ_ACTION_BATTERY_LOW:
+ if (latest->current_ua >= 0)
+- dev_warn(ddata->dev, "Battery low at 3.3V!\n");
++ dev_warn(ddata->dev, "Battery low at %imV!\n",
++ latest->voltage / 1000);
+ break;
+ case CPCAP_BATTERY_IRQ_ACTION_POWEROFF:
+- if (latest->current_ua >= 0) {
++ if (latest->current_ua >= 0 && latest->voltage <= 3200000) {
+ dev_emerg(ddata->dev,
+- "Battery empty at 3.1V, powering off\n");
++ "Battery empty at %imV, powering off\n",
++ latest->voltage / 1000);
+ orderly_poweroff(true);
+ }
+ break;
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index a46be221dbdc..87bc06b386a0 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1403,7 +1403,9 @@ static int set_machine_constraints(struct regulator_dev *rdev,
+ rdev_err(rdev, "failed to enable\n");
+ return ret;
+ }
+- rdev->use_count++;
++
++ if (rdev->constraints->always_on)
++ rdev->use_count++;
+ }
+
+ print_constraints(rdev);
+@@ -5198,6 +5200,7 @@ unset_supplies:
+ regulator_remove_coupling(rdev);
+ mutex_unlock(&regulator_list_mutex);
+ wash:
++ kfree(rdev->coupling_desc.coupled_rdevs);
+ kfree(rdev->constraints);
+ mutex_lock(&regulator_list_mutex);
+ regulator_ena_gpio_free(rdev);
+diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c
+index 76152aaa330b..96dc0eea7659 100644
+--- a/drivers/regulator/max8907-regulator.c
++++ b/drivers/regulator/max8907-regulator.c
+@@ -296,7 +296,10 @@ static int max8907_regulator_probe(struct platform_device *pdev)
+ memcpy(pmic->desc, max8907_regulators, sizeof(pmic->desc));
+
+ /* Backwards compatibility with MAX8907B; SD1 uses different voltages */
+- regmap_read(max8907->regmap_gen, MAX8907_REG_II2RR, &val);
++ ret = regmap_read(max8907->regmap_gen, MAX8907_REG_II2RR, &val);
++ if (ret)
++ return ret;
++
+ if ((val & MAX8907_II2RR_VERSION_MASK) ==
+ MAX8907_II2RR_VERSION_REV_B) {
+ pmic->desc[MAX8907_SD1].min_uV = 637500;
+@@ -333,14 +336,20 @@ static int max8907_regulator_probe(struct platform_device *pdev)
+ }
+
+ if (pmic->desc[i].ops == &max8907_ldo_ops) {
+- regmap_read(config.regmap, pmic->desc[i].enable_reg,
++ ret = regmap_read(config.regmap, pmic->desc[i].enable_reg,
+ &val);
++ if (ret)
++ return ret;
++
+ if ((val & MAX8907_MASK_LDO_SEQ) !=
+ MAX8907_MASK_LDO_SEQ)
+ pmic->desc[i].ops = &max8907_ldo_hwctl_ops;
+ } else if (pmic->desc[i].ops == &max8907_out5v_ops) {
+- regmap_read(config.regmap, pmic->desc[i].enable_reg,
++ ret = regmap_read(config.regmap, pmic->desc[i].enable_reg,
+ &val);
++ if (ret)
++ return ret;
++
+ if ((val & (MAX8907_MASK_OUT5V_VINEN |
+ MAX8907_MASK_OUT5V_ENSRC)) !=
+ MAX8907_MASK_OUT5V_ENSRC)
+diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
+index 13c54eac0cc3..d1839707128a 100644
+--- a/drivers/soundwire/intel.c
++++ b/drivers/soundwire/intel.c
+@@ -479,7 +479,10 @@ intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
+ unsigned int link_id = sdw->instance;
+ int pdi_conf = 0;
+
+- pdi->intel_alh_id = (link_id * 16) + pdi->num + 5;
++ /* the Bulk and PCM streams are not contiguous */
++ pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
++ if (pdi->num >= 2)
++ pdi->intel_alh_id += 2;
+
+ /*
+ * Program stream parameters to stream SHIM register
+@@ -508,7 +511,10 @@ intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
+ unsigned int link_id = sdw->instance;
+ unsigned int conf;
+
+- pdi->intel_alh_id = (link_id * 16) + pdi->num + 5;
++ /* the Bulk and PCM streams are not contiguous */
++ pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
++ if (pdi->num >= 2)
++ pdi->intel_alh_id += 2;
+
+ /* Program Stream config ALH register */
+ conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
+diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
+index c36587b42e95..82a0ee09cbe1 100644
+--- a/drivers/spi/spi-cadence.c
++++ b/drivers/spi/spi-cadence.c
+@@ -168,16 +168,16 @@ static void cdns_spi_init_hw(struct cdns_spi *xspi)
+ /**
+ * cdns_spi_chipselect - Select or deselect the chip select line
+ * @spi: Pointer to the spi_device structure
+- * @enable: Select (1) or deselect (0) the chip select line
++ * @is_high: Select(0) or deselect (1) the chip select line
+ */
+-static void cdns_spi_chipselect(struct spi_device *spi, bool enable)
++static void cdns_spi_chipselect(struct spi_device *spi, bool is_high)
+ {
+ struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
+ u32 ctrl_reg;
+
+ ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
+
+- if (!enable) {
++ if (is_high) {
+ /* Deselect the slave */
+ ctrl_reg |= CDNS_SPI_CR_SSCTRL;
+ } else {
+diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
+index 9a49e073e8b7..45972056ed8c 100644
+--- a/drivers/spi/spi-dw.c
++++ b/drivers/spi/spi-dw.c
+@@ -129,10 +129,11 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable)
+ struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
+ struct chip_data *chip = spi_get_ctldata(spi);
+
++ /* Chip select logic is inverted from spi_set_cs() */
+ if (chip && chip->cs_control)
+- chip->cs_control(enable);
++ chip->cs_control(!enable);
+
+- if (enable)
++ if (!enable)
+ dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
+ else if (dws->cs_override)
+ dw_writel(dws, DW_SPI_SER, 0);
+@@ -308,7 +309,8 @@ static int dw_spi_transfer_one(struct spi_controller *master,
+ cr0 = (transfer->bits_per_word - 1)
+ | (chip->type << SPI_FRF_OFFSET)
+ | ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) |
+- (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET))
++ (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET) |
++ (((spi->mode & SPI_LOOP) ? 1 : 0) << SPI_SRL_OFFSET))
+ | (chip->tmode << SPI_TMOD_OFFSET);
+
+ /*
+diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
+index 4b80ace1d137..2d563874b4ac 100644
+--- a/drivers/spi/spi-fsl-spi.c
++++ b/drivers/spi/spi-fsl-spi.c
+@@ -736,9 +736,9 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
+ if (ret)
+ goto err;
+
+- irq = irq_of_parse_and_map(np, 0);
+- if (!irq) {
+- ret = -EINVAL;
++ irq = platform_get_irq(ofdev, 0);
++ if (irq < 0) {
++ ret = irq;
+ goto err;
+ }
+
+@@ -751,7 +751,6 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
+ return 0;
+
+ err:
+- irq_dispose_mapping(irq);
+ return ret;
+ }
+
+diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
+index 1d3e23ec20a6..f9c5bbb74714 100644
+--- a/drivers/spi/spi-gpio.c
++++ b/drivers/spi/spi-gpio.c
+@@ -371,8 +371,10 @@ static int spi_gpio_probe(struct platform_device *pdev)
+ return -ENOMEM;
+
+ status = devm_add_action_or_reset(&pdev->dev, spi_gpio_put, master);
+- if (status)
++ if (status) {
++ spi_master_put(master);
+ return status;
++ }
+
+ if (of_id)
+ status = spi_gpio_probe_dt(pdev, master);
+diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
+index 439b01e4a2c8..f4a8f470aecc 100644
+--- a/drivers/spi/spi-img-spfi.c
++++ b/drivers/spi/spi-img-spfi.c
+@@ -673,6 +673,8 @@ static int img_spfi_probe(struct platform_device *pdev)
+ dma_release_channel(spfi->tx_ch);
+ if (spfi->rx_ch)
+ dma_release_channel(spfi->rx_ch);
++ spfi->tx_ch = NULL;
++ spfi->rx_ch = NULL;
+ dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n");
+ } else {
+ master->dma_tx = spfi->tx_ch;
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index bb6a14d1ab0f..2e73d75a6ac5 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -1565,7 +1565,13 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
+ #endif
+
+ ssp->clk = devm_clk_get(&pdev->dev, NULL);
++ if (IS_ERR(ssp->clk))
++ return NULL;
++
+ ssp->irq = platform_get_irq(pdev, 0);
++ if (ssp->irq < 0)
++ return NULL;
++
+ ssp->type = type;
+ ssp->pdev = pdev;
+ ssp->port_id = pxa2xx_spi_get_port_id(adev);
+diff --git a/drivers/spi/spi-sifive.c b/drivers/spi/spi-sifive.c
+index 35254bdc42c4..f7c1e20432e0 100644
+--- a/drivers/spi/spi-sifive.c
++++ b/drivers/spi/spi-sifive.c
+@@ -357,14 +357,14 @@ static int sifive_spi_probe(struct platform_device *pdev)
+ if (!cs_bits) {
+ dev_err(&pdev->dev, "Could not auto probe CS lines\n");
+ ret = -EINVAL;
+- goto put_master;
++ goto disable_clk;
+ }
+
+ num_cs = ilog2(cs_bits) + 1;
+ if (num_cs > SIFIVE_SPI_MAX_CS) {
+ dev_err(&pdev->dev, "Invalid number of spi slaves\n");
+ ret = -EINVAL;
+- goto put_master;
++ goto disable_clk;
+ }
+
+ /* Define our master */
+@@ -393,7 +393,7 @@ static int sifive_spi_probe(struct platform_device *pdev)
+ dev_name(&pdev->dev), spi);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to bind to interrupt\n");
+- goto put_master;
++ goto disable_clk;
+ }
+
+ dev_info(&pdev->dev, "mapped; irq=%d, cs=%d\n",
+@@ -402,11 +402,13 @@ static int sifive_spi_probe(struct platform_device *pdev)
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_register_master failed\n");
+- goto put_master;
++ goto disable_clk;
+ }
+
+ return 0;
+
++disable_clk:
++ clk_disable_unprepare(spi->clk);
+ put_master:
+ spi_master_put(master);
+
+@@ -420,6 +422,7 @@ static int sifive_spi_remove(struct platform_device *pdev)
+
+ /* Disable all the interrupts just in case */
+ sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
++ clk_disable_unprepare(spi->clk);
+
+ return 0;
+ }
+diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
+index 9a051286f120..9613cfe3c0a2 100644
+--- a/drivers/spi/spi-sprd-adi.c
++++ b/drivers/spi/spi-sprd-adi.c
+@@ -393,6 +393,9 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
+ val |= BIT_WDG_RUN | BIT_WDG_RST;
+ sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val);
+
++ /* Lock the watchdog */
++ sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOCK, ~WDG_UNLOCK_KEY);
++
+ mdelay(1000);
+
+ dev_emerg(sadi->dev, "Unable to restart system\n");
+diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
+index 0c24c494f386..77d26d64541a 100644
+--- a/drivers/spi/spi-st-ssc4.c
++++ b/drivers/spi/spi-st-ssc4.c
+@@ -381,6 +381,7 @@ static int spi_st_probe(struct platform_device *pdev)
+ return 0;
+
+ clk_disable:
++ pm_runtime_disable(&pdev->dev);
+ clk_disable_unprepare(spi_st->clk);
+ put_master:
+ spi_master_put(master);
+@@ -392,6 +393,8 @@ static int spi_st_remove(struct platform_device *pdev)
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_st *spi_st = spi_master_get_devdata(master);
+
++ pm_runtime_disable(&pdev->dev);
++
+ clk_disable_unprepare(spi_st->clk);
+
+ pinctrl_pm_select_sleep_state(&pdev->dev);
+diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
+index 111fffc91435..374a2a32edcd 100644
+--- a/drivers/spi/spi-tegra20-slink.c
++++ b/drivers/spi/spi-tegra20-slink.c
+@@ -1073,7 +1073,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
+ ret = clk_enable(tspi->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
+- goto exit_free_master;
++ goto exit_clk_unprepare;
+ }
+
+ spi_irq = platform_get_irq(pdev, 0);
+@@ -1146,6 +1146,8 @@ exit_free_irq:
+ free_irq(spi_irq, tspi);
+ exit_clk_disable:
+ clk_disable(tspi->clk);
++exit_clk_unprepare:
++ clk_unprepare(tspi->clk);
+ exit_free_master:
+ spi_master_put(master);
+ return ret;
+@@ -1159,6 +1161,7 @@ static int tegra_slink_remove(struct platform_device *pdev)
+ free_irq(tspi->irq, tspi);
+
+ clk_disable(tspi->clk);
++ clk_unprepare(tspi->clk);
+
+ if (tspi->tx_dma_chan)
+ tegra_slink_deinit_dma_param(tspi, false);
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index 255786f2e844..3ea9d8a3e6e8 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -627,6 +627,9 @@ static int spidev_release(struct inode *inode, struct file *filp)
+ if (dofree)
+ kfree(spidev);
+ }
++#ifdef CONFIG_SPI_SLAVE
++ spi_slave_abort(spidev->spi);
++#endif
+ mutex_unlock(&device_list_lock);
+
+ return 0;
+diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
+index 4bdf44d82879..dc62db1ee1dd 100644
+--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
++++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
+@@ -623,6 +623,11 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev,
+ dma_alloc_coherent(&pcidev->dev, DMA_BUFFER_SIZE,
+ &devpriv->dio_buffer_phys_addr[i],
+ GFP_KERNEL);
++ if (!devpriv->dio_buffer[i]) {
++ dev_warn(dev->class_dev,
++ "failed to allocate DMA buffer\n");
++ return -ENOMEM;
++ }
+ }
+ /* allocate dma descriptors */
+ devpriv->dma_desc = dma_alloc_coherent(&pcidev->dev,
+@@ -630,6 +635,11 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev,
+ NUM_DMA_DESCRIPTORS,
+ &devpriv->dma_desc_phys_addr,
+ GFP_KERNEL);
++ if (!devpriv->dma_desc) {
++ dev_warn(dev->class_dev,
++ "failed to allocate DMA descriptors\n");
++ return -ENOMEM;
++ }
+ if (devpriv->dma_desc_phys_addr & 0xf) {
+ dev_warn(dev->class_dev,
+ " dma descriptors not quad-word aligned (bug)\n");
+diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
+index a0a67aa517f0..61f0286fb157 100644
+--- a/drivers/staging/fbtft/fbtft-core.c
++++ b/drivers/staging/fbtft/fbtft-core.c
+@@ -666,7 +666,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
+ fbdefio->deferred_io = fbtft_deferred_io;
+ fb_deferred_io_init(info);
+
+- strncpy(info->fix.id, dev->driver->name, 16);
++ snprintf(info->fix.id, sizeof(info->fix.id), "%s", dev->driver->name);
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ info->fix.xpanstep = 0;
+diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
+index 038d6732c3fd..23026978a5a5 100644
+--- a/drivers/staging/iio/frequency/ad9834.c
++++ b/drivers/staging/iio/frequency/ad9834.c
+@@ -417,6 +417,10 @@ static int ad9834_probe(struct spi_device *spi)
+ st = iio_priv(indio_dev);
+ mutex_init(&st->lock);
+ st->mclk = devm_clk_get(&spi->dev, NULL);
++ if (IS_ERR(st->mclk)) {
++ ret = PTR_ERR(st->mclk);
++ goto error_disable_reg;
++ }
+
+ ret = clk_prepare_enable(st->mclk);
+ if (ret) {
+diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
+index b33a07bc9105..46576e32581f 100644
+--- a/drivers/staging/media/imx/imx-media-capture.c
++++ b/drivers/staging/media/imx/imx-media-capture.c
+@@ -26,6 +26,8 @@
+ #include <media/imx.h>
+ #include "imx-media.h"
+
++#define IMX_CAPTURE_NAME "imx-capture"
++
+ struct capture_priv {
+ struct imx_media_video_dev vdev;
+
+@@ -69,8 +71,8 @@ static int vidioc_querycap(struct file *file, void *fh,
+ {
+ struct capture_priv *priv = video_drvdata(file);
+
+- strscpy(cap->driver, "imx-media-capture", sizeof(cap->driver));
+- strscpy(cap->card, "imx-media-capture", sizeof(cap->card));
++ strscpy(cap->driver, IMX_CAPTURE_NAME, sizeof(cap->driver));
++ strscpy(cap->card, IMX_CAPTURE_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", priv->src_sd->name);
+
+diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
+index 73d8354e618c..e50b1f88e25b 100644
+--- a/drivers/staging/media/imx/imx7-mipi-csis.c
++++ b/drivers/staging/media/imx/imx7-mipi-csis.c
+@@ -350,6 +350,8 @@ static void mipi_csis_sw_reset(struct csi_state *state)
+ static int mipi_csis_phy_init(struct csi_state *state)
+ {
+ state->mipi_phy_regulator = devm_regulator_get(state->dev, "phy");
++ if (IS_ERR(state->mipi_phy_regulator))
++ return PTR_ERR(state->mipi_phy_regulator);
+
+ return regulator_set_voltage(state->mipi_phy_regulator, 1000000,
+ 1000000);
+@@ -966,7 +968,10 @@ static int mipi_csis_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- mipi_csis_phy_init(state);
++ ret = mipi_csis_phy_init(state);
++ if (ret < 0)
++ return ret;
++
+ mipi_csis_phy_reset(state);
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
+index 2d3ea8b74dfd..3439f6ad6338 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
+@@ -357,6 +357,8 @@ static int cedrus_probe(struct platform_device *pdev)
+
+ dev->mdev.dev = &pdev->dev;
+ strscpy(dev->mdev.model, CEDRUS_NAME, sizeof(dev->mdev.model));
++ strscpy(dev->mdev.bus_info, "platform:" CEDRUS_NAME,
++ sizeof(dev->mdev.bus_info));
+
+ media_device_init(&dev->mdev);
+ dev->mdev.ops = &cedrus_m2m_media_ops;
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
+index 2f017a651848..3758a1c4e2d0 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
++++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
+@@ -179,12 +179,16 @@ static inline dma_addr_t cedrus_buf_addr(struct vb2_buffer *buf,
+ static inline dma_addr_t cedrus_dst_buf_addr(struct cedrus_ctx *ctx,
+ int index, unsigned int plane)
+ {
+- struct vb2_buffer *buf;
++ struct vb2_buffer *buf = NULL;
++ struct vb2_queue *vq;
+
+ if (index < 0)
+ return 0;
+
+- buf = ctx->fh.m2m_ctx->cap_q_ctx.q.bufs[index];
++ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
++ if (vq)
++ buf = vb2_get_buffer(vq, index);
++
+ return buf ? cedrus_buf_addr(buf, &ctx->dst_fmt, plane) : 0;
+ }
+
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+index d6a782703c9b..08c6c9c410cc 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+@@ -96,7 +96,7 @@ static void cedrus_write_frame_list(struct cedrus_ctx *ctx,
+ const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
+ const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params;
+ const struct v4l2_ctrl_h264_sps *sps = run->h264.sps;
+- struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
++ struct vb2_queue *cap_q;
+ struct cedrus_buffer *output_buf;
+ struct cedrus_dev *dev = ctx->dev;
+ unsigned long used_dpbs = 0;
+@@ -104,6 +104,8 @@ static void cedrus_write_frame_list(struct cedrus_ctx *ctx,
+ unsigned int output = 0;
+ unsigned int i;
+
++ cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
++
+ memset(pic_list, 0, sizeof(pic_list));
+
+ for (i = 0; i < ARRAY_SIZE(decode->dpb); i++) {
+@@ -167,12 +169,14 @@ static void _cedrus_write_ref_list(struct cedrus_ctx *ctx,
+ enum cedrus_h264_sram_off sram)
+ {
+ const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
+- struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
++ struct vb2_queue *cap_q;
+ struct cedrus_dev *dev = ctx->dev;
+ u8 sram_array[CEDRUS_MAX_REF_IDX];
+ unsigned int i;
+ size_t size;
+
++ cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
++
+ memset(sram_array, 0, sizeof(sram_array));
+
+ for (i = 0; i < num_ref; i++) {
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+index ddd29788d685..f9dd8cbf3458 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
++++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+@@ -10,6 +10,9 @@
+ #ifndef _CEDRUS_REGS_H_
+ #define _CEDRUS_REGS_H_
+
++#define SHIFT_AND_MASK_BITS(v, h, l) \
++ (((unsigned long)(v) << (l)) & GENMASK(h, l))
++
+ /*
+ * Common acronyms and contractions used in register descriptions:
+ * * VLD : Variable-Length Decoder
+@@ -37,8 +40,8 @@
+ #define VE_PRIMARY_CHROMA_BUF_LEN 0xc4
+ #define VE_PRIMARY_FB_LINE_STRIDE 0xc8
+
+-#define VE_PRIMARY_FB_LINE_STRIDE_CHROMA(s) (((s) << 16) & GENMASK(31, 16))
+-#define VE_PRIMARY_FB_LINE_STRIDE_LUMA(s) (((s) << 0) & GENMASK(15, 0))
++#define VE_PRIMARY_FB_LINE_STRIDE_CHROMA(s) SHIFT_AND_MASK_BITS(s, 31, 16)
++#define VE_PRIMARY_FB_LINE_STRIDE_LUMA(s) SHIFT_AND_MASK_BITS(s, 15, 0)
+
+ #define VE_CHROMA_BUF_LEN 0xe8
+
+@@ -46,7 +49,7 @@
+ #define VE_SECONDARY_OUT_FMT_EXT (0x01 << 30)
+ #define VE_SECONDARY_OUT_FMT_YU12 (0x02 << 30)
+ #define VE_SECONDARY_OUT_FMT_YV12 (0x03 << 30)
+-#define VE_CHROMA_BUF_LEN_SDRT(l) ((l) & GENMASK(27, 0))
++#define VE_CHROMA_BUF_LEN_SDRT(l) SHIFT_AND_MASK_BITS(l, 27, 0)
+
+ #define VE_PRIMARY_OUT_FMT 0xec
+
+@@ -69,15 +72,15 @@
+
+ #define VE_DEC_MPEG_MP12HDR (VE_ENGINE_DEC_MPEG + 0x00)
+
+-#define VE_DEC_MPEG_MP12HDR_SLICE_TYPE(t) (((t) << 28) & GENMASK(30, 28))
++#define VE_DEC_MPEG_MP12HDR_SLICE_TYPE(t) SHIFT_AND_MASK_BITS(t, 30, 28)
+ #define VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(x, y) (24 - 4 * (y) - 8 * (x))
+ #define VE_DEC_MPEG_MP12HDR_F_CODE(__x, __y, __v) \
+- (((__v) & GENMASK(3, 0)) << VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(__x, __y))
++ (((unsigned long)(__v) & GENMASK(3, 0)) << VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(__x, __y))
+
+ #define VE_DEC_MPEG_MP12HDR_INTRA_DC_PRECISION(p) \
+- (((p) << 10) & GENMASK(11, 10))
++ SHIFT_AND_MASK_BITS(p, 11, 10)
+ #define VE_DEC_MPEG_MP12HDR_INTRA_PICTURE_STRUCTURE(s) \
+- (((s) << 8) & GENMASK(9, 8))
++ SHIFT_AND_MASK_BITS(s, 9, 8)
+ #define VE_DEC_MPEG_MP12HDR_TOP_FIELD_FIRST(v) \
+ ((v) ? BIT(7) : 0)
+ #define VE_DEC_MPEG_MP12HDR_FRAME_PRED_FRAME_DCT(v) \
+@@ -98,19 +101,19 @@
+ #define VE_DEC_MPEG_PICCODEDSIZE (VE_ENGINE_DEC_MPEG + 0x08)
+
+ #define VE_DEC_MPEG_PICCODEDSIZE_WIDTH(w) \
+- ((DIV_ROUND_UP((w), 16) << 8) & GENMASK(15, 8))
++ SHIFT_AND_MASK_BITS(DIV_ROUND_UP((w), 16), 15, 8)
+ #define VE_DEC_MPEG_PICCODEDSIZE_HEIGHT(h) \
+- ((DIV_ROUND_UP((h), 16) << 0) & GENMASK(7, 0))
++ SHIFT_AND_MASK_BITS(DIV_ROUND_UP((h), 16), 7, 0)
+
+ #define VE_DEC_MPEG_PICBOUNDSIZE (VE_ENGINE_DEC_MPEG + 0x0c)
+
+-#define VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(w) (((w) << 16) & GENMASK(27, 16))
+-#define VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(h) (((h) << 0) & GENMASK(11, 0))
++#define VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(w) SHIFT_AND_MASK_BITS(w, 27, 16)
++#define VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(h) SHIFT_AND_MASK_BITS(h, 11, 0)
+
+ #define VE_DEC_MPEG_MBADDR (VE_ENGINE_DEC_MPEG + 0x10)
+
+-#define VE_DEC_MPEG_MBADDR_X(w) (((w) << 8) & GENMASK(15, 8))
+-#define VE_DEC_MPEG_MBADDR_Y(h) (((h) << 0) & GENMASK(7, 0))
++#define VE_DEC_MPEG_MBADDR_X(w) SHIFT_AND_MASK_BITS(w, 15, 8)
++#define VE_DEC_MPEG_MBADDR_Y(h) SHIFT_AND_MASK_BITS(h, 7, 0)
+
+ #define VE_DEC_MPEG_CTRL (VE_ENGINE_DEC_MPEG + 0x14)
+
+@@ -225,7 +228,7 @@
+ #define VE_DEC_MPEG_IQMINPUT_FLAG_INTRA (0x01 << 14)
+ #define VE_DEC_MPEG_IQMINPUT_FLAG_NON_INTRA (0x00 << 14)
+ #define VE_DEC_MPEG_IQMINPUT_WEIGHT(i, v) \
+- (((v) & GENMASK(7, 0)) | (((i) << 8) & GENMASK(13, 8)))
++ (SHIFT_AND_MASK_BITS(i, 13, 8) | SHIFT_AND_MASK_BITS(v, 7, 0))
+
+ #define VE_DEC_MPEG_ERROR (VE_ENGINE_DEC_MPEG + 0xc4)
+ #define VE_DEC_MPEG_CRTMBADDR (VE_ENGINE_DEC_MPEG + 0xc8)
+diff --git a/drivers/staging/mt7621-pci/Kconfig b/drivers/staging/mt7621-pci/Kconfig
+index af928b75a940..ce58042f2f21 100644
+--- a/drivers/staging/mt7621-pci/Kconfig
++++ b/drivers/staging/mt7621-pci/Kconfig
+@@ -2,7 +2,6 @@
+ config PCI_MT7621
+ tristate "MediaTek MT7621 PCI Controller"
+ depends on RALINK
+- depends on PCI
+ select PCI_DRIVERS_GENERIC
+ help
+ This selects a driver for the MediaTek MT7621 PCI Controller.
+diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
+index 952f2ab51347..c37591657bac 100644
+--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
++++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
+@@ -776,7 +776,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
+ memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN);
+
+- if (psta->qos_option)
++ if (psta && psta->qos_option)
+ qos_option = true;
+ } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
+@@ -784,7 +784,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
+ memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
+
+- if (psta->qos_option)
++ if (psta && psta->qos_option)
+ qos_option = true;
+ } else {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("fw_state:%x is not allowed to xmit frame\n", get_fwstate(pmlmepriv)));
+diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
+index 2821411878ce..511136dce3a4 100644
+--- a/drivers/staging/rtl8192u/r8192U_core.c
++++ b/drivers/staging/rtl8192u/r8192U_core.c
+@@ -1422,7 +1422,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
+ (struct tx_fwinfo_819x_usb *)(skb->data + USB_HWDESC_HEADER_LEN);
+ struct usb_device *udev = priv->udev;
+ int pend;
+- int status;
++ int status, rt = -1;
+ struct urb *tx_urb = NULL, *tx_urb_zero = NULL;
+ unsigned int idx_pipe;
+
+@@ -1566,8 +1566,10 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
+ }
+ if (bSend0Byte) {
+ tx_urb_zero = usb_alloc_urb(0, GFP_ATOMIC);
+- if (!tx_urb_zero)
+- return -ENOMEM;
++ if (!tx_urb_zero) {
++ rt = -ENOMEM;
++ goto error;
++ }
+ usb_fill_bulk_urb(tx_urb_zero, udev,
+ usb_sndbulkpipe(udev, idx_pipe),
+ &zero, 0, tx_zero_isr, dev);
+@@ -1577,7 +1579,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
+ "Error TX URB for zero byte %d, error %d",
+ atomic_read(&priv->tx_pending[tcb_desc->queue_index]),
+ status);
+- return -1;
++ goto error;
+ }
+ }
+ netif_trans_update(dev);
+@@ -1588,7 +1590,12 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
+ RT_TRACE(COMP_ERR, "Error TX URB %d, error %d",
+ atomic_read(&priv->tx_pending[tcb_desc->queue_index]),
+ status);
+- return -1;
++
++error:
++ dev_kfree_skb_any(skb);
++ usb_free_urb(tx_urb);
++ usb_free_urb(tx_urb_zero);
++ return rt;
+ }
+
+ static short rtl8192_usb_initendpoints(struct net_device *dev)
+diff --git a/drivers/staging/wilc1000/wilc_hif.c b/drivers/staging/wilc1000/wilc_hif.c
+index d3d9ea284816..77d0732f451b 100644
+--- a/drivers/staging/wilc1000/wilc_hif.c
++++ b/drivers/staging/wilc1000/wilc_hif.c
+@@ -473,6 +473,8 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies->data, ies->len);
+ if (rates_ie) {
+ rates_len = rates_ie[1];
++ if (rates_len > WILC_MAX_RATES_SUPPORTED)
++ rates_len = WILC_MAX_RATES_SUPPORTED;
+ param->supp_rates[0] = rates_len;
+ memcpy(&param->supp_rates[1], rates_ie + 2, rates_len);
+ }
+diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+index 22f21831649b..c3cd6f389a98 100644
+--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
++++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+@@ -1419,8 +1419,10 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
+ if (vif->iftype == WILC_AP_MODE || vif->iftype == WILC_GO_MODE)
+ wilc_wfi_deinit_mon_interface(wl, true);
+ vif->iftype = WILC_STATION_MODE;
+- wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
+- WILC_STATION_MODE, vif->idx);
++
++ if (wl->initialized)
++ wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
++ WILC_STATION_MODE, vif->idx);
+
+ memset(priv->assoc_stainfo.sta_associated_bss, 0,
+ WILC_MAX_NUM_STA * ETH_ALEN);
+@@ -1432,8 +1434,10 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
+ priv->wdev.iftype = type;
+ vif->monitor_flag = 0;
+ vif->iftype = WILC_CLIENT_MODE;
+- wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
+- WILC_STATION_MODE, vif->idx);
++
++ if (wl->initialized)
++ wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
++ WILC_STATION_MODE, vif->idx);
+ break;
+
+ case NL80211_IFTYPE_AP:
+@@ -1450,8 +1454,10 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
+ dev->ieee80211_ptr->iftype = type;
+ priv->wdev.iftype = type;
+ vif->iftype = WILC_GO_MODE;
+- wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
+- WILC_AP_MODE, vif->idx);
++
++ if (wl->initialized)
++ wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
++ WILC_AP_MODE, vif->idx);
+ break;
+
+ default:
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index a8dc8af83f39..1ba9bc667e13 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -2270,27 +2270,6 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
+ mode |= ATMEL_US_USMODE_NORMAL;
+ }
+
+- /* set the mode, clock divisor, parity, stop bits and data size */
+- atmel_uart_writel(port, ATMEL_US_MR, mode);
+-
+- /*
+- * when switching the mode, set the RTS line state according to the
+- * new mode, otherwise keep the former state
+- */
+- if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
+- unsigned int rts_state;
+-
+- if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
+- /* let the hardware control the RTS line */
+- rts_state = ATMEL_US_RTSDIS;
+- } else {
+- /* force RTS line to low level */
+- rts_state = ATMEL_US_RTSEN;
+- }
+-
+- atmel_uart_writel(port, ATMEL_US_CR, rts_state);
+- }
+-
+ /*
+ * Set the baud rate:
+ * Fractional baudrate allows to setup output frequency more
+@@ -2317,6 +2296,28 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
+
+ if (!(port->iso7816.flags & SER_ISO7816_ENABLED))
+ atmel_uart_writel(port, ATMEL_US_BRGR, quot);
++
++ /* set the mode, clock divisor, parity, stop bits and data size */
++ atmel_uart_writel(port, ATMEL_US_MR, mode);
++
++ /*
++ * when switching the mode, set the RTS line state according to the
++ * new mode, otherwise keep the former state
++ */
++ if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
++ unsigned int rts_state;
++
++ if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
++ /* let the hardware control the RTS line */
++ rts_state = ATMEL_US_RTSDIS;
++ } else {
++ /* force RTS line to low level */
++ rts_state = ATMEL_US_RTSEN;
++ }
++
++ atmel_uart_writel(port, ATMEL_US_CR, rts_state);
++ }
++
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
+ atmel_port->tx_stopped = false;
+diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
+index 771d11196523..494e2672ebd7 100644
+--- a/drivers/tty/serial/sprd_serial.c
++++ b/drivers/tty/serial/sprd_serial.c
+@@ -679,6 +679,9 @@ static irqreturn_t sprd_handle_irq(int irq, void *dev_id)
+ if (ims & SPRD_IMSR_TIMEOUT)
+ serial_out(port, SPRD_ICLR, SPRD_ICLR_TIMEOUT);
+
++ if (ims & SPRD_IMSR_BREAK_DETECT)
++ serial_out(port, SPRD_ICLR, SPRD_IMSR_BREAK_DETECT);
++
+ if (ims & (SPRD_IMSR_RX_FIFO_FULL | SPRD_IMSR_BREAK_DETECT |
+ SPRD_IMSR_TIMEOUT))
+ sprd_rx(port);
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 3f899552f6e3..6ca40d135430 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -764,8 +764,15 @@ static int claimintf(struct usb_dev_state *ps, unsigned int ifnum)
+ intf = usb_ifnum_to_if(dev, ifnum);
+ if (!intf)
+ err = -ENOENT;
+- else
++ else {
++ unsigned int old_suppress;
++
++ /* suppress uevents while claiming interface */
++ old_suppress = dev_get_uevent_suppress(&intf->dev);
++ dev_set_uevent_suppress(&intf->dev, 1);
+ err = usb_driver_claim_interface(&usbfs_driver, intf, ps);
++ dev_set_uevent_suppress(&intf->dev, old_suppress);
++ }
+ if (err == 0)
+ set_bit(ifnum, &ps->ifclaimed);
+ return err;
+@@ -785,7 +792,13 @@ static int releaseintf(struct usb_dev_state *ps, unsigned int ifnum)
+ if (!intf)
+ err = -ENOENT;
+ else if (test_and_clear_bit(ifnum, &ps->ifclaimed)) {
++ unsigned int old_suppress;
++
++ /* suppress uevents while releasing interface */
++ old_suppress = dev_get_uevent_suppress(&intf->dev);
++ dev_set_uevent_suppress(&intf->dev, 1);
+ usb_driver_release_interface(&usbfs_driver, intf);
++ dev_set_uevent_suppress(&intf->dev, old_suppress);
+ err = 0;
+ }
+ return err;
+diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
+index aa2f77f1506d..8a5c9b3ebe1e 100644
+--- a/drivers/usb/host/ehci-q.c
++++ b/drivers/usb/host/ehci-q.c
+@@ -27,6 +27,10 @@
+
+ /*-------------------------------------------------------------------------*/
+
++/* PID Codes that are used here, from EHCI specification, Table 3-16. */
++#define PID_CODE_IN 1
++#define PID_CODE_SETUP 2
++
+ /* fill a qtd, returning how much of the buffer we were able to queue up */
+
+ static int
+@@ -190,7 +194,7 @@ static int qtd_copy_status (
+ int status = -EINPROGRESS;
+
+ /* count IN/OUT bytes, not SETUP (even short packets) */
+- if (likely (QTD_PID (token) != 2))
++ if (likely(QTD_PID(token) != PID_CODE_SETUP))
+ urb->actual_length += length - QTD_LENGTH (token);
+
+ /* don't modify error codes */
+@@ -206,6 +210,13 @@ static int qtd_copy_status (
+ if (token & QTD_STS_BABBLE) {
+ /* FIXME "must" disable babbling device's port too */
+ status = -EOVERFLOW;
++ /*
++ * When MMF is active and PID Code is IN, queue is halted.
++ * EHCI Specification, Table 4-13.
++ */
++ } else if ((token & QTD_STS_MMF) &&
++ (QTD_PID(token) == PID_CODE_IN)) {
++ status = -EPROTO;
+ /* CERR nonzero + halt --> stall */
+ } else if (QTD_CERR(token)) {
+ status = -EPIPE;
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 1904ef56f61c..4917c5b033fa 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -48,6 +48,7 @@
+ #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI 0x15e9
+ #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec
+ #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0
++#define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI 0x8a13
+
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
+@@ -212,7 +213,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI ||
+- pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI))
++ pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI))
+ xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+@@ -517,7 +519,6 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
+ retval = xhci_resume(xhci, hibernated);
+ return retval;
+ }
+-#endif /* CONFIG_PM */
+
+ static void xhci_pci_shutdown(struct usb_hcd *hcd)
+ {
+@@ -530,6 +531,7 @@ static void xhci_pci_shutdown(struct usb_hcd *hcd)
+ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
++#endif /* CONFIG_PM */
+
+ /*-------------------------------------------------------------------------*/
+
+diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
+index 0824099b905e..ef1735d014da 100644
+--- a/drivers/usb/renesas_usbhs/common.h
++++ b/drivers/usb/renesas_usbhs/common.h
+@@ -161,11 +161,12 @@ struct usbhs_priv;
+ #define VBSTS (1 << 7) /* VBUS_0 and VBUSIN_0 Input Status */
+ #define VALID (1 << 3) /* USB Request Receive */
+
+-#define DVSQ_MASK (0x3 << 4) /* Device State */
++#define DVSQ_MASK (0x7 << 4) /* Device State */
+ #define POWER_STATE (0 << 4)
+ #define DEFAULT_STATE (1 << 4)
+ #define ADDRESS_STATE (2 << 4)
+ #define CONFIGURATION_STATE (3 << 4)
++#define SUSPENDED_STATE (4 << 4)
+
+ #define CTSQ_MASK (0x7) /* Control Transfer Stage */
+ #define IDLE_SETUP_STAGE 0 /* Idle stage or setup stage */
+diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
+index cd38d74b3223..53489cafecc1 100644
+--- a/drivers/usb/renesas_usbhs/mod_gadget.c
++++ b/drivers/usb/renesas_usbhs/mod_gadget.c
+@@ -457,12 +457,18 @@ static int usbhsg_irq_dev_state(struct usbhs_priv *priv,
+ {
+ struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
+ struct device *dev = usbhsg_gpriv_to_dev(gpriv);
++ int state = usbhs_status_get_device_state(irq_state);
+
+ gpriv->gadget.speed = usbhs_bus_get_speed(priv);
+
+- dev_dbg(dev, "state = %x : speed : %d\n",
+- usbhs_status_get_device_state(irq_state),
+- gpriv->gadget.speed);
++ dev_dbg(dev, "state = %x : speed : %d\n", state, gpriv->gadget.speed);
++
++ if (gpriv->gadget.speed != USB_SPEED_UNKNOWN &&
++ (state & SUSPENDED_STATE)) {
++ if (gpriv->driver && gpriv->driver->suspend)
++ gpriv->driver->suspend(&gpriv->gadget);
++ usb_gadget_set_state(&gpriv->gadget, USB_STATE_SUSPENDED);
++ }
+
+ return 0;
+ }
+diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
+index 6532d68e8808..e4b96674c405 100644
+--- a/drivers/usb/usbip/usbip_common.c
++++ b/drivers/usb/usbip/usbip_common.c
+@@ -727,6 +727,9 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
+
+ copy -= recv;
+ ret += recv;
++
++ if (!copy)
++ break;
+ }
+
+ if (ret != size)
+diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
+index 33f8972ba842..00fc98741c5d 100644
+--- a/drivers/usb/usbip/vhci_rx.c
++++ b/drivers/usb/usbip/vhci_rx.c
+@@ -77,16 +77,21 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
+ usbip_pack_pdu(pdu, urb, USBIP_RET_SUBMIT, 0);
+
+ /* recv transfer buffer */
+- if (usbip_recv_xbuff(ud, urb) < 0)
+- return;
++ if (usbip_recv_xbuff(ud, urb) < 0) {
++ urb->status = -EPROTO;
++ goto error;
++ }
+
+ /* recv iso_packet_descriptor */
+- if (usbip_recv_iso(ud, urb) < 0)
+- return;
++ if (usbip_recv_iso(ud, urb) < 0) {
++ urb->status = -EPROTO;
++ goto error;
++ }
+
+ /* restore the padding in iso packets */
+ usbip_pad_iso(ud, urb);
+
++error:
+ if (usbip_dbg_flag_vhci_rx)
+ usbip_dump_urb(urb);
+
+diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
+index 79cc75096f42..a50dadd01093 100644
+--- a/drivers/xen/Kconfig
++++ b/drivers/xen/Kconfig
+@@ -141,7 +141,8 @@ config XEN_GNTDEV
+
+ config XEN_GNTDEV_DMABUF
+ bool "Add support for dma-buf grant access device driver extension"
+- depends on XEN_GNTDEV && XEN_GRANT_DMA_ALLOC && DMA_SHARED_BUFFER
++ depends on XEN_GNTDEV && XEN_GRANT_DMA_ALLOC
++ select DMA_SHARED_BUFFER
+ help
+ Allows userspace processes and kernel modules to use Xen backed
+ dma-buf implementation. With this extension grant references to
+diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
+index 2e9e13ffbd08..10a04b99798a 100644
+--- a/fs/btrfs/async-thread.c
++++ b/fs/btrfs/async-thread.c
+@@ -252,16 +252,17 @@ out:
+ }
+ }
+
+-static void run_ordered_work(struct __btrfs_workqueue *wq)
++static void run_ordered_work(struct __btrfs_workqueue *wq,
++ struct btrfs_work *self)
+ {
+ struct list_head *list = &wq->ordered_list;
+ struct btrfs_work *work;
+ spinlock_t *lock = &wq->list_lock;
+ unsigned long flags;
++ void *wtag;
++ bool free_self = false;
+
+ while (1) {
+- void *wtag;
+-
+ spin_lock_irqsave(lock, flags);
+ if (list_empty(list))
+ break;
+@@ -287,16 +288,47 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
+ list_del(&work->ordered_list);
+ spin_unlock_irqrestore(lock, flags);
+
+- /*
+- * We don't want to call the ordered free functions with the
+- * lock held though. Save the work as tag for the trace event,
+- * because the callback could free the structure.
+- */
+- wtag = work;
+- work->ordered_free(work);
+- trace_btrfs_all_work_done(wq->fs_info, wtag);
++ if (work == self) {
++ /*
++ * This is the work item that the worker is currently
++ * executing.
++ *
++ * The kernel workqueue code guarantees non-reentrancy
++ * of work items. I.e., if a work item with the same
++ * address and work function is queued twice, the second
++ * execution is blocked until the first one finishes. A
++ * work item may be freed and recycled with the same
++ * work function; the workqueue code assumes that the
++ * original work item cannot depend on the recycled work
++ * item in that case (see find_worker_executing_work()).
++ *
++ * Note that the work of one Btrfs filesystem may depend
++ * on the work of another Btrfs filesystem via, e.g., a
++ * loop device. Therefore, we must not allow the current
++ * work item to be recycled until we are really done,
++ * otherwise we break the above assumption and can
++ * deadlock.
++ */
++ free_self = true;
++ } else {
++ /*
++ * We don't want to call the ordered free functions with
++ * the lock held though. Save the work as tag for the
++ * trace event, because the callback could free the
++ * structure.
++ */
++ wtag = work;
++ work->ordered_free(work);
++ trace_btrfs_all_work_done(wq->fs_info, wtag);
++ }
+ }
+ spin_unlock_irqrestore(lock, flags);
++
++ if (free_self) {
++ wtag = self;
++ self->ordered_free(self);
++ trace_btrfs_all_work_done(wq->fs_info, wtag);
++ }
+ }
+
+ static void normal_work_helper(struct btrfs_work *work)
+@@ -324,7 +356,7 @@ static void normal_work_helper(struct btrfs_work *work)
+ work->func(work);
+ if (need_order) {
+ set_bit(WORK_DONE_BIT, &work->flags);
+- run_ordered_work(wq);
++ run_ordered_work(wq, work);
+ }
+ if (!need_order)
+ trace_btrfs_all_work_done(wq->fs_info, wtag);
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index e59cde204b2f..da9b0f060a9d 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -383,7 +383,7 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
+ for (node = rb_first(tm_root); node; node = next) {
+ next = rb_next(node);
+ tm = rb_entry(node, struct tree_mod_elem, node);
+- if (tm->seq > min_seq)
++ if (tm->seq >= min_seq)
+ continue;
+ rb_erase(node, tm_root);
+ kfree(tm);
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index fe2b8765d9e6..5e9f80b28fcf 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -2785,7 +2785,7 @@ struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
+ /* file-item.c */
+ struct btrfs_dio_private;
+ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+- struct btrfs_fs_info *fs_info, u64 bytenr, u64 len);
++ struct btrfs_root *root, u64 bytenr, u64 len);
+ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
+ u8 *dst);
+ blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio,
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 402b61bf345c..3895c21853cc 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1657,8 +1657,8 @@ static void end_workqueue_fn(struct btrfs_work *work)
+ bio->bi_status = end_io_wq->status;
+ bio->bi_private = end_io_wq->private;
+ bio->bi_end_io = end_io_wq->end_io;
+- kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
+ bio_endio(bio);
++ kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
+ }
+
+ static int cleaner_kthread(void *arg)
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 49cb26fa7c63..eb95ed78a18e 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -1848,8 +1848,8 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
+ btrfs_pin_extent(fs_info, head->bytenr,
+ head->num_bytes, 1);
+ if (head->is_data) {
+- ret = btrfs_del_csums(trans, fs_info, head->bytenr,
+- head->num_bytes);
++ ret = btrfs_del_csums(trans, fs_info->csum_root,
++ head->bytenr, head->num_bytes);
+ }
+ }
+
+@@ -3155,7 +3155,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
+ btrfs_release_path(path);
+
+ if (is_data) {
+- ret = btrfs_del_csums(trans, info, bytenr, num_bytes);
++ ret = btrfs_del_csums(trans, info->csum_root, bytenr,
++ num_bytes);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 4905f48587df..be9dc78aa727 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -5066,12 +5066,14 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
+ return eb;
+ eb = alloc_dummy_extent_buffer(fs_info, start);
+ if (!eb)
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+ eb->fs_info = fs_info;
+ again:
+ ret = radix_tree_preload(GFP_NOFS);
+- if (ret)
++ if (ret) {
++ exists = ERR_PTR(ret);
+ goto free_eb;
++ }
+ spin_lock(&fs_info->buffer_lock);
+ ret = radix_tree_insert(&fs_info->buffer_radix,
+ start >> PAGE_SHIFT, eb);
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index 1a599f50837b..c878bc25d046 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -590,9 +590,9 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
+ * range of bytes.
+ */
+ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+- struct btrfs_fs_info *fs_info, u64 bytenr, u64 len)
++ struct btrfs_root *root, u64 bytenr, u64 len)
+ {
+- struct btrfs_root *root = fs_info->csum_root;
++ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ u64 end_byte = bytenr + len;
+@@ -602,6 +602,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
+ u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
+ int blocksize_bits = fs_info->sb->s_blocksize_bits;
+
++ ASSERT(root == fs_info->csum_root ||
++ root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
++
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 10a01dd0c4e6..e5758f62e8d8 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5697,7 +5697,6 @@ static void inode_tree_add(struct inode *inode)
+
+ static void inode_tree_del(struct inode *inode)
+ {
+- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ int empty = 0;
+
+@@ -5710,7 +5709,6 @@ static void inode_tree_del(struct inode *inode)
+ spin_unlock(&root->inode_lock);
+
+ if (empty && btrfs_root_refs(&root->root_item) == 0) {
+- synchronize_srcu(&fs_info->subvol_srcu);
+ spin_lock(&root->inode_lock);
+ empty = RB_EMPTY_ROOT(&root->inode_tree);
+ spin_unlock(&root->inode_lock);
+@@ -9535,9 +9533,8 @@ static int btrfs_rename_exchange(struct inode *old_dir,
+ btrfs_init_log_ctx(&ctx_dest, new_inode);
+
+ /* close the race window with snapshot create/destroy ioctl */
+- if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
+- down_read(&fs_info->subvol_sem);
+- if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
++ if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
++ new_ino == BTRFS_FIRST_FREE_OBJECTID)
+ down_read(&fs_info->subvol_sem);
+
+ /*
+@@ -9771,9 +9768,8 @@ out_fail:
+ ret = ret ? ret : ret2;
+ }
+ out_notrans:
+- if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
+- up_read(&fs_info->subvol_sem);
+- if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
++ if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
++ old_ino == BTRFS_FIRST_FREE_OBJECTID)
+ up_read(&fs_info->subvol_sem);
+
+ ASSERT(list_empty(&ctx_root.list));
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 23272d9154f3..a56dcc0c9c2a 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -705,11 +705,17 @@ static noinline int create_subvol(struct inode *dir,
+
+ btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2);
+ ret = btrfs_update_inode(trans, root, dir);
+- BUG_ON(ret);
++ if (ret) {
++ btrfs_abort_transaction(trans, ret);
++ goto fail;
++ }
+
+ ret = btrfs_add_root_ref(trans, objectid, root->root_key.objectid,
+ btrfs_ino(BTRFS_I(dir)), index, name, namelen);
+- BUG_ON(ret);
++ if (ret) {
++ btrfs_abort_transaction(trans, ret);
++ goto fail;
++ }
+
+ ret = btrfs_uuid_tree_add(trans, root_item->uuid,
+ BTRFS_UUID_KEY_SUBVOL, objectid);
+diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
+index ee6f60547a8d..dd4f9c2b7107 100644
+--- a/fs/btrfs/reada.c
++++ b/fs/btrfs/reada.c
+@@ -752,21 +752,19 @@ static int reada_start_machine_dev(struct btrfs_device *dev)
+ static void reada_start_machine_worker(struct btrfs_work *work)
+ {
+ struct reada_machine_work *rmw;
+- struct btrfs_fs_info *fs_info;
+ int old_ioprio;
+
+ rmw = container_of(work, struct reada_machine_work, work);
+- fs_info = rmw->fs_info;
+-
+- kfree(rmw);
+
+ old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
+ task_nice_ioprio(current));
+ set_task_ioprio(current, BTRFS_IOPRIO_READA);
+- __reada_start_machine(fs_info);
++ __reada_start_machine(rmw->fs_info);
+ set_task_ioprio(current, old_ioprio);
+
+- atomic_dec(&fs_info->reada_works_cnt);
++ atomic_dec(&rmw->fs_info->reada_works_cnt);
++
++ kfree(rmw);
+ }
+
+ static void __reada_start_machine(struct btrfs_fs_info *fs_info)
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 5cd42b66818c..fd0f4c1696c8 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -4555,6 +4555,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
+ fs_root = read_fs_root(fs_info, reloc_root->root_key.offset);
+ if (IS_ERR(fs_root)) {
+ err = PTR_ERR(fs_root);
++ list_add_tail(&reloc_root->root_list, &reloc_roots);
+ goto out_free;
+ }
+
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index f7d4e03f4c5d..a0770a6aee00 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -2149,14 +2149,13 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
+ scrub_write_block_to_dev_replace(sblock);
+ }
+
+- scrub_block_put(sblock);
+-
+ if (sctx->is_dev_replace && sctx->flush_all_writes) {
+ mutex_lock(&sctx->wr_lock);
+ scrub_wr_submit(sctx);
+ mutex_unlock(&sctx->wr_lock);
+ }
+
++ scrub_block_put(sblock);
+ scrub_pending_bio_dec(sctx);
+ }
+
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 518ec1265a0c..3eb0fec2488a 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -7075,12 +7075,6 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
+ send_root->send_in_progress++;
+ spin_unlock(&send_root->root_item_lock);
+
+- /*
+- * This is done when we lookup the root, it should already be complete
+- * by the time we get here.
+- */
+- WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE);
+-
+ /*
+ * Userspace tools do the checks and warn the user if it's
+ * not RO.
+diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
+index bc92df977630..6e774d055402 100644
+--- a/fs/btrfs/tests/free-space-tree-tests.c
++++ b/fs/btrfs/tests/free-space-tree-tests.c
+@@ -463,9 +463,9 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
+ root->fs_info->tree_root = root;
+
+ root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
+- if (!root->node) {
++ if (IS_ERR(root->node)) {
+ test_std_err(TEST_ALLOC_EXTENT_BUFFER);
+- ret = -ENOMEM;
++ ret = PTR_ERR(root->node);
+ goto out;
+ }
+ btrfs_set_header_level(root->node, 0);
+diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
+index 09aaca1efd62..ac035a6fa003 100644
+--- a/fs/btrfs/tests/qgroup-tests.c
++++ b/fs/btrfs/tests/qgroup-tests.c
+@@ -484,9 +484,9 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
+ * *cough*backref walking code*cough*
+ */
+ root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
+- if (!root->node) {
++ if (IS_ERR(root->node)) {
+ test_err("couldn't allocate dummy buffer");
+- ret = -ENOMEM;
++ ret = PTR_ERR(root->node);
+ goto out;
+ }
+ btrfs_set_header_level(root->node, 0);
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 076d5b8014fb..0e44db066641 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -243,7 +243,7 @@ static int check_extent_data_item(struct extent_buffer *leaf,
+ }
+
+ static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key,
+- int slot)
++ int slot, struct btrfs_key *prev_key)
+ {
+ struct btrfs_fs_info *fs_info = leaf->fs_info;
+ u32 sectorsize = fs_info->sectorsize;
+@@ -267,6 +267,20 @@ static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key,
+ btrfs_item_size_nr(leaf, slot), csumsize);
+ return -EUCLEAN;
+ }
++ if (slot > 0 && prev_key->type == BTRFS_EXTENT_CSUM_KEY) {
++ u64 prev_csum_end;
++ u32 prev_item_size;
++
++ prev_item_size = btrfs_item_size_nr(leaf, slot - 1);
++ prev_csum_end = (prev_item_size / csumsize) * sectorsize;
++ prev_csum_end += prev_key->offset;
++ if (prev_csum_end > key->offset) {
++ generic_err(leaf, slot - 1,
++"csum end range (%llu) goes beyond the start range (%llu) of the next csum item",
++ prev_csum_end, key->offset);
++ return -EUCLEAN;
++ }
++ }
+ return 0;
+ }
+
+@@ -1239,7 +1253,7 @@ static int check_leaf_item(struct extent_buffer *leaf,
+ ret = check_extent_data_item(leaf, key, slot, prev_key);
+ break;
+ case BTRFS_EXTENT_CSUM_KEY:
+- ret = check_csum_item(leaf, key, slot);
++ ret = check_csum_item(leaf, key, slot, prev_key);
+ break;
+ case BTRFS_DIR_ITEM_KEY:
+ case BTRFS_DIR_INDEX_KEY:
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 8a6cc600bf18..ab27e6cd9b3e 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -808,7 +808,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_ordered_sum,
+ list);
+ if (!ret)
+- ret = btrfs_del_csums(trans, fs_info,
++ ret = btrfs_del_csums(trans,
++ fs_info->csum_root,
+ sums->bytenr,
+ sums->len);
+ if (!ret)
+@@ -3927,6 +3928,28 @@ static int log_inode_item(struct btrfs_trans_handle *trans,
+ return 0;
+ }
+
++static int log_csums(struct btrfs_trans_handle *trans,
++ struct btrfs_root *log_root,
++ struct btrfs_ordered_sum *sums)
++{
++ int ret;
++
++ /*
++ * Due to extent cloning, we might have logged a csum item that covers a
++ * subrange of a cloned extent, and later we can end up logging a csum
++ * item for a larger subrange of the same extent or the entire range.
++ * This would leave csum items in the log tree that cover the same range
++ * and break the searches for checksums in the log tree, resulting in
++ * some checksums missing in the fs/subvolume tree. So just delete (or
++ * trim and adjust) any existing csum items in the log for this range.
++ */
++ ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len);
++ if (ret)
++ return ret;
++
++ return btrfs_csum_file_blocks(trans, log_root, sums);
++}
++
+ static noinline int copy_items(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode,
+ struct btrfs_path *dst_path,
+@@ -4072,7 +4095,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
+ struct btrfs_ordered_sum,
+ list);
+ if (!ret)
+- ret = btrfs_csum_file_blocks(trans, log, sums);
++ ret = log_csums(trans, log, sums);
+ list_del(&sums->list);
+ kfree(sums);
+ }
+@@ -4292,7 +4315,7 @@ static int log_extent_csums(struct btrfs_trans_handle *trans,
+ struct btrfs_ordered_sum,
+ list);
+ if (!ret)
+- ret = btrfs_csum_file_blocks(trans, log_root, sums);
++ ret = log_csums(trans, log_root, sums);
+ list_del(&sums->list);
+ kfree(sums);
+ }
+@@ -6314,9 +6337,28 @@ again:
+ wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
+ if (IS_ERR(wc.replay_dest)) {
+ ret = PTR_ERR(wc.replay_dest);
++
++ /*
++ * We didn't find the subvol, likely because it was
++ * deleted. This is ok, simply skip this log and go to
++ * the next one.
++ *
++ * We need to exclude the root because we can't have
++ * other log replays overwriting this log as we'll read
++ * it back in a few more times. This will keep our
++ * block from being modified, and we'll just bail for
++ * each subsequent pass.
++ */
++ if (ret == -ENOENT)
++ ret = btrfs_pin_extent_for_log_replay(fs_info,
++ log->node->start,
++ log->node->len);
+ free_extent_buffer(log->node);
+ free_extent_buffer(log->commit_root);
+ kfree(log);
++
++ if (!ret)
++ goto next;
+ btrfs_handle_fs_error(fs_info, ret,
+ "Couldn't read target root for tree log recovery.");
+ goto error;
+@@ -6348,7 +6390,6 @@ again:
+ &root->highest_objectid);
+ }
+
+- key.offset = found_key.offset - 1;
+ wc.replay_dest->log_root = NULL;
+ free_extent_buffer(log->node);
+ free_extent_buffer(log->commit_root);
+@@ -6356,9 +6397,10 @@ again:
+
+ if (ret)
+ goto error;
+-
++next:
+ if (found_key.offset == 0)
+ break;
++ key.offset = found_key.offset - 1;
+ }
+ btrfs_release_path(path);
+
+diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
+index 91caab63bdf5..76b84f2397b1 100644
+--- a/fs/btrfs/uuid-tree.c
++++ b/fs/btrfs/uuid-tree.c
+@@ -324,6 +324,8 @@ again_search_slot:
+ }
+ if (ret < 0 && ret != -ENOENT)
+ goto out;
++ key.offset++;
++ goto again_search_slot;
+ }
+ item_size -= sizeof(subid_le);
+ offset += sizeof(subid_le);
+diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
+index 9fdd2b269d61..6305d5ec25af 100644
+--- a/fs/ext4/dir.c
++++ b/fs/ext4/dir.c
+@@ -81,6 +81,11 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
+ error_msg = "rec_len is too small for name_len";
+ else if (unlikely(((char *) de - buf) + rlen > size))
+ error_msg = "directory entry overrun";
++ else if (unlikely(((char *) de - buf) + rlen >
++ size - EXT4_DIR_REC_LEN(1) &&
++ ((char *) de - buf) + rlen != size)) {
++ error_msg = "directory entry too close to block end";
++ }
+ else if (unlikely(le32_to_cpu(de->inode) >
+ le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
+ error_msg = "inode out of bounds";
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 91da21890360..53134e4509b8 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -6035,7 +6035,7 @@ int ext4_expand_extra_isize(struct inode *inode,
+ error = ext4_journal_get_write_access(handle, iloc->bh);
+ if (error) {
+ brelse(iloc->bh);
+- goto out_stop;
++ goto out_unlock;
+ }
+
+ error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
+@@ -6045,8 +6045,8 @@ int ext4_expand_extra_isize(struct inode *inode,
+ if (!error)
+ error = rc;
+
++out_unlock:
+ ext4_write_unlock_xattr(inode, &no_expand);
+-out_stop:
+ ext4_journal_stop(handle);
+ return error;
+ }
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 923476e3aefb..f56402e9c11c 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2808,7 +2808,7 @@ bool ext4_empty_dir(struct inode *inode)
+ {
+ unsigned int offset;
+ struct buffer_head *bh;
+- struct ext4_dir_entry_2 *de, *de1;
++ struct ext4_dir_entry_2 *de;
+ struct super_block *sb;
+
+ if (ext4_has_inline_data(inode)) {
+@@ -2833,19 +2833,25 @@ bool ext4_empty_dir(struct inode *inode)
+ return true;
+
+ de = (struct ext4_dir_entry_2 *) bh->b_data;
+- de1 = ext4_next_entry(de, sb->s_blocksize);
+- if (le32_to_cpu(de->inode) != inode->i_ino ||
+- le32_to_cpu(de1->inode) == 0 ||
+- strcmp(".", de->name) || strcmp("..", de1->name)) {
+- ext4_warning_inode(inode, "directory missing '.' and/or '..'");
++ if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
++ 0) ||
++ le32_to_cpu(de->inode) != inode->i_ino || strcmp(".", de->name)) {
++ ext4_warning_inode(inode, "directory missing '.'");
++ brelse(bh);
++ return true;
++ }
++ offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
++ de = ext4_next_entry(de, sb->s_blocksize);
++ if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
++ offset) ||
++ le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) {
++ ext4_warning_inode(inode, "directory missing '..'");
+ brelse(bh);
+ return true;
+ }
+- offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) +
+- ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize);
+- de = ext4_next_entry(de1, sb->s_blocksize);
++ offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
+ while (offset < inode->i_size) {
+- if ((void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
++ if (!(offset & (sb->s_blocksize - 1))) {
+ unsigned int lblock;
+ brelse(bh);
+ lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
+@@ -2856,12 +2862,11 @@ bool ext4_empty_dir(struct inode *inode)
+ }
+ if (IS_ERR(bh))
+ return true;
+- de = (struct ext4_dir_entry_2 *) bh->b_data;
+ }
++ de = (struct ext4_dir_entry_2 *) (bh->b_data +
++ (offset & (sb->s_blocksize - 1)));
+ if (ext4_check_dir_entry(inode, NULL, de, bh,
+ bh->b_data, bh->b_size, offset)) {
+- de = (struct ext4_dir_entry_2 *)(bh->b_data +
+- sb->s_blocksize);
+ offset = (offset | (sb->s_blocksize - 1)) + 1;
+ continue;
+ }
+@@ -2870,7 +2875,6 @@ bool ext4_empty_dir(struct inode *inode)
+ return false;
+ }
+ offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
+- de = ext4_next_entry(de, sb->s_blocksize);
+ }
+ brelse(bh);
+ return true;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 98d37b8d0050..66162b430edc 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1887,6 +1887,13 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
+ }
+ sbi->s_commit_interval = HZ * arg;
+ } else if (token == Opt_debug_want_extra_isize) {
++ if ((arg & 1) ||
++ (arg < 4) ||
++ (arg > (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE))) {
++ ext4_msg(sb, KERN_ERR,
++ "Invalid want_extra_isize %d", arg);
++ return -1;
++ }
+ sbi->s_want_extra_isize = arg;
+ } else if (token == Opt_max_batch_time) {
+ sbi->s_max_batch_time = arg;
+@@ -3551,40 +3558,6 @@ int ext4_calculate_overhead(struct super_block *sb)
+ return 0;
+ }
+
+-static void ext4_clamp_want_extra_isize(struct super_block *sb)
+-{
+- struct ext4_sb_info *sbi = EXT4_SB(sb);
+- struct ext4_super_block *es = sbi->s_es;
+- unsigned def_extra_isize = sizeof(struct ext4_inode) -
+- EXT4_GOOD_OLD_INODE_SIZE;
+-
+- if (sbi->s_inode_size == EXT4_GOOD_OLD_INODE_SIZE) {
+- sbi->s_want_extra_isize = 0;
+- return;
+- }
+- if (sbi->s_want_extra_isize < 4) {
+- sbi->s_want_extra_isize = def_extra_isize;
+- if (ext4_has_feature_extra_isize(sb)) {
+- if (sbi->s_want_extra_isize <
+- le16_to_cpu(es->s_want_extra_isize))
+- sbi->s_want_extra_isize =
+- le16_to_cpu(es->s_want_extra_isize);
+- if (sbi->s_want_extra_isize <
+- le16_to_cpu(es->s_min_extra_isize))
+- sbi->s_want_extra_isize =
+- le16_to_cpu(es->s_min_extra_isize);
+- }
+- }
+- /* Check if enough inode space is available */
+- if ((sbi->s_want_extra_isize > sbi->s_inode_size) ||
+- (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
+- sbi->s_inode_size)) {
+- sbi->s_want_extra_isize = def_extra_isize;
+- ext4_msg(sb, KERN_INFO,
+- "required extra inode space not available");
+- }
+-}
+-
+ static void ext4_set_resv_clusters(struct super_block *sb)
+ {
+ ext4_fsblk_t resv_clusters;
+@@ -3792,6 +3765,68 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ */
+ sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
+
++ if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
++ sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
++ sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
++ } else {
++ sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
++ sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
++ if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
++ ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
++ sbi->s_first_ino);
++ goto failed_mount;
++ }
++ if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
++ (!is_power_of_2(sbi->s_inode_size)) ||
++ (sbi->s_inode_size > blocksize)) {
++ ext4_msg(sb, KERN_ERR,
++ "unsupported inode size: %d",
++ sbi->s_inode_size);
++ goto failed_mount;
++ }
++ /*
++ * i_atime_extra is the last extra field available for
++ * [acm]times in struct ext4_inode. Checking for that
++ * field should suffice to ensure we have extra space
++ * for all three.
++ */
++ if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) +
++ sizeof(((struct ext4_inode *)0)->i_atime_extra)) {
++ sb->s_time_gran = 1;
++ sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX;
++ } else {
++ sb->s_time_gran = NSEC_PER_SEC;
++ sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX;
++ }
++ sb->s_time_min = EXT4_TIMESTAMP_MIN;
++ }
++ if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
++ sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
++ EXT4_GOOD_OLD_INODE_SIZE;
++ if (ext4_has_feature_extra_isize(sb)) {
++ unsigned v, max = (sbi->s_inode_size -
++ EXT4_GOOD_OLD_INODE_SIZE);
++
++ v = le16_to_cpu(es->s_want_extra_isize);
++ if (v > max) {
++ ext4_msg(sb, KERN_ERR,
++ "bad s_want_extra_isize: %d", v);
++ goto failed_mount;
++ }
++ if (sbi->s_want_extra_isize < v)
++ sbi->s_want_extra_isize = v;
++
++ v = le16_to_cpu(es->s_min_extra_isize);
++ if (v > max) {
++ ext4_msg(sb, KERN_ERR,
++ "bad s_min_extra_isize: %d", v);
++ goto failed_mount;
++ }
++ if (sbi->s_want_extra_isize < v)
++ sbi->s_want_extra_isize = v;
++ }
++ }
++
+ if (sbi->s_es->s_mount_opts[0]) {
+ char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
+ sizeof(sbi->s_es->s_mount_opts),
+@@ -4030,42 +4065,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ has_huge_files);
+ sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
+
+- if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
+- sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
+- sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
+- } else {
+- sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
+- sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
+- if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
+- ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
+- sbi->s_first_ino);
+- goto failed_mount;
+- }
+- if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
+- (!is_power_of_2(sbi->s_inode_size)) ||
+- (sbi->s_inode_size > blocksize)) {
+- ext4_msg(sb, KERN_ERR,
+- "unsupported inode size: %d",
+- sbi->s_inode_size);
+- goto failed_mount;
+- }
+- /*
+- * i_atime_extra is the last extra field available for [acm]times in
+- * struct ext4_inode. Checking for that field should suffice to ensure
+- * we have extra space for all three.
+- */
+- if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) +
+- sizeof(((struct ext4_inode *)0)->i_atime_extra)) {
+- sb->s_time_gran = 1;
+- sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX;
+- } else {
+- sb->s_time_gran = NSEC_PER_SEC;
+- sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX;
+- }
+-
+- sb->s_time_min = EXT4_TIMESTAMP_MIN;
+- }
+-
+ sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
+ if (ext4_has_feature_64bit(sb)) {
+ if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
+@@ -4521,8 +4520,6 @@ no_journal:
+ } else if (ret)
+ goto failed_mount4a;
+
+- ext4_clamp_want_extra_isize(sb);
+-
+ ext4_set_resv_clusters(sb);
+
+ err = ext4_setup_system_zone(sb);
+@@ -5310,8 +5307,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
+ goto restore_opts;
+ }
+
+- ext4_clamp_want_extra_isize(sb);
+-
+ if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
+ test_opt(sb, JOURNAL_CHECKSUM)) {
+ ext4_msg(sb, KERN_ERR, "changing journal_checksum "
+diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
+index 2ba6253ea6d3..fc349204a71b 100644
+--- a/include/drm/drm_dp_mst_helper.h
++++ b/include/drm/drm_dp_mst_helper.h
+@@ -334,7 +334,7 @@ struct drm_dp_resource_status_notify {
+
+ struct drm_dp_query_payload_ack_reply {
+ u8 port_number;
+- u8 allocated_pbn;
++ u16 allocated_pbn;
+ };
+
+ struct drm_dp_sideband_msg_req_body {
+diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
+index 92d5fdc8154e..31b1b0e03df8 100644
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -595,17 +595,6 @@ struct governor_attr {
+ size_t count);
+ };
+
+-static inline bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy)
+-{
+- /*
+- * Allow remote callbacks if:
+- * - dvfs_possible_from_any_cpu flag is set
+- * - the local and remote CPUs share cpufreq policy
+- */
+- return policy->dvfs_possible_from_any_cpu ||
+- cpumask_test_cpu(smp_processor_id(), policy->cpus);
+-}
+-
+ /*********************************************************************
+ * FREQUENCY TABLE HELPERS *
+ *********************************************************************/
+diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
+index 4dc66157d872..deec18b8944a 100644
+--- a/include/linux/ipmi_smi.h
++++ b/include/linux/ipmi_smi.h
+@@ -224,10 +224,14 @@ static inline int ipmi_demangle_device_id(uint8_t netfn, uint8_t cmd,
+ * is called, and the lower layer must get the interface from that
+ * call.
+ */
+-int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
+- void *send_info,
+- struct device *dev,
+- unsigned char slave_addr);
++int ipmi_add_smi(struct module *owner,
++ const struct ipmi_smi_handlers *handlers,
++ void *send_info,
++ struct device *dev,
++ unsigned char slave_addr);
++
++#define ipmi_register_smi(handlers, send_info, dev, slave_addr) \
++ ipmi_add_smi(THIS_MODULE, handlers, send_info, dev, slave_addr)
+
+ /*
+ * Remove a low-level interface from the IPMI driver. This will
+diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
+index 5714fd35a83c..e3596db077dc 100644
+--- a/include/linux/mod_devicetable.h
++++ b/include/linux/mod_devicetable.h
+@@ -587,9 +587,9 @@ struct platform_device_id {
+ #define MDIO_NAME_SIZE 32
+ #define MDIO_MODULE_PREFIX "mdio:"
+
+-#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d"
++#define MDIO_ID_FMT "%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u"
+ #define MDIO_ID_ARGS(_id) \
+- (_id)>>31, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \
++ ((_id)>>31) & 1, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \
+ ((_id)>>27) & 1, ((_id)>>26) & 1, ((_id)>>25) & 1, ((_id)>>24) & 1, \
+ ((_id)>>23) & 1, ((_id)>>22) & 1, ((_id)>>21) & 1, ((_id)>>20) & 1, \
+ ((_id)>>19) & 1, ((_id)>>18) & 1, ((_id)>>17) & 1, ((_id)>>16) & 1, \
+diff --git a/include/linux/nvme.h b/include/linux/nvme.h
+index f61d6906e59d..a260cd754f28 100644
+--- a/include/linux/nvme.h
++++ b/include/linux/nvme.h
+@@ -1368,6 +1368,7 @@ enum {
+ NVME_SC_ANA_INACCESSIBLE = 0x302,
+ NVME_SC_ANA_TRANSITION = 0x303,
+ NVME_SC_HOST_PATH_ERROR = 0x370,
++ NVME_SC_HOST_ABORTED_CMD = 0x371,
+
+ NVME_SC_CRD = 0x1800,
+ NVME_SC_DNR = 0x4000,
+diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
+index 8f8be5b00060..5c17cb733224 100644
+--- a/include/linux/nvmem-consumer.h
++++ b/include/linux/nvmem-consumer.h
+@@ -118,7 +118,7 @@ static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
+ }
+
+ static inline int nvmem_cell_write(struct nvmem_cell *cell,
+- const char *buf, size_t len)
++ void *buf, size_t len)
+ {
+ return -EOPNOTSUPP;
+ }
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index 9a0e981df502..3d5d53313e6c 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -993,7 +993,7 @@ int phy_modify_paged_changed(struct phy_device *phydev, int page, u32 regnum,
+ int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
+ u16 mask, u16 set);
+
+-struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
++struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
+ bool is_c45,
+ struct phy_c45_device_ids *c45_ids);
+ #if IS_ENABLED(CONFIG_PHYLIB)
+diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
+index afa940cd50dc..cc6bcc1e96bc 100644
+--- a/include/linux/sched/cpufreq.h
++++ b/include/linux/sched/cpufreq.h
+@@ -12,6 +12,8 @@
+ #define SCHED_CPUFREQ_MIGRATION (1U << 1)
+
+ #ifdef CONFIG_CPU_FREQ
++struct cpufreq_policy;
++
+ struct update_util_data {
+ void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
+ };
+@@ -20,6 +22,7 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
+ void (*func)(struct update_util_data *data, u64 time,
+ unsigned int flags));
+ void cpufreq_remove_update_util_hook(int cpu);
++bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy);
+
+ static inline unsigned long map_util_freq(unsigned long util,
+ unsigned long freq, unsigned long cap)
+diff --git a/include/net/arp.h b/include/net/arp.h
+index c8f580a0e6b1..4950191f6b2b 100644
+--- a/include/net/arp.h
++++ b/include/net/arp.h
+@@ -57,8 +57,8 @@ static inline void __ipv4_confirm_neigh(struct net_device *dev, u32 key)
+ unsigned long now = jiffies;
+
+ /* avoid dirtying neighbour */
+- if (n->confirmed != now)
+- n->confirmed = now;
++ if (READ_ONCE(n->confirmed) != now)
++ WRITE_ONCE(n->confirmed, now);
+ }
+ rcu_read_unlock_bh();
+ }
+diff --git a/include/net/dst.h b/include/net/dst.h
+index fe62fe2eb781..8224dad2ae94 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -82,7 +82,7 @@ struct dst_entry {
+ struct dst_metrics {
+ u32 metrics[RTAX_MAX];
+ refcount_t refcnt;
+-};
++} __aligned(4); /* Low pointer bits contain DST_METRICS_FLAGS */
+ extern const struct dst_metrics dst_default_metrics;
+
+ u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
+diff --git a/include/net/ndisc.h b/include/net/ndisc.h
+index b2f715ca0567..b5ebeb3b0de0 100644
+--- a/include/net/ndisc.h
++++ b/include/net/ndisc.h
+@@ -414,8 +414,8 @@ static inline void __ipv6_confirm_neigh(struct net_device *dev,
+ unsigned long now = jiffies;
+
+ /* avoid dirtying neighbour */
+- if (n->confirmed != now)
+- n->confirmed = now;
++ if (READ_ONCE(n->confirmed) != now)
++ WRITE_ONCE(n->confirmed, now);
+ }
+ rcu_read_unlock_bh();
+ }
+@@ -431,8 +431,8 @@ static inline void __ipv6_confirm_neigh_stub(struct net_device *dev,
+ unsigned long now = jiffies;
+
+ /* avoid dirtying neighbour */
+- if (n->confirmed != now)
+- n->confirmed = now;
++ if (READ_ONCE(n->confirmed) != now)
++ WRITE_ONCE(n->confirmed, now);
+ }
+ rcu_read_unlock_bh();
+ }
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index b8452cc0e059..5e679c8dae0b 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -72,7 +72,6 @@ struct neigh_parms {
+ struct net_device *dev;
+ struct list_head list;
+ int (*neigh_setup)(struct neighbour *);
+- void (*neigh_cleanup)(struct neighbour *);
+ struct neigh_table *tbl;
+
+ void *sysctl_table;
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 718e62fbe869..013396e50b91 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1940,8 +1940,8 @@ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
+
+ static inline void sk_dst_confirm(struct sock *sk)
+ {
+- if (!sk->sk_dst_pending_confirm)
+- sk->sk_dst_pending_confirm = 1;
++ if (!READ_ONCE(sk->sk_dst_pending_confirm))
++ WRITE_ONCE(sk->sk_dst_pending_confirm, 1);
+ }
+
+ static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
+@@ -1951,10 +1951,10 @@ static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
+ unsigned long now = jiffies;
+
+ /* avoid dirtying neighbour */
+- if (n->confirmed != now)
+- n->confirmed = now;
+- if (sk && sk->sk_dst_pending_confirm)
+- sk->sk_dst_pending_confirm = 0;
++ if (READ_ONCE(n->confirmed) != now)
++ WRITE_ONCE(n->confirmed, now);
++ if (sk && READ_ONCE(sk->sk_dst_pending_confirm))
++ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ }
+ }
+
+diff --git a/include/trace/events/wbt.h b/include/trace/events/wbt.h
+index b048694070e2..37342a13c9cb 100644
+--- a/include/trace/events/wbt.h
++++ b/include/trace/events/wbt.h
+@@ -33,7 +33,8 @@ TRACE_EVENT(wbt_stat,
+ ),
+
+ TP_fast_assign(
+- strncpy(__entry->name, dev_name(bdi->dev), 32);
++ strlcpy(__entry->name, dev_name(bdi->dev),
++ ARRAY_SIZE(__entry->name));
+ __entry->rmean = stat[0].mean;
+ __entry->rmin = stat[0].min;
+ __entry->rmax = stat[0].max;
+@@ -67,7 +68,8 @@ TRACE_EVENT(wbt_lat,
+ ),
+
+ TP_fast_assign(
+- strncpy(__entry->name, dev_name(bdi->dev), 32);
++ strlcpy(__entry->name, dev_name(bdi->dev),
++ ARRAY_SIZE(__entry->name));
+ __entry->lat = div_u64(lat, 1000);
+ ),
+
+@@ -103,7 +105,8 @@ TRACE_EVENT(wbt_step,
+ ),
+
+ TP_fast_assign(
+- strncpy(__entry->name, dev_name(bdi->dev), 32);
++ strlcpy(__entry->name, dev_name(bdi->dev),
++ ARRAY_SIZE(__entry->name));
+ __entry->msg = msg;
+ __entry->step = step;
+ __entry->window = div_u64(window, 1000);
+@@ -138,7 +141,8 @@ TRACE_EVENT(wbt_timer,
+ ),
+
+ TP_fast_assign(
+- strncpy(__entry->name, dev_name(bdi->dev), 32);
++ strlcpy(__entry->name, dev_name(bdi->dev),
++ ARRAY_SIZE(__entry->name));
+ __entry->status = status;
+ __entry->step = step;
+ __entry->inflight = inflight;
+diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h
+index 8997d5068c08..4511b85c84df 100644
+--- a/include/uapi/linux/cec-funcs.h
++++ b/include/uapi/linux/cec-funcs.h
+@@ -923,7 +923,8 @@ static inline void cec_msg_give_deck_status(struct cec_msg *msg,
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_GIVE_DECK_STATUS;
+ msg->msg[2] = status_req;
+- msg->reply = reply ? CEC_MSG_DECK_STATUS : 0;
++ msg->reply = (reply && status_req != CEC_OP_STATUS_REQ_OFF) ?
++ CEC_MSG_DECK_STATUS : 0;
+ }
+
+ static inline void cec_ops_give_deck_status(const struct cec_msg *msg,
+@@ -1027,7 +1028,8 @@ static inline void cec_msg_give_tuner_device_status(struct cec_msg *msg,
+ msg->len = 3;
+ msg->msg[1] = CEC_MSG_GIVE_TUNER_DEVICE_STATUS;
+ msg->msg[2] = status_req;
+- msg->reply = reply ? CEC_MSG_TUNER_DEVICE_STATUS : 0;
++ msg->reply = (reply && status_req != CEC_OP_STATUS_REQ_OFF) ?
++ CEC_MSG_TUNER_DEVICE_STATUS : 0;
+ }
+
+ static inline void cec_ops_give_tuner_device_status(const struct cec_msg *msg,
+diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
+index 052580c33d26..173e983619d7 100644
+--- a/kernel/bpf/stackmap.c
++++ b/kernel/bpf/stackmap.c
+@@ -287,7 +287,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
+ bool irq_work_busy = false;
+ struct stack_map_irq_work *work = NULL;
+
+- if (in_nmi()) {
++ if (irqs_disabled()) {
+ work = this_cpu_ptr(&up_read_work);
+ if (work->irq_work.flags & IRQ_WORK_BUSY)
+ /* cannot queue more up_read, fallback */
+@@ -295,8 +295,9 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
+ }
+
+ /*
+- * We cannot do up_read() in nmi context. To do build_id lookup
+- * in nmi context, we need to run up_read() in irq_work. We use
++ * We cannot do up_read() when the irq is disabled, because of
++ * risk to deadlock with rq_lock. To do build_id lookup when the
++ * irqs are disabled, we need to run up_read() in irq_work. We use
+ * a percpu variable to do the irq_work. If the irq_work is
+ * already used by another lookup, we fall back to report ips.
+ *
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index ffc3e53f5300..9e7cee5307e0 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -978,6 +978,17 @@ static void __reg_bound_offset(struct bpf_reg_state *reg)
+ reg->umax_value));
+ }
+
++static void __reg_bound_offset32(struct bpf_reg_state *reg)
++{
++ u64 mask = 0xffffFFFF;
++ struct tnum range = tnum_range(reg->umin_value & mask,
++ reg->umax_value & mask);
++ struct tnum lo32 = tnum_cast(reg->var_off, 4);
++ struct tnum hi32 = tnum_lshift(tnum_rshift(reg->var_off, 32), 32);
++
++ reg->var_off = tnum_or(hi32, tnum_intersect(lo32, range));
++}
++
+ /* Reset the min/max bounds of a register */
+ static void __mark_reg_unbounded(struct bpf_reg_state *reg)
+ {
+@@ -5433,6 +5444,10 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
+ /* We might have learned some bits from the bounds. */
+ __reg_bound_offset(false_reg);
+ __reg_bound_offset(true_reg);
++ if (is_jmp32) {
++ __reg_bound_offset32(false_reg);
++ __reg_bound_offset32(true_reg);
++ }
+ /* Intersecting with the old var_off might have improved our bounds
+ * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
+ * then new var_off is (0; 0x7f...fc) which improves our umax.
+@@ -5542,6 +5557,10 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
+ /* We might have learned some bits from the bounds. */
+ __reg_bound_offset(false_reg);
+ __reg_bound_offset(true_reg);
++ if (is_jmp32) {
++ __reg_bound_offset32(false_reg);
++ __reg_bound_offset32(true_reg);
++ }
+ /* Intersecting with the old var_off might have improved our bounds
+ * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
+ * then new var_off is (0; 0x7f...fc) which improves our umax.
+diff --git a/kernel/cgroup/freezer.c b/kernel/cgroup/freezer.c
+index 8cf010680678..3984dd6b8ddb 100644
+--- a/kernel/cgroup/freezer.c
++++ b/kernel/cgroup/freezer.c
+@@ -230,6 +230,15 @@ void cgroup_freezer_migrate_task(struct task_struct *task,
+ if (task->flags & PF_KTHREAD)
+ return;
+
++ /*
++ * It's not necessary to do changes if both of the src and dst cgroups
++ * are not freezing and task is not frozen.
++ */
++ if (!test_bit(CGRP_FREEZE, &src->flags) &&
++ !test_bit(CGRP_FREEZE, &dst->flags) &&
++ !task->frozen)
++ return;
++
+ /*
+ * Adjust counters of freezing and frozen tasks.
+ * Note, that if the task is frozen, but the destination cgroup is not
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 00a014670ed0..8f66a4833ded 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5607,10 +5607,8 @@ static void perf_mmap_close(struct vm_area_struct *vma)
+ perf_pmu_output_stop(event);
+
+ /* now it's safe to free the pages */
+- if (!rb->aux_mmap_locked)
+- atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
+- else
+- atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
++ atomic_long_sub(rb->aux_nr_pages - rb->aux_mmap_locked, &mmap_user->locked_vm);
++ atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
+
+ /* this has to be the last one */
+ rb_free_aux(rb);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 44123b4d14e8..8dacda4b0362 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -810,7 +810,7 @@ static inline unsigned int uclamp_bucket_base_value(unsigned int clamp_value)
+ return UCLAMP_BUCKET_DELTA * uclamp_bucket_id(clamp_value);
+ }
+
+-static inline enum uclamp_id uclamp_none(enum uclamp_id clamp_id)
++static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
+ {
+ if (clamp_id == UCLAMP_MIN)
+ return 0;
+@@ -853,7 +853,7 @@ static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
+ }
+
+ static inline
+-enum uclamp_id uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
++unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
+ unsigned int clamp_value)
+ {
+ struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
+@@ -918,7 +918,7 @@ uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
+ return uc_req;
+ }
+
+-enum uclamp_id uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
++unsigned int uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
+ {
+ struct uclamp_se uc_eff;
+
+diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c
+index b5dcd1d83c7f..7c2fe50fd76d 100644
+--- a/kernel/sched/cpufreq.c
++++ b/kernel/sched/cpufreq.c
+@@ -5,6 +5,8 @@
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ */
++#include <linux/cpufreq.h>
++
+ #include "sched.h"
+
+ DEFINE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
+@@ -57,3 +59,19 @@ void cpufreq_remove_update_util_hook(int cpu)
+ rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL);
+ }
+ EXPORT_SYMBOL_GPL(cpufreq_remove_update_util_hook);
++
++/**
++ * cpufreq_this_cpu_can_update - Check if cpufreq policy can be updated.
++ * @policy: cpufreq policy to check.
++ *
++ * Return 'true' if:
++ * - the local and remote CPUs share @policy,
++ * - dvfs_possible_from_any_cpu is set in @policy and the local CPU is not going
++ * offline (in which case it is not expected to run cpufreq updates any more).
++ */
++bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy)
++{
++ return cpumask_test_cpu(smp_processor_id(), policy->cpus) ||
++ (policy->dvfs_possible_from_any_cpu &&
++ rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)));
++}
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 86800b4d5453..b6f56e7c8dd1 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -82,12 +82,10 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
+ * by the hardware, as calculating the frequency is pointless if
+ * we cannot in fact act on it.
+ *
+- * For the slow switching platforms, the kthread is always scheduled on
+- * the right set of CPUs and any CPU can find the next frequency and
+- * schedule the kthread.
++ * This is needed on the slow switching platforms too to prevent CPUs
++ * going offline from leaving stale IRQ work items behind.
+ */
+- if (sg_policy->policy->fast_switch_enabled &&
+- !cpufreq_this_cpu_can_update(sg_policy->policy))
++ if (!cpufreq_this_cpu_can_update(sg_policy->policy))
+ return false;
+
+ if (unlikely(sg_policy->limits_changed)) {
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index c8870c5bd7df..49ed949f850c 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -2309,7 +2309,7 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
+ #endif /* CONFIG_CPU_FREQ */
+
+ #ifdef CONFIG_UCLAMP_TASK
+-enum uclamp_id uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
++unsigned int uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
+
+ static __always_inline
+ unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 6a0ee9178365..2fa72419bbd7 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4609,7 +4609,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
+
+ if (mask == TRACE_ITER_RECORD_TGID) {
+ if (!tgid_map)
+- tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
++ tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
+ sizeof(*tgid_map),
+ GFP_KERNEL);
+ if (!tgid_map) {
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 1552a95c743b..7f890262c8a3 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -435,11 +435,10 @@ static int disable_trace_kprobe(struct trace_event_call *call,
+
+ #if defined(CONFIG_KPROBES_ON_FTRACE) && \
+ !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
+-static bool within_notrace_func(struct trace_kprobe *tk)
++static bool __within_notrace_func(unsigned long addr)
+ {
+- unsigned long offset, size, addr;
++ unsigned long offset, size;
+
+- addr = trace_kprobe_address(tk);
+ if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
+ return false;
+
+@@ -452,6 +451,28 @@ static bool within_notrace_func(struct trace_kprobe *tk)
+ */
+ return !ftrace_location_range(addr, addr + size - 1);
+ }
++
++static bool within_notrace_func(struct trace_kprobe *tk)
++{
++ unsigned long addr = addr = trace_kprobe_address(tk);
++ char symname[KSYM_NAME_LEN], *p;
++
++ if (!__within_notrace_func(addr))
++ return false;
++
++ /* Check if the address is on a suffixed-symbol */
++ if (!lookup_symbol_name(addr, symname)) {
++ p = strchr(symname, '.');
++ if (!p)
++ return true;
++ *p = '\0';
++ addr = (unsigned long)kprobe_lookup_name(symname, 0);
++ if (addr)
++ return __within_notrace_func(addr);
++ }
++
++ return true;
++}
+ #else
+ #define within_notrace_func(tk) (false)
+ #endif
+diff --git a/lib/ubsan.c b/lib/ubsan.c
+index e7d31735950d..0c4681118fcd 100644
+--- a/lib/ubsan.c
++++ b/lib/ubsan.c
+@@ -374,9 +374,10 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
+ struct type_descriptor *lhs_type = data->lhs_type;
+ char rhs_str[VALUE_LENGTH];
+ char lhs_str[VALUE_LENGTH];
++ unsigned long ua_flags = user_access_save();
+
+ if (suppress_report(&data->location))
+- return;
++ goto out;
+
+ ubsan_prologue(&data->location, &flags);
+
+@@ -402,6 +403,8 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
+ lhs_type->type_name);
+
+ ubsan_epilogue(&flags);
++out:
++ user_access_restore(ua_flags);
+ }
+ EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
+
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index ee4eecc7e1c2..e7f10c4b40f0 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -422,7 +422,7 @@ void register_shrinker_prepared(struct shrinker *shrinker)
+ {
+ down_write(&shrinker_rwsem);
+ list_add_tail(&shrinker->list, &shrinker_list);
+-#ifdef CONFIG_MEMCG_KMEM
++#ifdef CONFIG_MEMCG
+ if (shrinker->flags & SHRINKER_MEMCG_AWARE)
+ idr_replace(&shrinker_idr, shrinker, shrinker->id);
+ #endif
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index ad5b0ac1f9ce..7ff92dd4c53c 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -934,6 +934,14 @@ static void hci_req_directed_advertising(struct hci_request *req,
+ return;
+
+ memset(&cp, 0, sizeof(cp));
++
++ /* Some controllers might reject command if intervals are not
++ * within range for undirected advertising.
++ * BCM20702A0 is known to be affected by this.
++ */
++ cp.min_interval = cpu_to_le16(0x0020);
++ cp.max_interval = cpu_to_le16(0x0020);
++
+ cp.type = LE_ADV_DIRECT_IND;
+ cp.own_address_type = own_addr_type;
+ cp.direct_addr_type = conn->dst_type;
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 04bc79359a17..0cc9ce917222 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -842,8 +842,8 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt)
+ if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
+ struct hci_cp_le_write_def_data_len cp;
+
+- cp.tx_len = hdev->le_max_tx_len;
+- cp.tx_time = hdev->le_max_tx_time;
++ cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
++ cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
+ hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
+ }
+
+@@ -4440,7 +4440,14 @@ static void hci_rx_work(struct work_struct *work)
+ hci_send_to_sock(hdev, skb);
+ }
+
+- if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
++ /* If the device has been opened in HCI_USER_CHANNEL,
++ * the userspace has exclusive access to device.
++ * When device is HCI_INIT, we still need to process
++ * the data packets to the driver in order
++ * to complete its setup().
++ */
++ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
++ !test_bit(HCI_INIT, &hdev->flags)) {
+ kfree_skb(skb);
+ continue;
+ }
+diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
+index 7f6a581b5b7e..3d25dbf10b26 100644
+--- a/net/bluetooth/hci_request.c
++++ b/net/bluetooth/hci_request.c
+@@ -1273,6 +1273,14 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
+
+ instance_flags = get_adv_instance_flags(hdev, instance);
+
++ /* If instance already has the flags set skip adding it once
++ * again.
++ */
++ if (adv_instance && eir_get_data(adv_instance->adv_data,
++ adv_instance->adv_data_len, EIR_FLAGS,
++ NULL))
++ goto skip_flags;
++
+ /* The Add Advertising command allows userspace to set both the general
+ * and limited discoverable flags.
+ */
+@@ -1305,6 +1313,7 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
+ }
+ }
+
++skip_flags:
+ if (adv_instance) {
+ memcpy(ptr, adv_instance->adv_data,
+ adv_instance->adv_data_len);
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index de09b0a65791..f7587428febd 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -423,9 +423,9 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ {
+ struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
+ struct j1939_sock *jsk = j1939_sk(sock->sk);
+- struct j1939_priv *priv = jsk->priv;
+- struct sock *sk = sock->sk;
+- struct net *net = sock_net(sk);
++ struct j1939_priv *priv;
++ struct sock *sk;
++ struct net *net;
+ int ret = 0;
+
+ ret = j1939_sk_sanity_check(addr, len);
+@@ -434,6 +434,10 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+
+ lock_sock(sock->sk);
+
++ priv = jsk->priv;
++ sk = sock->sk;
++ net = sock_net(sk);
++
+ /* Already bound to an interface? */
+ if (jsk->state & J1939_SOCK_BOUND) {
+ /* A re-bind() to a different interface is not
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 5480edff0c86..08ebc3ac5343 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -98,9 +98,6 @@ static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
+
+ static void neigh_cleanup_and_release(struct neighbour *neigh)
+ {
+- if (neigh->parms->neigh_cleanup)
+- neigh->parms->neigh_cleanup(neigh);
+-
+ trace_neigh_cleanup_and_release(neigh, 0);
+ __neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
+ call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
+diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
+index ae3bcb1540ec..b4db68e5caa9 100644
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -919,14 +919,17 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
+ struct kobject *kobj = &queue->kobj;
+ int error = 0;
+
++ /* Kobject_put later will trigger rx_queue_release call which
++ * decreases dev refcount: Take that reference here
++ */
++ dev_hold(queue->dev);
++
+ kobj->kset = dev->queues_kset;
+ error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
+ "rx-%u", index);
+ if (error)
+ goto err;
+
+- dev_hold(queue->dev);
+-
+ if (dev->sysfs_rx_queue_group) {
+ error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
+ if (error)
+diff --git a/net/mac80211/status.c b/net/mac80211/status.c
+index ab8ba5835ca0..5a3d645fe1bc 100644
+--- a/net/mac80211/status.c
++++ b/net/mac80211/status.c
+@@ -1030,7 +1030,8 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
+ I802_DEBUG_INC(local->dot11FailedCount);
+ }
+
+- if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
++ if ((ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
++ ieee80211_has_pm(fc) &&
+ ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) &&
+ !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
+ local->ps_sdata && !(local->scanning)) {
+diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c
+index 78fe622eba65..11b554ce07ff 100644
+--- a/net/nfc/nci/uart.c
++++ b/net/nfc/nci/uart.c
+@@ -346,7 +346,7 @@ static int nci_uart_default_recv_buf(struct nci_uart *nu, const u8 *data,
+ nu->rx_packet_len = -1;
+ nu->rx_skb = nci_skb_alloc(nu->ndev,
+ NCI_MAX_PACKET_SIZE,
+- GFP_KERNEL);
++ GFP_ATOMIC);
+ if (!nu->rx_skb)
+ return -ENOMEM;
+ }
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 82a50e850245..529d4ce945db 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -544,7 +544,8 @@ static int prb_calc_retire_blk_tmo(struct packet_sock *po,
+ msec = 1;
+ div = ecmd.base.speed / 1000;
+ }
+- }
++ } else
++ return DEFAULT_PRB_RETIRE_TOV;
+
+ mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
+
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 08d14d86ecfb..681ffb3545db 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -227,6 +227,7 @@ static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb,
+ sa->sin_port = sh->dest;
+ sa->sin_addr.s_addr = ip_hdr(skb)->daddr;
+ }
++ memset(sa->sin_zero, 0, sizeof(sa->sin_zero));
+ }
+
+ /* Initialize an sctp_addr from a socket. */
+@@ -235,6 +236,7 @@ static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk)
+ addr->v4.sin_family = AF_INET;
+ addr->v4.sin_port = 0;
+ addr->v4.sin_addr.s_addr = inet_sk(sk)->inet_rcv_saddr;
++ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
+ }
+
+ /* Initialize sk->sk_rcv_saddr from sctp_addr. */
+@@ -257,6 +259,7 @@ static void sctp_v4_from_addr_param(union sctp_addr *addr,
+ addr->v4.sin_family = AF_INET;
+ addr->v4.sin_port = port;
+ addr->v4.sin_addr.s_addr = param->v4.addr.s_addr;
++ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
+ }
+
+ /* Initialize an address parameter from a sctp_addr and return the length
+@@ -281,6 +284,7 @@ static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct flowi4 *fl4,
+ saddr->v4.sin_family = AF_INET;
+ saddr->v4.sin_port = port;
+ saddr->v4.sin_addr.s_addr = fl4->saddr;
++ memset(saddr->v4.sin_zero, 0, sizeof(saddr->v4.sin_zero));
+ }
+
+ /* Compare two addresses exactly. */
+@@ -303,6 +307,7 @@ static void sctp_v4_inaddr_any(union sctp_addr *addr, __be16 port)
+ addr->v4.sin_family = AF_INET;
+ addr->v4.sin_addr.s_addr = htonl(INADDR_ANY);
+ addr->v4.sin_port = port;
++ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
+ }
+
+ /* Is this a wildcard address? */
+diff --git a/net/sctp/stream.c b/net/sctp/stream.c
+index e83cdaa2ab76..6a30392068a0 100644
+--- a/net/sctp/stream.c
++++ b/net/sctp/stream.c
+@@ -84,8 +84,10 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
+ return 0;
+
+ ret = genradix_prealloc(&stream->out, outcnt, gfp);
+- if (ret)
++ if (ret) {
++ genradix_free(&stream->out);
+ return ret;
++ }
+
+ stream->outcnt = outcnt;
+ return 0;
+@@ -100,8 +102,10 @@ static int sctp_stream_alloc_in(struct sctp_stream *stream, __u16 incnt,
+ return 0;
+
+ ret = genradix_prealloc(&stream->in, incnt, gfp);
+- if (ret)
++ if (ret) {
++ genradix_free(&stream->in);
+ return ret;
++ }
+
+ stream->incnt = incnt;
+ return 0;
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index 2ba97ff325a5..0c5fcb8ed404 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -231,10 +231,12 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
+ lgr->conns_all = RB_ROOT;
+ if (ini->is_smcd) {
+ /* SMC-D specific settings */
++ get_device(&ini->ism_dev->dev);
+ lgr->peer_gid = ini->ism_gid;
+ lgr->smcd = ini->ism_dev;
+ } else {
+ /* SMC-R specific settings */
++ get_device(&ini->ib_dev->ibdev->dev);
+ lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
+ memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
+ SMC_SYSTEMID_LEN);
+@@ -433,10 +435,13 @@ static void smc_lgr_free_bufs(struct smc_link_group *lgr)
+ static void smc_lgr_free(struct smc_link_group *lgr)
+ {
+ smc_lgr_free_bufs(lgr);
+- if (lgr->is_smcd)
++ if (lgr->is_smcd) {
+ smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
+- else
++ put_device(&lgr->smcd->dev);
++ } else {
+ smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
++ put_device(&lgr->lnk[SMC_SINGLE_LINK].smcibdev->ibdev->dev);
++ }
+ kfree(lgr);
+ }
+
+diff --git a/samples/pktgen/functions.sh b/samples/pktgen/functions.sh
+index 4af4046d71be..40873a5d1461 100644
+--- a/samples/pktgen/functions.sh
++++ b/samples/pktgen/functions.sh
+@@ -5,6 +5,8 @@
+ # Author: Jesper Dangaaard Brouer
+ # License: GPL
+
++set -o errexit
++
+ ## -- General shell logging cmds --
+ function err() {
+ local exitcode=$1
+@@ -58,6 +60,7 @@ function pg_set() {
+ function proc_cmd() {
+ local result
+ local proc_file=$1
++ local status=0
+ # after shift, the remaining args are contained in $@
+ shift
+ local proc_ctrl=${PROC_DIR}/$proc_file
+@@ -73,13 +76,13 @@ function proc_cmd() {
+ echo "cmd: $@ > $proc_ctrl"
+ fi
+ # Quoting of "$@" is important for space expansion
+- echo "$@" > "$proc_ctrl"
+- local status=$?
++ echo "$@" > "$proc_ctrl" || status=$?
+
+- result=$(grep "Result: OK:" $proc_ctrl)
+- # Due to pgctrl, cannot use exit code $? from grep
+- if [[ "$result" == "" ]]; then
+- grep "Result:" $proc_ctrl >&2
++ if [[ "$proc_file" != "pgctrl" ]]; then
++ result=$(grep "Result: OK:" $proc_ctrl) || true
++ if [[ "$result" == "" ]]; then
++ grep "Result:" $proc_ctrl >&2
++ fi
+ fi
+ if (( $status != 0 )); then
+ err 5 "Write error($status) occurred cmd: \"$@ > $proc_ctrl\""
+@@ -105,6 +108,8 @@ function pgset() {
+ fi
+ }
+
++[[ $EUID -eq 0 ]] && trap 'pg_ctrl "reset"' EXIT
++
+ ## -- General shell tricks --
+
+ function root_check_run_with_sudo() {
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 91c6ad58729f..d4280568a41e 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -222,7 +222,8 @@ static bool hw_support_mmap(struct snd_pcm_substream *substream)
+ return false;
+
+ if (substream->ops->mmap ||
+- substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV)
++ (substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV &&
++ substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV_UC))
+ return true;
+
+ return dma_can_mmap(substream->dma_buffer.dev.dev);
+@@ -705,6 +706,10 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
+ while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
+ runtime->boundary *= 2;
+
++ /* clear the buffer for avoiding possible kernel info leaks */
++ if (runtime->dma_area && !substream->ops->copy_user)
++ memset(runtime->dma_area, 0, runtime->dma_bytes);
++
+ snd_pcm_timer_resolution_change(substream);
+ snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
+
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 59ae21b0bb93..013f0e69ff0f 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -74,6 +74,9 @@ static LIST_HEAD(snd_timer_slave_list);
+ /* lock for slave active lists */
+ static DEFINE_SPINLOCK(slave_active_lock);
+
++#define MAX_SLAVE_INSTANCES 1000
++static int num_slaves;
++
+ static DEFINE_MUTEX(register_mutex);
+
+ static int snd_timer_free(struct snd_timer *timer);
+@@ -252,6 +255,10 @@ int snd_timer_open(struct snd_timer_instance **ti,
+ err = -EINVAL;
+ goto unlock;
+ }
++ if (num_slaves >= MAX_SLAVE_INSTANCES) {
++ err = -EBUSY;
++ goto unlock;
++ }
+ timeri = snd_timer_instance_new(owner, NULL);
+ if (!timeri) {
+ err = -ENOMEM;
+@@ -261,6 +268,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
+ timeri->slave_id = tid->device;
+ timeri->flags |= SNDRV_TIMER_IFLG_SLAVE;
+ list_add_tail(&timeri->open_list, &snd_timer_slave_list);
++ num_slaves++;
+ err = snd_timer_check_slave(timeri);
+ if (err < 0) {
+ snd_timer_close_locked(timeri, &card_dev_to_put);
+@@ -356,6 +364,8 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri,
+ }
+
+ list_del(&timeri->open_list);
++ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
++ num_slaves--;
+
+ /* force to stop the timer */
+ snd_timer_stop(timeri);
+diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
+index 6c1497d9f52b..ce07ea0d4e71 100644
+--- a/sound/firewire/bebob/bebob_stream.c
++++ b/sound/firewire/bebob/bebob_stream.c
+@@ -415,15 +415,16 @@ static int make_both_connections(struct snd_bebob *bebob)
+ return 0;
+ }
+
+-static void
+-break_both_connections(struct snd_bebob *bebob)
++static void break_both_connections(struct snd_bebob *bebob)
+ {
+ cmp_connection_break(&bebob->in_conn);
+ cmp_connection_break(&bebob->out_conn);
+
+- /* These models seems to be in transition state for a longer time. */
+- if (bebob->maudio_special_quirk != NULL)
+- msleep(200);
++ // These models seem to be in transition state for a longer time. When
++ // accessing in the state, any transactions is corrupted. In the worst
++ // case, the device is going to reboot.
++ if (bebob->version < 2)
++ msleep(600);
+ }
+
+ static int
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index b7a1abb3e231..32ed46464af7 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -1809,13 +1809,14 @@ struct scp_msg {
+
+ static void dspio_clear_response_queue(struct hda_codec *codec)
+ {
++ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ unsigned int dummy = 0;
+- int status = -1;
++ int status;
+
+ /* clear all from the response queue */
+ do {
+ status = dspio_read(codec, &dummy);
+- } while (status == 0);
++ } while (status == 0 && time_before(jiffies, timeout));
+ }
+
+ static int dspio_get_response_data(struct hda_codec *codec)
+@@ -7588,12 +7589,14 @@ static void ca0132_process_dsp_response(struct hda_codec *codec,
+ struct ca0132_spec *spec = codec->spec;
+
+ codec_dbg(codec, "ca0132_process_dsp_response\n");
++ snd_hda_power_up_pm(codec);
+ if (spec->wait_scp) {
+ if (dspio_get_response_data(codec) >= 0)
+ spec->wait_scp = 0;
+ }
+
+ dspio_clear_response_queue(codec);
++ snd_hda_power_down_pm(codec);
+ }
+
+ static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
+@@ -7604,11 +7607,10 @@ static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
+ /* Delay enabling the HP amp, to let the mic-detection
+ * state machine run.
+ */
+- cancel_delayed_work(&spec->unsol_hp_work);
+- schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
+ tbl = snd_hda_jack_tbl_get(codec, cb->nid);
+ if (tbl)
+ tbl->block_report = 1;
++ schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
+ }
+
+ static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
+@@ -8454,12 +8456,25 @@ static void ca0132_reboot_notify(struct hda_codec *codec)
+ codec->patch_ops.free(codec);
+ }
+
++#ifdef CONFIG_PM
++static int ca0132_suspend(struct hda_codec *codec)
++{
++ struct ca0132_spec *spec = codec->spec;
++
++ cancel_delayed_work_sync(&spec->unsol_hp_work);
++ return 0;
++}
++#endif
++
+ static const struct hda_codec_ops ca0132_patch_ops = {
+ .build_controls = ca0132_build_controls,
+ .build_pcms = ca0132_build_pcms,
+ .init = ca0132_init,
+ .free = ca0132_free,
+ .unsol_event = snd_hda_jack_unsol_event,
++#ifdef CONFIG_PM
++ .suspend = ca0132_suspend,
++#endif
+ .reboot_notify = ca0132_reboot_notify,
+ };
+
+diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
+index 315a3d39bc09..8bc9450da79c 100644
+--- a/sound/soc/codecs/rt5677.c
++++ b/sound/soc/codecs/rt5677.c
+@@ -298,6 +298,7 @@ static bool rt5677_volatile_register(struct device *dev, unsigned int reg)
+ case RT5677_I2C_MASTER_CTRL7:
+ case RT5677_I2C_MASTER_CTRL8:
+ case RT5677_HAP_GENE_CTRL2:
++ case RT5677_PWR_ANLG2: /* Modified by DSP firmware */
+ case RT5677_PWR_DSP_ST:
+ case RT5677_PRIV_DATA:
+ case RT5677_ASRC_22:
+diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
+index cf64e109c658..7b087d94141b 100644
+--- a/sound/soc/codecs/wm2200.c
++++ b/sound/soc/codecs/wm2200.c
+@@ -2410,6 +2410,8 @@ static int wm2200_i2c_probe(struct i2c_client *i2c,
+
+ err_pm_runtime:
+ pm_runtime_disable(&i2c->dev);
++ if (i2c->irq)
++ free_irq(i2c->irq, wm2200);
+ err_reset:
+ if (wm2200->pdata.reset)
+ gpio_set_value_cansleep(wm2200->pdata.reset, 0);
+@@ -2426,12 +2428,15 @@ static int wm2200_i2c_remove(struct i2c_client *i2c)
+ {
+ struct wm2200_priv *wm2200 = i2c_get_clientdata(i2c);
+
++ pm_runtime_disable(&i2c->dev);
+ if (i2c->irq)
+ free_irq(i2c->irq, wm2200);
+ if (wm2200->pdata.reset)
+ gpio_set_value_cansleep(wm2200->pdata.reset, 0);
+ if (wm2200->pdata.ldo_ena)
+ gpio_set_value_cansleep(wm2200->pdata.ldo_ena, 0);
++ regulator_bulk_disable(ARRAY_SIZE(wm2200->core_supplies),
++ wm2200->core_supplies);
+
+ return 0;
+ }
+diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
+index 4af0e519e623..91cc63c5a51f 100644
+--- a/sound/soc/codecs/wm5100.c
++++ b/sound/soc/codecs/wm5100.c
+@@ -2617,6 +2617,7 @@ static int wm5100_i2c_probe(struct i2c_client *i2c,
+ return ret;
+
+ err_reset:
++ pm_runtime_disable(&i2c->dev);
+ if (i2c->irq)
+ free_irq(i2c->irq, wm5100);
+ wm5100_free_gpio(i2c);
+@@ -2640,6 +2641,7 @@ static int wm5100_i2c_remove(struct i2c_client *i2c)
+ {
+ struct wm5100_priv *wm5100 = i2c_get_clientdata(i2c);
+
++ pm_runtime_disable(&i2c->dev);
+ if (i2c->irq)
+ free_irq(i2c->irq, wm5100);
+ wm5100_free_gpio(i2c);
+diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
+index bcb3c9d5abf0..9e8c564f6e9c 100644
+--- a/sound/soc/codecs/wm8904.c
++++ b/sound/soc/codecs/wm8904.c
+@@ -1917,6 +1917,7 @@ static int wm8904_set_bias_level(struct snd_soc_component *component,
+ snd_soc_component_update_bits(component, WM8904_BIAS_CONTROL_0,
+ WM8904_BIAS_ENA, 0);
+
++ snd_soc_component_write(component, WM8904_SW_RESET_AND_ID, 0);
+ regcache_cache_only(wm8904->regmap, true);
+ regcache_mark_dirty(wm8904->regmap);
+
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index 9c1aa4ec9cba..dd2b5ad08659 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -405,10 +405,12 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
+ },
+- .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+- BYT_RT5640_MCLK_EN |
+- BYT_RT5640_SSP0_AIF1),
+-
++ .driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
++ BYT_RT5640_JD_SRC_JD2_IN4N |
++ BYT_RT5640_OVCD_TH_2000UA |
++ BYT_RT5640_OVCD_SF_0P75 |
++ BYT_RT5640_SSP0_AIF1 |
++ BYT_RT5640_MCLK_EN),
+ },
+ {
+ .matches = {
+diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+index 74dda8784f1a..67b276a65a8d 100644
+--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
++++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+@@ -400,6 +400,9 @@ static int kabylake_dmic_startup(struct snd_pcm_substream *substream)
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ dmic_constraints);
+
++ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
++ snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
++
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+ }
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index b600d3eaaf5c..a6e96cf1d8ff 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -877,6 +877,11 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
+ int i, ret = 0;
+
+ mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
++
++ ret = soc_pcm_params_symmetry(substream, params);
++ if (ret)
++ goto out;
++
+ if (rtd->dai_link->ops->hw_params) {
+ ret = rtd->dai_link->ops->hw_params(substream, params);
+ if (ret < 0) {
+@@ -958,9 +963,6 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
+ }
+ component = NULL;
+
+- ret = soc_pcm_params_symmetry(substream, params);
+- if (ret)
+- goto component_err;
+ out:
+ mutex_unlock(&rtd->card->pcm_mutex);
+ return ret;
+@@ -1385,6 +1387,7 @@ static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream,
+ struct snd_soc_dapm_widget *widget;
+ struct snd_soc_dai *dai;
+ int prune = 0;
++ int do_prune;
+
+ /* Destroy any old FE <--> BE connections */
+ for_each_dpcm_be(fe, stream, dpcm) {
+@@ -1398,13 +1401,16 @@ static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream,
+ continue;
+
+ /* is there a valid CODEC DAI widget for this BE */
++ do_prune = 1;
+ for_each_rtd_codec_dai(dpcm->be, i, dai) {
+ widget = dai_get_widget(dai, stream);
+
+ /* prune the BE if it's no longer in our active list */
+ if (widget && widget_in_list(list, widget))
+- continue;
++ do_prune = 0;
+ }
++ if (!do_prune)
++ continue;
+
+ dev_dbg(fe->dev, "ASoC: pruning %s BE %s for %s\n",
+ stream ? "capture" : "playback",
+diff --git a/sound/soc/sof/imx/Kconfig b/sound/soc/sof/imx/Kconfig
+index 5acae75f5750..71f318bc2c74 100644
+--- a/sound/soc/sof/imx/Kconfig
++++ b/sound/soc/sof/imx/Kconfig
+@@ -11,8 +11,8 @@ config SND_SOC_SOF_IMX_TOPLEVEL
+
+ if SND_SOC_SOF_IMX_TOPLEVEL
+
+-config SND_SOC_SOF_IMX8
+- tristate "SOF support for i.MX8"
++config SND_SOC_SOF_IMX8_SUPPORT
++ bool "SOF support for i.MX8"
+ depends on IMX_SCU
+ depends on IMX_DSP
+ help
+@@ -20,4 +20,8 @@ config SND_SOC_SOF_IMX8
+ Say Y if you have such a device.
+ If unsure select "N".
+
++config SND_SOC_SOF_IMX8
++ def_tristate SND_SOC_SOF_OF
++ depends on SND_SOC_SOF_IMX8_SUPPORT
++
+ endif ## SND_SOC_SOF_IMX_IMX_TOPLEVEL
+diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
+index 06e84679087b..5a5163eef2ef 100644
+--- a/sound/soc/sof/intel/hda.c
++++ b/sound/soc/sof/intel/hda.c
+@@ -268,6 +268,7 @@ static int hda_init(struct snd_sof_dev *sdev)
+
+ bus->use_posbuf = 1;
+ bus->bdl_pos_adj = 0;
++ bus->sync_write = 1;
+
+ mutex_init(&hbus->prepare_mutex);
+ hbus->pci = pci;
+diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
+index 4452594c2e17..fa299e078156 100644
+--- a/sound/soc/sof/topology.c
++++ b/sound/soc/sof/topology.c
+@@ -2828,6 +2828,10 @@ static int sof_link_load(struct snd_soc_component *scomp, int index,
+ if (!link->no_pcm) {
+ link->nonatomic = true;
+
++ /* set trigger order */
++ link->trigger[0] = SND_SOC_DPCM_TRIGGER_POST;
++ link->trigger[1] = SND_SOC_DPCM_TRIGGER_POST;
++
+ /* nothing more to do for FE dai links */
+ return 0;
+ }
+diff --git a/tools/arch/x86/lib/x86-opcode-map.txt b/tools/arch/x86/lib/x86-opcode-map.txt
+index e0b85930dd77..0a0e9112f284 100644
+--- a/tools/arch/x86/lib/x86-opcode-map.txt
++++ b/tools/arch/x86/lib/x86-opcode-map.txt
+@@ -333,7 +333,7 @@ AVXcode: 1
+ 06: CLTS
+ 07: SYSRET (o64)
+ 08: INVD
+-09: WBINVD
++09: WBINVD | WBNOINVD (F3)
+ 0a:
+ 0b: UD2 (1B)
+ 0c:
+@@ -364,7 +364,7 @@ AVXcode: 1
+ # a ModR/M byte.
+ 1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
+ 1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
+-1c:
++1c: Grp20 (1A),(1C)
+ 1d:
+ 1e:
+ 1f: NOP Ev
+@@ -792,6 +792,8 @@ f3: Grp17 (1A)
+ f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
+ f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
+ f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
++f8: MOVDIR64B Gv,Mdqq (66) | ENQCMD Gv,Mdqq (F2) | ENQCMDS Gv,Mdqq (F3)
++f9: MOVDIRI My,Gy
+ EndTable
+
+ Table: 3-byte opcode 2 (0x0f 0x3a)
+@@ -943,9 +945,9 @@ GrpTable: Grp6
+ EndTable
+
+ GrpTable: Grp7
+-0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
+-1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
+-2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
++0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B)
++1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) | ENCLS (111),(11B)
++2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) | ENCLU (111),(11B)
+ 3: LIDT Ms
+ 4: SMSW Mw/Rv
+ 5: rdpkru (110),(11B) | wrpkru (111),(11B)
+@@ -1020,7 +1022,7 @@ GrpTable: Grp15
+ 3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
+ 4: XSAVE | ptwrite Ey (F3),(11B)
+ 5: XRSTOR | lfence (11B)
+-6: XSAVEOPT | clwb (66) | mfence (11B)
++6: XSAVEOPT | clwb (66) | mfence (11B) | TPAUSE Rd (66),(11B) | UMONITOR Rv (F3),(11B) | UMWAIT Rd (F2),(11B)
+ 7: clflush | clflushopt (66) | sfence (11B)
+ EndTable
+
+@@ -1051,6 +1053,10 @@ GrpTable: Grp19
+ 6: vscatterpf1qps/d Wx (66),(ev)
+ EndTable
+
++GrpTable: Grp20
++0: cldemote Mb
++EndTable
++
+ # AMD's Prefetch Group
+ GrpTable: GrpP
+ 0: PREFETCH
+diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile
+index 5d1995fd369c..5535650800ab 100644
+--- a/tools/bpf/Makefile
++++ b/tools/bpf/Makefile
+@@ -16,7 +16,13 @@ CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/include/uapi -I$(srctree)/include
+ # isn't set and when invoked from selftests build, where srctree
+ # is set to ".". building_out_of_srctree is undefined for in srctree
+ # builds
++ifeq ($(srctree),)
++update_srctree := 1
++endif
+ ifndef building_out_of_srctree
++update_srctree := 1
++endif
++ifeq ($(update_srctree),1)
+ srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+ srctree := $(patsubst %/,%,$(dir $(srctree)))
+ endif
+diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
+index ede55fec3618..87f27e2664c5 100644
+--- a/tools/lib/bpf/btf_dump.c
++++ b/tools/lib/bpf/btf_dump.c
+@@ -876,7 +876,6 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
+ __u16 vlen = btf_vlen(t);
+
+ packed = is_struct ? btf_is_struct_packed(d->btf, id, t) : 0;
+- align = packed ? 1 : btf_align_of(d->btf, id);
+
+ btf_dump_printf(d, "%s%s%s {",
+ is_struct ? "struct" : "union",
+@@ -906,6 +905,13 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
+ btf_dump_printf(d, ";");
+ }
+
++ /* pad at the end, if necessary */
++ if (is_struct) {
++ align = packed ? 1 : btf_align_of(d->btf, id);
++ btf_dump_emit_bit_padding(d, off, t->size * 8, 0, align,
++ lvl + 1);
++ }
++
+ if (vlen)
+ btf_dump_printf(d, "\n");
+ btf_dump_printf(d, "%s}", pfx(lvl));
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index e0276520171b..a267cd0c0ce2 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -1897,16 +1897,22 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
+ return -errno;
+
+ new_fd = open("/", O_RDONLY | O_CLOEXEC);
+- if (new_fd < 0)
++ if (new_fd < 0) {
++ err = -errno;
+ goto err_free_new_name;
++ }
+
+ new_fd = dup3(fd, new_fd, O_CLOEXEC);
+- if (new_fd < 0)
++ if (new_fd < 0) {
++ err = -errno;
+ goto err_close_new_fd;
++ }
+
+ err = zclose(map->fd);
+- if (err)
++ if (err) {
++ err = -errno;
+ goto err_close_new_fd;
++ }
+ free(map->name);
+
+ map->fd = new_fd;
+@@ -1925,7 +1931,7 @@ err_close_new_fd:
+ close(new_fd);
+ err_free_new_name:
+ free(new_name);
+- return -errno;
++ return err;
+ }
+
+ int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
+diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
+index a902838f9fcc..70f9e10de286 100644
+--- a/tools/lib/bpf/xsk.c
++++ b/tools/lib/bpf/xsk.c
+@@ -163,6 +163,7 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
+ umem->umem_area = umem_area;
+ xsk_set_umem_config(&umem->config, usr_config);
+
++ memset(&mr, 0, sizeof(mr));
+ mr.addr = (uintptr_t)umem_area;
+ mr.len = size;
+ mr.chunk_size = umem->config.frame_size;
+@@ -343,13 +344,18 @@ static int xsk_get_max_queues(struct xsk_socket *xsk)
+ goto out;
+ }
+
+- if (err || channels.max_combined == 0)
++ if (err) {
+ /* If the device says it has no channels, then all traffic
+ * is sent to a single stream, so max queues = 1.
+ */
+ ret = 1;
+- else
+- ret = channels.max_combined;
++ } else {
++ /* Take the max of rx, tx, combined. Drivers return
++ * the number of channels in different ways.
++ */
++ ret = max(channels.max_rx, channels.max_tx);
++ ret = max(ret, (int)channels.max_combined);
++ }
+
+ out:
+ close(fd);
+@@ -465,6 +471,8 @@ static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
+ }
+ } else {
+ xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id);
++ if (xsk->prog_fd < 0)
++ return -errno;
+ err = xsk_lookup_bpf_maps(xsk);
+ if (err) {
+ close(xsk->prog_fd);
+diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile
+index 5b2cd5e58df0..5dbb0dde208c 100644
+--- a/tools/lib/subcmd/Makefile
++++ b/tools/lib/subcmd/Makefile
+@@ -28,7 +28,9 @@ ifeq ($(DEBUG),0)
+ endif
+ endif
+
+-ifeq ($(CC_NO_CLANG), 0)
++ifeq ($(DEBUG),1)
++ CFLAGS += -O0
++else ifeq ($(CC_NO_CLANG), 0)
+ CFLAGS += -O3
+ else
+ CFLAGS += -O6
+diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
+index 552592d153fb..f3cbf86e51ac 100644
+--- a/tools/lib/traceevent/parse-filter.c
++++ b/tools/lib/traceevent/parse-filter.c
+@@ -1473,8 +1473,10 @@ static int copy_filter_type(struct tep_event_filter *filter,
+ if (strcmp(str, "TRUE") == 0 || strcmp(str, "FALSE") == 0) {
+ /* Add trivial event */
+ arg = allocate_arg();
+- if (arg == NULL)
++ if (arg == NULL) {
++ free(str);
+ return -1;
++ }
+
+ arg->type = TEP_FILTER_ARG_BOOLEAN;
+ if (strcmp(str, "TRUE") == 0)
+@@ -1483,8 +1485,11 @@ static int copy_filter_type(struct tep_event_filter *filter,
+ arg->boolean.value = 0;
+
+ filter_type = add_filter_type(filter, event->id);
+- if (filter_type == NULL)
++ if (filter_type == NULL) {
++ free(str);
++ free_arg(arg);
+ return -1;
++ }
+
+ filter_type->filter = arg;
+
+diff --git a/tools/memory-model/linux-kernel.cat b/tools/memory-model/linux-kernel.cat
+index ea2ff4b94074..2a9b4fe4a84e 100644
+--- a/tools/memory-model/linux-kernel.cat
++++ b/tools/memory-model/linux-kernel.cat
+@@ -197,7 +197,7 @@ empty (wr-incoh | rw-incoh | ww-incoh) as plain-coherence
+ (* Actual races *)
+ let ww-nonrace = ww-vis & ((Marked * W) | rw-xbstar) & ((W * Marked) | wr-vis)
+ let ww-race = (pre-race & co) \ ww-nonrace
+-let wr-race = (pre-race & (co? ; rf)) \ wr-vis
++let wr-race = (pre-race & (co? ; rf)) \ wr-vis \ rw-xbstar^-1
+ let rw-race = (pre-race & fr) \ rw-xbstar
+
+ flag ~empty (ww-race | wr-race | rw-race) as data-race
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 044c9a3cb247..f53d3c515cdc 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -481,6 +481,7 @@ static const char *uaccess_safe_builtin[] = {
+ "ubsan_type_mismatch_common",
+ "__ubsan_handle_type_mismatch",
+ "__ubsan_handle_type_mismatch_v1",
++ "__ubsan_handle_shift_out_of_bounds",
+ /* misc */
+ "csum_partial_copy_generic",
+ "__memcpy_mcsafe",
+diff --git a/tools/perf/arch/arm64/util/sym-handling.c b/tools/perf/arch/arm64/util/sym-handling.c
+index 5df788985130..8dfa3e5229f1 100644
+--- a/tools/perf/arch/arm64/util/sym-handling.c
++++ b/tools/perf/arch/arm64/util/sym-handling.c
+@@ -6,9 +6,10 @@
+
+ #include "symbol.h" // for the elf__needs_adjust_symbols() prototype
+ #include <stdbool.h>
+-#include <gelf.h>
+
+ #ifdef HAVE_LIBELF_SUPPORT
++#include <gelf.h>
++
+ bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
+ {
+ return ehdr.e_type == ET_EXEC ||
+diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
+index aae0e57c60fb..7accaf8ef689 100644
+--- a/tools/perf/builtin-report.c
++++ b/tools/perf/builtin-report.c
+@@ -399,6 +399,13 @@ static int report__setup_sample_type(struct report *rep)
+ PERF_SAMPLE_BRANCH_ANY))
+ rep->nonany_branch_mode = true;
+
++#ifndef HAVE_LIBUNWIND_SUPPORT
++ if (dwarf_callchain_users) {
++ ui__warning("Please install libunwind development packages "
++ "during the perf build.\n");
++ }
++#endif
++
+ return 0;
+ }
+
+diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json
+index 0d1556fcdffe..99f4fc425564 100644
+--- a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json
++++ b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json
+@@ -15,7 +15,7 @@
+ },
+ {
+ "EventCode": "0x04",
+- "EventName": "uncore_hisi_ddrc.flux_wr",
++ "EventName": "uncore_hisi_ddrc.pre_cmd",
+ "BriefDescription": "DDRC precharge commands",
+ "PublicDescription": "DDRC precharge commands",
+ "Unit": "hisi_sccl,ddrc",
+diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
+index e2837260ca4d..99e3fd04a5cb 100644
+--- a/tools/perf/pmu-events/jevents.c
++++ b/tools/perf/pmu-events/jevents.c
+@@ -758,6 +758,7 @@ static int process_mapfile(FILE *outfp, char *fpath)
+ char *line, *p;
+ int line_num;
+ char *tblname;
++ int ret = 0;
+
+ pr_info("%s: Processing mapfile %s\n", prog, fpath);
+
+@@ -769,6 +770,7 @@ static int process_mapfile(FILE *outfp, char *fpath)
+ if (!mapfp) {
+ pr_info("%s: Error %s opening %s\n", prog, strerror(errno),
+ fpath);
++ free(line);
+ return -1;
+ }
+
+@@ -795,7 +797,8 @@ static int process_mapfile(FILE *outfp, char *fpath)
+ /* TODO Deal with lines longer than 16K */
+ pr_info("%s: Mapfile %s: line %d too long, aborting\n",
+ prog, fpath, line_num);
+- return -1;
++ ret = -1;
++ goto out;
+ }
+ line[strlen(line)-1] = '\0';
+
+@@ -825,7 +828,9 @@ static int process_mapfile(FILE *outfp, char *fpath)
+
+ out:
+ print_mapping_table_suffix(outfp);
+- return 0;
++ fclose(mapfp);
++ free(line);
++ return ret;
+ }
+
+ /*
+@@ -1122,6 +1127,7 @@ int main(int argc, char *argv[])
+ goto empty_map;
+ } else if (rc < 0) {
+ /* Make build fail */
++ fclose(eventsfp);
+ free_arch_std_events();
+ return 1;
+ } else if (rc) {
+@@ -1134,6 +1140,7 @@ int main(int argc, char *argv[])
+ goto empty_map;
+ } else if (rc < 0) {
+ /* Make build fail */
++ fclose(eventsfp);
+ free_arch_std_events();
+ return 1;
+ } else if (rc) {
+@@ -1151,6 +1158,8 @@ int main(int argc, char *argv[])
+ if (process_mapfile(eventsfp, mapfile)) {
+ pr_info("%s: Error processing mapfile %s\n", prog, mapfile);
+ /* Make build fail */
++ fclose(eventsfp);
++ free_arch_std_events();
+ return 1;
+ }
+
+diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
+index c1c2c13de254..166f411568a5 100644
+--- a/tools/perf/tests/bp_signal.c
++++ b/tools/perf/tests/bp_signal.c
+@@ -49,14 +49,6 @@ asm (
+ "__test_function:\n"
+ "incq (%rdi)\n"
+ "ret\n");
+-#elif defined (__aarch64__)
+-extern void __test_function(volatile long *ptr);
+-asm (
+- ".globl __test_function\n"
+- "__test_function:\n"
+- "str x30, [x0]\n"
+- "ret\n");
+-
+ #else
+ static void __test_function(volatile long *ptr)
+ {
+@@ -302,10 +294,15 @@ bool test__bp_signal_is_supported(void)
+ * stepping into the SIGIO handler and getting stuck on the
+ * breakpointed instruction.
+ *
++ * Since arm64 has the same issue with arm for the single-step
++ * handling, this case also gets suck on the breakpointed
++ * instruction.
++ *
+ * Just disable the test for these architectures until these
+ * issues are resolved.
+ */
+-#if defined(__powerpc__) || defined(__s390x__) || defined(__arm__)
++#if defined(__powerpc__) || defined(__s390x__) || defined(__arm__) || \
++ defined(__aarch64__)
+ return false;
+ #else
+ return true;
+diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
+index bce3a4cb4c89..d85c9f608564 100644
+--- a/tools/perf/tests/task-exit.c
++++ b/tools/perf/tests/task-exit.c
+@@ -53,6 +53,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
+ struct perf_cpu_map *cpus;
+ struct perf_thread_map *threads;
+ struct mmap *md;
++ int retry_count = 0;
+
+ signal(SIGCHLD, sig_handler);
+
+@@ -110,6 +111,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
+ if (evlist__mmap(evlist, 128) < 0) {
+ pr_debug("failed to mmap events: %d (%s)\n", errno,
+ str_error_r(errno, sbuf, sizeof(sbuf)));
++ err = -1;
+ goto out_delete_evlist;
+ }
+
+@@ -131,6 +133,13 @@ retry:
+ out_init:
+ if (!exited || !nr_exit) {
+ evlist__poll(evlist, -1);
++
++ if (retry_count++ > 1000) {
++ pr_debug("Failed after retrying 1000 times\n");
++ err = -1;
++ goto out_free_maps;
++ }
++
+ goto retry;
+ }
+
+diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
+index 4ba0f871f086..f5f855fff412 100644
+--- a/tools/perf/util/cs-etm.c
++++ b/tools/perf/util/cs-etm.c
+@@ -110,7 +110,7 @@ static int cs_etm__decode_data_block(struct cs_etm_queue *etmq);
+ * encode the etm queue number as the upper 16 bit and the channel as
+ * the lower 16 bit.
+ */
+-#define TO_CS_QUEUE_NR(queue_nr, trace_id_chan) \
++#define TO_CS_QUEUE_NR(queue_nr, trace_chan_id) \
+ (queue_nr << 16 | trace_chan_id)
+ #define TO_QUEUE_NR(cs_queue_nr) (cs_queue_nr >> 16)
+ #define TO_TRACE_CHAN_ID(cs_queue_nr) (cs_queue_nr & 0x0000ffff)
+@@ -819,7 +819,7 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
+ * Note that packets decoded above are still in the traceID's packet
+ * queue and will be processed in cs_etm__process_queues().
+ */
+- cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_id_chan);
++ cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
+ ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, timestamp);
+ out:
+ return ret;
+diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
+index df6cee5c071f..5544bfbd0f6c 100644
+--- a/tools/perf/util/dwarf-aux.c
++++ b/tools/perf/util/dwarf-aux.c
+@@ -307,21 +307,51 @@ bool die_is_func_def(Dwarf_Die *dw_die)
+ dwarf_attr(dw_die, DW_AT_declaration, &attr) == NULL);
+ }
+
++/**
++ * die_entrypc - Returns entry PC (the lowest address) of a DIE
++ * @dw_die: a DIE
++ * @addr: where to store entry PC
++ *
++ * Since dwarf_entrypc() does not return entry PC if the DIE has only address
++ * range, we have to use this to retrieve the lowest address from the address
++ * range attribute.
++ */
++int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr)
++{
++ Dwarf_Addr base, end;
++
++ if (!addr)
++ return -EINVAL;
++
++ if (dwarf_entrypc(dw_die, addr) == 0)
++ return 0;
++
++ return dwarf_ranges(dw_die, 0, &base, addr, &end) < 0 ? -ENOENT : 0;
++}
++
+ /**
+ * die_is_func_instance - Ensure that this DIE is an instance of a subprogram
+ * @dw_die: a DIE
+ *
+ * Ensure that this DIE is an instance (which has an entry address).
+- * This returns true if @dw_die is a function instance. If not, you need to
+- * call die_walk_instances() to find actual instances.
++ * This returns true if @dw_die is a function instance. If not, the @dw_die
++ * must be a prototype. You can use die_walk_instances() to find actual
++ * instances.
+ **/
+ bool die_is_func_instance(Dwarf_Die *dw_die)
+ {
+ Dwarf_Addr tmp;
++ Dwarf_Attribute attr_mem;
++ int tag = dwarf_tag(dw_die);
+
+- /* Actually gcc optimizes non-inline as like as inlined */
+- return !dwarf_func_inline(dw_die) && dwarf_entrypc(dw_die, &tmp) == 0;
++ if (tag != DW_TAG_subprogram &&
++ tag != DW_TAG_inlined_subroutine)
++ return false;
++
++ return dwarf_entrypc(dw_die, &tmp) == 0 ||
++ dwarf_attr(dw_die, DW_AT_ranges, &attr_mem) != NULL;
+ }
++
+ /**
+ * die_get_data_member_location - Get the data-member offset
+ * @mb_die: a DIE of a member of a data structure
+@@ -598,6 +628,9 @@ static int __die_walk_instances_cb(Dwarf_Die *inst, void *data)
+ Dwarf_Die *origin;
+ int tmp;
+
++ if (!die_is_func_instance(inst))
++ return DIE_FIND_CB_CONTINUE;
++
+ attr = dwarf_attr(inst, DW_AT_abstract_origin, &attr_mem);
+ if (attr == NULL)
+ return DIE_FIND_CB_CONTINUE;
+@@ -669,15 +702,14 @@ static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data)
+ if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) {
+ fname = die_get_call_file(in_die);
+ lineno = die_get_call_lineno(in_die);
+- if (fname && lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) {
++ if (fname && lineno > 0 && die_entrypc(in_die, &addr) == 0) {
+ lw->retval = lw->callback(fname, lineno, addr, lw->data);
+ if (lw->retval != 0)
+ return DIE_FIND_CB_END;
+ }
++ if (!lw->recursive)
++ return DIE_FIND_CB_SIBLING;
+ }
+- if (!lw->recursive)
+- /* Don't need to search recursively */
+- return DIE_FIND_CB_SIBLING;
+
+ if (addr) {
+ fname = dwarf_decl_file(in_die);
+@@ -710,7 +742,7 @@ static int __die_walk_funclines(Dwarf_Die *sp_die, bool recursive,
+ /* Handle function declaration line */
+ fname = dwarf_decl_file(sp_die);
+ if (fname && dwarf_decl_line(sp_die, &lineno) == 0 &&
+- dwarf_entrypc(sp_die, &addr) == 0) {
++ die_entrypc(sp_die, &addr) == 0) {
+ lw.retval = callback(fname, lineno, addr, data);
+ if (lw.retval != 0)
+ goto done;
+@@ -724,6 +756,10 @@ static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data)
+ {
+ struct __line_walk_param *lw = data;
+
++ /*
++ * Since inlined function can include another inlined function in
++ * the same file, we need to walk in it recursively.
++ */
+ lw->retval = __die_walk_funclines(sp_die, true, lw->callback, lw->data);
+ if (lw->retval != 0)
+ return DWARF_CB_ABORT;
+@@ -748,11 +784,12 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
+ Dwarf_Lines *lines;
+ Dwarf_Line *line;
+ Dwarf_Addr addr;
+- const char *fname, *decf = NULL;
++ const char *fname, *decf = NULL, *inf = NULL;
+ int lineno, ret = 0;
+ int decl = 0, inl;
+ Dwarf_Die die_mem, *cu_die;
+ size_t nlines, i;
++ bool flag;
+
+ /* Get the CU die */
+ if (dwarf_tag(rt_die) != DW_TAG_compile_unit) {
+@@ -783,6 +820,12 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
+ "Possible error in debuginfo.\n");
+ continue;
+ }
++ /* Skip end-of-sequence */
++ if (dwarf_lineendsequence(line, &flag) != 0 || flag)
++ continue;
++ /* Skip Non statement line-info */
++ if (dwarf_linebeginstatement(line, &flag) != 0 || !flag)
++ continue;
+ /* Filter lines based on address */
+ if (rt_die != cu_die) {
+ /*
+@@ -792,13 +835,21 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
+ */
+ if (!dwarf_haspc(rt_die, addr))
+ continue;
++
+ if (die_find_inlinefunc(rt_die, addr, &die_mem)) {
++ /* Call-site check */
++ inf = die_get_call_file(&die_mem);
++ if ((inf && !strcmp(inf, decf)) &&
++ die_get_call_lineno(&die_mem) == lineno)
++ goto found;
++
+ dwarf_decl_line(&die_mem, &inl);
+ if (inl != decl ||
+ decf != dwarf_decl_file(&die_mem))
+ continue;
+ }
+ }
++found:
+ /* Get source line */
+ fname = dwarf_linesrc(line, NULL, NULL);
+
+@@ -813,8 +864,9 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
+ */
+ if (rt_die != cu_die)
+ /*
+- * Don't need walk functions recursively, because nested
+- * inlined functions don't have lines of the specified DIE.
++ * Don't need walk inlined functions recursively, because
++ * inner inlined functions don't have the lines of the
++ * specified function.
+ */
+ ret = __die_walk_funclines(rt_die, false, callback, data);
+ else {
+@@ -989,7 +1041,7 @@ static int die_get_var_innermost_scope(Dwarf_Die *sp_die, Dwarf_Die *vr_die,
+ bool first = true;
+ const char *name;
+
+- ret = dwarf_entrypc(sp_die, &entry);
++ ret = die_entrypc(sp_die, &entry);
+ if (ret)
+ return ret;
+
+@@ -1052,7 +1104,7 @@ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf)
+ bool first = true;
+ const char *name;
+
+- ret = dwarf_entrypc(sp_die, &entry);
++ ret = die_entrypc(sp_die, &entry);
+ if (ret)
+ return ret;
+
+diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
+index f204e5892403..506006e0cf66 100644
+--- a/tools/perf/util/dwarf-aux.h
++++ b/tools/perf/util/dwarf-aux.h
+@@ -29,6 +29,9 @@ int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
+ /* Get DW_AT_linkage_name (should be NULL for C binary) */
+ const char *die_get_linkage_name(Dwarf_Die *dw_die);
+
++/* Get the lowest PC in DIE (including range list) */
++int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr);
++
+ /* Ensure that this DIE is a subprogram and definition (not declaration) */
+ bool die_is_func_def(Dwarf_Die *dw_die);
+
+diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
+index b5e2adef49de..422ad1888e74 100644
+--- a/tools/perf/util/parse-events.c
++++ b/tools/perf/util/parse-events.c
+@@ -1365,8 +1365,15 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
+ if (get_config_terms(head_config, &config_terms))
+ return -ENOMEM;
+
+- if (perf_pmu__config(pmu, &attr, head_config, parse_state->error))
++ if (perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
++ struct perf_evsel_config_term *pos, *tmp;
++
++ list_for_each_entry_safe(pos, tmp, &config_terms, list) {
++ list_del_init(&pos->list);
++ free(pos);
++ }
+ return -EINVAL;
++ }
+
+ evsel = __add_event(list, &parse_state->idx, &attr,
+ get_config_name(head_config), pmu,
+@@ -1927,15 +1934,20 @@ int parse_events(struct evlist *evlist, const char *str,
+
+ ret = parse_events__scanner(str, &parse_state, PE_START_EVENTS);
+ perf_pmu__parse_cleanup();
++
++ if (!ret && list_empty(&parse_state.list)) {
++ WARN_ONCE(true, "WARNING: event parser found nothing\n");
++ return -1;
++ }
++
++ /*
++ * Add list to the evlist even with errors to allow callers to clean up.
++ */
++ perf_evlist__splice_list_tail(evlist, &parse_state.list);
++
+ if (!ret) {
+ struct evsel *last;
+
+- if (list_empty(&parse_state.list)) {
+- WARN_ONCE(true, "WARNING: event parser found nothing\n");
+- return -1;
+- }
+-
+- perf_evlist__splice_list_tail(evlist, &parse_state.list);
+ evlist->nr_groups += parse_state.nr_groups;
+ last = evlist__last(evlist);
+ last->cmdline_group_boundary = true;
+diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
+index cd9f95e5044e..08cccd86447c 100644
+--- a/tools/perf/util/probe-finder.c
++++ b/tools/perf/util/probe-finder.c
+@@ -756,6 +756,16 @@ static int find_best_scope_cb(Dwarf_Die *fn_die, void *data)
+ return 0;
+ }
+
++/* Return innermost DIE */
++static int find_inner_scope_cb(Dwarf_Die *fn_die, void *data)
++{
++ struct find_scope_param *fsp = data;
++
++ memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die));
++ fsp->found = true;
++ return 1;
++}
++
+ /* Find an appropriate scope fits to given conditions */
+ static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem)
+ {
+@@ -767,8 +777,13 @@ static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem)
+ .die_mem = die_mem,
+ .found = false,
+ };
++ int ret;
+
+- cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb, &fsp);
++ ret = cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb,
++ &fsp);
++ if (!ret && !fsp.found)
++ cu_walk_functions_at(&pf->cu_die, pf->addr,
++ find_inner_scope_cb, &fsp);
+
+ return fsp.found ? die_mem : NULL;
+ }
+@@ -942,7 +957,7 @@ static int probe_point_inline_cb(Dwarf_Die *in_die, void *data)
+ ret = find_probe_point_lazy(in_die, pf);
+ else {
+ /* Get probe address */
+- if (dwarf_entrypc(in_die, &addr) != 0) {
++ if (die_entrypc(in_die, &addr) != 0) {
+ pr_warning("Failed to get entry address of %s.\n",
+ dwarf_diename(in_die));
+ return -ENOENT;
+@@ -994,7 +1009,7 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
+ param->retval = find_probe_point_by_line(pf);
+ } else if (die_is_func_instance(sp_die)) {
+ /* Instances always have the entry address */
+- dwarf_entrypc(sp_die, &pf->addr);
++ die_entrypc(sp_die, &pf->addr);
+ /* But in some case the entry address is 0 */
+ if (pf->addr == 0) {
+ pr_debug("%s has no entry PC. Skipped\n",
+@@ -1425,6 +1440,18 @@ error:
+ return DIE_FIND_CB_END;
+ }
+
++static bool available_var_finder_overlap(struct available_var_finder *af)
++{
++ int i;
++
++ for (i = 0; i < af->nvls; i++) {
++ if (af->pf.addr == af->vls[i].point.address)
++ return true;
++ }
++ return false;
++
++}
++
+ /* Add a found vars into available variables list */
+ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
+ {
+@@ -1435,6 +1462,14 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
+ Dwarf_Die die_mem;
+ int ret;
+
++ /*
++ * For some reason (e.g. different column assigned to same address),
++ * this callback can be called with the address which already passed.
++ * Ignore it first.
++ */
++ if (available_var_finder_overlap(af))
++ return 0;
++
+ /* Check number of tevs */
+ if (af->nvls == af->max_vls) {
+ pr_warning("Too many( > %d) probe point found.\n", af->max_vls);
+@@ -1578,7 +1613,7 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
+ /* Get function entry information */
+ func = basefunc = dwarf_diename(&spdie);
+ if (!func ||
+- dwarf_entrypc(&spdie, &baseaddr) != 0 ||
++ die_entrypc(&spdie, &baseaddr) != 0 ||
+ dwarf_decl_line(&spdie, &baseline) != 0) {
+ lineno = 0;
+ goto post;
+@@ -1595,7 +1630,7 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
+ while (die_find_top_inlinefunc(&spdie, (Dwarf_Addr)addr,
+ &indie)) {
+ /* There is an inline function */
+- if (dwarf_entrypc(&indie, &_addr) == 0 &&
++ if (die_entrypc(&indie, &_addr) == 0 &&
+ _addr == addr) {
+ /*
+ * addr is at an inline function entry.
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
+index 061bb4d6a3f5..5c172845fa5a 100644
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -1954,8 +1954,8 @@ out_err:
+ }
+
+ static union perf_event *
+-fetch_mmaped_event(struct perf_session *session,
+- u64 head, size_t mmap_size, char *buf)
++prefetch_event(char *buf, u64 head, size_t mmap_size,
++ bool needs_swap, union perf_event *error)
+ {
+ union perf_event *event;
+
+@@ -1967,20 +1967,32 @@ fetch_mmaped_event(struct perf_session *session,
+ return NULL;
+
+ event = (union perf_event *)(buf + head);
++ if (needs_swap)
++ perf_event_header__bswap(&event->header);
+
+- if (session->header.needs_swap)
++ if (head + event->header.size <= mmap_size)
++ return event;
++
++ /* We're not fetching the event so swap back again */
++ if (needs_swap)
+ perf_event_header__bswap(&event->header);
+
+- if (head + event->header.size > mmap_size) {
+- /* We're not fetching the event so swap back again */
+- if (session->header.needs_swap)
+- perf_event_header__bswap(&event->header);
+- pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx: fuzzed perf.data?\n",
+- __func__, head, event->header.size, mmap_size);
+- return ERR_PTR(-EINVAL);
+- }
++ pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
++ " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
+
+- return event;
++ return error;
++}
++
++static union perf_event *
++fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
++{
++ return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
++}
++
++static union perf_event *
++fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
++{
++ return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
+ }
+
+ static int __perf_session__process_decomp_events(struct perf_session *session)
+@@ -1993,10 +2005,8 @@ static int __perf_session__process_decomp_events(struct perf_session *session)
+ return 0;
+
+ while (decomp->head < decomp->size && !session_done()) {
+- union perf_event *event = fetch_mmaped_event(session, decomp->head, decomp->size, decomp->data);
+-
+- if (IS_ERR(event))
+- return PTR_ERR(event);
++ union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
++ session->header.needs_swap);
+
+ if (!event)
+ break;
+@@ -2096,7 +2106,7 @@ remap:
+ }
+
+ more:
+- event = fetch_mmaped_event(session, head, mmap_size, buf);
++ event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap);
+ if (IS_ERR(event))
+ return PTR_ERR(event);
+
+diff --git a/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c b/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
+index 7c7451d3f494..58dbdfd4fa13 100644
+--- a/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
++++ b/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
+@@ -39,7 +39,6 @@ static cstate_t hsw_ext_cstates[HSW_EXT_CSTATE_COUNT] = {
+ {
+ .name = "PC9",
+ .desc = N_("Processor Package C9"),
+- .desc = N_("Processor Package C2"),
+ .id = PC9,
+ .range = RANGE_PACKAGE,
+ .get_count_percent = hsw_ext_get_count_percent,
+diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
+index e95c33e333a4..b29a73fe64db 100644
+--- a/tools/testing/selftests/bpf/cgroup_helpers.c
++++ b/tools/testing/selftests/bpf/cgroup_helpers.c
+@@ -98,7 +98,7 @@ int enable_all_controllers(char *cgroup_path)
+ */
+ int setup_cgroup_environment(void)
+ {
+- char cgroup_workdir[PATH_MAX + 1];
++ char cgroup_workdir[PATH_MAX - 24];
+
+ format_cgroup_path(cgroup_workdir, "");
+
+diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
+index 3a62119c7498..35c512818a56 100644
+--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
++++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
+@@ -62,6 +62,10 @@ struct padded_a_lot {
+ * long: 64;
+ * long: 64;
+ * int b;
++ * long: 32;
++ * long: 64;
++ * long: 64;
++ * long: 64;
+ *};
+ *
+ */
+@@ -95,7 +99,6 @@ struct zone_padding {
+ struct zone {
+ int a;
+ short b;
+- short: 16;
+ struct zone_padding __pad__;
+ };
+
+diff --git a/tools/testing/selftests/bpf/progs/test_seg6_loop.c b/tools/testing/selftests/bpf/progs/test_seg6_loop.c
+index c4d104428643..69880c1e7700 100644
+--- a/tools/testing/selftests/bpf/progs/test_seg6_loop.c
++++ b/tools/testing/selftests/bpf/progs/test_seg6_loop.c
+@@ -132,8 +132,10 @@ static __always_inline int is_valid_tlv_boundary(struct __sk_buff *skb,
+ *pad_off = 0;
+
+ // we can only go as far as ~10 TLVs due to the BPF max stack size
++ // workaround: define induction variable "i" as "long" instead
++ // of "int" to prevent alu32 sub-register spilling.
+ #pragma clang loop unroll(disable)
+- for (int i = 0; i < 100; i++) {
++ for (long i = 0; i < 100; i++) {
+ struct sr6_tlv_t tlv;
+
+ if (cur_off == *tlv_off)
+diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
+index 608a06871572..d22e438198cf 100644
+--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
++++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
+@@ -44,7 +44,10 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx)
+ unsigned long tcp_mem[TCP_MEM_LOOPS] = {};
+ char value[MAX_VALUE_STR_LEN];
+ unsigned char i, off = 0;
+- int ret;
++ /* a workaround to prevent compiler from generating
++ * codes verifier cannot handle yet.
++ */
++ volatile int ret;
+
+ if (ctx->write)
+ return 0;
+diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
+index af75a1c7a458..3bf18364c67c 100644
+--- a/tools/testing/selftests/bpf/test_progs.c
++++ b/tools/testing/selftests/bpf/test_progs.c
+@@ -20,7 +20,7 @@ struct prog_test_def {
+ bool tested;
+ bool need_cgroup_cleanup;
+
+- const char *subtest_name;
++ char *subtest_name;
+ int subtest_num;
+
+ /* store counts before subtest started */
+@@ -81,16 +81,17 @@ void test__end_subtest()
+ fprintf(env.stdout, "#%d/%d %s:%s\n",
+ test->test_num, test->subtest_num,
+ test->subtest_name, sub_error_cnt ? "FAIL" : "OK");
++
++ free(test->subtest_name);
++ test->subtest_name = NULL;
+ }
+
+ bool test__start_subtest(const char *name)
+ {
+ struct prog_test_def *test = env.test;
+
+- if (test->subtest_name) {
++ if (test->subtest_name)
+ test__end_subtest();
+- test->subtest_name = NULL;
+- }
+
+ test->subtest_num++;
+
+@@ -104,7 +105,13 @@ bool test__start_subtest(const char *name)
+ if (!should_run(&env.subtest_selector, test->subtest_num, name))
+ return false;
+
+- test->subtest_name = name;
++ test->subtest_name = strdup(name);
++ if (!test->subtest_name) {
++ fprintf(env.stderr,
++ "Subtest #%d: failed to copy subtest name!\n",
++ test->subtest_num);
++ return false;
++ }
+ env.test->old_error_cnt = env.test->error_cnt;
+
+ return true;
+diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh
+index ff0d31d38061..7c76b841b17b 100755
+--- a/tools/testing/selftests/bpf/test_tc_tunnel.sh
++++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh
+@@ -62,6 +62,10 @@ cleanup() {
+ if [[ -f "${infile}" ]]; then
+ rm "${infile}"
+ fi
++
++ if [[ -n $server_pid ]]; then
++ kill $server_pid 2> /dev/null
++ fi
+ }
+
+ server_listen() {
+@@ -77,6 +81,7 @@ client_connect() {
+
+ verify_data() {
+ wait "${server_pid}"
++ server_pid=
+ # sha1sum returns two fields [sha1] [filepath]
+ # convert to bash array and access first elem
+ insum=($(sha1sum ${infile}))
+diff --git a/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh b/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh
+index fef88eb4b873..fa6a88c50750 100755
+--- a/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh
++++ b/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh
+@@ -36,7 +36,7 @@ h2_destroy()
+ {
+ ip -6 route del 2001:db8:1::/64 vrf v$h2
+ ip -4 route del 192.0.2.0/28 vrf v$h2
+- simple_if_fini $h2 192.0.2.130/28
++ simple_if_fini $h2 192.0.2.130/28 2001:db8:2::2/64
+ }
+
+ router_create()
+diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c
+index 53f598f06647..34df4c8882af 100644
+--- a/tools/testing/selftests/net/so_txtime.c
++++ b/tools/testing/selftests/net/so_txtime.c
+@@ -105,8 +105,8 @@ static void do_recv_one(int fdr, struct timed_send *ts)
+ tstop = (gettime_ns() - glob_tstart) / 1000;
+ texpect = ts->delay_us >= 0 ? ts->delay_us : 0;
+
+- fprintf(stderr, "payload:%c delay:%ld expected:%ld (us)\n",
+- rbuf[0], tstop, texpect);
++ fprintf(stderr, "payload:%c delay:%lld expected:%lld (us)\n",
++ rbuf[0], (long long)tstop, (long long)texpect);
+
+ if (rbuf[0] != ts->data)
+ error(1, 0, "payload mismatch. expected %c", ts->data);
+diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
+index 13e5ef615026..0ea44d975b6c 100644
+--- a/tools/testing/selftests/net/tls.c
++++ b/tools/testing/selftests/net/tls.c
+@@ -722,34 +722,6 @@ TEST_F(tls, recv_lowat)
+ EXPECT_EQ(memcmp(send_mem, recv_mem + 10, 5), 0);
+ }
+
+-TEST_F(tls, recv_rcvbuf)
+-{
+- char send_mem[4096];
+- char recv_mem[4096];
+- int rcv_buf = 1024;
+-
+- memset(send_mem, 0x1c, sizeof(send_mem));
+-
+- EXPECT_EQ(setsockopt(self->cfd, SOL_SOCKET, SO_RCVBUF,
+- &rcv_buf, sizeof(rcv_buf)), 0);
+-
+- EXPECT_EQ(send(self->fd, send_mem, 512, 0), 512);
+- memset(recv_mem, 0, sizeof(recv_mem));
+- EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), 512);
+- EXPECT_EQ(memcmp(send_mem, recv_mem, 512), 0);
+-
+- if (self->notls)
+- return;
+-
+- EXPECT_EQ(send(self->fd, send_mem, 4096, 0), 4096);
+- memset(recv_mem, 0, sizeof(recv_mem));
+- EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), -1);
+- EXPECT_EQ(errno, EMSGSIZE);
+-
+- EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), -1);
+- EXPECT_EQ(errno, EMSGSIZE);
+-}
+-
+ TEST_F(tls, bidir)
+ {
+ char const *test_str = "test_read";
+diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
+index 614b31aad168..c66da6ffd6d8 100644
+--- a/tools/testing/selftests/net/udpgso.c
++++ b/tools/testing/selftests/net/udpgso.c
+@@ -440,7 +440,8 @@ static bool __send_one(int fd, struct msghdr *msg, int flags)
+ if (ret == -1)
+ error(1, errno, "sendmsg");
+ if (ret != msg->msg_iov->iov_len)
+- error(1, 0, "sendto: %d != %lu", ret, msg->msg_iov->iov_len);
++ error(1, 0, "sendto: %d != %llu", ret,
++ (unsigned long long)msg->msg_iov->iov_len);
+ if (msg->msg_flags)
+ error(1, 0, "sendmsg: return flags 0x%x\n", msg->msg_flags);
+
+diff --git a/tools/testing/selftests/net/udpgso_bench_tx.c b/tools/testing/selftests/net/udpgso_bench_tx.c
+index ada99496634a..17512a43885e 100644
+--- a/tools/testing/selftests/net/udpgso_bench_tx.c
++++ b/tools/testing/selftests/net/udpgso_bench_tx.c
+@@ -405,7 +405,8 @@ static int send_udp_segment(int fd, char *data)
+ if (ret == -1)
+ error(1, errno, "sendmsg");
+ if (ret != iov.iov_len)
+- error(1, 0, "sendmsg: %u != %lu\n", ret, iov.iov_len);
++ error(1, 0, "sendmsg: %u != %llu\n", ret,
++ (unsigned long long)iov.iov_len);
+
+ return 1;
+ }
+diff --git a/tools/testing/selftests/proc/proc-self-map-files-002.c b/tools/testing/selftests/proc/proc-self-map-files-002.c
+index 47b7473dedef..e6aa00a183bc 100644
+--- a/tools/testing/selftests/proc/proc-self-map-files-002.c
++++ b/tools/testing/selftests/proc/proc-self-map-files-002.c
+@@ -47,7 +47,11 @@ static void fail(const char *fmt, unsigned long a, unsigned long b)
+ int main(void)
+ {
+ const int PAGE_SIZE = sysconf(_SC_PAGESIZE);
+- const unsigned long va_max = 1UL << 32;
++ /*
++ * va_max must be enough bigger than vm.mmap_min_addr, which is
++ * 64KB/32KB by default. (depends on CONFIG_LSM_MMAP_MIN_ADDR)
++ */
++ const unsigned long va_max = 1UL << 20;
+ unsigned long va;
+ void *p;
+ int fd;
+diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
+index 38b4c910b6c3..f23c9cd5684f 100644
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -38,6 +38,11 @@ static unsigned long io_map_base;
+ #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
+ #define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
+
++static bool is_iomap(unsigned long flags)
++{
++ return flags & KVM_S2PTE_FLAG_IS_IOMAP;
++}
++
+ static bool memslot_is_logging(struct kvm_memory_slot *memslot)
+ {
+ return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
+@@ -1698,6 +1703,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+
+ vma_pagesize = vma_kernel_pagesize(vma);
+ if (logging_active ||
++ (vma->vm_flags & VM_PFNMAP) ||
+ !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
+ force_pte = true;
+ vma_pagesize = PAGE_SIZE;
+@@ -1760,6 +1766,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ writable = false;
+ }
+
++ if (exec_fault && is_iomap(flags))
++ return -ENOEXEC;
++
+ spin_lock(&kvm->mmu_lock);
+ if (mmu_notifier_retry(kvm, mmu_seq))
+ goto out_unlock;
+@@ -1781,7 +1790,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ if (writable)
+ kvm_set_pfn_dirty(pfn);
+
+- if (fault_status != FSC_PERM)
++ if (fault_status != FSC_PERM && !is_iomap(flags))
+ clean_dcache_guest_page(pfn, vma_pagesize);
+
+ if (exec_fault)
+@@ -1948,9 +1957,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
+ if (is_iabt) {
+ /* Prefetch Abort on I/O address */
+- kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+- ret = 1;
+- goto out_unlock;
++ ret = -ENOEXEC;
++ goto out;
+ }
+
+ /*
+@@ -1992,6 +2000,11 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
+ if (ret == 0)
+ ret = 1;
++out:
++ if (ret == -ENOEXEC) {
++ kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
++ ret = 1;
++ }
+ out_unlock:
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ return ret;