diff options
author | 2020-10-07 08:48:37 -0400 | |
---|---|---|
committer | 2020-10-07 08:48:37 -0400 | |
commit | 299592e55430c8340e849ba1eeda4c3c9446b9a4 (patch) | |
tree | 57aefcd575227d9263962ab625ba2d6ae47c317e | |
parent | Linux patch 5.4.69 (diff) | |
download | linux-patches-299592e55430c8340e849ba1eeda4c3c9446b9a4.tar.gz linux-patches-299592e55430c8340e849ba1eeda4c3c9446b9a4.tar.bz2 linux-patches-299592e55430c8340e849ba1eeda4c3c9446b9a4.zip |
Linux patch 5.4.705.4-71
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1069_linux-5.4.70.patch | 2429 |
2 files changed, 2433 insertions, 0 deletions
diff --git a/0000_README b/0000_README index dd45626b..f195c0da 100644 --- a/0000_README +++ b/0000_README @@ -319,6 +319,10 @@ Patch: 1068_linux-5.4.69.patch From: http://www.kernel.org Desc: Linux 5.4.69 +Patch: 1069_linux-5.4.70.patch +From: http://www.kernel.org +Desc: Linux 5.4.70 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1069_linux-5.4.70.patch b/1069_linux-5.4.70.patch new file mode 100644 index 00000000..eeb57d33 --- /dev/null +++ b/1069_linux-5.4.70.patch @@ -0,0 +1,2429 @@ +diff --git a/Documentation/admin-guide/iostats.rst b/Documentation/admin-guide/iostats.rst +index 5d63b18bd6d1f..60c45c916f7d0 100644 +--- a/Documentation/admin-guide/iostats.rst ++++ b/Documentation/admin-guide/iostats.rst +@@ -99,7 +99,7 @@ Field 10 -- # of milliseconds spent doing I/Os + + Since 5.0 this field counts jiffies when at least one request was + started or completed. If request runs more than 2 jiffies then some +- I/O time will not be accounted unless there are other requests. ++ I/O time might be not accounted in case of concurrent requests. + + Field 11 -- weighted # of milliseconds spent doing I/Os + This field is incremented at each I/O start, I/O completion, I/O +@@ -133,6 +133,9 @@ are summed (possibly overflowing the unsigned long variable they are + summed to) and the result given to the user. There is no convenient + user interface for accessing the per-CPU counters themselves. + ++Since 4.19 request times are measured with nanoseconds precision and ++truncated to milliseconds before showing in this interface. ++ + Disks vs Partitions + ------------------- + +diff --git a/Documentation/devicetree/bindings/gpio/sgpio-aspeed.txt b/Documentation/devicetree/bindings/gpio/sgpio-aspeed.txt +index d4d83916c09dd..be329ea4794f8 100644 +--- a/Documentation/devicetree/bindings/gpio/sgpio-aspeed.txt ++++ b/Documentation/devicetree/bindings/gpio/sgpio-aspeed.txt +@@ -20,8 +20,9 @@ Required properties: + - gpio-controller : Marks the device node as a GPIO controller + - interrupts : Interrupt specifier, see interrupt-controller/interrupts.txt + - interrupt-controller : Mark the GPIO controller as an interrupt-controller +-- ngpios : number of GPIO lines, see gpio.txt +- (should be multiple of 8, up to 80 pins) ++- ngpios : number of *hardware* GPIO lines, see gpio.txt. This will expose ++ 2 software GPIOs per hardware GPIO: one for hardware input, one for hardware ++ output. Up to 80 pins, must be a multiple of 8. + - clocks : A phandle to the APB clock for SGPM clock division + - bus-frequency : SGPM CLK frequency + +diff --git a/Makefile b/Makefile +index adf3847106775..e409fd909560f 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 69 ++SUBLEVEL = 70 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c +index a6dd80a2c9392..ee50506d86f42 100644 +--- a/arch/ia64/mm/init.c ++++ b/arch/ia64/mm/init.c +@@ -518,7 +518,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg) + if (map_start < map_end) + memmap_init_zone((unsigned long)(map_end - map_start), + args->nid, args->zone, page_to_pfn(map_start), +- MEMMAP_EARLY, NULL); ++ MEMINIT_EARLY, NULL); + return 0; + } + +@@ -527,8 +527,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone, + unsigned long start_pfn) + { + if (!vmem_map) { +- memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, +- NULL); ++ memmap_init_zone(size, nid, zone, start_pfn, ++ MEMINIT_EARLY, NULL); + } else { + struct page *start; + struct memmap_init_callback_data args; +diff --git a/block/bio.c b/block/bio.c +index f07739300dfe3..24704bc2ad6f1 100644 +--- a/block/bio.c ++++ b/block/bio.c +@@ -1754,14 +1754,14 @@ defer: + schedule_work(&bio_dirty_work); + } + +-void update_io_ticks(struct hd_struct *part, unsigned long now) ++void update_io_ticks(struct hd_struct *part, unsigned long now, bool end) + { + unsigned long stamp; + again: + stamp = READ_ONCE(part->stamp); + if (unlikely(stamp != now)) { + if (likely(cmpxchg(&part->stamp, stamp, now) == stamp)) { +- __part_stat_add(part, io_ticks, 1); ++ __part_stat_add(part, io_ticks, end ? now - stamp : 1); + } + } + if (part->partno) { +@@ -1777,7 +1777,7 @@ void generic_start_io_acct(struct request_queue *q, int op, + + part_stat_lock(); + +- update_io_ticks(part, jiffies); ++ update_io_ticks(part, jiffies, false); + part_stat_inc(part, ios[sgrp]); + part_stat_add(part, sectors[sgrp], sectors); + part_inc_in_flight(q, part, op_is_write(op)); +@@ -1795,7 +1795,7 @@ void generic_end_io_acct(struct request_queue *q, int req_op, + + part_stat_lock(); + +- update_io_ticks(part, now); ++ update_io_ticks(part, now, true); + part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration)); + part_stat_add(part, time_in_queue, duration); + part_dec_in_flight(q, part, op_is_write(req_op)); +diff --git a/block/blk-core.c b/block/blk-core.c +index ca6b677356864..81aafb601df06 100644 +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -1334,7 +1334,7 @@ void blk_account_io_done(struct request *req, u64 now) + part_stat_lock(); + part = req->part; + +- update_io_ticks(part, jiffies); ++ update_io_ticks(part, jiffies, true); + part_stat_inc(part, ios[sgrp]); + part_stat_add(part, nsecs[sgrp], now - req->start_time_ns); + part_stat_add(part, time_in_queue, nsecs_to_jiffies64(now - req->start_time_ns)); +@@ -1376,7 +1376,7 @@ void blk_account_io_start(struct request *rq, bool new_io) + rq->part = part; + } + +- update_io_ticks(part, jiffies); ++ update_io_ticks(part, jiffies, false); + + part_stat_unlock(); + } +diff --git a/drivers/base/node.c b/drivers/base/node.c +index 296546ffed6c1..9c6e6a7b93545 100644 +--- a/drivers/base/node.c ++++ b/drivers/base/node.c +@@ -758,14 +758,36 @@ static int __ref get_nid_for_pfn(unsigned long pfn) + return pfn_to_nid(pfn); + } + ++static int do_register_memory_block_under_node(int nid, ++ struct memory_block *mem_blk) ++{ ++ int ret; ++ ++ /* ++ * If this memory block spans multiple nodes, we only indicate ++ * the last processed node. ++ */ ++ mem_blk->nid = nid; ++ ++ ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj, ++ &mem_blk->dev.kobj, ++ kobject_name(&mem_blk->dev.kobj)); ++ if (ret) ++ return ret; ++ ++ return sysfs_create_link_nowarn(&mem_blk->dev.kobj, ++ &node_devices[nid]->dev.kobj, ++ kobject_name(&node_devices[nid]->dev.kobj)); ++} ++ + /* register memory section under specified node if it spans that node */ +-static int register_mem_sect_under_node(struct memory_block *mem_blk, +- void *arg) ++static int register_mem_block_under_node_early(struct memory_block *mem_blk, ++ void *arg) + { + unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE; + unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr); + unsigned long end_pfn = start_pfn + memory_block_pfns - 1; +- int ret, nid = *(int *)arg; ++ int nid = *(int *)arg; + unsigned long pfn; + + for (pfn = start_pfn; pfn <= end_pfn; pfn++) { +@@ -782,38 +804,33 @@ static int register_mem_sect_under_node(struct memory_block *mem_blk, + } + + /* +- * We need to check if page belongs to nid only for the boot +- * case, during hotplug we know that all pages in the memory +- * block belong to the same node. +- */ +- if (system_state == SYSTEM_BOOTING) { +- page_nid = get_nid_for_pfn(pfn); +- if (page_nid < 0) +- continue; +- if (page_nid != nid) +- continue; +- } +- +- /* +- * If this memory block spans multiple nodes, we only indicate +- * the last processed node. ++ * We need to check if page belongs to nid only at the boot ++ * case because node's ranges can be interleaved. + */ +- mem_blk->nid = nid; +- +- ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj, +- &mem_blk->dev.kobj, +- kobject_name(&mem_blk->dev.kobj)); +- if (ret) +- return ret; ++ page_nid = get_nid_for_pfn(pfn); ++ if (page_nid < 0) ++ continue; ++ if (page_nid != nid) ++ continue; + +- return sysfs_create_link_nowarn(&mem_blk->dev.kobj, +- &node_devices[nid]->dev.kobj, +- kobject_name(&node_devices[nid]->dev.kobj)); ++ return do_register_memory_block_under_node(nid, mem_blk); + } + /* mem section does not span the specified node */ + return 0; + } + ++/* ++ * During hotplug we know that all pages in the memory block belong to the same ++ * node. ++ */ ++static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk, ++ void *arg) ++{ ++ int nid = *(int *)arg; ++ ++ return do_register_memory_block_under_node(nid, mem_blk); ++} ++ + /* + * Unregister a memory block device under the node it spans. Memory blocks + * with multiple nodes cannot be offlined and therefore also never be removed. +@@ -829,11 +846,19 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk) + kobject_name(&node_devices[mem_blk->nid]->dev.kobj)); + } + +-int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn) ++int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn, ++ enum meminit_context context) + { ++ walk_memory_blocks_func_t func; ++ ++ if (context == MEMINIT_HOTPLUG) ++ func = register_mem_block_under_node_hotplug; ++ else ++ func = register_mem_block_under_node_early; ++ + return walk_memory_blocks(PFN_PHYS(start_pfn), + PFN_PHYS(end_pfn - start_pfn), (void *)&nid, +- register_mem_sect_under_node); ++ func); + } + + #ifdef CONFIG_HUGETLBFS +diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c +index 51564fc23c639..f4086287bb71b 100644 +--- a/drivers/clk/samsung/clk-exynos4.c ++++ b/drivers/clk/samsung/clk-exynos4.c +@@ -927,7 +927,7 @@ static const struct samsung_gate_clock exynos4210_gate_clks[] __initconst = { + GATE(CLK_PCIE, "pcie", "aclk133", GATE_IP_FSYS, 14, 0, 0), + GATE(CLK_SMMU_PCIE, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0), + GATE(CLK_MODEMIF, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0), +- GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0), ++ GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0), + GATE(CLK_SYSREG, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0, +@@ -969,7 +969,7 @@ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = { + 0), + GATE(CLK_TSADC, "tsadc", "aclk133", E4X12_GATE_BUS_FSYS1, 16, 0, 0), + GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0), +- GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0), ++ GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0), + GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0, +diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c +index 993f3a73c71e7..55d3b505b08c9 100644 +--- a/drivers/clk/socfpga/clk-s10.c ++++ b/drivers/clk/socfpga/clk-s10.c +@@ -107,7 +107,7 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = { + { STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux), + 0, 0, 2, 0xB0, 1}, + { STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux, +- ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2}, ++ ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 2, 0xB0, 2}, + { STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux, + ARRAY_SIZE(gpio_db_free_mux), 0, 0, 0, 0xB0, 3}, + { STRATIX10_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux, +diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c +index 1583f5fc992f3..80f640d9ea71c 100644 +--- a/drivers/clk/tegra/clk-pll.c ++++ b/drivers/clk/tegra/clk-pll.c +@@ -1569,9 +1569,6 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw) + unsigned long flags = 0; + unsigned long input_rate; + +- if (clk_pll_is_enabled(hw)) +- return 0; +- + input_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); + + if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate)) +diff --git a/drivers/clocksource/timer-gx6605s.c b/drivers/clocksource/timer-gx6605s.c +index 80d0939d040b5..8d386adbe8009 100644 +--- a/drivers/clocksource/timer-gx6605s.c ++++ b/drivers/clocksource/timer-gx6605s.c +@@ -28,6 +28,7 @@ static irqreturn_t gx6605s_timer_interrupt(int irq, void *dev) + void __iomem *base = timer_of_base(to_timer_of(ce)); + + writel_relaxed(GX6605S_STATUS_CLR, base + TIMER_STATUS); ++ writel_relaxed(0, base + TIMER_INI); + + ce->event_handler(ce); + +diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c +index 09e53c5f3b0a4..2820c59b5f071 100644 +--- a/drivers/gpio/gpio-aspeed.c ++++ b/drivers/gpio/gpio-aspeed.c +@@ -1115,8 +1115,8 @@ static const struct aspeed_gpio_config ast2500_config = + + static const struct aspeed_bank_props ast2600_bank_props[] = { + /* input output */ +- {5, 0xffffffff, 0x0000ffff}, /* U/V/W/X */ +- {6, 0xffff0000, 0x0fff0000}, /* Y/Z */ ++ {5, 0xffffffff, 0xffffff00}, /* U/V/W/X */ ++ {6, 0x0000ffff, 0x0000ffff}, /* Y/Z */ + { }, + }; + +diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c +index 213aedc97dc2e..9c1c4d81aa7b6 100644 +--- a/drivers/gpio/gpio-mockup.c ++++ b/drivers/gpio/gpio-mockup.c +@@ -497,6 +497,7 @@ static int __init gpio_mockup_init(void) + err = platform_driver_register(&gpio_mockup_driver); + if (err) { + gpio_mockup_err("error registering platform driver\n"); ++ debugfs_remove_recursive(gpio_mockup_dbg_dir); + return err; + } + +@@ -527,6 +528,7 @@ static int __init gpio_mockup_init(void) + gpio_mockup_err("error registering device"); + platform_driver_unregister(&gpio_mockup_driver); + gpio_mockup_unregister_pdevs(); ++ debugfs_remove_recursive(gpio_mockup_dbg_dir); + return PTR_ERR(pdev); + } + +diff --git a/drivers/gpio/gpio-siox.c b/drivers/gpio/gpio-siox.c +index 006a7e6a75f21..7e70d2d06c3fe 100644 +--- a/drivers/gpio/gpio-siox.c ++++ b/drivers/gpio/gpio-siox.c +@@ -245,6 +245,7 @@ static int gpio_siox_probe(struct siox_device *sdevice) + girq->chip = &ddata->ichip; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_level_irq; ++ girq->threaded = true; + + ret = devm_gpiochip_add_data(dev, &ddata->gchip, NULL); + if (ret) +diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c +index d7314d39ab65b..36ea8a3bd4510 100644 +--- a/drivers/gpio/gpio-sprd.c ++++ b/drivers/gpio/gpio-sprd.c +@@ -149,17 +149,20 @@ static int sprd_gpio_irq_set_type(struct irq_data *data, + sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0); + sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0); + sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 1); ++ sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1); + irq_set_handler_locked(data, handle_edge_irq); + break; + case IRQ_TYPE_EDGE_FALLING: + sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0); + sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0); + sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 0); ++ sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1); + irq_set_handler_locked(data, handle_edge_irq); + break; + case IRQ_TYPE_EDGE_BOTH: + sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0); + sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 1); ++ sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1); + irq_set_handler_locked(data, handle_edge_irq); + break; + case IRQ_TYPE_LEVEL_HIGH: +diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c +index 75b1135b383a7..daf29044d0f19 100644 +--- a/drivers/gpio/gpio-tc3589x.c ++++ b/drivers/gpio/gpio-tc3589x.c +@@ -209,7 +209,7 @@ static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d) + continue; + + tc3589x_gpio->oldregs[i][j] = new; +- tc3589x_reg_write(tc3589x, regmap[i] + j * 8, new); ++ tc3589x_reg_write(tc3589x, regmap[i] + j, new); + } + } + +diff --git a/drivers/gpio/sgpio-aspeed.c b/drivers/gpio/sgpio-aspeed.c +index 8319812593e31..3a5dfb8ded1fb 100644 +--- a/drivers/gpio/sgpio-aspeed.c ++++ b/drivers/gpio/sgpio-aspeed.c +@@ -17,7 +17,17 @@ + #include <linux/spinlock.h> + #include <linux/string.h> + +-#define MAX_NR_SGPIO 80 ++/* ++ * MAX_NR_HW_GPIO represents the number of actual hardware-supported GPIOs (ie, ++ * slots within the clocked serial GPIO data). Since each HW GPIO is both an ++ * input and an output, we provide MAX_NR_HW_GPIO * 2 lines on our gpiochip ++ * device. ++ * ++ * We use SGPIO_OUTPUT_OFFSET to define the split between the inputs and ++ * outputs; the inputs start at line 0, the outputs start at OUTPUT_OFFSET. ++ */ ++#define MAX_NR_HW_SGPIO 80 ++#define SGPIO_OUTPUT_OFFSET MAX_NR_HW_SGPIO + + #define ASPEED_SGPIO_CTRL 0x54 + +@@ -30,8 +40,8 @@ struct aspeed_sgpio { + struct clk *pclk; + spinlock_t lock; + void __iomem *base; +- uint32_t dir_in[3]; + int irq; ++ int n_sgpio; + }; + + struct aspeed_sgpio_bank { +@@ -111,31 +121,69 @@ static void __iomem *bank_reg(struct aspeed_sgpio *gpio, + } + } + +-#define GPIO_BANK(x) ((x) >> 5) +-#define GPIO_OFFSET(x) ((x) & 0x1f) ++#define GPIO_BANK(x) ((x % SGPIO_OUTPUT_OFFSET) >> 5) ++#define GPIO_OFFSET(x) ((x % SGPIO_OUTPUT_OFFSET) & 0x1f) + #define GPIO_BIT(x) BIT(GPIO_OFFSET(x)) + + static const struct aspeed_sgpio_bank *to_bank(unsigned int offset) + { +- unsigned int bank = GPIO_BANK(offset); ++ unsigned int bank; ++ ++ bank = GPIO_BANK(offset); + + WARN_ON(bank >= ARRAY_SIZE(aspeed_sgpio_banks)); + return &aspeed_sgpio_banks[bank]; + } + ++static int aspeed_sgpio_init_valid_mask(struct gpio_chip *gc, ++ unsigned long *valid_mask, unsigned int ngpios) ++{ ++ struct aspeed_sgpio *sgpio = gpiochip_get_data(gc); ++ int n = sgpio->n_sgpio; ++ int c = SGPIO_OUTPUT_OFFSET - n; ++ ++ WARN_ON(ngpios < MAX_NR_HW_SGPIO * 2); ++ ++ /* input GPIOs in the lower range */ ++ bitmap_set(valid_mask, 0, n); ++ bitmap_clear(valid_mask, n, c); ++ ++ /* output GPIOS above SGPIO_OUTPUT_OFFSET */ ++ bitmap_set(valid_mask, SGPIO_OUTPUT_OFFSET, n); ++ bitmap_clear(valid_mask, SGPIO_OUTPUT_OFFSET + n, c); ++ ++ return 0; ++} ++ ++static void aspeed_sgpio_irq_init_valid_mask(struct gpio_chip *gc, ++ unsigned long *valid_mask, unsigned int ngpios) ++{ ++ struct aspeed_sgpio *sgpio = gpiochip_get_data(gc); ++ int n = sgpio->n_sgpio; ++ ++ WARN_ON(ngpios < MAX_NR_HW_SGPIO * 2); ++ ++ /* input GPIOs in the lower range */ ++ bitmap_set(valid_mask, 0, n); ++ bitmap_clear(valid_mask, n, ngpios - n); ++} ++ ++static bool aspeed_sgpio_is_input(unsigned int offset) ++{ ++ return offset < SGPIO_OUTPUT_OFFSET; ++} ++ + static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset) + { + struct aspeed_sgpio *gpio = gpiochip_get_data(gc); + const struct aspeed_sgpio_bank *bank = to_bank(offset); + unsigned long flags; + enum aspeed_sgpio_reg reg; +- bool is_input; + int rc = 0; + + spin_lock_irqsave(&gpio->lock, flags); + +- is_input = gpio->dir_in[GPIO_BANK(offset)] & GPIO_BIT(offset); +- reg = is_input ? reg_val : reg_rdata; ++ reg = aspeed_sgpio_is_input(offset) ? reg_val : reg_rdata; + rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset)); + + spin_unlock_irqrestore(&gpio->lock, flags); +@@ -143,22 +191,31 @@ static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset) + return rc; + } + +-static void sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val) ++static int sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val) + { + struct aspeed_sgpio *gpio = gpiochip_get_data(gc); + const struct aspeed_sgpio_bank *bank = to_bank(offset); +- void __iomem *addr; ++ void __iomem *addr_r, *addr_w; + u32 reg = 0; + +- addr = bank_reg(gpio, bank, reg_val); +- reg = ioread32(addr); ++ if (aspeed_sgpio_is_input(offset)) ++ return -EINVAL; ++ ++ /* Since this is an output, read the cached value from rdata, then ++ * update val. */ ++ addr_r = bank_reg(gpio, bank, reg_rdata); ++ addr_w = bank_reg(gpio, bank, reg_val); ++ ++ reg = ioread32(addr_r); + + if (val) + reg |= GPIO_BIT(offset); + else + reg &= ~GPIO_BIT(offset); + +- iowrite32(reg, addr); ++ iowrite32(reg, addr_w); ++ ++ return 0; + } + + static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val) +@@ -175,43 +232,28 @@ static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val) + + static int aspeed_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset) + { +- struct aspeed_sgpio *gpio = gpiochip_get_data(gc); +- unsigned long flags; +- +- spin_lock_irqsave(&gpio->lock, flags); +- gpio->dir_in[GPIO_BANK(offset)] |= GPIO_BIT(offset); +- spin_unlock_irqrestore(&gpio->lock, flags); +- +- return 0; ++ return aspeed_sgpio_is_input(offset) ? 0 : -EINVAL; + } + + static int aspeed_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val) + { + struct aspeed_sgpio *gpio = gpiochip_get_data(gc); + unsigned long flags; ++ int rc; + +- spin_lock_irqsave(&gpio->lock, flags); +- +- gpio->dir_in[GPIO_BANK(offset)] &= ~GPIO_BIT(offset); +- sgpio_set_value(gc, offset, val); ++ /* No special action is required for setting the direction; we'll ++ * error-out in sgpio_set_value if this isn't an output GPIO */ + ++ spin_lock_irqsave(&gpio->lock, flags); ++ rc = sgpio_set_value(gc, offset, val); + spin_unlock_irqrestore(&gpio->lock, flags); + +- return 0; ++ return rc; + } + + static int aspeed_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset) + { +- int dir_status; +- struct aspeed_sgpio *gpio = gpiochip_get_data(gc); +- unsigned long flags; +- +- spin_lock_irqsave(&gpio->lock, flags); +- dir_status = gpio->dir_in[GPIO_BANK(offset)] & GPIO_BIT(offset); +- spin_unlock_irqrestore(&gpio->lock, flags); +- +- return dir_status; +- ++ return !!aspeed_sgpio_is_input(offset); + } + + static void irqd_to_aspeed_sgpio_data(struct irq_data *d, +@@ -402,6 +444,7 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio, + + irq = &gpio->chip.irq; + irq->chip = &aspeed_sgpio_irqchip; ++ irq->init_valid_mask = aspeed_sgpio_irq_init_valid_mask; + irq->handler = handle_bad_irq; + irq->default_type = IRQ_TYPE_NONE; + irq->parent_handler = aspeed_sgpio_irq_handler; +@@ -409,17 +452,15 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio, + irq->parents = &gpio->irq; + irq->num_parents = 1; + +- /* set IRQ settings and Enable Interrupt */ ++ /* Apply default IRQ settings */ + for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) { + bank = &aspeed_sgpio_banks[i]; + /* set falling or level-low irq */ + iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type0)); + /* trigger type is edge */ + iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type1)); +- /* dual edge trigger mode. */ +- iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_type2)); +- /* enable irq */ +- iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_enable)); ++ /* single edge trigger */ ++ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type2)); + } + + return 0; +@@ -452,11 +493,12 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev) + if (rc < 0) { + dev_err(&pdev->dev, "Could not read ngpios property\n"); + return -EINVAL; +- } else if (nr_gpios > MAX_NR_SGPIO) { ++ } else if (nr_gpios > MAX_NR_HW_SGPIO) { + dev_err(&pdev->dev, "Number of GPIOs exceeds the maximum of %d: %d\n", +- MAX_NR_SGPIO, nr_gpios); ++ MAX_NR_HW_SGPIO, nr_gpios); + return -EINVAL; + } ++ gpio->n_sgpio = nr_gpios; + + rc = of_property_read_u32(pdev->dev.of_node, "bus-frequency", &sgpio_freq); + if (rc < 0) { +@@ -497,7 +539,8 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev) + spin_lock_init(&gpio->lock); + + gpio->chip.parent = &pdev->dev; +- gpio->chip.ngpio = nr_gpios; ++ gpio->chip.ngpio = MAX_NR_HW_SGPIO * 2; ++ gpio->chip.init_valid_mask = aspeed_sgpio_init_valid_mask; + gpio->chip.direction_input = aspeed_sgpio_dir_in; + gpio->chip.direction_output = aspeed_sgpio_dir_out; + gpio->chip.get_direction = aspeed_sgpio_get_direction; +@@ -509,9 +552,6 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev) + gpio->chip.label = dev_name(&pdev->dev); + gpio->chip.base = -1; + +- /* set all SGPIO pins as input (1). */ +- memset(gpio->dir_in, 0xff, sizeof(gpio->dir_in)); +- + aspeed_sgpio_setup_irqs(gpio, pdev); + + rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +index e0aed42d9cbda..b588e0e409e72 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +@@ -297,7 +297,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, + take the current one */ + if (active && !adev->have_disp_power_ref) { + adev->have_disp_power_ref = true; +- goto out; ++ return ret; + } + /* if we have no active crtcs, then drop the power ref + we got before */ +diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c +index 18b4881f44814..12b99ba575017 100644 +--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c ++++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c +@@ -396,7 +396,7 @@ static struct regmap_config sun8i_mixer_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +- .max_register = 0xbfffc, /* guessed */ ++ .max_register = 0xffffc, /* guessed */ + }; + + static int sun8i_mixer_of_get_id(struct device_node *node) +diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c +index 1213e1932ccb5..24d584a1c9a78 100644 +--- a/drivers/i2c/busses/i2c-cpm.c ++++ b/drivers/i2c/busses/i2c-cpm.c +@@ -65,6 +65,9 @@ struct i2c_ram { + char res1[4]; /* Reserved */ + ushort rpbase; /* Relocation pointer */ + char res2[2]; /* Reserved */ ++ /* The following elements are only for CPM2 */ ++ char res3[4]; /* Reserved */ ++ uint sdmatmp; /* Internal */ + }; + + #define I2COM_START 0x80 +diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c +index 21fdcde77883f..56e7696aa3c0f 100644 +--- a/drivers/iio/adc/qcom-spmi-adc5.c ++++ b/drivers/iio/adc/qcom-spmi-adc5.c +@@ -786,7 +786,7 @@ static int adc5_probe(struct platform_device *pdev) + + static struct platform_driver adc5_driver = { + .driver = { +- .name = "qcom-spmi-adc5.c", ++ .name = "qcom-spmi-adc5", + .of_match_table = adc5_match_table, + }, + .probe = adc5_probe, +diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c +index 854d5e7587241..ef2fa0905208d 100644 +--- a/drivers/input/mouse/trackpoint.c ++++ b/drivers/input/mouse/trackpoint.c +@@ -282,6 +282,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, + case TP_VARIANT_ALPS: + case TP_VARIANT_ELAN: + case TP_VARIANT_NXP: ++ case TP_VARIANT_JYT_SYNAPTICS: ++ case TP_VARIANT_SYNAPTICS: + if (variant_id) + *variant_id = param[0]; + if (firmware_id) +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index 42771b9b10a00..98f0c7729b754 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -721,6 +721,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = { + DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), + }, + }, ++ { ++ /* Acer Aspire 5 A515 */ ++ .matches = { ++ DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"), ++ DMI_MATCH(DMI_BOARD_VENDOR, "PK"), ++ }, ++ }, + { } + }; + +diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c +index 9c94e16fb1277..55ed857f804f7 100644 +--- a/drivers/iommu/exynos-iommu.c ++++ b/drivers/iommu/exynos-iommu.c +@@ -1299,13 +1299,17 @@ static int exynos_iommu_of_xlate(struct device *dev, + return -ENODEV; + + data = platform_get_drvdata(sysmmu); +- if (!data) ++ if (!data) { ++ put_device(&sysmmu->dev); + return -ENODEV; ++ } + + if (!owner) { + owner = kzalloc(sizeof(*owner), GFP_KERNEL); +- if (!owner) ++ if (!owner) { ++ put_device(&sysmmu->dev); + return -ENOMEM; ++ } + + INIT_LIST_HEAD(&owner->controllers); + mutex_init(&owner->rpm_lock); +diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c +index 693ee73eb2912..ef03d6fafc5ce 100644 +--- a/drivers/memstick/core/memstick.c ++++ b/drivers/memstick/core/memstick.c +@@ -441,6 +441,9 @@ static void memstick_check(struct work_struct *work) + } else if (host->card->stop) + host->card->stop(host->card); + ++ if (host->removing) ++ goto out_power_off; ++ + card = memstick_alloc_card(host); + + if (!card) { +@@ -545,6 +548,7 @@ EXPORT_SYMBOL(memstick_add_host); + */ + void memstick_remove_host(struct memstick_host *host) + { ++ host->removing = 1; + flush_workqueue(workqueue); + mutex_lock(&host->lock); + if (host->card) +diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c +index 425aa898e797a..91d0cb08238cf 100644 +--- a/drivers/mmc/host/sdhci-pci-core.c ++++ b/drivers/mmc/host/sdhci-pci-core.c +@@ -798,7 +798,8 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) + static bool glk_broken_cqhci(struct sdhci_pci_slot *slot) + { + return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && +- dmi_match(DMI_BIOS_VENDOR, "LENOVO"); ++ (dmi_match(DMI_BIOS_VENDOR, "LENOVO") || ++ dmi_match(DMI_SYS_VENDOR, "IRBIS")); + } + + static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) +diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c +index f1a2da15dd0a6..b14d93da242f1 100644 +--- a/drivers/net/ethernet/dec/tulip/de2104x.c ++++ b/drivers/net/ethernet/dec/tulip/de2104x.c +@@ -91,7 +91,7 @@ MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copi + #define DSL CONFIG_DE2104X_DSL + #endif + +-#define DE_RX_RING_SIZE 64 ++#define DE_RX_RING_SIZE 128 + #define DE_TX_RING_SIZE 64 + #define DE_RING_BYTES \ + ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \ +diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c +index bd9c07888ebb4..6fa7a009a24a4 100644 +--- a/drivers/net/usb/rndis_host.c ++++ b/drivers/net/usb/rndis_host.c +@@ -201,7 +201,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen) + dev_dbg(&info->control->dev, + "rndis response error, code %d\n", retval); + } +- msleep(20); ++ msleep(40); + } + dev_dbg(&info->control->dev, "rndis response timeout\n"); + return -ETIMEDOUT; +diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c +index cc33441af4691..50804d0473083 100644 +--- a/drivers/net/wan/hdlc_cisco.c ++++ b/drivers/net/wan/hdlc_cisco.c +@@ -118,6 +118,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type, + skb_put(skb, sizeof(struct cisco_packet)); + skb->priority = TC_PRIO_CONTROL; + skb->dev = dev; ++ skb->protocol = htons(ETH_P_HDLC); + skb_reset_network_header(skb); + + dev_queue_xmit(skb); +diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c +index 9acad651ea1f6..d6cfd51613ed8 100644 +--- a/drivers/net/wan/hdlc_fr.c ++++ b/drivers/net/wan/hdlc_fr.c +@@ -433,6 +433,8 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev) + if (pvc->state.fecn) /* TX Congestion counter */ + dev->stats.tx_compressed++; + skb->dev = pvc->frad; ++ skb->protocol = htons(ETH_P_HDLC); ++ skb_reset_network_header(skb); + dev_queue_xmit(skb); + return NETDEV_TX_OK; + } +@@ -555,6 +557,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep) + skb_put(skb, i); + skb->priority = TC_PRIO_CONTROL; + skb->dev = dev; ++ skb->protocol = htons(ETH_P_HDLC); + skb_reset_network_header(skb); + + dev_queue_xmit(skb); +@@ -1041,7 +1044,7 @@ static void pvc_setup(struct net_device *dev) + { + dev->type = ARPHRD_DLCI; + dev->flags = IFF_POINTOPOINT; +- dev->hard_header_len = 10; ++ dev->hard_header_len = 0; + dev->addr_len = 2; + netif_keep_dst(dev); + } +@@ -1093,6 +1096,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) + dev->mtu = HDLC_MAX_MTU; + dev->min_mtu = 68; + dev->max_mtu = HDLC_MAX_MTU; ++ dev->needed_headroom = 10; + dev->priv_flags |= IFF_NO_QUEUE; + dev->ml_priv = pvc; + +diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c +index 16f33d1ffbfb9..64f8556513369 100644 +--- a/drivers/net/wan/hdlc_ppp.c ++++ b/drivers/net/wan/hdlc_ppp.c +@@ -251,6 +251,7 @@ static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code, + + skb->priority = TC_PRIO_CONTROL; + skb->dev = dev; ++ skb->protocol = htons(ETH_P_HDLC); + skb_reset_network_header(skb); + skb_queue_tail(&tx_queue, skb); + } +diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c +index 2cff914aada55..709e3de0f6af1 100644 +--- a/drivers/net/wan/lapbether.c ++++ b/drivers/net/wan/lapbether.c +@@ -198,8 +198,6 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) + struct net_device *dev; + int size = skb->len; + +- skb->protocol = htons(ETH_P_X25); +- + ptr = skb_push(skb, 2); + + *ptr++ = size % 256; +@@ -210,6 +208,8 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) + + skb->dev = dev = lapbeth->ethdev; + ++ skb->protocol = htons(ETH_P_DEC); ++ + skb_reset_network_header(skb); + + dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0); +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index 2cd32901d95c7..207ed6d49ad7c 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -630,7 +630,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, + } + + __rq_for_each_bio(bio, req) { +- u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); ++ u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector); + u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; + + if (n < segments) { +@@ -671,7 +671,7 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, + cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; + cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); + cmnd->write_zeroes.slba = +- cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); ++ cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); + cmnd->write_zeroes.length = + cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); + cmnd->write_zeroes.control = 0; +@@ -695,7 +695,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, + + cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); + cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); +- cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); ++ cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); + cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); + + if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams) +@@ -1680,12 +1680,6 @@ static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type) + } + #endif /* CONFIG_BLK_DEV_INTEGRITY */ + +-static void nvme_set_chunk_size(struct nvme_ns *ns) +-{ +- u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9)); +- blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size)); +-} +- + static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) + { + struct nvme_ctrl *ctrl = ns->ctrl; +@@ -1719,8 +1713,7 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) + + static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns) + { +- u32 max_sectors; +- unsigned short bs = 1 << ns->lba_shift; ++ u64 max_blocks; + + if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) || + (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) +@@ -1736,11 +1729,12 @@ static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns) + * nvme_init_identify() if available. + */ + if (ns->ctrl->max_hw_sectors == UINT_MAX) +- max_sectors = ((u32)(USHRT_MAX + 1) * bs) >> 9; ++ max_blocks = (u64)USHRT_MAX + 1; + else +- max_sectors = ((u32)(ns->ctrl->max_hw_sectors + 1) * bs) >> 9; ++ max_blocks = ns->ctrl->max_hw_sectors + 1; + +- blk_queue_max_write_zeroes_sectors(disk->queue, max_sectors); ++ blk_queue_max_write_zeroes_sectors(disk->queue, ++ nvme_lba_to_sect(ns, max_blocks)); + } + + static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, +@@ -1774,7 +1768,7 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) + static void nvme_update_disk_info(struct gendisk *disk, + struct nvme_ns *ns, struct nvme_id_ns *id) + { +- sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9); ++ sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze)); + unsigned short bs = 1 << ns->lba_shift; + u32 atomic_bs, phys_bs, io_opt; + +@@ -1840,6 +1834,7 @@ static void nvme_update_disk_info(struct gendisk *disk, + static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) + { + struct nvme_ns *ns = disk->private_data; ++ u32 iob; + + /* + * If identify namespace failed, use default 512 byte block size so +@@ -1848,7 +1843,13 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) + ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds; + if (ns->lba_shift == 0) + ns->lba_shift = 9; +- ns->noiob = le16_to_cpu(id->noiob); ++ ++ if ((ns->ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && ++ is_power_of_2(ns->ctrl->max_hw_sectors)) ++ iob = ns->ctrl->max_hw_sectors; ++ else ++ iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob)); ++ + ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); + ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); + /* the PI implementation requires metadata equal t10 pi tuple size */ +@@ -1857,8 +1858,8 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) + else + ns->pi_type = 0; + +- if (ns->noiob) +- nvme_set_chunk_size(ns); ++ if (iob) ++ blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(iob)); + nvme_update_disk_info(disk, ns, id); + #ifdef CONFIG_NVME_MULTIPATH + if (ns->head->disk) { +@@ -2209,9 +2210,6 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, + blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); + blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); + } +- if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && +- is_power_of_2(ctrl->max_hw_sectors)) +- blk_queue_chunk_sectors(q, ctrl->max_hw_sectors); + blk_queue_virt_boundary(q, ctrl->page_size - 1); + if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) + vwc = true; +@@ -2933,10 +2931,24 @@ static int nvme_dev_open(struct inode *inode, struct file *file) + return -EWOULDBLOCK; + } + ++ nvme_get_ctrl(ctrl); ++ if (!try_module_get(ctrl->ops->module)) ++ return -EINVAL; ++ + file->private_data = ctrl; + return 0; + } + ++static int nvme_dev_release(struct inode *inode, struct file *file) ++{ ++ struct nvme_ctrl *ctrl = ++ container_of(inode->i_cdev, struct nvme_ctrl, cdev); ++ ++ module_put(ctrl->ops->module); ++ nvme_put_ctrl(ctrl); ++ return 0; ++} ++ + static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) + { + struct nvme_ns *ns; +@@ -2999,6 +3011,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd, + static const struct file_operations nvme_dev_fops = { + .owner = THIS_MODULE, + .open = nvme_dev_open, ++ .release = nvme_dev_release, + .unlocked_ioctl = nvme_dev_ioctl, + .compat_ioctl = nvme_dev_ioctl, + }; +diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c +index da801a14cd13d..65b3dc9cd693b 100644 +--- a/drivers/nvme/host/fc.c ++++ b/drivers/nvme/host/fc.c +@@ -3319,12 +3319,14 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) + spin_lock_irqsave(&nvme_fc_lock, flags); + list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { + if (lport->localport.node_name != laddr.nn || +- lport->localport.port_name != laddr.pn) ++ lport->localport.port_name != laddr.pn || ++ lport->localport.port_state != FC_OBJSTATE_ONLINE) + continue; + + list_for_each_entry(rport, &lport->endp_list, endp_list) { + if (rport->remoteport.node_name != raddr.nn || +- rport->remoteport.port_name != raddr.pn) ++ rport->remoteport.port_name != raddr.pn || ++ rport->remoteport.port_state != FC_OBJSTATE_ONLINE) + continue; + + /* if fail to get reference fall through. Will error */ +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h +index b7117fb09dd0f..d7132d8cb7c5d 100644 +--- a/drivers/nvme/host/nvme.h ++++ b/drivers/nvme/host/nvme.h +@@ -384,7 +384,6 @@ struct nvme_ns { + #define NVME_NS_REMOVING 0 + #define NVME_NS_DEAD 1 + #define NVME_NS_ANA_PENDING 2 +- u16 noiob; + + struct nvme_fault_inject fault_inject; + +@@ -429,9 +428,20 @@ static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) + return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); + } + +-static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) ++/* ++ * Convert a 512B sector number to a device logical block number. ++ */ ++static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector) ++{ ++ return sector >> (ns->lba_shift - SECTOR_SHIFT); ++} ++ ++/* ++ * Convert a device logical block number to a 512B sector number. ++ */ ++static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba) + { +- return (sector >> (ns->lba_shift - 9)); ++ return lba << (ns->lba_shift - SECTOR_SHIFT); + } + + static inline void nvme_end_request(struct request *req, __le16 status, +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index 75f26d2ec6429..af0b51d1d43e8 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -941,13 +941,6 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) + volatile struct nvme_completion *cqe = &nvmeq->cqes[idx]; + struct request *req; + +- if (unlikely(cqe->command_id >= nvmeq->q_depth)) { +- dev_warn(nvmeq->dev->ctrl.device, +- "invalid id %d completed on queue %d\n", +- cqe->command_id, le16_to_cpu(cqe->sq_id)); +- return; +- } +- + /* + * AEN requests are special as they don't time out and can + * survive any kind of queue freeze and often don't respond to +@@ -962,6 +955,13 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) + } + + req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id); ++ if (unlikely(!req)) { ++ dev_warn(nvmeq->dev->ctrl.device, ++ "invalid id %d completed on queue %d\n", ++ cqe->command_id, le16_to_cpu(cqe->sq_id)); ++ return; ++ } ++ + trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); + nvme_end_request(req, cqe->status, cqe->result); + } +diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c +index 88a047b9fa6fa..6ef12017ff4e8 100644 +--- a/drivers/phy/ti/phy-am654-serdes.c ++++ b/drivers/phy/ti/phy-am654-serdes.c +@@ -625,8 +625,10 @@ static int serdes_am654_probe(struct platform_device *pdev) + pm_runtime_enable(dev); + + phy = devm_phy_create(dev, NULL, &ops); +- if (IS_ERR(phy)) +- return PTR_ERR(phy); ++ if (IS_ERR(phy)) { ++ ret = PTR_ERR(phy); ++ goto clk_err; ++ } + + phy_set_drvdata(phy, am654_phy); + phy_provider = devm_of_phy_provider_register(dev, serdes_am654_xlate); +diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c +index a767a05fa3a0d..48e2a6c56a83b 100644 +--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c ++++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c +@@ -414,7 +414,7 @@ static struct mvebu_mpp_mode mv98dx3236_mpp_modes[] = { + MPP_VAR_FUNCTION(0x1, "i2c0", "sck", V_98DX3236_PLUS)), + MPP_MODE(15, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS), +- MPP_VAR_FUNCTION(0x4, "i2c0", "sda", V_98DX3236_PLUS)), ++ MPP_VAR_FUNCTION(0x1, "i2c0", "sda", V_98DX3236_PLUS)), + MPP_MODE(16, + MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS), + MPP_VAR_FUNCTION(0x4, "dev", "oe", V_98DX3236_PLUS)), +diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c +index f20326714b9d5..215bf6624e7c3 100644 +--- a/drivers/spi/spi-fsl-espi.c ++++ b/drivers/spi/spi-fsl-espi.c +@@ -555,13 +555,14 @@ static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events) + static irqreturn_t fsl_espi_irq(s32 irq, void *context_data) + { + struct fsl_espi *espi = context_data; +- u32 events; ++ u32 events, mask; + + spin_lock(&espi->lock); + + /* Get interrupt events(tx/rx) */ + events = fsl_espi_read_reg(espi, ESPI_SPIE); +- if (!events) { ++ mask = fsl_espi_read_reg(espi, ESPI_SPIM); ++ if (!(events & mask)) { + spin_unlock(&espi->lock); + return IRQ_NONE; + } +diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c +index b4206b0dede54..1f638759a9533 100644 +--- a/drivers/usb/gadget/function/f_ncm.c ++++ b/drivers/usb/gadget/function/f_ncm.c +@@ -1189,7 +1189,6 @@ static int ncm_unwrap_ntb(struct gether *port, + const struct ndp_parser_opts *opts = ncm->parser_opts; + unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0; + int dgram_counter; +- bool ndp_after_header; + + /* dwSignature */ + if (get_unaligned_le32(tmp) != opts->nth_sign) { +@@ -1216,7 +1215,6 @@ static int ncm_unwrap_ntb(struct gether *port, + } + + ndp_index = get_ncm(&tmp, opts->ndp_index); +- ndp_after_header = false; + + /* Run through all the NDP's in the NTB */ + do { +@@ -1232,8 +1230,6 @@ static int ncm_unwrap_ntb(struct gether *port, + ndp_index); + goto err; + } +- if (ndp_index == opts->nth_size) +- ndp_after_header = true; + + /* + * walk through NDP +@@ -1312,37 +1308,13 @@ static int ncm_unwrap_ntb(struct gether *port, + index2 = get_ncm(&tmp, opts->dgram_item_len); + dg_len2 = get_ncm(&tmp, opts->dgram_item_len); + +- if (index2 == 0 || dg_len2 == 0) +- break; +- + /* wDatagramIndex[1] */ +- if (ndp_after_header) { +- if (index2 < opts->nth_size + opts->ndp_size) { +- INFO(port->func.config->cdev, +- "Bad index: %#X\n", index2); +- goto err; +- } +- } else { +- if (index2 < opts->nth_size + opts->dpe_size) { +- INFO(port->func.config->cdev, +- "Bad index: %#X\n", index2); +- goto err; +- } +- } + if (index2 > block_len - opts->dpe_size) { + INFO(port->func.config->cdev, + "Bad index: %#X\n", index2); + goto err; + } + +- /* wDatagramLength[1] */ +- if ((dg_len2 < 14 + crc_len) || +- (dg_len2 > frame_max)) { +- INFO(port->func.config->cdev, +- "Bad dgram length: %#X\n", dg_len); +- goto err; +- } +- + /* + * Copy the data into a new skb. + * This ensures the truesize is correct +@@ -1359,6 +1331,8 @@ static int ncm_unwrap_ntb(struct gether *port, + ndp_len -= 2 * (opts->dgram_item_len * 2); + + dgram_counter++; ++ if (index2 == 0 || dg_len2 == 0) ++ break; + } while (ndp_len > 2 * (opts->dgram_item_len * 2)); + } while (ndp_index); + +diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c +index ca68a27b98edd..f21f5bfbb78dc 100644 +--- a/drivers/vhost/vsock.c ++++ b/drivers/vhost/vsock.c +@@ -384,6 +384,52 @@ static bool vhost_vsock_more_replies(struct vhost_vsock *vsock) + return val < vq->num; + } + ++static struct virtio_transport vhost_transport = { ++ .transport = { ++ .get_local_cid = vhost_transport_get_local_cid, ++ ++ .init = virtio_transport_do_socket_init, ++ .destruct = virtio_transport_destruct, ++ .release = virtio_transport_release, ++ .connect = virtio_transport_connect, ++ .shutdown = virtio_transport_shutdown, ++ .cancel_pkt = vhost_transport_cancel_pkt, ++ ++ .dgram_enqueue = virtio_transport_dgram_enqueue, ++ .dgram_dequeue = virtio_transport_dgram_dequeue, ++ .dgram_bind = virtio_transport_dgram_bind, ++ .dgram_allow = virtio_transport_dgram_allow, ++ ++ .stream_enqueue = virtio_transport_stream_enqueue, ++ .stream_dequeue = virtio_transport_stream_dequeue, ++ .stream_has_data = virtio_transport_stream_has_data, ++ .stream_has_space = virtio_transport_stream_has_space, ++ .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, ++ .stream_is_active = virtio_transport_stream_is_active, ++ .stream_allow = virtio_transport_stream_allow, ++ ++ .notify_poll_in = virtio_transport_notify_poll_in, ++ .notify_poll_out = virtio_transport_notify_poll_out, ++ .notify_recv_init = virtio_transport_notify_recv_init, ++ .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, ++ .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, ++ .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, ++ .notify_send_init = virtio_transport_notify_send_init, ++ .notify_send_pre_block = virtio_transport_notify_send_pre_block, ++ .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, ++ .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, ++ ++ .set_buffer_size = virtio_transport_set_buffer_size, ++ .set_min_buffer_size = virtio_transport_set_min_buffer_size, ++ .set_max_buffer_size = virtio_transport_set_max_buffer_size, ++ .get_buffer_size = virtio_transport_get_buffer_size, ++ .get_min_buffer_size = virtio_transport_get_min_buffer_size, ++ .get_max_buffer_size = virtio_transport_get_max_buffer_size, ++ }, ++ ++ .send_pkt = vhost_transport_send_pkt, ++}; ++ + static void vhost_vsock_handle_tx_kick(struct vhost_work *work) + { + struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, +@@ -440,7 +486,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) + if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid && + le64_to_cpu(pkt->hdr.dst_cid) == + vhost_transport_get_local_cid()) +- virtio_transport_recv_pkt(pkt); ++ virtio_transport_recv_pkt(&vhost_transport, pkt); + else + virtio_transport_free_pkt(pkt); + +@@ -793,52 +839,6 @@ static struct miscdevice vhost_vsock_misc = { + .fops = &vhost_vsock_fops, + }; + +-static struct virtio_transport vhost_transport = { +- .transport = { +- .get_local_cid = vhost_transport_get_local_cid, +- +- .init = virtio_transport_do_socket_init, +- .destruct = virtio_transport_destruct, +- .release = virtio_transport_release, +- .connect = virtio_transport_connect, +- .shutdown = virtio_transport_shutdown, +- .cancel_pkt = vhost_transport_cancel_pkt, +- +- .dgram_enqueue = virtio_transport_dgram_enqueue, +- .dgram_dequeue = virtio_transport_dgram_dequeue, +- .dgram_bind = virtio_transport_dgram_bind, +- .dgram_allow = virtio_transport_dgram_allow, +- +- .stream_enqueue = virtio_transport_stream_enqueue, +- .stream_dequeue = virtio_transport_stream_dequeue, +- .stream_has_data = virtio_transport_stream_has_data, +- .stream_has_space = virtio_transport_stream_has_space, +- .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, +- .stream_is_active = virtio_transport_stream_is_active, +- .stream_allow = virtio_transport_stream_allow, +- +- .notify_poll_in = virtio_transport_notify_poll_in, +- .notify_poll_out = virtio_transport_notify_poll_out, +- .notify_recv_init = virtio_transport_notify_recv_init, +- .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, +- .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, +- .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, +- .notify_send_init = virtio_transport_notify_send_init, +- .notify_send_pre_block = virtio_transport_notify_send_pre_block, +- .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, +- .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, +- +- .set_buffer_size = virtio_transport_set_buffer_size, +- .set_min_buffer_size = virtio_transport_set_min_buffer_size, +- .set_max_buffer_size = virtio_transport_set_max_buffer_size, +- .get_buffer_size = virtio_transport_get_buffer_size, +- .get_min_buffer_size = virtio_transport_get_min_buffer_size, +- .get_max_buffer_size = virtio_transport_get_max_buffer_size, +- }, +- +- .send_pkt = vhost_transport_send_pkt, +-}; +- + static int __init vhost_vsock_init(void) + { + int ret; +diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c +index 48890826b5e66..196bd241e701a 100644 +--- a/fs/btrfs/dev-replace.c ++++ b/fs/btrfs/dev-replace.c +@@ -562,6 +562,37 @@ static void btrfs_rm_dev_replace_unblocked(struct btrfs_fs_info *fs_info) + wake_up(&fs_info->dev_replace.replace_wait); + } + ++/* ++ * When finishing the device replace, before swapping the source device with the ++ * target device we must update the chunk allocation state in the target device, ++ * as it is empty because replace works by directly copying the chunks and not ++ * through the normal chunk allocation path. ++ */ ++static int btrfs_set_target_alloc_state(struct btrfs_device *srcdev, ++ struct btrfs_device *tgtdev) ++{ ++ struct extent_state *cached_state = NULL; ++ u64 start = 0; ++ u64 found_start; ++ u64 found_end; ++ int ret = 0; ++ ++ lockdep_assert_held(&srcdev->fs_info->chunk_mutex); ++ ++ while (!find_first_extent_bit(&srcdev->alloc_state, start, ++ &found_start, &found_end, ++ CHUNK_ALLOCATED, &cached_state)) { ++ ret = set_extent_bits(&tgtdev->alloc_state, found_start, ++ found_end, CHUNK_ALLOCATED); ++ if (ret) ++ break; ++ start = found_end + 1; ++ } ++ ++ free_extent_state(cached_state); ++ return ret; ++} ++ + static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, + int scrub_ret) + { +@@ -636,8 +667,14 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, + dev_replace->time_stopped = ktime_get_real_seconds(); + dev_replace->item_needs_writeback = 1; + +- /* replace old device with new one in mapping tree */ ++ /* ++ * Update allocation state in the new device and replace the old device ++ * with the new one in the mapping tree. ++ */ + if (!scrub_ret) { ++ scrub_ret = btrfs_set_target_alloc_state(src_device, tgt_device); ++ if (scrub_ret) ++ goto error; + btrfs_dev_replace_update_device_in_mapping_tree(fs_info, + src_device, + tgt_device); +@@ -648,6 +685,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, + btrfs_dev_name(src_device), + src_device->devid, + rcu_str_deref(tgt_device->name), scrub_ret); ++error: + up_write(&dev_replace->rwsem); + mutex_unlock(&fs_info->chunk_mutex); + mutex_unlock(&fs_info->fs_devices->device_list_mutex); +diff --git a/fs/eventpoll.c b/fs/eventpoll.c +index ae1d32344f7ac..339453ac834cc 100644 +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -218,8 +218,7 @@ struct eventpoll { + struct file *file; + + /* used to optimize loop detection check */ +- int visited; +- struct list_head visited_list_link; ++ u64 gen; + + #ifdef CONFIG_NET_RX_BUSY_POLL + /* used to track busy poll napi_id */ +@@ -269,6 +268,8 @@ static long max_user_watches __read_mostly; + */ + static DEFINE_MUTEX(epmutex); + ++static u64 loop_check_gen = 0; ++ + /* Used to check for epoll file descriptor inclusion loops */ + static struct nested_calls poll_loop_ncalls; + +@@ -278,9 +279,6 @@ static struct kmem_cache *epi_cache __read_mostly; + /* Slab cache used to allocate "struct eppoll_entry" */ + static struct kmem_cache *pwq_cache __read_mostly; + +-/* Visited nodes during ep_loop_check(), so we can unset them when we finish */ +-static LIST_HEAD(visited_list); +- + /* + * List of files with newly added links, where we may need to limit the number + * of emanating paths. Protected by the epmutex. +@@ -1455,7 +1453,7 @@ static int reverse_path_check(void) + + static int ep_create_wakeup_source(struct epitem *epi) + { +- const char *name; ++ struct name_snapshot n; + struct wakeup_source *ws; + + if (!epi->ep->ws) { +@@ -1464,8 +1462,9 @@ static int ep_create_wakeup_source(struct epitem *epi) + return -ENOMEM; + } + +- name = epi->ffd.file->f_path.dentry->d_name.name; +- ws = wakeup_source_register(NULL, name); ++ take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry); ++ ws = wakeup_source_register(NULL, n.name.name); ++ release_dentry_name_snapshot(&n); + + if (!ws) + return -ENOMEM; +@@ -1527,6 +1526,22 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, + RCU_INIT_POINTER(epi->ws, NULL); + } + ++ /* Add the current item to the list of active epoll hook for this file */ ++ spin_lock(&tfile->f_lock); ++ list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); ++ spin_unlock(&tfile->f_lock); ++ ++ /* ++ * Add the current item to the RB tree. All RB tree operations are ++ * protected by "mtx", and ep_insert() is called with "mtx" held. ++ */ ++ ep_rbtree_insert(ep, epi); ++ ++ /* now check if we've created too many backpaths */ ++ error = -EINVAL; ++ if (full_check && reverse_path_check()) ++ goto error_remove_epi; ++ + /* Initialize the poll table using the queue callback */ + epq.epi = epi; + init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); +@@ -1549,22 +1564,6 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, + if (epi->nwait < 0) + goto error_unregister; + +- /* Add the current item to the list of active epoll hook for this file */ +- spin_lock(&tfile->f_lock); +- list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); +- spin_unlock(&tfile->f_lock); +- +- /* +- * Add the current item to the RB tree. All RB tree operations are +- * protected by "mtx", and ep_insert() is called with "mtx" held. +- */ +- ep_rbtree_insert(ep, epi); +- +- /* now check if we've created too many backpaths */ +- error = -EINVAL; +- if (full_check && reverse_path_check()) +- goto error_remove_epi; +- + /* We have to drop the new item inside our item list to keep track of it */ + write_lock_irq(&ep->lock); + +@@ -1593,6 +1592,8 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, + + return 0; + ++error_unregister: ++ ep_unregister_pollwait(ep, epi); + error_remove_epi: + spin_lock(&tfile->f_lock); + list_del_rcu(&epi->fllink); +@@ -1600,9 +1601,6 @@ error_remove_epi: + + rb_erase_cached(&epi->rbn, &ep->rbr); + +-error_unregister: +- ep_unregister_pollwait(ep, epi); +- + /* + * We need to do this because an event could have been arrived on some + * allocated wait queue. Note that we don't care about the ep->ovflist +@@ -1969,13 +1967,12 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) + struct epitem *epi; + + mutex_lock_nested(&ep->mtx, call_nests + 1); +- ep->visited = 1; +- list_add(&ep->visited_list_link, &visited_list); ++ ep->gen = loop_check_gen; + for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) { + epi = rb_entry(rbp, struct epitem, rbn); + if (unlikely(is_file_epoll(epi->ffd.file))) { + ep_tovisit = epi->ffd.file->private_data; +- if (ep_tovisit->visited) ++ if (ep_tovisit->gen == loop_check_gen) + continue; + error = ep_call_nested(&poll_loop_ncalls, + ep_loop_check_proc, epi->ffd.file, +@@ -2016,18 +2013,8 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) + */ + static int ep_loop_check(struct eventpoll *ep, struct file *file) + { +- int ret; +- struct eventpoll *ep_cur, *ep_next; +- +- ret = ep_call_nested(&poll_loop_ncalls, ++ return ep_call_nested(&poll_loop_ncalls, + ep_loop_check_proc, file, ep, current); +- /* clear visited list */ +- list_for_each_entry_safe(ep_cur, ep_next, &visited_list, +- visited_list_link) { +- ep_cur->visited = 0; +- list_del(&ep_cur->visited_list_link); +- } +- return ret; + } + + static void clear_tfile_check_list(void) +@@ -2189,6 +2176,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, + mutex_lock_nested(&ep->mtx, 0); + if (op == EPOLL_CTL_ADD) { + if (!list_empty(&f.file->f_ep_links) || ++ ep->gen == loop_check_gen || + is_file_epoll(tf.file)) { + full_check = 1; + mutex_unlock(&ep->mtx); +@@ -2249,6 +2237,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, + error_tgt_fput: + if (full_check) { + clear_tfile_check_list(); ++ loop_check_gen++; + mutex_unlock(&epmutex); + } + +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index f8d8a8e34b808..ab4fc1255aca8 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -3074,11 +3074,10 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) + ssize_t ret = 0; + struct file *file = iocb->ki_filp; + struct fuse_file *ff = file->private_data; +- bool async_dio = ff->fc->async_dio; + loff_t pos = 0; + struct inode *inode; + loff_t i_size; +- size_t count = iov_iter_count(iter); ++ size_t count = iov_iter_count(iter), shortened = 0; + loff_t offset = iocb->ki_pos; + struct fuse_io_priv *io; + +@@ -3086,17 +3085,9 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) + inode = file->f_mapping->host; + i_size = i_size_read(inode); + +- if ((iov_iter_rw(iter) == READ) && (offset > i_size)) ++ if ((iov_iter_rw(iter) == READ) && (offset >= i_size)) + return 0; + +- /* optimization for short read */ +- if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) { +- if (offset >= i_size) +- return 0; +- iov_iter_truncate(iter, fuse_round_up(ff->fc, i_size - offset)); +- count = iov_iter_count(iter); +- } +- + io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); + if (!io) + return -ENOMEM; +@@ -3112,15 +3103,22 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) + * By default, we want to optimize all I/Os with async request + * submission to the client filesystem if supported. + */ +- io->async = async_dio; ++ io->async = ff->fc->async_dio; + io->iocb = iocb; + io->blocking = is_sync_kiocb(iocb); + ++ /* optimization for short read */ ++ if (io->async && !io->write && offset + count > i_size) { ++ iov_iter_truncate(iter, fuse_round_up(ff->fc, i_size - offset)); ++ shortened = count - iov_iter_count(iter); ++ count -= shortened; ++ } ++ + /* + * We cannot asynchronously extend the size of a file. + * In such case the aio will behave exactly like sync io. + */ +- if ((offset + count > i_size) && iov_iter_rw(iter) == WRITE) ++ if ((offset + count > i_size) && io->write) + io->blocking = true; + + if (io->async && io->blocking) { +@@ -3138,6 +3136,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) + } else { + ret = __fuse_direct_read(io, iter, &pos); + } ++ iov_iter_reexpand(iter, iov_iter_count(iter) + shortened); + + if (io->async) { + bool blocking = io->blocking; +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c +index 05ed7be8a6345..188b17a3b19eb 100644 +--- a/fs/nfs/dir.c ++++ b/fs/nfs/dir.c +@@ -553,6 +553,9 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en + xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); + + do { ++ if (entry->label) ++ entry->label->len = NFS4_MAXLABELLEN; ++ + status = xdr_decode(desc, entry, &stream); + if (status != 0) { + if (status == -EAGAIN) +diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c +index f780e223b1185..239c9548b1568 100644 +--- a/fs/xfs/xfs_iomap.c ++++ b/fs/xfs/xfs_iomap.c +@@ -1002,9 +1002,15 @@ xfs_file_iomap_begin( + * I/O, which must be block aligned, we need to report the + * newly allocated address. If the data fork has a hole, copy + * the COW fork mapping to avoid allocating to the data fork. ++ * ++ * Otherwise, ensure that the imap range does not extend past ++ * the range allocated/found in cmap. + */ + if (directio || imap.br_startblock == HOLESTARTBLOCK) + imap = cmap; ++ else ++ xfs_trim_extent(&imap, cmap.br_startoff, ++ cmap.br_blockcount); + + end_fsb = imap.br_startoff + imap.br_blockcount; + length = XFS_FSB_TO_B(mp, end_fsb) - offset; +diff --git a/include/linux/genhd.h b/include/linux/genhd.h +index 62a2ec9f17df8..c1bf9956256f6 100644 +--- a/include/linux/genhd.h ++++ b/include/linux/genhd.h +@@ -419,7 +419,7 @@ static inline void free_part_info(struct hd_struct *part) + kfree(part->info); + } + +-void update_io_ticks(struct hd_struct *part, unsigned long now); ++void update_io_ticks(struct hd_struct *part, unsigned long now, bool end); + + /* block/genhd.c */ + extern void device_add_disk(struct device *parent, struct gendisk *disk, +diff --git a/include/linux/memstick.h b/include/linux/memstick.h +index 216a713bef7f0..1198ea3d40126 100644 +--- a/include/linux/memstick.h ++++ b/include/linux/memstick.h +@@ -281,6 +281,7 @@ struct memstick_host { + + struct memstick_dev *card; + unsigned int retries; ++ bool removing; + + /* Notify the host that some requests are pending. */ + void (*request)(struct memstick_host *host); +diff --git a/include/linux/mm.h b/include/linux/mm.h +index 3285dae06c030..34119f393a802 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -2208,7 +2208,7 @@ static inline void zero_resv_unavail(void) {} + + extern void set_dma_reserve(unsigned long new_dma_reserve); + extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, +- enum memmap_context, struct vmem_altmap *); ++ enum meminit_context, struct vmem_altmap *); + extern void setup_per_zone_wmarks(void); + extern int __meminit init_per_zone_wmark_min(void); + extern void mem_init(void); +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h +index 85804ba622152..a90aba3d6afb4 100644 +--- a/include/linux/mmzone.h ++++ b/include/linux/mmzone.h +@@ -822,10 +822,15 @@ bool zone_watermark_ok(struct zone *z, unsigned int order, + unsigned int alloc_flags); + bool zone_watermark_ok_safe(struct zone *z, unsigned int order, + unsigned long mark, int classzone_idx); +-enum memmap_context { +- MEMMAP_EARLY, +- MEMMAP_HOTPLUG, ++/* ++ * Memory initialization context, use to differentiate memory added by ++ * the platform statically or via memory hotplug interface. ++ */ ++enum meminit_context { ++ MEMINIT_EARLY, ++ MEMINIT_HOTPLUG, + }; ++ + extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, + unsigned long size); + +diff --git a/include/linux/node.h b/include/linux/node.h +index 4866f32a02d8d..014ba3ab2efd8 100644 +--- a/include/linux/node.h ++++ b/include/linux/node.h +@@ -99,11 +99,13 @@ extern struct node *node_devices[]; + typedef void (*node_registration_func_t)(struct node *); + + #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA) +-extern int link_mem_sections(int nid, unsigned long start_pfn, +- unsigned long end_pfn); ++int link_mem_sections(int nid, unsigned long start_pfn, ++ unsigned long end_pfn, ++ enum meminit_context context); + #else + static inline int link_mem_sections(int nid, unsigned long start_pfn, +- unsigned long end_pfn) ++ unsigned long end_pfn, ++ enum meminit_context context) + { + return 0; + } +@@ -128,7 +130,8 @@ static inline int register_one_node(int nid) + if (error) + return error; + /* link memory sections under this node */ +- error = link_mem_sections(nid, start_pfn, end_pfn); ++ error = link_mem_sections(nid, start_pfn, end_pfn, ++ MEMINIT_EARLY); + } + + return error; +diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h +index 07875ccc7bb50..b139f76060a65 100644 +--- a/include/linux/virtio_vsock.h ++++ b/include/linux/virtio_vsock.h +@@ -150,7 +150,8 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk, + + void virtio_transport_destruct(struct vsock_sock *vsk); + +-void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt); ++void virtio_transport_recv_pkt(struct virtio_transport *t, ++ struct virtio_vsock_pkt *pkt); + void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt); + void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt); + u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted); +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c +index 705852c1724aa..fbba31baef53c 100644 +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -6382,16 +6382,14 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, + { + int bit; + +- if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching()) +- return; +- + bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); + if (bit < 0) + return; + + preempt_disable_notrace(); + +- op->func(ip, parent_ip, op, regs); ++ if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) ++ op->func(ip, parent_ip, op, regs); + + preempt_enable_notrace(); + trace_clear_recursion(bit); +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index db8162b34ef64..5b2a664812b10 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -3584,14 +3584,14 @@ unsigned long trace_total_entries(struct trace_array *tr) + + static void print_lat_help_header(struct seq_file *m) + { +- seq_puts(m, "# _------=> CPU# \n" +- "# / _-----=> irqs-off \n" +- "# | / _----=> need-resched \n" +- "# || / _---=> hardirq/softirq \n" +- "# ||| / _--=> preempt-depth \n" +- "# |||| / delay \n" +- "# cmd pid ||||| time | caller \n" +- "# \\ / ||||| \\ | / \n"); ++ seq_puts(m, "# _------=> CPU# \n" ++ "# / _-----=> irqs-off \n" ++ "# | / _----=> need-resched \n" ++ "# || / _---=> hardirq/softirq \n" ++ "# ||| / _--=> preempt-depth \n" ++ "# |||| / delay \n" ++ "# cmd pid ||||| time | caller \n" ++ "# \\ / ||||| \\ | / \n"); + } + + static void print_event_info(struct trace_buffer *buf, struct seq_file *m) +@@ -3612,26 +3612,26 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m, + + print_event_info(buf, m); + +- seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : ""); +- seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); ++ seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : ""); ++ seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); + } + + static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m, + unsigned int flags) + { + bool tgid = flags & TRACE_ITER_RECORD_TGID; +- const char *space = " "; +- int prec = tgid ? 10 : 2; ++ const char *space = " "; ++ int prec = tgid ? 12 : 2; + + print_event_info(buf, m); + +- seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space); +- seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); +- seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); +- seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); +- seq_printf(m, "# %.*s||| / delay\n", prec, space); +- seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID "); +- seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | "); ++ seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space); ++ seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); ++ seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); ++ seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); ++ seq_printf(m, "# %.*s||| / delay\n", prec, space); ++ seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID "); ++ seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | "); + } + + void +diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c +index d54ce252b05a8..a0a45901dc027 100644 +--- a/kernel/trace/trace_output.c ++++ b/kernel/trace/trace_output.c +@@ -482,7 +482,7 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) + + trace_find_cmdline(entry->pid, comm); + +- trace_seq_printf(s, "%8.8s-%-5d %3d", ++ trace_seq_printf(s, "%8.8s-%-7d %3d", + comm, entry->pid, cpu); + + return trace_print_lat_fmt(s, entry); +@@ -573,15 +573,15 @@ int trace_print_context(struct trace_iterator *iter) + + trace_find_cmdline(entry->pid, comm); + +- trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); ++ trace_seq_printf(s, "%16s-%-7d ", comm, entry->pid); + + if (tr->trace_flags & TRACE_ITER_RECORD_TGID) { + unsigned int tgid = trace_find_tgid(entry->pid); + + if (!tgid) +- trace_seq_printf(s, "(-----) "); ++ trace_seq_printf(s, "(-------) "); + else +- trace_seq_printf(s, "(%5d) ", tgid); ++ trace_seq_printf(s, "(%7d) ", tgid); + } + + trace_seq_printf(s, "[%03d] ", iter->cpu); +@@ -624,7 +624,7 @@ int trace_print_lat_context(struct trace_iterator *iter) + trace_find_cmdline(entry->pid, comm); + + trace_seq_printf( +- s, "%16s %5d %3d %d %08x %08lx ", ++ s, "%16s %7d %3d %d %08x %08lx ", + comm, entry->pid, iter->cpu, entry->flags, + entry->preempt_count, iter->idx); + } else { +@@ -905,7 +905,7 @@ static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter, + S = task_index_to_char(field->prev_state); + trace_find_cmdline(field->next_pid, comm); + trace_seq_printf(&iter->seq, +- " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", ++ " %7d:%3d:%c %s [%03d] %7d:%3d:%c %s\n", + field->prev_pid, + field->prev_prio, + S, delim, +diff --git a/lib/random32.c b/lib/random32.c +index 3d749abb9e80d..1786f78bf4c53 100644 +--- a/lib/random32.c ++++ b/lib/random32.c +@@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void) + } + #endif + +-DEFINE_PER_CPU(struct rnd_state, net_rand_state); ++DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; + + /** + * prandom_u32_state - seeded pseudo-random number generator. +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c +index 3eb0b311b4a12..308beca3ffebc 100644 +--- a/mm/memory_hotplug.c ++++ b/mm/memory_hotplug.c +@@ -725,7 +725,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, + * are reserved so nobody should be touching them so we should be safe + */ + memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, +- MEMMAP_HOTPLUG, altmap); ++ MEMINIT_HOTPLUG, altmap); + + set_zone_contiguous(zone); + } +@@ -1082,7 +1082,8 @@ int __ref add_memory_resource(int nid, struct resource *res) + } + + /* link memory sections under this node.*/ +- ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1)); ++ ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1), ++ MEMINIT_HOTPLUG); + BUG_ON(ret); + + /* create new memmap entry */ +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 67a9943aa595f..373ca57807589 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -5875,7 +5875,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn) + * done. Non-atomic initialization, single-pass. + */ + void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, +- unsigned long start_pfn, enum memmap_context context, ++ unsigned long start_pfn, enum meminit_context context, + struct vmem_altmap *altmap) + { + unsigned long pfn, end_pfn = start_pfn + size; +@@ -5907,7 +5907,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, + * There can be holes in boot-time mem_map[]s handed to this + * function. They do not exist on hotplugged memory. + */ +- if (context == MEMMAP_EARLY) { ++ if (context == MEMINIT_EARLY) { + if (!early_pfn_valid(pfn)) + continue; + if (!early_pfn_in_nid(pfn, nid)) +@@ -5920,7 +5920,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, + + page = pfn_to_page(pfn); + __init_single_page(page, pfn, zone, nid); +- if (context == MEMMAP_HOTPLUG) ++ if (context == MEMINIT_HOTPLUG) + __SetPageReserved(page); + + /* +@@ -6002,7 +6002,7 @@ void __ref memmap_init_zone_device(struct zone *zone, + * check here not to call set_pageblock_migratetype() against + * pfn out of zone. + * +- * Please note that MEMMAP_HOTPLUG path doesn't clear memmap ++ * Please note that MEMINIT_HOTPLUG path doesn't clear memmap + * because this is done early in section_activate() + */ + if (!(pfn & (pageblock_nr_pages - 1))) { +@@ -6028,7 +6028,7 @@ static void __meminit zone_init_free_lists(struct zone *zone) + void __meminit __weak memmap_init(unsigned long size, int nid, + unsigned long zone, unsigned long start_pfn) + { +- memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, NULL); ++ memmap_init_zone(size, nid, zone, start_pfn, MEMINIT_EARLY, NULL); + } + + static int zone_batchsize(struct zone *zone) +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c +index e5fb9002d3147..3ab85e1e38d82 100644 +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -419,7 +419,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, + else if (status->bw == RATE_INFO_BW_5) + channel_flags |= IEEE80211_CHAN_QUARTER; + +- if (status->band == NL80211_BAND_5GHZ) ++ if (status->band == NL80211_BAND_5GHZ || ++ status->band == NL80211_BAND_6GHZ) + channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; + else if (status->encoding != RX_ENC_LEGACY) + channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; +diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c +index ccdcb9ad9ac72..aabc63dadf176 100644 +--- a/net/mac80211/vht.c ++++ b/net/mac80211/vht.c +@@ -168,10 +168,7 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, + /* take some capabilities as-is */ + cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info); + vht_cap->cap = cap_info; +- vht_cap->cap &= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 | +- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | +- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | +- IEEE80211_VHT_CAP_RXLDPC | ++ vht_cap->cap &= IEEE80211_VHT_CAP_RXLDPC | + IEEE80211_VHT_CAP_VHT_TXOP_PS | + IEEE80211_VHT_CAP_HTC_VHT | + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | +@@ -180,6 +177,9 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, + IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | + IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; + ++ vht_cap->cap |= min_t(u32, cap_info & IEEE80211_VHT_CAP_MAX_MPDU_MASK, ++ own_cap.cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK); ++ + /* and some based on our own capabilities */ + switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { + case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c +index aa8adf930b3ce..b7f0d52e5f1b6 100644 +--- a/net/netfilter/nf_conntrack_netlink.c ++++ b/net/netfilter/nf_conntrack_netlink.c +@@ -1141,6 +1141,8 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[], + if (!tb[CTA_TUPLE_IP]) + return -EINVAL; + ++ if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6) ++ return -EOPNOTSUPP; + tuple->src.l3num = l3num; + + err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple); +diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c +index 861ec9a671f9d..5905f0cddc895 100644 +--- a/net/vmw_vsock/virtio_transport.c ++++ b/net/vmw_vsock/virtio_transport.c +@@ -86,33 +86,6 @@ out_rcu: + return ret; + } + +-static void virtio_transport_loopback_work(struct work_struct *work) +-{ +- struct virtio_vsock *vsock = +- container_of(work, struct virtio_vsock, loopback_work); +- LIST_HEAD(pkts); +- +- spin_lock_bh(&vsock->loopback_list_lock); +- list_splice_init(&vsock->loopback_list, &pkts); +- spin_unlock_bh(&vsock->loopback_list_lock); +- +- mutex_lock(&vsock->rx_lock); +- +- if (!vsock->rx_run) +- goto out; +- +- while (!list_empty(&pkts)) { +- struct virtio_vsock_pkt *pkt; +- +- pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list); +- list_del_init(&pkt->list); +- +- virtio_transport_recv_pkt(pkt); +- } +-out: +- mutex_unlock(&vsock->rx_lock); +-} +- + static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock, + struct virtio_vsock_pkt *pkt) + { +@@ -370,59 +343,6 @@ static bool virtio_transport_more_replies(struct virtio_vsock *vsock) + return val < virtqueue_get_vring_size(vq); + } + +-static void virtio_transport_rx_work(struct work_struct *work) +-{ +- struct virtio_vsock *vsock = +- container_of(work, struct virtio_vsock, rx_work); +- struct virtqueue *vq; +- +- vq = vsock->vqs[VSOCK_VQ_RX]; +- +- mutex_lock(&vsock->rx_lock); +- +- if (!vsock->rx_run) +- goto out; +- +- do { +- virtqueue_disable_cb(vq); +- for (;;) { +- struct virtio_vsock_pkt *pkt; +- unsigned int len; +- +- if (!virtio_transport_more_replies(vsock)) { +- /* Stop rx until the device processes already +- * pending replies. Leave rx virtqueue +- * callbacks disabled. +- */ +- goto out; +- } +- +- pkt = virtqueue_get_buf(vq, &len); +- if (!pkt) { +- break; +- } +- +- vsock->rx_buf_nr--; +- +- /* Drop short/long packets */ +- if (unlikely(len < sizeof(pkt->hdr) || +- len > sizeof(pkt->hdr) + pkt->len)) { +- virtio_transport_free_pkt(pkt); +- continue; +- } +- +- pkt->len = len - sizeof(pkt->hdr); +- virtio_transport_deliver_tap_pkt(pkt); +- virtio_transport_recv_pkt(pkt); +- } +- } while (!virtqueue_enable_cb(vq)); +- +-out: +- if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2) +- virtio_vsock_rx_fill(vsock); +- mutex_unlock(&vsock->rx_lock); +-} +- + /* event_lock must be held */ + static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock, + struct virtio_vsock_event *event) +@@ -586,6 +506,86 @@ static struct virtio_transport virtio_transport = { + .send_pkt = virtio_transport_send_pkt, + }; + ++static void virtio_transport_loopback_work(struct work_struct *work) ++{ ++ struct virtio_vsock *vsock = ++ container_of(work, struct virtio_vsock, loopback_work); ++ LIST_HEAD(pkts); ++ ++ spin_lock_bh(&vsock->loopback_list_lock); ++ list_splice_init(&vsock->loopback_list, &pkts); ++ spin_unlock_bh(&vsock->loopback_list_lock); ++ ++ mutex_lock(&vsock->rx_lock); ++ ++ if (!vsock->rx_run) ++ goto out; ++ ++ while (!list_empty(&pkts)) { ++ struct virtio_vsock_pkt *pkt; ++ ++ pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list); ++ list_del_init(&pkt->list); ++ ++ virtio_transport_recv_pkt(&virtio_transport, pkt); ++ } ++out: ++ mutex_unlock(&vsock->rx_lock); ++} ++ ++static void virtio_transport_rx_work(struct work_struct *work) ++{ ++ struct virtio_vsock *vsock = ++ container_of(work, struct virtio_vsock, rx_work); ++ struct virtqueue *vq; ++ ++ vq = vsock->vqs[VSOCK_VQ_RX]; ++ ++ mutex_lock(&vsock->rx_lock); ++ ++ if (!vsock->rx_run) ++ goto out; ++ ++ do { ++ virtqueue_disable_cb(vq); ++ for (;;) { ++ struct virtio_vsock_pkt *pkt; ++ unsigned int len; ++ ++ if (!virtio_transport_more_replies(vsock)) { ++ /* Stop rx until the device processes already ++ * pending replies. Leave rx virtqueue ++ * callbacks disabled. ++ */ ++ goto out; ++ } ++ ++ pkt = virtqueue_get_buf(vq, &len); ++ if (!pkt) { ++ break; ++ } ++ ++ vsock->rx_buf_nr--; ++ ++ /* Drop short/long packets */ ++ if (unlikely(len < sizeof(pkt->hdr) || ++ len > sizeof(pkt->hdr) + pkt->len)) { ++ virtio_transport_free_pkt(pkt); ++ continue; ++ } ++ ++ pkt->len = len - sizeof(pkt->hdr); ++ virtio_transport_deliver_tap_pkt(pkt); ++ virtio_transport_recv_pkt(&virtio_transport, pkt); ++ } ++ } while (!virtqueue_enable_cb(vq)); ++ ++out: ++ if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2) ++ virtio_vsock_rx_fill(vsock); ++ mutex_unlock(&vsock->rx_lock); ++} ++ + static int virtio_vsock_probe(struct virtio_device *vdev) + { + vq_callback_t *callbacks[] = { +diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c +index fb2060dffb0af..efbb521bff135 100644 +--- a/net/vmw_vsock/virtio_transport_common.c ++++ b/net/vmw_vsock/virtio_transport_common.c +@@ -696,9 +696,9 @@ static int virtio_transport_reset(struct vsock_sock *vsk, + /* Normally packets are associated with a socket. There may be no socket if an + * attempt was made to connect to a socket that does not exist. + */ +-static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt) ++static int virtio_transport_reset_no_sock(const struct virtio_transport *t, ++ struct virtio_vsock_pkt *pkt) + { +- const struct virtio_transport *t; + struct virtio_vsock_pkt *reply; + struct virtio_vsock_pkt_info info = { + .op = VIRTIO_VSOCK_OP_RST, +@@ -718,7 +718,6 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt) + if (!reply) + return -ENOMEM; + +- t = virtio_transport_get_ops(); + if (!t) { + virtio_transport_free_pkt(reply); + return -ENOTCONN; +@@ -1060,7 +1059,8 @@ static bool virtio_transport_space_update(struct sock *sk, + /* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex + * lock. + */ +-void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt) ++void virtio_transport_recv_pkt(struct virtio_transport *t, ++ struct virtio_vsock_pkt *pkt) + { + struct sockaddr_vm src, dst; + struct vsock_sock *vsk; +@@ -1082,7 +1082,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt) + le32_to_cpu(pkt->hdr.fwd_cnt)); + + if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) { +- (void)virtio_transport_reset_no_sock(pkt); ++ (void)virtio_transport_reset_no_sock(t, pkt); + goto free_pkt; + } + +@@ -1093,7 +1093,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt) + if (!sk) { + sk = vsock_find_bound_socket(&dst); + if (!sk) { +- (void)virtio_transport_reset_no_sock(pkt); ++ (void)virtio_transport_reset_no_sock(t, pkt); + goto free_pkt; + } + } +@@ -1127,6 +1127,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt) + virtio_transport_free_pkt(pkt); + break; + default: ++ (void)virtio_transport_reset_no_sock(t, pkt); + virtio_transport_free_pkt(pkt); + break; + } +diff --git a/scripts/dtc/Makefile b/scripts/dtc/Makefile +index b5a5b1c548c9b..c2dac994896b4 100644 +--- a/scripts/dtc/Makefile ++++ b/scripts/dtc/Makefile +@@ -9,7 +9,7 @@ dtc-objs := dtc.o flattree.o fstree.o data.o livetree.o treesource.o \ + dtc-objs += dtc-lexer.lex.o dtc-parser.tab.o + + # Source files need to get at the userspace version of libfdt_env.h to compile +-HOST_EXTRACFLAGS := -I $(srctree)/$(src)/libfdt ++HOST_EXTRACFLAGS += -I $(srctree)/$(src)/libfdt + + ifeq ($(shell pkg-config --exists yaml-0.1 2>/dev/null && echo yes),) + ifneq ($(CHECK_DTBS),) +diff --git a/tools/io_uring/io_uring-bench.c b/tools/io_uring/io_uring-bench.c +index 0f257139b003e..7703f01183854 100644 +--- a/tools/io_uring/io_uring-bench.c ++++ b/tools/io_uring/io_uring-bench.c +@@ -130,7 +130,7 @@ static int io_uring_register_files(struct submitter *s) + s->nr_files); + } + +-static int gettid(void) ++static int lk_gettid(void) + { + return syscall(__NR_gettid); + } +@@ -281,7 +281,7 @@ static void *submitter_fn(void *data) + struct io_sq_ring *ring = &s->sq_ring; + int ret, prepped; + +- printf("submitter=%d\n", gettid()); ++ printf("submitter=%d\n", lk_gettid()); + + srand48_r(pthread_self(), &s->rand); + +diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile +index d045707e7c9a4..283caeaaffc30 100644 +--- a/tools/lib/bpf/Makefile ++++ b/tools/lib/bpf/Makefile +@@ -59,7 +59,7 @@ FEATURE_USER = .libbpf + FEATURE_TESTS = libelf libelf-mmap bpf reallocarray cxx + FEATURE_DISPLAY = libelf bpf + +-INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi ++INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi + FEATURE_CHECK_CFLAGS-bpf = $(INCLUDES) + + check_feat := 1 |