lkml.org 
[lkml]   [2020]   [Jul]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
SubjectRe: Linux 5.7.7
Date
diff --git a/Makefile b/Makefile
index f928cd1dfdc17..5a5e329d9241e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 7
-SUBLEVEL = 6
+SUBLEVEL = 7
EXTRAVERSION =
NAME = Kleptomaniac Octopus

diff --git a/arch/arm/boot/dts/am335x-pocketbeagle.dts b/arch/arm/boot/dts/am335x-pocketbeagle.dts
index 4da719098028f..f0b222201b867 100644
--- a/arch/arm/boot/dts/am335x-pocketbeagle.dts
+++ b/arch/arm/boot/dts/am335x-pocketbeagle.dts
@@ -88,7 +88,6 @@
AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
- AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_INPUT, MUX_MODE4) /* (B12) mcasp0_aclkr.mmc0_sdwp */
>;
};

diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index a35f5052d76f6..ed6634d34c3c7 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -335,7 +335,7 @@
<0x47400010 0x4>;
reg-names = "rev", "sysc";
ti,sysc-mask = <(SYSC_OMAP4_FREEEMU |
- SYSC_OMAP2_SOFTRESET)>;
+ SYSC_OMAP4_SOFTRESET)>;
ti,sysc-midle = <SYSC_IDLE_FORCE>,
<SYSC_IDLE_NO>,
<SYSC_IDLE_SMART>;
@@ -347,7 +347,7 @@
clock-names = "fck";
#address-cells = <1>;
#size-cells = <1>;
- ranges = <0x0 0x47400000 0x5000>;
+ ranges = <0x0 0x47400000 0x8000>;

usb0_phy: usb-phy@1300 {
compatible = "ti,am335x-usb-phy";
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index da6d70f09ef19..3175266ede646 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -200,7 +200,7 @@
status = "disabled";
};

- dma@20000 {
+ dma: dma@20000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x20000 0x1000>;
interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
@@ -215,6 +215,8 @@
clocks = <&iprocslow>;
clock-names = "apb_pclk";
#dma-cells = <1>;
+ dma-coherent;
+ status = "disabled";
};

sdio: sdhci@21000 {
@@ -257,10 +259,10 @@
status = "disabled";
};

- mailbox: mailbox@25000 {
+ mailbox: mailbox@25c00 {
compatible = "brcm,iproc-fa2-mbox";
- reg = <0x25000 0x445>;
- interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x25c00 0x400>;
+ interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <1>;
brcm,rx-status-len = <32>;
brcm,use-bcm-hdr;
diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
index 334325390aed0..29bbecd36f65d 100644
--- a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
+++ b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
@@ -17,6 +17,7 @@
};

memory {
+ device_type = "memory";
reg = <0x00000000 0x08000000
0x88000000 0x18000000>;
};
diff --git a/arch/arm/boot/dts/bcm958522er.dts b/arch/arm/boot/dts/bcm958522er.dts
index 8c388eb8a08f8..7be4c4e628e02 100644
--- a/arch/arm/boot/dts/bcm958522er.dts
+++ b/arch/arm/boot/dts/bcm958522er.dts
@@ -58,6 +58,10 @@

/* USB 3 support needed to be complete */

+&dma {
+ status = "okay";
+};
+
&amac0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm958525er.dts b/arch/arm/boot/dts/bcm958525er.dts
index c339771bb22e0..e58ed7e953460 100644
--- a/arch/arm/boot/dts/bcm958525er.dts
+++ b/arch/arm/boot/dts/bcm958525er.dts
@@ -58,6 +58,10 @@

/* USB 3 support needed to be complete */

+&dma {
+ status = "okay";
+};
+
&amac0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm958525xmc.dts b/arch/arm/boot/dts/bcm958525xmc.dts
index 1c72ec8288de4..716da62f57885 100644
--- a/arch/arm/boot/dts/bcm958525xmc.dts
+++ b/arch/arm/boot/dts/bcm958525xmc.dts
@@ -58,6 +58,10 @@

/* XHCI support needed to be complete */

+&dma {
+ status = "okay";
+};
+
&amac0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm958622hr.dts b/arch/arm/boot/dts/bcm958622hr.dts
index 96a021cebd97b..a49c2fd21f4a8 100644
--- a/arch/arm/boot/dts/bcm958622hr.dts
+++ b/arch/arm/boot/dts/bcm958622hr.dts
@@ -58,6 +58,10 @@

/* USB 3 and SLIC support needed to be complete */

+&dma {
+ status = "okay";
+};
+
&amac0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts
index b2c7f21d471e6..dd6dff6452b87 100644
--- a/arch/arm/boot/dts/bcm958623hr.dts
+++ b/arch/arm/boot/dts/bcm958623hr.dts
@@ -58,6 +58,10 @@

/* USB 3 and SLIC support needed to be complete */

+&dma {
+ status = "okay";
+};
+
&amac0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
index 536fb24f38bb7..a71371b4065ed 100644
--- a/arch/arm/boot/dts/bcm958625hr.dts
+++ b/arch/arm/boot/dts/bcm958625hr.dts
@@ -69,6 +69,10 @@
status = "okay";
};

+&dma {
+ status = "okay";
+};
+
&amac0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm958625k.dts b/arch/arm/boot/dts/bcm958625k.dts
index 3fcca12d83c2d..7b84b54436edd 100644
--- a/arch/arm/boot/dts/bcm958625k.dts
+++ b/arch/arm/boot/dts/bcm958625k.dts
@@ -48,6 +48,10 @@
};
};

+&dma {
+ status = "okay";
+};
+
&amac0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6ul-kontron-n6x1x-s.dtsi b/arch/arm/boot/dts/imx6ul-kontron-n6x1x-s.dtsi
index f05e918412027..53a25fba34f69 100644
--- a/arch/arm/boot/dts/imx6ul-kontron-n6x1x-s.dtsi
+++ b/arch/arm/boot/dts/imx6ul-kontron-n6x1x-s.dtsi
@@ -232,13 +232,6 @@
status = "okay";
};

-&wdog1 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_wdog>;
- fsl,ext-reset-output;
- status = "okay";
-};
-
&iomuxc {
pinctrl-0 = <&pinctrl_reset_out &pinctrl_gpio>;

@@ -409,10 +402,4 @@
MX6UL_PAD_NAND_DATA03__USDHC2_DATA3 0x170f9
>;
};
-
- pinctrl_wdog: wdoggrp {
- fsl,pins = <
- MX6UL_PAD_GPIO1_IO09__WDOG1_WDOG_ANY 0x30b0
- >;
- };
};
diff --git a/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi b/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi
index a17af4d9bfdf9..61ba21a605a81 100644
--- a/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi
+++ b/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi
@@ -57,6 +57,13 @@
status = "okay";
};

+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
&iomuxc {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_reset_out>;
@@ -106,4 +113,10 @@
MX6UL_PAD_SNVS_TAMPER9__GPIO5_IO09 0x1b0b0
>;
};
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO09__WDOG1_WDOG_ANY 0x18b0
+ >;
+ };
};
diff --git a/arch/arm/boot/dts/omap4-duovero-parlor.dts b/arch/arm/boot/dts/omap4-duovero-parlor.dts
index 8047e8cdb3af0..4548d87534e37 100644
--- a/arch/arm/boot/dts/omap4-duovero-parlor.dts
+++ b/arch/arm/boot/dts/omap4-duovero-parlor.dts
@@ -139,7 +139,7 @@
ethernet@gpmc {
reg = <5 0 0xff>;
interrupt-parent = <&gpio2>;
- interrupts = <12 IRQ_TYPE_EDGE_FALLING>; /* gpio_44 */
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>; /* gpio_44 */

phy-mode = "mii";

diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index 6aa938b949db2..1df0ee01ee02b 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -53,6 +53,7 @@ config ARCH_BCM_NSP
select ARM_ERRATA_754322
select ARM_ERRATA_775420
select ARM_ERRATA_764369 if SMP
+ select ARM_TIMER_SP804
select THERMAL
select THERMAL_OF
help
diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c
index f057df813f83a..e9962b48e30cb 100644
--- a/arch/arm/mach-imx/pm-imx5.c
+++ b/arch/arm/mach-imx/pm-imx5.c
@@ -295,14 +295,14 @@ static int __init imx_suspend_alloc_ocram(
if (!ocram_pool) {
pr_warn("%s: ocram pool unavailable!\n", __func__);
ret = -ENODEV;
- goto put_node;
+ goto put_device;
}

ocram_base = gen_pool_alloc(ocram_pool, size);
if (!ocram_base) {
pr_warn("%s: unable to alloc ocram!\n", __func__);
ret = -ENOMEM;
- goto put_node;
+ goto put_device;
}

phys = gen_pool_virt_to_phys(ocram_pool, ocram_base);
@@ -312,6 +312,8 @@ static int __init imx_suspend_alloc_ocram(
if (virt_out)
*virt_out = virt;

+put_device:
+ put_device(&pdev->dev);
put_node:
of_node_put(node);

diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 82706af307dee..c630457bb228e 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -3489,7 +3489,7 @@ static const struct omap_hwmod_reset dra7_reset_quirks[] = {
};

static const struct omap_hwmod_reset omap_reset_quirks[] = {
- { .match = "dss", .len = 3, .reset = omap_dss_reset, },
+ { .match = "dss_core", .len = 8, .reset = omap_dss_reset, },
{ .match = "hdq1w", .len = 5, .reset = omap_hdq1w_reset, },
{ .match = "i2c", .len = 3, .reset = omap_i2c_reset, },
{ .match = "wd_timer", .len = 8, .reset = omap2_wd_timer_reset, },
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
index 951e14a3de0e5..22aed2806fda8 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
@@ -196,7 +196,7 @@

ldo1_reg: LDO1 {
regulator-name = "LDO1";
- regulator-min-microvolt = <3000000>;
+ regulator-min-microvolt = <1600000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
regulator-always-on;
@@ -204,7 +204,7 @@

ldo2_reg: LDO2 {
regulator-name = "LDO2";
- regulator-min-microvolt = <900000>;
+ regulator-min-microvolt = <800000>;
regulator-max-microvolt = <900000>;
regulator-boot-on;
regulator-always-on;
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
index 2497eebb57393..fe49dbc535e1f 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
@@ -101,7 +101,7 @@

ldo1_reg: LDO1 {
regulator-name = "LDO1";
- regulator-min-microvolt = <3000000>;
+ regulator-min-microvolt = <1600000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
regulator-always-on;
@@ -109,7 +109,7 @@

ldo2_reg: LDO2 {
regulator-name = "LDO2";
- regulator-min-microvolt = <900000>;
+ regulator-min-microvolt = <800000>;
regulator-max-microvolt = <900000>;
regulator-boot-on;
regulator-always-on;
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 94289d1269933..c12186f8ab7ab 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -338,7 +338,7 @@ static unsigned int find_supported_vector_length(unsigned int vl)
return sve_vl_from_vq(__bit_to_vq(bit));
}

-#ifdef CONFIG_SYSCTL
+#if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)

static int sve_proc_do_default_vl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
@@ -384,9 +384,9 @@ static int __init sve_sysctl_init(void)
return 0;
}

-#else /* ! CONFIG_SYSCTL */
+#else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
static int __init sve_sysctl_init(void) { return 0; }
-#endif /* ! CONFIG_SYSCTL */
+#endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */

#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
index 0bbac612146ea..666b225aeb3ad 100644
--- a/arch/arm64/kernel/perf_regs.c
+++ b/arch/arm64/kernel/perf_regs.c
@@ -15,15 +15,34 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
return 0;

/*
- * Compat (i.e. 32 bit) mode:
- * - PC has been set in the pt_regs struct in kernel_entry,
- * - Handle SP and LR here.
+ * Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but
+ * we're stuck with it for ABI compatability reasons.
+ *
+ * For a 32-bit consumer inspecting a 32-bit task, then it will look at
+ * the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h).
+ * These correspond directly to a prefix of the registers saved in our
+ * 'struct pt_regs', with the exception of the PC, so we copy that down
+ * (x15 corresponds to SP_hyp in the architecture).
+ *
+ * So far, so good.
+ *
+ * The oddity arises when a 64-bit consumer looks at a 32-bit task and
+ * asks for registers beyond PERF_REG_ARM_MAX. In this case, we return
+ * SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and
+ * PC registers would normally live. The initial idea was to allow a
+ * 64-bit unwinder to unwind a 32-bit task and, although it's not clear
+ * how well that works in practice, somebody might be relying on it.
+ *
+ * At the time we make a sample, we don't know whether the consumer is
+ * 32-bit or 64-bit, so we have to cater for both possibilities.
*/
if (compat_user_mode(regs)) {
if ((u32)idx == PERF_REG_ARM64_SP)
return regs->compat_sp;
if ((u32)idx == PERF_REG_ARM64_LR)
return regs->compat_lr;
+ if (idx == 15)
+ return regs->pc;
}

if ((u32)idx == PERF_REG_ARM64_SP)
diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c
index 4a75f2d9bf0e0..bce0e5349978f 100644
--- a/arch/powerpc/mm/nohash/kaslr_booke.c
+++ b/arch/powerpc/mm/nohash/kaslr_booke.c
@@ -14,6 +14,7 @@
#include <linux/memblock.h>
#include <linux/libfdt.h>
#include <linux/crash_core.h>
+#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/kdump.h>
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index d969bab4a26b5..262e5bbb27760 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -179,7 +179,7 @@
" bnez %1, 0b\n" \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
+ : "rJ" ((long)__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
@@ -224,7 +224,7 @@
RISCV_ACQUIRE_BARRIER \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
+ : "rJ" ((long)__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
@@ -270,7 +270,7 @@
" bnez %1, 0b\n" \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
+ : "rJ" ((long)__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
@@ -316,7 +316,7 @@
" fence rw, rw\n" \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
+ : "rJ" ((long)__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index f3619f59d85cc..12f8a7fce78b1 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -8,6 +8,7 @@
#include <linux/syscalls.h>
#include <asm/unistd.h>
#include <asm/cacheflush.h>
+#include <asm-generic/mman-common.h>

static long riscv_sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
@@ -16,6 +17,11 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
{
if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
return -EINVAL;
+
+ if ((prot & PROT_WRITE) && (prot & PROT_EXEC))
+ if (unlikely(!(prot & PROT_READ)))
+ return -EINVAL;
+
return ksys_mmap_pgoff(addr, len, prot, flags, fd,
offset >> (PAGE_SHIFT - page_shift_offset));
}
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index 3bcfdeb013951..0cd085cdeb4f2 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -36,6 +36,7 @@ struct vdso_data {
__u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
__u32 ts_dir; /* TOD steering direction 0x64 */
__u64 ts_end; /* TOD steering end 0x68 */
+ __u32 hrtimer_res; /* hrtimer resolution 0x70 */
};

struct vdso_per_cpu_data {
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index e80f0e6f59722..46f84cb0d5528 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -76,6 +76,7 @@ int main(void)
OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift);
OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir);
OFFSET(__VDSO_TS_END, vdso_data, ts_end);
+ OFFSET(__VDSO_CLOCK_REALTIME_RES, vdso_data, hrtimer_res);
OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
OFFSET(__VDSO_GETCPU_VAL, vdso_per_cpu_data, getcpu_val);
@@ -86,7 +87,6 @@ int main(void)
DEFINE(__CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
DEFINE(__CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
- DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC);
BLANK();
/* idle data offsets */
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 3ae64914bd144..9584e743102b7 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -368,9 +368,9 @@ ENTRY(system_call)
jnz .Lsysc_nr_ok
# svc 0: system call number in %r1
llgfr %r1,%r1 # clear high word in r1
+ sth %r1,__PT_INT_CODE+2(%r11)
cghi %r1,NR_syscalls
jnl .Lsysc_nr_ok
- sth %r1,__PT_INT_CODE+2(%r11)
slag %r8,%r1,3
.Lsysc_nr_ok:
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 58faa12542a1b..e007224b65bb2 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -324,6 +324,25 @@ static inline void __poke_user_per(struct task_struct *child,
child->thread.per_user.end = data;
}

+static void fixup_int_code(struct task_struct *child, addr_t data)
+{
+ struct pt_regs *regs = task_pt_regs(child);
+ int ilc = regs->int_code >> 16;
+ u16 insn;
+
+ if (ilc > 6)
+ return;
+
+ if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16),
+ &insn, sizeof(insn), FOLL_FORCE) != sizeof(insn))
+ return;
+
+ /* double check that tracee stopped on svc instruction */
+ if ((insn >> 8) != 0xa)
+ return;
+
+ regs->int_code = 0x20000 | (data & 0xffff);
+}
/*
* Write a word to the user area of a process at location addr. This
* operation does have an additional problem compared to peek_user.
@@ -335,7 +354,9 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
struct user *dummy = NULL;
addr_t offset;

+
if (addr < (addr_t) &dummy->regs.acrs) {
+ struct pt_regs *regs = task_pt_regs(child);
/*
* psw and gprs are stored on the stack
*/
@@ -353,7 +374,11 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
/* Invalid addressing mode bits */
return -EINVAL;
}
- *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
+
+ if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
+ addr == offsetof(struct user, regs.gprs[2]))
+ fixup_int_code(child, data);
+ *(addr_t *)((addr_t) &regs->psw + addr) = data;

} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
/*
@@ -719,6 +744,10 @@ static int __poke_user_compat(struct task_struct *child,
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
(__u64)(tmp & PSW32_ADDR_AMODE);
} else {
+
+ if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
+ addr == offsetof(struct compat_user, regs.gprs[2]))
+ fixup_int_code(child, data);
/* gpr 0-15 */
*(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
}
@@ -838,40 +867,66 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
{
unsigned long mask = -1UL;
+ long ret = -1;
+
+ if (is_compat_task())
+ mask = 0xffffffff;

/*
* The sysc_tracesys code in entry.S stored the system
* call number to gprs[2].
*/
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- (tracehook_report_syscall_entry(regs) ||
- regs->gprs[2] >= NR_syscalls)) {
+ tracehook_report_syscall_entry(regs)) {
/*
- * Tracing decided this syscall should not happen or the
- * debugger stored an invalid system call number. Skip
+ * Tracing decided this syscall should not happen. Skip
* the system call and the system call restart handling.
*/
- clear_pt_regs_flag(regs, PIF_SYSCALL);
- return -1;
+ goto skip;
}

+#ifdef CONFIG_SECCOMP
/* Do the secure computing check after ptrace. */
- if (secure_computing()) {
- /* seccomp failures shouldn't expose any additional code. */
- return -1;
+ if (unlikely(test_thread_flag(TIF_SECCOMP))) {
+ struct seccomp_data sd;
+
+ if (is_compat_task()) {
+ sd.instruction_pointer = regs->psw.addr & 0x7fffffff;
+ sd.arch = AUDIT_ARCH_S390;
+ } else {
+ sd.instruction_pointer = regs->psw.addr;
+ sd.arch = AUDIT_ARCH_S390X;
+ }
+
+ sd.nr = regs->int_code & 0xffff;
+ sd.args[0] = regs->orig_gpr2 & mask;
+ sd.args[1] = regs->gprs[3] & mask;
+ sd.args[2] = regs->gprs[4] & mask;
+ sd.args[3] = regs->gprs[5] & mask;
+ sd.args[4] = regs->gprs[6] & mask;
+ sd.args[5] = regs->gprs[7] & mask;
+
+ if (__secure_computing(&sd) == -1)
+ goto skip;
}
+#endif /* CONFIG_SECCOMP */

if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
- trace_sys_enter(regs, regs->gprs[2]);
+ trace_sys_enter(regs, regs->int_code & 0xffff);

- if (is_compat_task())
- mask = 0xffffffff;

- audit_syscall_entry(regs->gprs[2], regs->orig_gpr2 & mask,
+ audit_syscall_entry(regs->int_code & 0xffff, regs->orig_gpr2 & mask,
regs->gprs[3] &mask, regs->gprs[4] &mask,
regs->gprs[5] &mask);

+ if ((signed long)regs->gprs[2] >= NR_syscalls) {
+ regs->gprs[2] = -ENOSYS;
+ ret = -ENOSYS;
+ }
return regs->gprs[2];
+skip:
+ clear_pt_regs_flag(regs, PIF_SYSCALL);
+ return ret;
}

asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index f9d070d016e35..b1113b5194325 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -301,6 +301,7 @@ void update_vsyscall(struct timekeeper *tk)

vdso_data->tk_mult = tk->tkr_mono.mult;
vdso_data->tk_shift = tk->tkr_mono.shift;
+ vdso_data->hrtimer_res = hrtimer_resolution;
smp_wmb();
++vdso_data->tb_update_count;
}
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index bec19e7e6e1cf..4a66a1cb919b1 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -18,8 +18,8 @@ KBUILD_AFLAGS_64 += -m64 -s

KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin
-KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
- -Wl,--hash-style=both
+ldflags-y := -fPIC -shared -nostdlib -soname=linux-vdso64.so.1 \
+ --hash-style=both --build-id -T

$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64)
@@ -37,8 +37,8 @@ KASAN_SANITIZE := n
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so

# link rule for the .so file, .lds has to be first
-$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
- $(call if_changed,vdso64ld)
+$(obj)/vdso64.so.dbg: $(obj)/vdso64.lds $(obj-vdso64) FORCE
+ $(call if_changed,ld)

# strip rule for the .so file
$(obj)/%.so: OBJCOPYFLAGS := -S
@@ -50,8 +50,6 @@ $(obj-vdso64): %.o: %.S FORCE
$(call if_changed_dep,vdso64as)

# actual build commands
-quiet_cmd_vdso64ld = VDSO64L $@
- cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
quiet_cmd_vdso64as = VDSO64A $@
cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<

diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
index 081435398e0a1..0c79caa32b592 100644
--- a/arch/s390/kernel/vdso64/clock_getres.S
+++ b/arch/s390/kernel/vdso64/clock_getres.S
@@ -17,12 +17,14 @@
.type __kernel_clock_getres,@function
__kernel_clock_getres:
CFI_STARTPROC
- larl %r1,4f
+ larl %r1,3f
+ lg %r0,0(%r1)
cghi %r2,__CLOCK_REALTIME_COARSE
je 0f
cghi %r2,__CLOCK_MONOTONIC_COARSE
je 0f
- larl %r1,3f
+ larl %r1,_vdso_data
+ llgf %r0,__VDSO_CLOCK_REALTIME_RES(%r1)
cghi %r2,__CLOCK_REALTIME
je 0f
cghi %r2,__CLOCK_MONOTONIC
@@ -36,7 +38,6 @@ __kernel_clock_getres:
jz 2f
0: ltgr %r3,%r3
jz 1f /* res == NULL */
- lg %r0,0(%r1)
xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */
stg %r0,8(%r3) /* store tp->tv_usec */
1: lghi %r2,0
@@ -45,6 +46,5 @@ __kernel_clock_getres:
svc 0
br %r14
CFI_ENDPROC
-3: .quad __CLOCK_REALTIME_RES
-4: .quad __CLOCK_COARSE_RES
+3: .quad __CLOCK_COARSE_RES
.size __kernel_clock_getres,.-__kernel_clock_getres
diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
index 60f7205ebe40d..646dd58169ecb 100644
--- a/arch/sparc/kernel/ptrace_32.c
+++ b/arch/sparc/kernel/ptrace_32.c
@@ -168,12 +168,17 @@ static int genregs32_set(struct task_struct *target,
if (ret || !count)
return ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &regs->y,
+ &regs->npc,
34 * sizeof(u32), 35 * sizeof(u32));
if (ret || !count)
return ret;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->y,
+ 35 * sizeof(u32), 36 * sizeof(u32));
+ if (ret || !count)
+ return ret;
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- 35 * sizeof(u32), 38 * sizeof(u32));
+ 36 * sizeof(u32), 38 * sizeof(u32));
}

static int fpregs32_get(struct task_struct *target,
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 76d1d64d51e38..41f7922086220 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -213,7 +213,6 @@ SYM_FUNC_START(startup_32)
* We place all of the values on our mini stack so lret can
* used to perform that far jump.
*/
- pushl $__KERNEL_CS
leal startup_64(%ebp), %eax
#ifdef CONFIG_EFI_MIXED
movl efi32_boot_args(%ebp), %edi
@@ -224,11 +223,20 @@ SYM_FUNC_START(startup_32)
movl efi32_boot_args+8(%ebp), %edx // saved bootparams pointer
cmpl $0, %edx
jnz 1f
+ /*
+ * efi_pe_entry uses MS calling convention, which requires 32 bytes of
+ * shadow space on the stack even if all arguments are passed in
+ * registers. We also need an additional 8 bytes for the space that
+ * would be occupied by the return address, and this also results in
+ * the correct stack alignment for entry.
+ */
+ subl $40, %esp
leal efi_pe_entry(%ebp), %eax
movl %edi, %ecx // MS calling convention
movl %esi, %edx
1:
#endif
+ pushl $__KERNEL_CS
pushl %eax

/* Enter paged protected Mode, activating Long Mode */
@@ -776,6 +784,7 @@ SYM_DATA_LOCAL(boot_heap, .fill BOOT_HEAP_SIZE, 1, 0)

SYM_DATA_START_LOCAL(boot_stack)
.fill BOOT_STACK_SIZE, 1, 0
+ .balign 16
SYM_DATA_END_LABEL(boot_stack, SYM_L_LOCAL, boot_stack_end)

/*
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index dd17c2da1af5f..da78ccbd493b7 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -58,4 +58,9 @@ static inline bool handle_guest_split_lock(unsigned long ip)
return false;
}
#endif
+#ifdef CONFIG_IA32_FEAT_CTL
+void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
+#else
+static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {}
+#endif
#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 0a6b35353fc79..86e2e0272c576 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1195,7 +1195,7 @@ struct kvm_x86_ops {
void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t offset, unsigned long mask);
- int (*write_log_dirty)(struct kvm_vcpu *vcpu);
+ int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);

/* pmu operations of sub-arch */
const struct kvm_pmu_ops *pmu_ops;
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index b809f117f3f46..9d5252c9685c6 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -23,8 +23,6 @@
#define MWAITX_MAX_LOOPS ((u32)-1)
#define MWAITX_DISABLE_CSTATES 0xf0

-u32 get_umwait_control_msr(void);
-
static inline void __monitor(const void *eax, unsigned long ecx,
unsigned long edx)
{
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 3bcf27caf6c9f..c4e8fd709cf67 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -113,9 +113,10 @@ struct cpuinfo_x86 {
/* in KB - valid for CPUS which support this call: */
unsigned int x86_cache_size;
int x86_cache_alignment; /* In bytes */
- /* Cache QoS architectural values: */
+ /* Cache QoS architectural values, valid only on the BSP: */
int x86_cache_max_rmid; /* max index */
int x86_cache_occ_scale; /* scale to bytes */
+ int x86_cache_mbm_width_offset;
int x86_power;
unsigned long loops_per_jiffy;
/* cpuid returned max cores value: */
diff --git a/arch/x86/include/asm/resctrl_sched.h b/arch/x86/include/asm/resctrl_sched.h
index f6b7fe2833cc7..c8a27cbbdae29 100644
--- a/arch/x86/include/asm/resctrl_sched.h
+++ b/arch/x86/include/asm/resctrl_sched.h
@@ -84,9 +84,12 @@ static inline void resctrl_sched_in(void)
__resctrl_sched_in();
}

+void resctrl_cpu_detect(struct cpuinfo_x86 *c);
+
#else

static inline void resctrl_sched_in(void) {}
+static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {}

#endif /* CONFIG_X86_CPU_RESCTRL */

diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 426792565d864..c5cf336e50776 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -3,6 +3,7 @@
#include <linux/sched.h>
#include <linux/sched/clock.h>

+#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include <asm/e820/api.h>
#include <asm/mtrr.h>
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 8293ee5149758..c669a5756bdf6 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -56,6 +56,7 @@
#include <asm/intel-family.h>
#include <asm/cpu_device_id.h>
#include <asm/uv/uv.h>
+#include <asm/resctrl_sched.h>

#include "cpu.h"

@@ -347,6 +348,9 @@ out:
cr4_clear_bits(X86_CR4_UMIP);
}

+/* These bits should not change their value after CPU init is finished. */
+static const unsigned long cr4_pinned_mask =
+ X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE;
static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
static unsigned long cr4_pinned_bits __ro_after_init;

@@ -371,20 +375,20 @@ EXPORT_SYMBOL(native_write_cr0);

void native_write_cr4(unsigned long val)
{
- unsigned long bits_missing = 0;
+ unsigned long bits_changed = 0;

set_register:
asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));

if (static_branch_likely(&cr_pinning)) {
- if (unlikely((val & cr4_pinned_bits) != cr4_pinned_bits)) {
- bits_missing = ~val & cr4_pinned_bits;
- val |= bits_missing;
+ if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
+ bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
+ val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
goto set_register;
}
- /* Warn after we've set the missing bits. */
- WARN_ONCE(bits_missing, "CR4 bits went missing: %lx!?\n",
- bits_missing);
+ /* Warn after we've corrected the changed bits. */
+ WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
+ bits_changed);
}
}
EXPORT_SYMBOL(native_write_cr4);
@@ -396,7 +400,7 @@ void cr4_init(void)
if (boot_cpu_has(X86_FEATURE_PCID))
cr4 |= X86_CR4_PCIDE;
if (static_branch_likely(&cr_pinning))
- cr4 |= cr4_pinned_bits;
+ cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;

__write_cr4(cr4);

@@ -411,10 +415,7 @@ void cr4_init(void)
*/
static void __init setup_cr_pinning(void)
{
- unsigned long mask;
-
- mask = (X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP);
- cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & mask;
+ cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
static_key_enable(&cr_pinning.key);
}

@@ -854,30 +855,6 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
}
}

-static void init_cqm(struct cpuinfo_x86 *c)
-{
- if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
- c->x86_cache_max_rmid = -1;
- c->x86_cache_occ_scale = -1;
- return;
- }
-
- /* will be overridden if occupancy monitoring exists */
- c->x86_cache_max_rmid = cpuid_ebx(0xf);
-
- if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
- cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
- cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
- u32 eax, ebx, ecx, edx;
-
- /* QoS sub-leaf, EAX=0Fh, ECX=1 */
- cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
-
- c->x86_cache_max_rmid = ecx;
- c->x86_cache_occ_scale = ebx;
- }
-}
-
void get_cpu_cap(struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
@@ -945,7 +922,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)

init_scattered_cpuid_features(c);
init_speculation_control(c);
- init_cqm(c);
+ resctrl_cpu_detect(c);

/*
* Clear/Set all flags overridden by options, after probe.
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index fb538fccd24c0..9d033693519aa 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -81,8 +81,4 @@ extern void update_srbds_msr(void);

extern u64 x86_read_arch_cap_msr(void);

-#ifdef CONFIG_IA32_FEAT_CTL
-void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
-#endif
-
#endif /* ARCH_X86_CPU_H */
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index d8cc5223b7ce8..c1551541c7a53 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -958,6 +958,35 @@ static __init void rdt_init_res_defs(void)

static enum cpuhp_state rdt_online;

+void resctrl_cpu_detect(struct cpuinfo_x86 *c)
+{
+ if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
+ c->x86_cache_max_rmid = -1;
+ c->x86_cache_occ_scale = -1;
+ c->x86_cache_mbm_width_offset = -1;
+ return;
+ }
+
+ /* will be overridden if occupancy monitoring exists */
+ c->x86_cache_max_rmid = cpuid_ebx(0xf);
+
+ if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
+ cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
+ cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
+ u32 eax, ebx, ecx, edx;
+
+ /* QoS sub-leaf, EAX=0Fh, ECX=1 */
+ cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
+
+ c->x86_cache_max_rmid = ecx;
+ c->x86_cache_occ_scale = ebx;
+ c->x86_cache_mbm_width_offset = eax & 0xff;
+
+ if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset)
+ c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD;
+ }
+}
+
static int __init resctrl_late_init(void)
{
struct rdt_resource *r;
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index 3dd13f3a8b231..0963864757143 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -37,6 +37,7 @@
#define MBA_IS_LINEAR 0x4
#define MBA_MAX_MBPS U32_MAX
#define MAX_MBA_BW_AMD 0x800
+#define MBM_CNTR_WIDTH_OFFSET_AMD 20

#define RMID_VAL_ERROR BIT_ULL(63)
#define RMID_VAL_UNAVAIL BIT_ULL(62)
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 5a359d9fcc055..29a3878ab3c01 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -1117,6 +1117,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
_d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
_r_cdp = NULL;
+ _d_cdp = NULL;
ret = -EINVAL;
}

diff --git a/arch/x86/kernel/cpu/umwait.c b/arch/x86/kernel/cpu/umwait.c
index 300e3fd5ade3e..ec8064c0ae035 100644
--- a/arch/x86/kernel/cpu/umwait.c
+++ b/arch/x86/kernel/cpu/umwait.c
@@ -18,12 +18,6 @@
*/
static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE);

-u32 get_umwait_control_msr(void)
-{
- return umwait_control_cached;
-}
-EXPORT_SYMBOL_GPL(get_umwait_control_msr);
-
/*
* Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by
* hardware or BIOS before kernel boot.
diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c
index df1358ba622bd..05fa4ef634902 100644
--- a/arch/x86/kernel/cpu/zhaoxin.c
+++ b/arch/x86/kernel/cpu/zhaoxin.c
@@ -2,6 +2,7 @@
#include <linux/sched.h>
#include <linux/sched/clock.h>

+#include <asm/cpu.h>
#include <asm/cpufeature.h>

#include "cpu.h"
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 9af25c97612a7..8967e320a9784 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2512,6 +2512,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
}
memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));

+ apic->vcpu->kvm->arch.apic_map_dirty = true;
kvm_recalculate_apic_map(vcpu->kvm);
kvm_apic_set_version(vcpu);

diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 8a3b1bce722a4..d0e3b1b6845b5 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -222,7 +222,7 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn);
-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa);

int kvm_mmu_post_init_vm(struct kvm *kvm);
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 92d0569541943..eb27ab47d6073 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1746,10 +1746,10 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
* Emulate arch specific page modification logging for the
* nested hypervisor
*/
-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa)
{
if (kvm_x86_ops.write_log_dirty)
- return kvm_x86_ops.write_log_dirty(vcpu);
+ return kvm_x86_ops.write_log_dirty(vcpu, l2_gpa);

return 0;
}
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 9bdf9b7d9a962..7098f843eabdd 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -235,7 +235,7 @@ static inline unsigned FNAME(gpte_access)(u64 gpte)
static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu,
struct guest_walker *walker,
- int write_fault)
+ gpa_t addr, int write_fault)
{
unsigned level, index;
pt_element_t pte, orig_pte;
@@ -260,7 +260,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
!(pte & PT_GUEST_DIRTY_MASK)) {
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
#if PTTYPE == PTTYPE_EPT
- if (kvm_arch_write_log_dirty(vcpu))
+ if (kvm_arch_write_log_dirty(vcpu, addr))
return -EINVAL;
#endif
pte |= PT_GUEST_DIRTY_MASK;
@@ -457,7 +457,8 @@ retry_walk:
(PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);

if (unlikely(!accessed_dirty)) {
- ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
+ ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
+ addr, write_fault);
if (unlikely(ret < 0))
goto error;
else if (ret)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index d7aa0dfab8bbd..390ec34e4b4f1 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6467,23 +6467,6 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
msrs[i].host, false);
}

-static void atomic_switch_umwait_control_msr(struct vcpu_vmx *vmx)
-{
- u32 host_umwait_control;
-
- if (!vmx_has_waitpkg(vmx))
- return;
-
- host_umwait_control = get_umwait_control_msr();
-
- if (vmx->msr_ia32_umwait_control != host_umwait_control)
- add_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL,
- vmx->msr_ia32_umwait_control,
- host_umwait_control, false);
- else
- clear_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL);
-}
-
static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6575,9 +6558,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)

pt_guest_enter(vmx);

- if (vcpu_to_pmu(vcpu)->version)
- atomic_switch_perf_msrs(vmx);
- atomic_switch_umwait_control_msr(vmx);
+ atomic_switch_perf_msrs(vmx);

if (enable_preemption_timer)
vmx_update_hv_timer(vcpu);
@@ -7334,11 +7315,11 @@ static void vmx_flush_log_dirty(struct kvm *kvm)
kvm_flush_pml_buffers(kvm);
}

-static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
+static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
{
struct vmcs12 *vmcs12;
struct vcpu_vmx *vmx = to_vmx(vcpu);
- gpa_t gpa, dst;
+ gpa_t dst;

if (is_guest_mode(vcpu)) {
WARN_ON_ONCE(vmx->nested.pml_full);
@@ -7357,7 +7338,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
return 1;
}

- gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
+ gpa &= ~0xFFFull;
dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;

if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 97c5a92146f9d..5f08eeac16c87 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2784,7 +2784,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return kvm_mtrr_set_msr(vcpu, msr, data);
case MSR_IA32_APICBASE:
return kvm_set_apic_base(vcpu, msr_info);
- case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
+ case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
return kvm_x2apic_msr_write(vcpu, msr, data);
case MSR_IA32_TSCDEADLINE:
kvm_set_lapic_tscdeadline_msr(vcpu, data);
@@ -3112,7 +3112,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_APICBASE:
msr_info->data = kvm_get_apic_base(vcpu);
break;
- case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
+ case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
case MSR_IA32_TSCDEADLINE:
msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index fff28c6f73a21..b0dfac3d3df71 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -24,6 +24,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
asm volatile(
" testq %[size8],%[size8]\n"
" jz 4f\n"
+ " .align 16\n"
"0: movq $0,(%[dst])\n"
" addq $8,%[dst]\n"
" decl %%ecx ; jnz 0b\n"
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index aaff9ed7ff45c..b0d3c5ca6d807 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -193,6 +193,8 @@ static void fix_processor_context(void)
*/
static void notrace __restore_processor_state(struct saved_context *ctxt)
{
+ struct cpuinfo_x86 *c;
+
if (ctxt->misc_enable_saved)
wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
/*
@@ -263,6 +265,10 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
mtrr_bp_restore();
perf_restore_debug_store();
msr_restore_context(ctxt);
+
+ c = &cpu_data(smp_processor_id());
+ if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
+ init_ia32_feat_ctl(c);
}

/* Needed by apm.c */
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index bf62c25cde8f4..ae07dd78e9518 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -278,7 +278,6 @@ bool bio_integrity_prep(struct bio *bio)

if (ret == 0) {
printk(KERN_ERR "could not attach integrity payload\n");
- kfree(buf);
status = BLK_STS_RESOURCE;
goto err_end_io;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 98a702761e2cc..8f580e66691b9 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3328,7 +3328,9 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,

if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
nr_hw_queues = nr_cpu_ids;
- if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
+ if (nr_hw_queues < 1)
+ return;
+ if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
return;

list_for_each_entry(q, &set->tag_list, tag_set_list)
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
index ece8c1a921cc1..88c8af455ea3f 100644
--- a/drivers/acpi/acpi_configfs.c
+++ b/drivers/acpi/acpi_configfs.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/configfs.h>
#include <linux/acpi.h>
+#include <linux/security.h>

#include "acpica/accommon.h"
#include "acpica/actables.h"
@@ -28,7 +29,10 @@ static ssize_t acpi_table_aml_write(struct config_item *cfg,
{
const struct acpi_table_header *header = data;
struct acpi_table *table;
- int ret;
+ int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
+
+ if (ret)
+ return ret;

table = container_of(cfg, struct acpi_table, cfg);

diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 3a89909b50a6c..76c668c05fa03 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -938,13 +938,13 @@ static void __exit interrupt_stats_exit(void)
}

static ssize_t
-acpi_show_profile(struct device *dev, struct device_attribute *attr,
+acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
}

-static const struct device_attribute pm_profile_attr =
+static const struct kobj_attribute pm_profile_attr =
__ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);

static ssize_t hotplug_enabled_show(struct kobject *kobj,
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index e47c8a4c83db5..f50c5f182bb52 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -4686,8 +4686,15 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)

static void binder_free_proc(struct binder_proc *proc)
{
+ struct binder_device *device;
+
BUG_ON(!list_empty(&proc->todo));
BUG_ON(!list_empty(&proc->delivered_death));
+ device = container_of(proc->context, struct binder_device, context);
+ if (refcount_dec_and_test(&device->ref)) {
+ kfree(proc->context->name);
+ kfree(device);
+ }
binder_alloc_deferred_release(&proc->alloc);
put_task_struct(proc->tsk);
binder_stats_deleted(BINDER_STAT_PROC);
@@ -5406,7 +5413,6 @@ static int binder_node_release(struct binder_node *node, int refs)
static void binder_deferred_release(struct binder_proc *proc)
{
struct binder_context *context = proc->context;
- struct binder_device *device;
struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, active_transactions;

@@ -5423,12 +5429,6 @@ static void binder_deferred_release(struct binder_proc *proc)
context->binder_context_mgr_node = NULL;
}
mutex_unlock(&context->context_mgr_node_lock);
- device = container_of(proc->context, struct binder_device, context);
- if (refcount_dec_and_test(&device->ref)) {
- kfree(context->name);
- kfree(device);
- }
- proc->context = NULL;
binder_inner_proc_lock(proc);
/*
* Make sure proc stays alive after we
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 36e588d88b956..c10deb87015ba 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3692,12 +3692,13 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
const u8 *cdb = scmd->cmnd;
- const u8 *p;
u8 pg, spg;
unsigned six_byte, pg_len, hdr_len, bd_len;
int len;
u16 fp = (u16)-1;
u8 bp = 0xff;
+ u8 buffer[64];
+ const u8 *p = buffer;

VPRINTK("ENTER\n");

@@ -3731,12 +3732,14 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len)
goto invalid_param_len;

- p = page_address(sg_page(scsi_sglist(scmd)));
-
/* Move past header and block descriptors. */
if (len < hdr_len)
goto invalid_param_len;

+ if (!sg_copy_to_buffer(scsi_sglist(scmd), scsi_sg_count(scmd),
+ buffer, sizeof(buffer)))
+ goto invalid_param_len;
+
if (six_byte)
bd_len = p[3];
else
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 980aacdbcf3b4..141ac600b64c8 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -907,7 +907,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
- goto err_pm_disable;
+ goto err_pm_put;

host = ata_host_alloc(dev, 1);
if (!host) {
@@ -937,7 +937,6 @@ static int sata_rcar_probe(struct platform_device *pdev)

err_pm_put:
pm_runtime_put(dev);
-err_pm_disable:
pm_runtime_disable(dev);
return ret;
}
@@ -991,8 +990,10 @@ static int sata_rcar_resume(struct device *dev)
int ret;

ret = pm_runtime_get_sync(dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put(dev);
return ret;
+ }

if (priv->type == RCAR_GEN3_SATA) {
sata_rcar_init_module(priv);
@@ -1017,8 +1018,10 @@ static int sata_rcar_restore(struct device *dev)
int ret;

ret = pm_runtime_get_sync(dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put(dev);
return ret;
+ }

sata_rcar_setup_port(host);

diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 59f911e577192..508bbd6ea4396 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1356,6 +1356,7 @@ void regmap_exit(struct regmap *map)
if (map->hwlock)
hwspin_lock_free(map->hwlock);
kfree_const(map->name);
+ kfree(map->patch);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index da693e6a834e5..418bb4621255a 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1289,7 +1289,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit) {
sync_blockdev(lo->lo_device);
- kill_bdev(lo->lo_device);
+ invalidate_bdev(lo->lo_device);
}

/* I/O need to be drained during transfer transition */
@@ -1320,7 +1320,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)

if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit) {
- /* kill_bdev should have truncated all the pages */
+ /* invalidate_bdev should have truncated all the pages */
if (lo->lo_device->bd_inode->i_mapping->nrpages) {
err = -EAGAIN;
pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
@@ -1565,11 +1565,11 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
return 0;

sync_blockdev(lo->lo_device);
- kill_bdev(lo->lo_device);
+ invalidate_bdev(lo->lo_device);

blk_mq_freeze_queue(lo->lo_queue);

- /* kill_bdev should have truncated all the pages */
+ /* invalidate_bdev should have truncated all the pages */
if (lo->lo_device->bd_inode->i_mapping->nrpages) {
err = -EAGAIN;
pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index e5f5f48d69d2f..db9541f385055 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -221,6 +221,35 @@ static u32 sysc_read_sysstatus(struct sysc *ddata)
return sysc_read(ddata, offset);
}

+/* Poll on reset status */
+static int sysc_wait_softreset(struct sysc *ddata)
+{
+ u32 sysc_mask, syss_done, rstval;
+ int syss_offset, error = 0;
+
+ syss_offset = ddata->offsets[SYSC_SYSSTATUS];
+ sysc_mask = BIT(ddata->cap->regbits->srst_shift);
+
+ if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
+ syss_done = 0;
+ else
+ syss_done = ddata->cfg.syss_mask;
+
+ if (syss_offset >= 0) {
+ error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval,
+ (rstval & ddata->cfg.syss_mask) ==
+ syss_done,
+ 100, MAX_MODULE_SOFTRESET_WAIT);
+
+ } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
+ error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval,
+ !(rstval & sysc_mask),
+ 100, MAX_MODULE_SOFTRESET_WAIT);
+ }
+
+ return error;
+}
+
static int sysc_add_named_clock_from_child(struct sysc *ddata,
const char *name,
const char *optfck_name)
@@ -925,18 +954,47 @@ static int sysc_enable_module(struct device *dev)
struct sysc *ddata;
const struct sysc_regbits *regbits;
u32 reg, idlemodes, best_mode;
+ int error;

ddata = dev_get_drvdata(dev);
+
+ /*
+ * Some modules like DSS reset automatically on idle. Enable optional
+ * reset clocks and wait for OCP softreset to complete.
+ */
+ if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) {
+ error = sysc_enable_opt_clocks(ddata);
+ if (error) {
+ dev_err(ddata->dev,
+ "Optional clocks failed for enable: %i\n",
+ error);
+ return error;
+ }
+ }
+ error = sysc_wait_softreset(ddata);
+ if (error)
+ dev_warn(ddata->dev, "OCP softreset timed out\n");
+ if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
+ sysc_disable_opt_clocks(ddata);
+
+ /*
+ * Some subsystem private interconnects, like DSS top level module,
+ * need only the automatic OCP softreset handling with no sysconfig
+ * register bits to configure.
+ */
if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
return 0;

regbits = ddata->cap->regbits;
reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);

- /* Set CLOCKACTIVITY, we only use it for ick */
+ /*
+ * Set CLOCKACTIVITY, we only use it for ick. And we only configure it
+ * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware
+ * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag.
+ */
if (regbits->clkact_shift >= 0 &&
- (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT ||
- ddata->cfg.sysc_val & BIT(regbits->clkact_shift)))
+ (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT))
reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift;

/* Set SIDLE mode */
@@ -991,6 +1049,9 @@ set_autoidle:
sysc_write_sysconfig(ddata, reg);
}

+ /* Flush posted write */
+ sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
if (ddata->module_enable_quirk)
ddata->module_enable_quirk(ddata);

@@ -1071,6 +1132,9 @@ set_sidle:
reg |= 1 << regbits->autoidle_shift;
sysc_write_sysconfig(ddata, reg);

+ /* Flush posted write */
+ sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
return 0;
}

@@ -1488,7 +1552,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false;
const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1);
int manager_count;
- bool framedonetv_irq;
+ bool framedonetv_irq = true;
u32 val, irq_mask = 0;

switch (sysc_soc->soc) {
@@ -1505,6 +1569,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
break;
case SOC_AM4:
manager_count = 1;
+ framedonetv_irq = false;
break;
case SOC_UNKNOWN:
default:
@@ -1822,11 +1887,10 @@ static int sysc_legacy_init(struct sysc *ddata)
*/
static int sysc_reset(struct sysc *ddata)
{
- int sysc_offset, syss_offset, sysc_val, rstval, error = 0;
- u32 sysc_mask, syss_done;
+ int sysc_offset, sysc_val, error;
+ u32 sysc_mask;

sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
- syss_offset = ddata->offsets[SYSC_SYSSTATUS];

if (ddata->legacy_mode ||
ddata->cap->regbits->srst_shift < 0 ||
@@ -1835,11 +1899,6 @@ static int sysc_reset(struct sysc *ddata)

sysc_mask = BIT(ddata->cap->regbits->srst_shift);

- if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
- syss_done = 0;
- else
- syss_done = ddata->cfg.syss_mask;
-
if (ddata->pre_reset_quirk)
ddata->pre_reset_quirk(ddata);

@@ -1856,18 +1915,9 @@ static int sysc_reset(struct sysc *ddata)
if (ddata->post_reset_quirk)
ddata->post_reset_quirk(ddata);

- /* Poll on reset status */
- if (syss_offset >= 0) {
- error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval,
- (rstval & ddata->cfg.syss_mask) ==
- syss_done,
- 100, MAX_MODULE_SOFTRESET_WAIT);
-
- } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
- error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval,
- !(rstval & sysc_mask),
- 100, MAX_MODULE_SOFTRESET_WAIT);
- }
+ error = sysc_wait_softreset(ddata);
+ if (error)
+ dev_warn(ddata->dev, "OCP softreset timed out\n");

if (ddata->reset_done_quirk)
ddata->reset_done_quirk(ddata);
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
index e2330e757f1ff..001617033d6a2 100644
--- a/drivers/char/hw_random/ks-sa-rng.c
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -244,6 +244,7 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "Failed to enable SA power-domain\n");
+ pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
return ret;
}
diff --git a/drivers/clk/sifive/fu540-prci.c b/drivers/clk/sifive/fu540-prci.c
index 6282ee2f361cd..a8901f90a61ac 100644
--- a/drivers/clk/sifive/fu540-prci.c
+++ b/drivers/clk/sifive/fu540-prci.c
@@ -586,7 +586,10 @@ static int sifive_fu540_prci_probe(struct platform_device *pdev)
struct __prci_data *pd;
int r;

- pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ pd = devm_kzalloc(dev,
+ struct_size(pd, hw_clks.hws,
+ ARRAY_SIZE(__prci_init_clocks)),
+ GFP_KERNEL);
if (!pd)
return -ENOMEM;

diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 4e9994de0b900..0d89c3e473bdc 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -272,6 +272,8 @@ static int get_scrub_rate(struct mem_ctl_info *mci)

if (pvt->model == 0x60)
amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
+ else
+ amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
} else {
amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
}
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index e3d6926965834..d5915272141fd 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -181,7 +181,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL,
"entry%d", entry_num);
if (rc) {
- kfree(entry);
+ kobject_put(&entry->kobj);
return rc;
}
}
diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c
index ea66b1f16a79d..f1c4faf58c764 100644
--- a/drivers/firmware/efi/libstub/file.c
+++ b/drivers/firmware/efi/libstub/file.c
@@ -104,12 +104,20 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len,
if (!found)
return 0;

+ /* Skip any leading slashes */
+ while (cmdline[i] == L'/' || cmdline[i] == L'\\')
+ i++;
+
while (--result_len > 0 && i < cmdline_len) {
- if (cmdline[i] == L'\0' ||
- cmdline[i] == L'\n' ||
- cmdline[i] == L' ')
+ efi_char16_t c = cmdline[i++];
+
+ if (c == L'\0' || c == L'\n' || c == L' ')
break;
- *result++ = cmdline[i++];
+ else if (c == L'/')
+ /* Replace UNIX dir separators with EFI standard ones */
+ *result++ = L'\\';
+ else
+ *result++ = c;
}
*result = L'\0';
return i;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index d2840c2f62865..1dc57079933ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1261,8 +1261,12 @@ static int sdma_v5_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;

- for (i = 0; i < adev->sdma.num_instances; i++)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (adev->sdma.instance[i].fw != NULL)
+ release_firmware(adev->sdma.instance[i].fw);
+
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ }

return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index fe0cd49d4ea7c..d8c74aa4e5650 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -396,6 +396,7 @@ struct kfd_process *kfd_create_process(struct file *filep)
(int)process->lead_thread->pid);
if (ret) {
pr_warn("Creating procfs pid directory failed");
+ kobject_put(process->kobj);
goto out;
}

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 0461fecd68db3..11491ae1effc2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1017,7 +1017,6 @@ static const struct {
{"link_settings", &dp_link_settings_debugfs_fops},
{"phy_settings", &dp_phy_settings_debugfs_fop},
{"test_pattern", &dp_phy_test_pattern_fops},
- {"output_bpc", &output_bpc_fops},
{"vrr_range", &vrr_range_fops},
{"sdp_message", &sdp_message_fops},
{"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
@@ -1090,6 +1089,9 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
&force_yuv420_output_fops);

+ debugfs_create_file("output_bpc", 0644, dir, connector,
+ &output_bpc_fops);
+
connector->debugfs_dpcd_address = 0;
connector->debugfs_dpcd_size = 0;

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index dcf84a61de37f..949d10ef83040 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -510,8 +510,10 @@ static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin

srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size);

- if (!srm)
- return -EINVAL;
+ if (!srm) {
+ ret = -EINVAL;
+ goto ret;
+ }

if (pos >= srm_size)
ret = 0;
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index e89694eb90b4c..700f0039df7bd 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1777,7 +1777,7 @@ bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,

kfree(rgb_regamma);
rgb_regamma_alloc_fail:
- kvfree(rgb_user);
+ kfree(rgb_user);
rgb_user_alloc_fail:
return ret;
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a9771de4d17e6..c7be39a00d437 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -227,18 +227,9 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_debug_leave);

-/**
- * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
- * @fb_helper: driver-allocated fbdev helper, can be NULL
- *
- * This should be called from driver's drm &drm_driver.lastclose callback
- * when implementing an fbcon on top of kms using this helper. This ensures that
- * the user isn't greeted with a black screen when e.g. X dies.
- *
- * RETURNS:
- * Zero if everything went ok, negative error code otherwise.
- */
-int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+static int
+__drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
+ bool force)
{
bool do_delayed;
int ret;
@@ -250,7 +241,16 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
return 0;

mutex_lock(&fb_helper->lock);
- ret = drm_client_modeset_commit(&fb_helper->client);
+ if (force) {
+ /*
+ * Yes this is the _locked version which expects the master lock
+ * to be held. But for forced restores we're intentionally
+ * racing here, see drm_fb_helper_set_par().
+ */
+ ret = drm_client_modeset_commit_locked(&fb_helper->client);
+ } else {
+ ret = drm_client_modeset_commit(&fb_helper->client);
+ }

do_delayed = fb_helper->delayed_hotplug;
if (do_delayed)
@@ -262,6 +262,22 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)

return ret;
}
+
+/**
+ * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
+ * @fb_helper: driver-allocated fbdev helper, can be NULL
+ *
+ * This should be called from driver's drm &drm_driver.lastclose callback
+ * when implementing an fbcon on top of kms using this helper. This ensures that
+ * the user isn't greeted with a black screen when e.g. X dies.
+ *
+ * RETURNS:
+ * Zero if everything went ok, negative error code otherwise.
+ */
+int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+{
+ return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, false);
+}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);

#ifdef CONFIG_MAGIC_SYSRQ
@@ -1310,6 +1326,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct fb_var_screeninfo *var = &info->var;
+ bool force;

if (oops_in_progress)
return -EBUSY;
@@ -1319,7 +1336,25 @@ int drm_fb_helper_set_par(struct fb_info *info)
return -EINVAL;
}

- drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+ /*
+ * Normally we want to make sure that a kms master takes precedence over
+ * fbdev, to avoid fbdev flickering and occasionally stealing the
+ * display status. But Xorg first sets the vt back to text mode using
+ * the KDSET IOCTL with KD_TEXT, and only after that drops the master
+ * status when exiting.
+ *
+ * In the past this was caught by drm_fb_helper_lastclose(), but on
+ * modern systems where logind always keeps a drm fd open to orchestrate
+ * the vt switching, this doesn't work.
+ *
+ * To not break the userspace ABI we have this special case here, which
+ * is only used for the above case. Everything else uses the normal
+ * commit function, which ensures that we never steal the display from
+ * an active drm master.
+ */
+ force = var->activate & FB_ACTIVATE_KD_TEXT;
+
+ __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force);

return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 3ad828eaefe1c..db91b3c031a13 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2297,6 +2297,7 @@ static const struct panel_desc logicpd_type_28 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
};

static const struct panel_desc mitsubishi_aa070mc01 = {
@@ -2465,6 +2466,7 @@ static const struct panel_desc newhaven_nhd_43_480272ef_atxl = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
};

static const struct display_timing nlt_nl192108ac18_02d_timing = {
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index b57c37ddd164c..c7fbb7932f379 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -2127,7 +2127,7 @@ static int ni_init_smc_spll_table(struct radeon_device *rdev)
if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
ret = -EINVAL;

- if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
+ if (fb_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
ret = -EINVAL;

if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 0919f1f159a49..f65d1489dc509 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -31,6 +31,7 @@ config DRM_RCAR_DW_HDMI
config DRM_RCAR_LVDS
tristate "R-Car DU LVDS Encoder Support"
depends on DRM && DRM_BRIDGE && OF
+ select DRM_KMS_HELPER
select DRM_PANEL
select OF_FLATTREE
select OF_OVERLAY
diff --git a/drivers/i2c/busses/i2c-fsi.c b/drivers/i2c/busses/i2c-fsi.c
index e0c256922d4f1..977d6f524649c 100644
--- a/drivers/i2c/busses/i2c-fsi.c
+++ b/drivers/i2c/busses/i2c-fsi.c
@@ -98,7 +98,7 @@
#define I2C_STAT_DAT_REQ BIT(25)
#define I2C_STAT_CMD_COMP BIT(24)
#define I2C_STAT_STOP_ERR BIT(23)
-#define I2C_STAT_MAX_PORT GENMASK(19, 16)
+#define I2C_STAT_MAX_PORT GENMASK(22, 16)
#define I2C_STAT_ANY_INT BIT(15)
#define I2C_STAT_SCL_IN BIT(11)
#define I2C_STAT_SDA_IN BIT(10)
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 4c4d17ddc96b9..7c88611c732c4 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -1769,14 +1769,9 @@ static int tegra_i2c_remove(struct platform_device *pdev)
static int __maybe_unused tegra_i2c_suspend(struct device *dev)
{
struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
- int err;

i2c_mark_adapter_suspended(&i2c_dev->adapter);

- err = pm_runtime_force_suspend(dev);
- if (err < 0)
- return err;
-
return 0;
}

@@ -1797,10 +1792,6 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev)
if (err)
return err;

- err = pm_runtime_force_resume(dev);
- if (err < 0)
- return err;
-
i2c_mark_adapter_resumed(&i2c_dev->adapter);

return 0;
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index b34d2ff069318..bbb70a8a411e3 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -495,6 +495,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
break;
case I2C_SMBUS_BLOCK_DATA:
case I2C_SMBUS_BLOCK_PROC_CALL:
+ if (msg[1].buf[0] > I2C_SMBUS_BLOCK_MAX) {
+ dev_err(&adapter->dev,
+ "Invalid block size returned: %d\n",
+ msg[1].buf[0]);
+ status = -EPROTO;
+ goto cleanup;
+ }
for (i = 0; i < msg[1].buf[0] + 1; i++)
data->block[i] = msg[1].buf[i];
break;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 26e6f7df247b6..12ada58c96a90 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1619,6 +1619,8 @@ static struct rdma_id_private *cma_find_listener(
{
struct rdma_id_private *id_priv, *id_priv_dev;

+ lockdep_assert_held(&lock);
+
if (!bind_list)
return ERR_PTR(-EINVAL);

@@ -1665,6 +1667,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
}
}

+ mutex_lock(&lock);
/*
* Net namespace might be getting deleted while route lookup,
* cm_id lookup is in progress. Therefore, perform netdevice
@@ -1706,6 +1709,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
err:
rcu_read_unlock();
+ mutex_unlock(&lock);
if (IS_ERR(id_priv) && *net_dev) {
dev_put(*net_dev);
*net_dev = NULL;
@@ -2481,6 +2485,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
struct net *net = id_priv->id.route.addr.dev_addr.net;
int ret;

+ lockdep_assert_held(&lock);
+
if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
return;

@@ -3308,6 +3314,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list,
u64 sid, mask;
__be16 port;

+ lockdep_assert_held(&lock);
+
addr = cma_src_addr(id_priv);
port = htons(bind_list->port);

@@ -3336,6 +3344,8 @@ static int cma_alloc_port(enum rdma_ucm_port_space ps,
struct rdma_bind_list *bind_list;
int ret;

+ lockdep_assert_held(&lock);
+
bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
if (!bind_list)
return -ENOMEM;
@@ -3362,6 +3372,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
struct sockaddr *saddr = cma_src_addr(id_priv);
__be16 dport = cma_port(daddr);

+ lockdep_assert_held(&lock);
+
hlist_for_each_entry(cur_id, &bind_list->owners, node) {
struct sockaddr *cur_daddr = cma_dst_addr(cur_id);
struct sockaddr *cur_saddr = cma_src_addr(cur_id);
@@ -3401,6 +3413,8 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
unsigned int rover;
struct net *net = id_priv->id.route.addr.dev_addr.net;

+ lockdep_assert_held(&lock);
+
inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
rover = prandom_u32() % remaining + low;
@@ -3448,6 +3462,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
struct rdma_id_private *cur_id;
struct sockaddr *addr, *cur_addr;

+ lockdep_assert_held(&lock);
+
addr = cma_src_addr(id_priv);
hlist_for_each_entry(cur_id, &bind_list->owners, node) {
if (id_priv == cur_id)
@@ -3478,6 +3494,8 @@ static int cma_use_port(enum rdma_ucm_port_space ps,
unsigned short snum;
int ret;

+ lockdep_assert_held(&lock);
+
snum = ntohs(cma_port(cma_src_addr(id_priv)));
if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
return -EACCES;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index c54db13fa9b0a..049c9cdc10dec 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -639,10 +639,10 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);

flush_workqueue(port_priv->wq);
- ib_cancel_rmpp_recvs(mad_agent_priv);

deref_mad_agent(mad_agent_priv);
wait_for_completion(&mad_agent_priv->comp);
+ ib_cancel_rmpp_recvs(mad_agent_priv);

ib_mad_agent_security_cleanup(&mad_agent_priv->agent);

@@ -2941,6 +2941,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
sg_list.addr))) {
+ kfree(mad_priv);
ret = -ENOMEM;
break;
}
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index e0a5e897e4b1d..75bcbc625616e 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -459,40 +459,46 @@ static struct ib_uobject *
alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
struct uverbs_attr_bundle *attrs)
{
- const struct uverbs_obj_fd_type *fd_type =
- container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
+ const struct uverbs_obj_fd_type *fd_type;
int new_fd;
- struct ib_uobject *uobj;
+ struct ib_uobject *uobj, *ret;
struct file *filp;

+ uobj = alloc_uobj(attrs, obj);
+ if (IS_ERR(uobj))
+ return uobj;
+
+ fd_type =
+ container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release &&
- fd_type->fops->release != &uverbs_async_event_release))
- return ERR_PTR(-EINVAL);
+ fd_type->fops->release != &uverbs_async_event_release)) {
+ ret = ERR_PTR(-EINVAL);
+ goto err_fd;
+ }

new_fd = get_unused_fd_flags(O_CLOEXEC);
- if (new_fd < 0)
- return ERR_PTR(new_fd);
-
- uobj = alloc_uobj(attrs, obj);
- if (IS_ERR(uobj))
+ if (new_fd < 0) {
+ ret = ERR_PTR(new_fd);
goto err_fd;
+ }

/* Note that uverbs_uobject_fd_release() is called during abort */
filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
fd_type->flags);
if (IS_ERR(filp)) {
- uverbs_uobject_put(uobj);
- uobj = ERR_CAST(filp);
- goto err_fd;
+ ret = ERR_CAST(filp);
+ goto err_getfile;
}
uobj->object = filp;

uobj->id = new_fd;
return uobj;

-err_fd:
+err_getfile:
put_unused_fd(new_fd);
- return uobj;
+err_fd:
+ uverbs_uobject_put(uobj);
+ return ret;
}

struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 5c57098a4aee5..3420c77424861 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -209,6 +209,7 @@ int efa_query_device(struct ib_device *ibdev,
props->max_send_sge = dev_attr->max_sq_sge;
props->max_recv_sge = dev_attr->max_rq_sge;
props->max_sge_rd = dev_attr->max_wr_rdma_sge;
+ props->max_pkeys = 1;

if (udata && udata->outlen) {
resp.max_sq_sge = dev_attr->max_sq_sge;
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index 4633a0ce1a8c0..2ced236e1553c 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -985,15 +985,10 @@ static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf,
static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target)
{
struct hfi1_pportdata *ppd;
- int ret;

ppd = private2ppd(fp);

- ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
- if (ret) /* failed - release the module */
- module_put(THIS_MODULE);
-
- return ret;
+ return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
}

static int i2c1_debugfs_open(struct inode *in, struct file *fp)
@@ -1013,7 +1008,6 @@ static int __i2c_debugfs_release(struct inode *in, struct file *fp, u32 target)
ppd = private2ppd(fp);

release_chip_resource(ppd->dd, i2c_target(target));
- module_put(THIS_MODULE);

return 0;
}
@@ -1031,18 +1025,10 @@ static int i2c2_debugfs_release(struct inode *in, struct file *fp)
static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target)
{
struct hfi1_pportdata *ppd;
- int ret;
-
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;

ppd = private2ppd(fp);

- ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
- if (ret) /* failed - release the module */
- module_put(THIS_MODULE);
-
- return ret;
+ return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
}

static int qsfp1_debugfs_open(struct inode *in, struct file *fp)
@@ -1062,7 +1048,6 @@ static int __qsfp_debugfs_release(struct inode *in, struct file *fp, u32 target)
ppd = private2ppd(fp);

release_chip_resource(ppd->dd, i2c_target(target));
- module_put(THIS_MODULE);

return 0;
}
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 792eecd206b61..97fc7dd353b04 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -150,8 +150,17 @@ qedr_iw_issue_event(void *context,
if (params->cm_info) {
event.ird = params->cm_info->ird;
event.ord = params->cm_info->ord;
- event.private_data_len = params->cm_info->private_data_len;
- event.private_data = (void *)params->cm_info->private_data;
+ /* Only connect_request and reply have valid private data
+ * the rest of the events this may be left overs from
+ * connection establishment. CONNECT_REQUEST is issued via
+ * qedr_iw_mpa_request
+ */
+ if (event_type == IW_CM_EVENT_CONNECT_REPLY) {
+ event.private_data_len =
+ params->cm_info->private_data_len;
+ event.private_data =
+ (void *)params->cm_info->private_data;
+ }
}

if (ep->cm_id)
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 500a7ee04c44e..ca29954a54ac3 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1196,7 +1196,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
err = alloc_ud_wq_attr(qp, rdi->dparms.node);
if (err) {
ret = (ERR_PTR(err));
- goto bail_driver_priv;
+ goto bail_rq_rvt;
}

err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
@@ -1300,9 +1300,11 @@ bail_qpn:
rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);

bail_rq_wq:
- rvt_free_rq(&qp->r_rq);
free_ud_wq_attr(qp);

+bail_rq_rvt:
+ rvt_free_rq(&qp->r_rq);
+
bail_driver_priv:
rdi->driver_f.qp_priv_free(rdi, qp);

diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 650520244ed0c..7271d705f4b06 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -139,7 +139,8 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
break;

bytes = min(bytes, len);
- if (siw_rx_kva(srx, (void *)buf_addr, bytes) == bytes) {
+ if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) ==
+ bytes) {
copied += bytes;
offset += bytes;
len -= bytes;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index f77dae7ba7d40..7df5621bba8de 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -898,7 +898,8 @@ int __init detect_intel_iommu(void)
if (!ret)
ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
&validate_drhd_cb);
- if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) {
+ if (!ret && !no_iommu && !iommu_detected &&
+ (!dmar_disabled || dmar_platform_optin())) {
iommu_detected = 1;
/* Make sure ACS will be enabled */
pci_request_acs();
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index fde7aba49b746..34b2ed91cf4d9 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -634,6 +634,12 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
return g_iommus[iommu_id];
}

+static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
+{
+ return sm_supported(iommu) ?
+ ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
+}
+
static void domain_update_iommu_coherency(struct dmar_domain *domain)
{
struct dmar_drhd_unit *drhd;
@@ -645,7 +651,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)

for_each_domain_iommu(i, domain) {
found = true;
- if (!ecap_coherent(g_iommus[i]->ecap)) {
+ if (!iommu_paging_structure_coherency(g_iommus[i])) {
domain->iommu_coherency = 0;
break;
}
@@ -656,7 +662,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
/* No hardware attached; use lowest common denominator */
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
- if (!ecap_coherent(iommu->ecap)) {
+ if (!iommu_paging_structure_coherency(iommu)) {
domain->iommu_coherency = 0;
break;
}
@@ -943,7 +949,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
if (domain_use_first_level(domain))
- pteval |= DMA_FL_PTE_XD;
+ pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
if (cmpxchg64(&pte->val, 0ULL, pteval))
/* Someone else set it while we were thinking; use theirs. */
free_pgtable_page(tmp_page);
@@ -2034,7 +2040,6 @@ static inline void
context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
{
context->hi |= pasid & ((1 << 20) - 1);
- context->hi |= (1 << 20);
}

/*
@@ -2178,7 +2183,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,

context_set_fault_enable(context);
context_set_present(context);
- domain_flush_cache(domain, context, sizeof(*context));
+ if (!ecap_coherent(iommu->ecap))
+ clflush_cache_range(context, sizeof(*context));

/*
* It's a non-present to present mapping. If hardware doesn't cache
@@ -2326,7 +2332,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,

attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
if (domain_use_first_level(domain))
- attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD;
+ attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;

if (!sg) {
sg_res = nr_pages;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 4d8bf731b118c..a2e5a0fcd7d5c 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -819,7 +819,8 @@ static void bcache_device_free(struct bcache_device *d)
}

static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
- sector_t sectors, make_request_fn make_request_fn)
+ sector_t sectors, make_request_fn make_request_fn,
+ struct block_device *cached_bdev)
{
struct request_queue *q;
const size_t max_stripes = min_t(size_t, INT_MAX,
@@ -885,6 +886,21 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
q->limits.io_min = block_size;
q->limits.logical_block_size = block_size;
q->limits.physical_block_size = block_size;
+
+ if (q->limits.logical_block_size > PAGE_SIZE && cached_bdev) {
+ /*
+ * This should only happen with BCACHE_SB_VERSION_BDEV.
+ * Block/page size is checked for BCACHE_SB_VERSION_CDEV.
+ */
+ pr_info("%s: sb/logical block size (%u) greater than page size "
+ "(%lu) falling back to device logical block size (%u)",
+ d->disk->disk_name, q->limits.logical_block_size,
+ PAGE_SIZE, bdev_logical_block_size(cached_bdev));
+
+ /* This also adjusts physical block size/min io size if needed */
+ blk_queue_logical_block_size(q, bdev_logical_block_size(cached_bdev));
+ }
+
blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue);
@@ -1342,7 +1358,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)

ret = bcache_device_init(&dc->disk, block_size,
dc->bdev->bd_part->nr_sects - dc->sb.data_offset,
- cached_dev_make_request);
+ cached_dev_make_request, dc->bdev);
if (ret)
return ret;

@@ -1455,7 +1471,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
kobject_init(&d->kobj, &bch_flash_dev_ktype);

if (bcache_device_init(d, block_bytes(c), u->sectors,
- flash_dev_make_request))
+ flash_dev_make_request, NULL))
goto err;

bcache_device_attach(d, c, u - c->uuids);
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 613c171b1b6d2..5cc94f57421ca 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -286,6 +286,8 @@ static int persistent_memory_claim(struct dm_writecache *wc)
while (daa-- && i < p) {
pages[i++] = pfn_t_to_page(pfn);
pfn.val++;
+ if (!(i & 15))
+ cond_resched();
}
} while (i < p);
wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
@@ -857,6 +859,8 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_
writecache_wait_for_ios(wc, WRITE);
discarded_something = true;
}
+ if (!writecache_entry_is_committed(wc, e))
+ wc->uncommitted_blocks--;
writecache_free_entry(wc, e);
}

diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 9392934e3a060..7becfc768bbcc 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -94,6 +94,7 @@
#define MEI_DEV_ID_JSP_N 0x4DE0 /* Jasper Lake Point N */

#define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */
+#define MEI_DEV_ID_TGP_H 0x43E0 /* Tiger Lake Point H */

#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */
#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */
@@ -107,6 +108,8 @@
# define PCI_CFG_HFS_1_D0I3_MSK 0x80000000
#define PCI_CFG_HFS_2 0x48
#define PCI_CFG_HFS_3 0x60
+# define PCI_CFG_HFS_3_FW_SKU_MSK 0x00000070
+# define PCI_CFG_HFS_3_FW_SKU_SPS 0x00000060
#define PCI_CFG_HFS_4 0x64
#define PCI_CFG_HFS_5 0x68
#define PCI_CFG_HFS_6 0x6C
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index f620442addf52..7649710a2ab9e 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1366,7 +1366,7 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev)
#define MEI_CFG_FW_NM \
.quirk_probe = mei_me_fw_type_nm

-static bool mei_me_fw_type_sps(struct pci_dev *pdev)
+static bool mei_me_fw_type_sps_4(struct pci_dev *pdev)
{
u32 reg;
unsigned int devfn;
@@ -1382,7 +1382,36 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
return (reg & 0xf0000) == 0xf0000;
}

-#define MEI_CFG_FW_SPS \
+#define MEI_CFG_FW_SPS_4 \
+ .quirk_probe = mei_me_fw_type_sps_4
+
+/**
+ * mei_me_fw_sku_sps() - check for sps sku
+ *
+ * Read ME FW Status register to check for SPS Firmware.
+ * The SPS FW is only signaled in pci function 0
+ *
+ * @pdev: pci device
+ *
+ * Return: true in case of SPS firmware
+ */
+static bool mei_me_fw_type_sps(struct pci_dev *pdev)
+{
+ u32 reg;
+ u32 fw_type;
+ unsigned int devfn;
+
+ devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
+ pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_3, &reg);
+ trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg);
+ fw_type = (reg & PCI_CFG_HFS_3_FW_SKU_MSK);
+
+ dev_dbg(&pdev->dev, "fw type is %d\n", fw_type);
+
+ return fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
+}
+
+#define MEI_CFG_FW_SPS \
.quirk_probe = mei_me_fw_type_sps

#define MEI_CFG_FW_VER_SUPP \
@@ -1452,10 +1481,17 @@ static const struct mei_cfg mei_me_pch8_cfg = {
};

/* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
-static const struct mei_cfg mei_me_pch8_sps_cfg = {
+static const struct mei_cfg mei_me_pch8_sps_4_cfg = {
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
- MEI_CFG_FW_SPS,
+ MEI_CFG_FW_SPS_4,
+};
+
+/* LBG with quirk for SPS (4.0) Firmware exclusion */
+static const struct mei_cfg mei_me_pch12_sps_4_cfg = {
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
+ MEI_CFG_FW_SPS_4,
};

/* Cannon Lake and newer devices */
@@ -1465,8 +1501,18 @@ static const struct mei_cfg mei_me_pch12_cfg = {
MEI_CFG_DMA_128,
};

-/* LBG with quirk for SPS Firmware exclusion */
+/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion */
static const struct mei_cfg mei_me_pch12_sps_cfg = {
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
+ MEI_CFG_DMA_128,
+ MEI_CFG_FW_SPS,
+};
+
+/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion
+ * w/o DMA support
+ */
+static const struct mei_cfg mei_me_pch12_nodma_sps_cfg = {
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
MEI_CFG_FW_SPS,
@@ -1480,6 +1526,15 @@ static const struct mei_cfg mei_me_pch15_cfg = {
MEI_CFG_TRC,
};

+/* Tiger Lake with quirk for SPS 5.0 and newer Firmware exclusion */
+static const struct mei_cfg mei_me_pch15_sps_cfg = {
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
+ MEI_CFG_DMA_128,
+ MEI_CFG_TRC,
+ MEI_CFG_FW_SPS,
+};
+
/*
* mei_cfg_list - A list of platform platform specific configurations.
* Note: has to be synchronized with enum mei_cfg_idx.
@@ -1492,10 +1547,13 @@ static const struct mei_cfg *const mei_cfg_list[] = {
[MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
[MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
- [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
+ [MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg,
[MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
+ [MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg,
[MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg,
+ [MEI_ME_PCH12_SPS_NODMA_CFG] = &mei_me_pch12_nodma_sps_cfg,
[MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
+ [MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg,
};

const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index b6b94e2114645..6a8973649c490 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2012-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/

@@ -76,14 +76,20 @@ struct mei_me_hw {
* with quirk for Node Manager exclusion.
* @MEI_ME_PCH8_CFG: Platform Controller Hub Gen8 and newer
* client platforms.
- * @MEI_ME_PCH8_SPS_CFG: Platform Controller Hub Gen8 and newer
+ * @MEI_ME_PCH8_SPS_4_CFG: Platform Controller Hub Gen8 and newer
* servers platforms with quirk for
* SPS firmware exclusion.
* @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer
- * @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 and newer
+ * @MEI_ME_PCH12_SPS_4_CFG:Platform Controller Hub Gen12 up to 4.0
+ * servers platforms with quirk for
+ * SPS firmware exclusion.
+ * @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 5.0 and newer
* servers platforms with quirk for
* SPS firmware exclusion.
* @MEI_ME_PCH15_CFG: Platform Controller Hub Gen15 and newer
+ * @MEI_ME_PCH15_SPS_CFG: Platform Controller Hub Gen15 and newer
+ * servers platforms with quirk for
+ * SPS firmware exclusion.
* @MEI_ME_NUM_CFG: Upper Sentinel.
*/
enum mei_cfg_idx {
@@ -94,10 +100,13 @@ enum mei_cfg_idx {
MEI_ME_PCH7_CFG,
MEI_ME_PCH_CPT_PBG_CFG,
MEI_ME_PCH8_CFG,
- MEI_ME_PCH8_SPS_CFG,
+ MEI_ME_PCH8_SPS_4_CFG,
MEI_ME_PCH12_CFG,
+ MEI_ME_PCH12_SPS_4_CFG,
MEI_ME_PCH12_SPS_CFG,
+ MEI_ME_PCH12_SPS_NODMA_CFG,
MEI_ME_PCH15_CFG,
+ MEI_ME_PCH15_SPS_CFG,
MEI_ME_NUM_CFG,
};

diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index a1ed375fed374..81e759674c1b5 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -59,18 +59,18 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_4_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, MEI_ME_PCH8_CFG)},

{MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_4_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_4_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_4_CFG)},

{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
@@ -84,8 +84,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {

{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH12_SPS_NODMA_CFG)},

{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
@@ -96,6 +96,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},

{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)},

{MEI_PCI_DEVICE(MEI_DEV_ID_JSP_N, MEI_ME_PCH15_CFG)},

diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 5d3c691a1c668..3dd46cd551145 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -572,6 +572,9 @@ static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf,
if (data[IFLA_BAREUDP_SRCPORT_MIN])
conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]);

+ if (data[IFLA_BAREUDP_MULTIPROTO_MODE])
+ conf->multi_proto_mode = true;
+
return 0;
}

diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index c7ac63f419184..946e41f020a56 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1147,6 +1147,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
set_bit(0, priv->cfp.used);
set_bit(0, priv->cfp.unique);

+ /* Balance of_node_put() done by of_find_node_by_name() */
+ of_node_get(dn);
ports = of_find_node_by_name(dn, "ports");
if (ports) {
bcm_sf2_identify_ports(priv, ports);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index b9b4edb913c13..9b7f1af5f5747 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1249,8 +1249,12 @@ out_disable_adv_intr:

static void __alx_stop(struct alx_priv *alx)
{
- alx_halt(alx);
alx_free_irq(alx);
+
+ cancel_work_sync(&alx->link_check_wk);
+ cancel_work_sync(&alx->reset_wk);
+
+ alx_halt(alx);
alx_free_rings(alx);
alx_free_napis(alx);
}
@@ -1855,9 +1859,6 @@ static void alx_remove(struct pci_dev *pdev)
struct alx_priv *alx = pci_get_drvdata(pdev);
struct alx_hw *hw = &alx->hw;

- cancel_work_sync(&alx->link_check_wk);
- cancel_work_sync(&alx->reset_wk);
-
/* restore permanent mac address */
alx_set_macaddr(hw, hw->perm_addr);

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 19c4a0a5727a4..b6fb5a1709c01 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -6293,6 +6293,7 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)

static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
{
+ struct hwrm_stat_ctx_clr_stats_input req0 = {0};
struct hwrm_stat_ctx_free_input req = {0};
int i;

@@ -6302,6 +6303,7 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
return;

+ bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);

mutex_lock(&bp->hwrm_cmd_lock);
@@ -6311,7 +6313,11 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)

if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
-
+ if (BNXT_FW_MAJ(bp) <= 20) {
+ req0.stat_ctx_id = req.stat_ctx_id;
+ _hwrm_send_message(bp, &req0, sizeof(req0),
+ HWRM_CMD_TIMEOUT);
+ }
_hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);

@@ -6953,7 +6959,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;

bp->tx_push_thresh = 0;
- if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
+ if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
+ BNXT_FW_MAJ(bp) > 217)
bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;

hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
@@ -7217,8 +7224,9 @@ static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
static int bnxt_hwrm_ver_get(struct bnxt *bp)
{
struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+ u16 fw_maj, fw_min, fw_bld, fw_rsv;
u32 dev_caps_cfg, hwrm_ver;
- int rc;
+ int rc, len;

bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
mutex_lock(&bp->hwrm_cmd_lock);
@@ -7250,9 +7258,22 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
resp->hwrm_intf_upd_8b);

- snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
- resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
- resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
+ fw_maj = le16_to_cpu(resp->hwrm_fw_major);
+ if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
+ fw_min = le16_to_cpu(resp->hwrm_fw_minor);
+ fw_bld = le16_to_cpu(resp->hwrm_fw_build);
+ fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
+ len = FW_VER_STR_LEN;
+ } else {
+ fw_maj = resp->hwrm_fw_maj_8b;
+ fw_min = resp->hwrm_fw_min_8b;
+ fw_bld = resp->hwrm_fw_bld_8b;
+ fw_rsv = resp->hwrm_fw_rsvd_8b;
+ len = BC_HWRM_STR_LEN;
+ }
+ bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
+ snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
+ fw_rsv);

if (strlen(resp->active_pkg_name)) {
int fw_ver_len = strlen(bp->fw_ver_str);
@@ -11863,7 +11884,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &bnxt_ethtool_ops;
pci_set_drvdata(pdev, dev);

- bnxt_vpd_read_info(bp);
+ if (BNXT_PF(bp))
+ bnxt_vpd_read_info(bp);

rc = bnxt_alloc_hwrm_resources(bp);
if (rc)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 3d39638521d6c..23ee433db8644 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1729,6 +1729,11 @@ struct bnxt {
#define PHY_VER_STR_LEN (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
char fw_ver_str[FW_VER_STR_LEN];
char hwrm_ver_supp[FW_VER_STR_LEN];
+ u64 fw_ver_code;
+#define BNXT_FW_VER_CODE(maj, min, bld, rsv) \
+ ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv))
+#define BNXT_FW_MAJ(bp) ((bp)->fw_ver_code >> 48)
+
__be16 vxlan_port;
u8 vxlan_port_cnt;
__le16 vxlan_fw_dst_port_id;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 38bdfd4b46f0f..dde1c23c8e399 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1520,11 +1520,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}

- if (skb_padto(skb, ETH_ZLEN)) {
- ret = NETDEV_TX_OK;
- goto out;
- }
-
/* Retain how many bytes will be sent on the wire, without TSB inserted
* by transmit checksum offload
*/
@@ -1571,6 +1566,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
len_stat = (size << DMA_BUFLENGTH_SHIFT) |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);

+ /* Note: if we ever change from DMA_TX_APPEND_CRC below we
+ * will need to restore software padding of "runt" packets
+ */
if (!i) {
len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
if (skb->ip_summed == CHECKSUM_PARTIAL)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ff98a82b7bc47..d71ce7634ac19 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -18170,8 +18170,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,

rtnl_lock();

- /* We probably don't have netdev yet */
- if (!netdev || !netif_running(netdev))
+ /* Could be second call or maybe we don't have netdev yet */
+ if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
goto done;

/* We needn't recover from permanent error */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 67933079aeea5..52582e8ed90e5 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -2558,7 +2558,7 @@ static int macb_open(struct net_device *dev)

err = macb_phylink_connect(bp);
if (err)
- goto napi_exit;
+ goto reset_hw;

netif_tx_start_all_queues(dev);

@@ -2567,9 +2567,11 @@ static int macb_open(struct net_device *dev)

return 0;

-napi_exit:
+reset_hw:
+ macb_reset_hw(bp);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
napi_disable(&queue->napi);
+ macb_free_consistent(bp);
pm_exit:
pm_runtime_put_sync(&bp->pdev->dev);
return err;
@@ -3760,15 +3762,9 @@ static int macb_init(struct platform_device *pdev)

static struct sifive_fu540_macb_mgmt *mgmt;

-/* Initialize and start the Receiver and Transmit subsystems */
-static int at91ether_start(struct net_device *dev)
+static int at91ether_alloc_coherent(struct macb *lp)
{
- struct macb *lp = netdev_priv(dev);
struct macb_queue *q = &lp->queues[0];
- struct macb_dma_desc *desc;
- dma_addr_t addr;
- u32 ctl;
- int i;

q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
(AT91ETHER_MAX_RX_DESCR *
@@ -3790,6 +3786,43 @@ static int at91ether_start(struct net_device *dev)
return -ENOMEM;
}

+ return 0;
+}
+
+static void at91ether_free_coherent(struct macb *lp)
+{
+ struct macb_queue *q = &lp->queues[0];
+
+ if (q->rx_ring) {
+ dma_free_coherent(&lp->pdev->dev,
+ AT91ETHER_MAX_RX_DESCR *
+ macb_dma_desc_get_size(lp),
+ q->rx_ring, q->rx_ring_dma);
+ q->rx_ring = NULL;
+ }
+
+ if (q->rx_buffers) {
+ dma_free_coherent(&lp->pdev->dev,
+ AT91ETHER_MAX_RX_DESCR *
+ AT91ETHER_MAX_RBUFF_SZ,
+ q->rx_buffers, q->rx_buffers_dma);
+ q->rx_buffers = NULL;
+ }
+}
+
+/* Initialize and start the Receiver and Transmit subsystems */
+static int at91ether_start(struct macb *lp)
+{
+ struct macb_queue *q = &lp->queues[0];
+ struct macb_dma_desc *desc;
+ dma_addr_t addr;
+ u32 ctl;
+ int i, ret;
+
+ ret = at91ether_alloc_coherent(lp);
+ if (ret)
+ return ret;
+
addr = q->rx_buffers_dma;
for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
desc = macb_rx_desc(q, i);
@@ -3811,9 +3844,39 @@ static int at91ether_start(struct net_device *dev)
ctl = macb_readl(lp, NCR);
macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));

+ /* Enable MAC interrupts */
+ macb_writel(lp, IER, MACB_BIT(RCOMP) |
+ MACB_BIT(RXUBR) |
+ MACB_BIT(ISR_TUND) |
+ MACB_BIT(ISR_RLE) |
+ MACB_BIT(TCOMP) |
+ MACB_BIT(ISR_ROVR) |
+ MACB_BIT(HRESP));
+
return 0;
}

+static void at91ether_stop(struct macb *lp)
+{
+ u32 ctl;
+
+ /* Disable MAC interrupts */
+ macb_writel(lp, IDR, MACB_BIT(RCOMP) |
+ MACB_BIT(RXUBR) |
+ MACB_BIT(ISR_TUND) |
+ MACB_BIT(ISR_RLE) |
+ MACB_BIT(TCOMP) |
+ MACB_BIT(ISR_ROVR) |
+ MACB_BIT(HRESP));
+
+ /* Disable Receiver and Transmitter */
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+
+ /* Free resources. */
+ at91ether_free_coherent(lp);
+}
+
/* Open the ethernet interface */
static int at91ether_open(struct net_device *dev)
{
@@ -3833,63 +3896,36 @@ static int at91ether_open(struct net_device *dev)

macb_set_hwaddr(lp);

- ret = at91ether_start(dev);
+ ret = at91ether_start(lp);
if (ret)
- return ret;
-
- /* Enable MAC interrupts */
- macb_writel(lp, IER, MACB_BIT(RCOMP) |
- MACB_BIT(RXUBR) |
- MACB_BIT(ISR_TUND) |
- MACB_BIT(ISR_RLE) |
- MACB_BIT(TCOMP) |
- MACB_BIT(ISR_ROVR) |
- MACB_BIT(HRESP));
+ goto pm_exit;

ret = macb_phylink_connect(lp);
if (ret)
- return ret;
+ goto stop;

netif_start_queue(dev);

return 0;
+
+stop:
+ at91ether_stop(lp);
+pm_exit:
+ pm_runtime_put_sync(&lp->pdev->dev);
+ return ret;
}

/* Close the interface */
static int at91ether_close(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
- struct macb_queue *q = &lp->queues[0];
- u32 ctl;
-
- /* Disable Receiver and Transmitter */
- ctl = macb_readl(lp, NCR);
- macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
-
- /* Disable MAC interrupts */
- macb_writel(lp, IDR, MACB_BIT(RCOMP) |
- MACB_BIT(RXUBR) |
- MACB_BIT(ISR_TUND) |
- MACB_BIT(ISR_RLE) |
- MACB_BIT(TCOMP) |
- MACB_BIT(ISR_ROVR) |
- MACB_BIT(HRESP));

netif_stop_queue(dev);

phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);

- dma_free_coherent(&lp->pdev->dev,
- AT91ETHER_MAX_RX_DESCR *
- macb_dma_desc_get_size(lp),
- q->rx_ring, q->rx_ring_dma);
- q->rx_ring = NULL;
-
- dma_free_coherent(&lp->pdev->dev,
- AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
- q->rx_buffers, q->rx_buffers_dma);
- q->rx_buffers = NULL;
+ at91ether_stop(lp);

return pm_runtime_put(&lp->pdev->dev);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 72b37a66c7d88..0ed20a9cca144 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -502,41 +502,20 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
}
EXPORT_SYMBOL(cxgb4_select_ntuple);

-/*
- * Called when address resolution fails for an L2T entry to handle packets
- * on the arpq head. If a packet specifies a failure handler it is invoked,
- * otherwise the packet is sent to the device.
- */
-static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e)
-{
- struct sk_buff *skb;
-
- while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
- const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
-
- spin_unlock(&e->lock);
- if (cb->arp_err_handler)
- cb->arp_err_handler(cb->handle, skb);
- else
- t4_ofld_send(adap, skb);
- spin_lock(&e->lock);
- }
-}
-
/*
* Called when the host's neighbor layer makes a change to some entry that is
* loaded into the HW L2 table.
*/
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
{
- struct l2t_entry *e;
- struct sk_buff_head *arpq = NULL;
- struct l2t_data *d = adap->l2t;
unsigned int addr_len = neigh->tbl->key_len;
u32 *addr = (u32 *) neigh->primary_key;
- int ifidx = neigh->dev->ifindex;
- int hash = addr_hash(d, addr, addr_len, ifidx);
+ int hash, ifidx = neigh->dev->ifindex;
+ struct sk_buff_head *arpq = NULL;
+ struct l2t_data *d = adap->l2t;
+ struct l2t_entry *e;

+ hash = addr_hash(d, addr, addr_len, ifidx);
read_lock_bh(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (!addreq(e, addr) && e->ifindex == ifidx) {
@@ -569,8 +548,25 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
write_l2e(adap, e, 0);
}

- if (arpq)
- handle_failed_resolution(adap, e);
+ if (arpq) {
+ struct sk_buff *skb;
+
+ /* Called when address resolution fails for an L2T
+ * entry to handle packets on the arpq head. If a
+ * packet specifies a failure handler it is invoked,
+ * otherwise the packet is sent to the device.
+ */
+ while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
+ const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
+
+ spin_unlock(&e->lock);
+ if (cb->arp_err_handler)
+ cb->arp_err_handler(cb->handle, skb);
+ else
+ t4_ofld_send(adap, skb);
+ spin_lock(&e->lock);
+ }
+ }
spin_unlock_bh(&e->lock);
}

diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 6516c45864b35..db8106d9d6edf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1425,12 +1425,10 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)

qidx = skb_get_queue_mapping(skb);
if (ptp_enabled) {
- spin_lock(&adap->ptp_lock);
if (!(adap->ptp_tx_skb)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
adap->ptp_tx_skb = skb_get(skb);
} else {
- spin_unlock(&adap->ptp_lock);
goto out_free;
}
q = &adap->sge.ptptxq;
@@ -1444,11 +1442,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)

#ifdef CONFIG_CHELSIO_T4_FCOE
ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
- if (unlikely(ret == -ENOTSUPP)) {
- if (ptp_enabled)
- spin_unlock(&adap->ptp_lock);
+ if (unlikely(ret == -EOPNOTSUPP))
goto out_free;
- }
#endif /* CONFIG_CHELSIO_T4_FCOE */

chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
@@ -1461,8 +1456,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
dev_err(adap->pdev_dev,
"%s: Tx ring %u full while queue awake!\n",
dev->name, qidx);
- if (ptp_enabled)
- spin_unlock(&adap->ptp_lock);
return NETDEV_TX_BUSY;
}

@@ -1481,8 +1474,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
q->mapping_err++;
- if (ptp_enabled)
- spin_unlock(&adap->ptp_lock);
goto out_free;
}

@@ -1630,8 +1621,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
txq_advance(&q->q, ndesc);

cxgb4_ring_tx_db(adap, &q->q, ndesc);
- if (ptp_enabled)
- spin_unlock(&adap->ptp_lock);
return NETDEV_TX_OK;

out_free:
@@ -2365,6 +2354,16 @@ netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(qid >= pi->nqsets))
return cxgb4_ethofld_xmit(skb, dev);

+ if (is_ptp_enabled(skb, dev)) {
+ struct adapter *adap = netdev2adap(dev);
+ netdev_tx_t ret;
+
+ spin_lock(&adap->ptp_lock);
+ ret = cxgb4_eth_xmit(skb, dev);
+ spin_unlock(&adap->ptp_lock);
+ return ret;
+ }
+
return cxgb4_eth_xmit(skb, dev);
}

diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index ccf2611f4a203..4486a0db8ef0c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -266,7 +266,7 @@ static irqreturn_t enetc_msix(int irq, void *data)
/* disable interrupts */
enetc_wr_reg(v->rbier, 0);

- for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
+ for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);

napi_schedule_irqoff(&v->napi);
@@ -302,7 +302,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
/* enable interrupts */
enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);

- for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
+ for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
ENETC_TBIER_TXTIE);

diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 96d36ae5049e1..c5c732601e35e 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1715,7 +1715,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
}

netdev->min_mtu = IBMVETH_MIN_MTU;
- netdev->max_mtu = ETH_MAX_MTU;
+ netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;

memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);

diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 1b4d04e4474bb..2baf7b3ff4cbf 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -842,12 +842,13 @@ static int ibmvnic_login(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
unsigned long timeout = msecs_to_jiffies(30000);
int retry_count = 0;
+ int retries = 10;
bool retry;
int rc;

do {
retry = false;
- if (retry_count > IBMVNIC_MAX_QUEUES) {
+ if (retry_count > retries) {
netdev_warn(netdev, "Login attempts exceeded\n");
return -1;
}
@@ -862,11 +863,23 @@ static int ibmvnic_login(struct net_device *netdev)

if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
- netdev_warn(netdev, "Login timed out\n");
- return -1;
+ netdev_warn(netdev, "Login timed out, retrying...\n");
+ retry = true;
+ adapter->init_done_rc = 0;
+ retry_count++;
+ continue;
}

- if (adapter->init_done_rc == PARTIALSUCCESS) {
+ if (adapter->init_done_rc == ABORTED) {
+ netdev_warn(netdev, "Login aborted, retrying...\n");
+ retry = true;
+ adapter->init_done_rc = 0;
+ retry_count++;
+ /* FW or device may be busy, so
+ * wait a bit before retrying login
+ */
+ msleep(500);
+ } else if (adapter->init_done_rc == PARTIALSUCCESS) {
retry_count++;
release_sub_crqs(adapter, 1);

diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index b7b553602ea9f..24f4d8e0da989 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1544,7 +1544,7 @@ static void mvpp2_read_stats(struct mvpp2_port *port)
for (q = 0; q < port->ntxqs; q++)
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
*pstats++ += mvpp2_read_index(port->priv,
- MVPP22_CTRS_TX_CTR(port->id, i),
+ MVPP22_CTRS_TX_CTR(port->id, q),
mvpp2_ethtool_txq_regs[i].offset);

/* Rxqs are numbered from 0 from the user standpoint, but not from the
@@ -1553,7 +1553,7 @@ static void mvpp2_read_stats(struct mvpp2_port *port)
for (q = 0; q < port->nrxqs; q++)
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
*pstats++ += mvpp2_read_index(port->priv,
- port->first_rxq + i,
+ port->first_rxq + q,
mvpp2_ethtool_rxq_regs[i].offset);
}

diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 3e4199246a18d..d9a2267aeaea2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -990,10 +990,10 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,

lossy = !(pfc || pause_en);
thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
- mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, &thres_cells);
+ thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
pfc, pause_en);
- mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, &delay_cells);
+ delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
total_cells = thres_cells + delay_cells;

taken_headroom_cells += total_cells;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index e28ecb84b8164..6b2e4e730b189 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -395,17 +395,15 @@ mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port,
return NULL;
}

-static inline void
+static inline u32
mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port,
- u16 *p_size)
+ u32 size_cells)
{
/* Ports with eight lanes use two headroom buffers between which the
* configured headroom size is split. Therefore, multiply the calculated
* headroom size by two.
*/
- if (mlxsw_sp_port->mapping.width != 8)
- return;
- *p_size *= 2;
+ return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
}

enum mlxsw_sp_flood_type {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 19bf0768ed788..2fb2cbd4f229e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -312,7 +312,7 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)

if (i == MLXSW_SP_PB_UNUSED)
continue;
- mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, &size);
+ size = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, size);
mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
}
mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 7c5032f9c8fff..76242c70d41ad 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -776,7 +776,7 @@ mlxsw_sp_span_port_buffsize_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
speed = 0;

buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu);
- mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, (u16 *) &buffsize);
+ buffsize = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, buffsize);
mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 7aa037c3fe020..790d4854b8ef5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1653,6 +1653,14 @@ int ionic_open(struct net_device *netdev)
if (err)
goto err_out;

+ err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
+ if (err)
+ goto err_txrx_deinit;
+
+ err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
+ if (err)
+ goto err_txrx_deinit;
+
/* don't start the queues until we have link */
if (netif_carrier_ok(netdev)) {
err = ionic_start_queues(lif);
@@ -1674,8 +1682,8 @@ static void ionic_stop_queues(struct ionic_lif *lif)
if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
return;

- ionic_txrx_disable(lif);
netif_tx_disable(lif->netdev);
+ ionic_txrx_disable(lif);
}

int ionic_stop(struct net_device *netdev)
@@ -1941,18 +1949,19 @@ int ionic_reset_queues(struct ionic_lif *lif)
bool running;
int err = 0;

- /* Put off the next watchdog timeout */
- netif_trans_update(lif->netdev);
-
err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
if (err)
return err;

running = netif_running(lif->netdev);
- if (running)
+ if (running) {
+ netif_device_detach(lif->netdev);
err = ionic_stop(lif->netdev);
- if (!err && running)
+ }
+ if (!err && running) {
ionic_open(lif->netdev);
+ netif_device_attach(lif->netdev);
+ }

clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);

diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 1a636bad717dc..aeed8939f410a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -270,7 +270,7 @@ static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
}

- iids->vf_cids += vf_cids * p_mngr->vf_count;
+ iids->vf_cids = vf_cids;
iids->tids += vf_tids * p_mngr->vf_count;

DP_VERBOSE(p_hwfn, QED_MSG_ILT,
@@ -442,6 +442,20 @@ static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
return p_blk;
}

+static void qed_cxt_ilt_blk_reset(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+ u32 cli_idx, blk_idx;
+
+ for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) {
+ for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++)
+ clients[cli_idx].pf_blks[blk_idx].total_size = 0;
+
+ for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++)
+ clients[cli_idx].vf_blks[blk_idx].total_size = 0;
+ }
+}
+
int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
@@ -461,6 +475,11 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)

p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);

+ /* Reset all ILT blocks at the beginning of ILT computing in order
+ * to prevent memory allocation for irrelevant blocks afterwards.
+ */
+ qed_cxt_ilt_blk_reset(p_hwfn);
+
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
"hwfn [%d] - Set context manager starting line to be 0x%08x\n",
p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index f4eebaabb6d0d..3e56b6056b477 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -5568,7 +5568,8 @@ static const char * const s_status_str[] = {

/* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
"The filter/trigger constraint dword offsets are not enabled for recording",
-
+ /* DBG_STATUS_NO_MATCHING_FRAMING_MODE */
+ "No matching framing mode",

/* DBG_STATUS_VFC_READ_ERROR */
"Error reading from VFC",
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 38a65b984e476..9b00988fb77e1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1368,6 +1368,8 @@ static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn)

void qed_resc_free(struct qed_dev *cdev)
{
+ struct qed_rdma_info *rdma_info;
+ struct qed_hwfn *p_hwfn;
int i;

if (IS_VF(cdev)) {
@@ -1385,7 +1387,8 @@ void qed_resc_free(struct qed_dev *cdev)
qed_llh_free(cdev);

for_each_hwfn(cdev, i) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ p_hwfn = cdev->hwfns + i;
+ rdma_info = p_hwfn->p_rdma_info;

qed_cxt_mngr_free(p_hwfn);
qed_qm_info_free(p_hwfn);
@@ -1404,8 +1407,10 @@ void qed_resc_free(struct qed_dev *cdev)
qed_ooo_free(p_hwfn);
}

- if (QED_IS_RDMA_PERSONALITY(p_hwfn))
+ if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) {
+ qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto);
qed_rdma_info_free(p_hwfn);
+ }

qed_iov_free(p_hwfn);
qed_l2_free(p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index d2fe61a5cf567..5409a2da6106d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -2836,8 +2836,6 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
if (rc)
return rc;

- qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
-
return qed_iwarp_ll2_stop(p_hwfn);
}

diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 37e70562a9645..f15c26ef88709 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -113,7 +113,6 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
break;
}
}
- qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE);
}

static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 856051f50eb75..adc2c8f3d48ef 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -81,12 +81,17 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
}

+#define QED_VF_CHANNEL_USLEEP_ITERATIONS 90
+#define QED_VF_CHANNEL_USLEEP_DELAY 100
+#define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10
+#define QED_VF_CHANNEL_MSLEEP_DELAY 25
+
static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
{
union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
struct ustorm_trigger_vf_zone trigger;
struct ustorm_vf_zone *zone_data;
- int rc = 0, time = 100;
+ int iter, rc = 0;

zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;

@@ -126,11 +131,19 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));

/* When PF would be done with the response, it would write back to the
- * `done' address. Poll until then.
+ * `done' address from a coherent DMA zone. Poll until then.
*/
- while ((!*done) && time) {
- msleep(25);
- time--;
+
+ iter = QED_VF_CHANNEL_USLEEP_ITERATIONS;
+ while (!*done && iter--) {
+ udelay(QED_VF_CHANNEL_USLEEP_DELAY);
+ dma_rmb();
+ }
+
+ iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS;
+ while (!*done && iter--) {
+ msleep(QED_VF_CHANNEL_MSLEEP_DELAY);
+ dma_rmb();
}

if (!*done) {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 1a83d1fd8ccd6..26eb58e7e0765 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1158,7 +1158,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,

/* PTP not supported on VFs */
if (!is_vf)
- qede_ptp_enable(edev, (mode == QEDE_PROBE_NORMAL));
+ qede_ptp_enable(edev);

edev->ops->register_ops(cdev, &qede_ll_ops, edev);

@@ -1247,6 +1247,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
if (system_state == SYSTEM_POWER_OFF)
return;
qed_ops->common->remove(cdev);
+ edev->cdev = NULL;

/* Since this can happen out-of-sync with other flows,
* don't release the netdevice until after slowpath stop
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 4c7f7a7fc1515..cd5841a9415ef 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -412,6 +412,7 @@ void qede_ptp_disable(struct qede_dev *edev)
if (ptp->tx_skb) {
dev_kfree_skb_any(ptp->tx_skb);
ptp->tx_skb = NULL;
+ clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
}

/* Disable PTP in HW */
@@ -423,7 +424,7 @@ void qede_ptp_disable(struct qede_dev *edev)
edev->ptp = NULL;
}

-static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
+static int qede_ptp_init(struct qede_dev *edev)
{
struct qede_ptp *ptp;
int rc;
@@ -444,25 +445,19 @@ static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
/* Init work queue for Tx timestamping */
INIT_WORK(&ptp->work, qede_ptp_task);

- /* Init cyclecounter and timecounter. This is done only in the first
- * load. If done in every load, PTP application will fail when doing
- * unload / load (e.g. MTU change) while it is running.
- */
- if (init_tc) {
- memset(&ptp->cc, 0, sizeof(ptp->cc));
- ptp->cc.read = qede_ptp_read_cc;
- ptp->cc.mask = CYCLECOUNTER_MASK(64);
- ptp->cc.shift = 0;
- ptp->cc.mult = 1;
-
- timecounter_init(&ptp->tc, &ptp->cc,
- ktime_to_ns(ktime_get_real()));
- }
+ /* Init cyclecounter and timecounter */
+ memset(&ptp->cc, 0, sizeof(ptp->cc));
+ ptp->cc.read = qede_ptp_read_cc;
+ ptp->cc.mask = CYCLECOUNTER_MASK(64);
+ ptp->cc.shift = 0;
+ ptp->cc.mult = 1;

- return rc;
+ timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+
+ return 0;
}

-int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
+int qede_ptp_enable(struct qede_dev *edev)
{
struct qede_ptp *ptp;
int rc;
@@ -483,7 +478,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)

edev->ptp = ptp;

- rc = qede_ptp_init(edev, init_tc);
+ rc = qede_ptp_init(edev);
if (rc)
goto err1;

diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
index 691a14c4b2c5a..89c7f3cf3ee28 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
@@ -41,7 +41,7 @@ void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
void qede_ptp_disable(struct qede_dev *edev);
-int qede_ptp_enable(struct qede_dev *edev, bool init_tc);
+int qede_ptp_enable(struct qede_dev *edev);
int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);

static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
index 2d873ae8a234d..668ccc9d49f83 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
@@ -105,6 +105,7 @@ static void qede_rdma_destroy_wq(struct qede_dev *edev)

qede_rdma_cleanup_event(edev);
destroy_workqueue(edev->rdma_info.rdma_wq);
+ edev->rdma_info.rdma_wq = NULL;
}

int qede_rdma_dev_add(struct qede_dev *edev, bool recovery)
@@ -325,7 +326,7 @@ static void qede_rdma_add_event(struct qede_dev *edev,
if (edev->rdma_info.exp_recovery)
return;

- if (!edev->rdma_info.qedr_dev)
+ if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq)
return;

/* We don't want the cleanup flow to start while we're allocating and
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index c51b48dc36397..7bda2671bd5b6 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -2192,8 +2192,11 @@ static void rtl_release_firmware(struct rtl8169_private *tp)
void r8169_apply_firmware(struct rtl8169_private *tp)
{
/* TODO: release firmware if rtl_fw_write_firmware signals failure. */
- if (tp->rtl_fw)
+ if (tp->rtl_fw) {
rtl_fw_write_firmware(tp, tp->rtl_fw);
+ /* At least one firmware doesn't reset tp->ocp_base. */
+ tp->ocp_base = OCP_STD_PHY_BASE;
+ }
}

static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 7585cd2270ba4..fc99e7118e494 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -647,10 +647,10 @@ static int rocker_dma_rings_init(struct rocker *rocker)
err_dma_event_ring_bufs_alloc:
rocker_dma_ring_destroy(rocker, &rocker->event_ring);
err_dma_event_ring_create:
+ rocker_dma_cmd_ring_waits_free(rocker);
+err_dma_cmd_ring_waits_alloc:
rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
PCI_DMA_BIDIRECTIONAL);
-err_dma_cmd_ring_waits_alloc:
- rocker_dma_cmd_ring_waits_free(rocker);
err_dma_cmd_ring_bufs_alloc:
rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
return err;
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index a5a0fb60193ab..5a70c49bf454f 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1038,8 +1038,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
skb->ip_summed = CHECKSUM_UNNECESSARY;

next:
- if ((skb && napi_gro_receive(&priv->napi, skb) != GRO_DROP) ||
- xdp_result) {
+ if (skb)
+ napi_gro_receive(&priv->napi, skb);
+ if (skb || xdp_result) {
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += xdp.data_end - xdp.data;
}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 75266580b586d..4661ef865807f 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1649,6 +1649,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
geneve->collect_md = metadata;
geneve->use_udp6_rx_checksums = use_udp6_rx_checksums;
geneve->ttl_inherit = ttl_inherit;
+ geneve->df = df;
geneve_unquiesce(geneve, gs4, gs6);

return 0;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 3fa33d27eebaf..d140e3c93fe34 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -461,8 +461,7 @@ config MICROCHIP_T1_PHY
config MICROSEMI_PHY
tristate "Microsemi PHYs"
depends on MACSEC || MACSEC=n
- select CRYPTO_AES
- select CRYPTO_ECB
+ select CRYPTO_LIB_AES if MACSEC
---help---
Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs

diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
index b4d3dc4068e27..d53ca884b5c9e 100644
--- a/drivers/net/phy/mscc/mscc_macsec.c
+++ b/drivers/net/phy/mscc/mscc_macsec.c
@@ -10,7 +10,7 @@
#include <linux/phy.h>
#include <dt-bindings/net/mscc-phy-vsc8531.h>

-#include <crypto/skcipher.h>
+#include <crypto/aes.h>

#include <net/macsec.h>

@@ -500,39 +500,17 @@ static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN],
u16 key_len, u8 hkey[16])
{
- struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
- struct skcipher_request *req = NULL;
- struct scatterlist src, dst;
- DECLARE_CRYPTO_WAIT(wait);
- u32 input[4] = {0};
+ const u8 input[AES_BLOCK_SIZE] = {0};
+ struct crypto_aes_ctx ctx;
int ret;

- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- req = skcipher_request_alloc(tfm, GFP_KERNEL);
- if (!req) {
- ret = -ENOMEM;
- goto out;
- }
-
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done,
- &wait);
- ret = crypto_skcipher_setkey(tfm, key, key_len);
- if (ret < 0)
- goto out;
-
- sg_init_one(&src, input, 16);
- sg_init_one(&dst, hkey, 16);
- skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
-
- ret = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+ ret = aes_expandkey(&ctx, key, key_len);
+ if (ret)
+ return ret;

-out:
- skcipher_request_free(req);
- crypto_free_skcipher(tfm);
- return ret;
+ aes_encrypt(&ctx, hkey, input);
+ memzero_explicit(&ctx, sizeof(ctx));
+ return 0;
}

static int vsc8584_macsec_transformation(struct phy_device *phydev,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 697c74deb222b..0881b4b923632 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -798,8 +798,10 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,

/* Grab the bits from PHYIR2, and put them in the lower half */
phy_reg = mdiobus_read(bus, addr, MII_PHYSID2);
- if (phy_reg < 0)
- return -EIO;
+ if (phy_reg < 0) {
+ /* returning -ENODEV doesn't stop bus scanning */
+ return (phy_reg == -EIO || phy_reg == -ENODEV) ? -ENODEV : -EIO;
+ }

*phy_id |= phy_reg;

diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 34ca12aec61b3..ac38bead1cd25 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1480,6 +1480,8 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
struct ethtool_pauseparam *pause)
{
struct phylink_link_state *config = &pl->link_config;
+ bool manual_changed;
+ int pause_state;

ASSERT_RTNL();

@@ -1494,15 +1496,15 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
!pause->autoneg && pause->rx_pause != pause->tx_pause)
return -EINVAL;

- mutex_lock(&pl->state_mutex);
- config->pause = 0;
+ pause_state = 0;
if (pause->autoneg)
- config->pause |= MLO_PAUSE_AN;
+ pause_state |= MLO_PAUSE_AN;
if (pause->rx_pause)
- config->pause |= MLO_PAUSE_RX;
+ pause_state |= MLO_PAUSE_RX;
if (pause->tx_pause)
- config->pause |= MLO_PAUSE_TX;
+ pause_state |= MLO_PAUSE_TX;

+ mutex_lock(&pl->state_mutex);
/*
* See the comments for linkmode_set_pause(), wrt the deficiencies
* with the current implementation. A solution to this issue would
@@ -1519,18 +1521,35 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
linkmode_set_pause(config->advertising, pause->tx_pause,
pause->rx_pause);

- /* If we have a PHY, phylib will call our link state function if the
- * mode has changed, which will trigger a resolve and update the MAC
- * configuration.
+ manual_changed = (config->pause ^ pause_state) & MLO_PAUSE_AN ||
+ (!(pause_state & MLO_PAUSE_AN) &&
+ (config->pause ^ pause_state) & MLO_PAUSE_TXRX_MASK);
+
+ config->pause = pause_state;
+
+ if (!pl->phydev && !test_bit(PHYLINK_DISABLE_STOPPED,
+ &pl->phylink_disable_state))
+ phylink_pcs_config(pl, true, &pl->link_config);
+
+ mutex_unlock(&pl->state_mutex);
+
+ /* If we have a PHY, a change of the pause frame advertisement will
+ * cause phylib to renegotiate (if AN is enabled) which will in turn
+ * call our phylink_phy_change() and trigger a resolve. Note that
+ * we can't hold our state mutex while calling phy_set_asym_pause().
*/
- if (pl->phydev) {
+ if (pl->phydev)
phy_set_asym_pause(pl->phydev, pause->rx_pause,
pause->tx_pause);
- } else if (!test_bit(PHYLINK_DISABLE_STOPPED,
- &pl->phylink_disable_state)) {
- phylink_pcs_config(pl, true, &pl->link_config);
+
+ /* If the manual pause settings changed, make sure we trigger a
+ * resolve to update their state; we can not guarantee that the
+ * link will cycle.
+ */
+ if (manual_changed) {
+ pl->mac_link_dropped = true;
+ phylink_run_resolve(pl);
}
- mutex_unlock(&pl->state_mutex);

return 0;
}
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 93da7d3d0954c..74568ae161253 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -122,10 +122,13 @@ static int lan87xx_read_status(struct phy_device *phydev)
if (rc < 0)
return rc;

- /* Wait max 640 ms to detect energy */
- phy_read_poll_timeout(phydev, MII_LAN83C185_CTRL_STATUS, rc,
- rc & MII_LAN83C185_ENERGYON, 10000,
- 640000, true);
+ /* Wait max 640 ms to detect energy and the timeout is not
+ * an actual error.
+ */
+ read_poll_timeout(phy_read, rc,
+ rc & MII_LAN83C185_ENERGYON || rc < 0,
+ 10000, 640000, true, phydev,
+ MII_LAN83C185_CTRL_STATUS);
if (rc < 0)
return rc;

diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 93044cf1417a5..1fe4cc28d154d 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1414,10 +1414,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
}

if (pkt_cnt == 0) {
- /* Skip IP alignment psudo header */
- skb_pull(skb, 2);
skb->len = pkt_len;
- skb_set_tail_pointer(skb, pkt_len);
+ /* Skip IP alignment pseudo header */
+ skb_pull(skb, 2);
+ skb_set_tail_pointer(skb, skb->len);
skb->truesize = pkt_len + sizeof(struct sk_buff);
ax88179_rx_checksum(skb, pkt_hdr);
return 1;
@@ -1426,8 +1426,9 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
ax_skb = skb_clone(skb, GFP_ATOMIC);
if (ax_skb) {
ax_skb->len = pkt_len;
- ax_skb->data = skb->data + 2;
- skb_set_tail_pointer(ax_skb, pkt_len);
+ /* Skip IP alignment pseudo header */
+ skb_pull(ax_skb, 2);
+ skb_set_tail_pointer(ax_skb, ax_skb->len);
ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
ax88179_rx_checksum(ax_skb, pkt_hdr);
usbnet_skb_return(dev, ax_skb);
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index 3ac3f8570ca1b..a8f151b1b5fab 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -45,17 +45,18 @@ static int wg_open(struct net_device *dev)
if (dev_v6)
dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE;

+ mutex_lock(&wg->device_update_lock);
ret = wg_socket_init(wg, wg->incoming_port);
if (ret < 0)
- return ret;
- mutex_lock(&wg->device_update_lock);
+ goto out;
list_for_each_entry(peer, &wg->peer_list, peer_list) {
wg_packet_send_staged_packets(peer);
if (peer->persistent_keepalive_interval)
wg_packet_send_keepalive(peer);
}
+out:
mutex_unlock(&wg->device_update_lock);
- return 0;
+ return ret;
}

#ifdef CONFIG_PM_SLEEP
@@ -225,6 +226,7 @@ static void wg_destruct(struct net_device *dev)
list_del(&wg->device_list);
rtnl_unlock();
mutex_lock(&wg->device_update_lock);
+ rcu_assign_pointer(wg->creating_net, NULL);
wg->incoming_port = 0;
wg_socket_reinit(wg, NULL, NULL);
/* The final references are cleared in the below calls to destroy_workqueue. */
@@ -240,13 +242,11 @@ static void wg_destruct(struct net_device *dev)
skb_queue_purge(&wg->incoming_handshakes);
free_percpu(dev->tstats);
free_percpu(wg->incoming_handshakes_worker);
- if (wg->have_creating_net_ref)
- put_net(wg->creating_net);
kvfree(wg->index_hashtable);
kvfree(wg->peer_hashtable);
mutex_unlock(&wg->device_update_lock);

- pr_debug("%s: Interface deleted\n", dev->name);
+ pr_debug("%s: Interface destroyed\n", dev->name);
free_netdev(dev);
}

@@ -292,7 +292,7 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
struct wg_device *wg = netdev_priv(dev);
int ret = -ENOMEM;

- wg->creating_net = src_net;
+ rcu_assign_pointer(wg->creating_net, src_net);
init_rwsem(&wg->static_identity.lock);
mutex_init(&wg->socket_update_lock);
mutex_init(&wg->device_update_lock);
@@ -393,30 +393,26 @@ static struct rtnl_link_ops link_ops __read_mostly = {
.newlink = wg_newlink,
};

-static int wg_netdevice_notification(struct notifier_block *nb,
- unsigned long action, void *data)
+static void wg_netns_pre_exit(struct net *net)
{
- struct net_device *dev = ((struct netdev_notifier_info *)data)->dev;
- struct wg_device *wg = netdev_priv(dev);
-
- ASSERT_RTNL();
-
- if (action != NETDEV_REGISTER || dev->netdev_ops != &netdev_ops)
- return 0;
+ struct wg_device *wg;

- if (dev_net(dev) == wg->creating_net && wg->have_creating_net_ref) {
- put_net(wg->creating_net);
- wg->have_creating_net_ref = false;
- } else if (dev_net(dev) != wg->creating_net &&
- !wg->have_creating_net_ref) {
- wg->have_creating_net_ref = true;
- get_net(wg->creating_net);
+ rtnl_lock();
+ list_for_each_entry(wg, &device_list, device_list) {
+ if (rcu_access_pointer(wg->creating_net) == net) {
+ pr_debug("%s: Creating namespace exiting\n", wg->dev->name);
+ netif_carrier_off(wg->dev);
+ mutex_lock(&wg->device_update_lock);
+ rcu_assign_pointer(wg->creating_net, NULL);
+ wg_socket_reinit(wg, NULL, NULL);
+ mutex_unlock(&wg->device_update_lock);
+ }
}
- return 0;
+ rtnl_unlock();
}

-static struct notifier_block netdevice_notifier = {
- .notifier_call = wg_netdevice_notification
+static struct pernet_operations pernet_ops = {
+ .pre_exit = wg_netns_pre_exit
};

int __init wg_device_init(void)
@@ -429,18 +425,18 @@ int __init wg_device_init(void)
return ret;
#endif

- ret = register_netdevice_notifier(&netdevice_notifier);
+ ret = register_pernet_device(&pernet_ops);
if (ret)
goto error_pm;

ret = rtnl_link_register(&link_ops);
if (ret)
- goto error_netdevice;
+ goto error_pernet;

return 0;

-error_netdevice:
- unregister_netdevice_notifier(&netdevice_notifier);
+error_pernet:
+ unregister_pernet_device(&pernet_ops);
error_pm:
#ifdef CONFIG_PM_SLEEP
unregister_pm_notifier(&pm_notifier);
@@ -451,7 +447,7 @@ error_pm:
void wg_device_uninit(void)
{
rtnl_link_unregister(&link_ops);
- unregister_netdevice_notifier(&netdevice_notifier);
+ unregister_pernet_device(&pernet_ops);
#ifdef CONFIG_PM_SLEEP
unregister_pm_notifier(&pm_notifier);
#endif
diff --git a/drivers/net/wireguard/device.h b/drivers/net/wireguard/device.h
index b15a8be9d8169..4d0144e169478 100644
--- a/drivers/net/wireguard/device.h
+++ b/drivers/net/wireguard/device.h
@@ -40,7 +40,7 @@ struct wg_device {
struct net_device *dev;
struct crypt_queue encrypt_queue, decrypt_queue;
struct sock __rcu *sock4, *sock6;
- struct net *creating_net;
+ struct net __rcu *creating_net;
struct noise_static_identity static_identity;
struct workqueue_struct *handshake_receive_wq, *handshake_send_wq;
struct workqueue_struct *packet_crypt_wq;
@@ -56,7 +56,6 @@ struct wg_device {
unsigned int num_peers, device_update_gen;
u32 fwmark;
u16 incoming_port;
- bool have_creating_net_ref;
};

int wg_device_init(void);
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
index 802099c8828a6..20a4f3c0a0a19 100644
--- a/drivers/net/wireguard/netlink.c
+++ b/drivers/net/wireguard/netlink.c
@@ -511,11 +511,15 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
if (flags & ~__WGDEVICE_F_ALL)
goto out;

- ret = -EPERM;
- if ((info->attrs[WGDEVICE_A_LISTEN_PORT] ||
- info->attrs[WGDEVICE_A_FWMARK]) &&
- !ns_capable(wg->creating_net->user_ns, CAP_NET_ADMIN))
- goto out;
+ if (info->attrs[WGDEVICE_A_LISTEN_PORT] || info->attrs[WGDEVICE_A_FWMARK]) {
+ struct net *net;
+ rcu_read_lock();
+ net = rcu_dereference(wg->creating_net);
+ ret = !net || !ns_capable(net->user_ns, CAP_NET_ADMIN) ? -EPERM : 0;
+ rcu_read_unlock();
+ if (ret)
+ goto out;
+ }

++wg->device_update_gen;

diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index 91438144e4f7a..9b2ab6fc91cdd 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -414,14 +414,8 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
if (unlikely(routed_peer != peer))
goto dishonest_packet_peer;

- if (unlikely(napi_gro_receive(&peer->napi, skb) == GRO_DROP)) {
- ++dev->stats.rx_dropped;
- net_dbg_ratelimited("%s: Failed to give packet to userspace from peer %llu (%pISpfsc)\n",
- dev->name, peer->internal_id,
- &peer->endpoint.addr);
- } else {
- update_rx_stats(peer, message_data_len(len_before_trim));
- }
+ napi_gro_receive(&peer->napi, skb);
+ update_rx_stats(peer, message_data_len(len_before_trim));
return;

dishonest_packet_peer:
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
index f9018027fc133..c33e2c81635fa 100644
--- a/drivers/net/wireguard/socket.c
+++ b/drivers/net/wireguard/socket.c
@@ -347,6 +347,7 @@ static void set_sock_opts(struct socket *sock)

int wg_socket_init(struct wg_device *wg, u16 port)
{
+ struct net *net;
int ret;
struct udp_tunnel_sock_cfg cfg = {
.sk_user_data = wg,
@@ -371,37 +372,47 @@ int wg_socket_init(struct wg_device *wg, u16 port)
};
#endif

+ rcu_read_lock();
+ net = rcu_dereference(wg->creating_net);
+ net = net ? maybe_get_net(net) : NULL;
+ rcu_read_unlock();
+ if (unlikely(!net))
+ return -ENONET;
+
#if IS_ENABLED(CONFIG_IPV6)
retry:
#endif

- ret = udp_sock_create(wg->creating_net, &port4, &new4);
+ ret = udp_sock_create(net, &port4, &new4);
if (ret < 0) {
pr_err("%s: Could not create IPv4 socket\n", wg->dev->name);
- return ret;
+ goto out;
}
set_sock_opts(new4);
- setup_udp_tunnel_sock(wg->creating_net, new4, &cfg);
+ setup_udp_tunnel_sock(net, new4, &cfg);

#if IS_ENABLED(CONFIG_IPV6)
if (ipv6_mod_enabled()) {
port6.local_udp_port = inet_sk(new4->sk)->inet_sport;
- ret = udp_sock_create(wg->creating_net, &port6, &new6);
+ ret = udp_sock_create(net, &port6, &new6);
if (ret < 0) {
udp_tunnel_sock_release(new4);
if (ret == -EADDRINUSE && !port && retries++ < 100)
goto retry;
pr_err("%s: Could not create IPv6 socket\n",
wg->dev->name);
- return ret;
+ goto out;
}
set_sock_opts(new6);
- setup_udp_tunnel_sock(wg->creating_net, new6, &cfg);
+ setup_udp_tunnel_sock(net, new6, &cfg);
}
#endif

wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL);
- return 0;
+ ret = 0;
+out:
+ put_net(net);
+ return ret;
}

void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index bc8c15fb609dc..080e5aa60bea4 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -897,7 +897,6 @@ static void wil_rx_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb)
void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
struct wil_net_stats *stats, bool gro)
{
- gro_result_t rc = GRO_NORMAL;
struct wil6210_vif *vif = ndev_to_vif(ndev);
struct wil6210_priv *wil = ndev_to_wil(ndev);
struct wireless_dev *wdev = vif_to_wdev(vif);
@@ -908,22 +907,16 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
*/
int mcast = is_multicast_ether_addr(da);
struct sk_buff *xmit_skb = NULL;
- static const char * const gro_res_str[] = {
- [GRO_MERGED] = "GRO_MERGED",
- [GRO_MERGED_FREE] = "GRO_MERGED_FREE",
- [GRO_HELD] = "GRO_HELD",
- [GRO_NORMAL] = "GRO_NORMAL",
- [GRO_DROP] = "GRO_DROP",
- [GRO_CONSUMED] = "GRO_CONSUMED",
- };

if (wdev->iftype == NL80211_IFTYPE_STATION) {
sa = wil_skb_get_sa(skb);
if (mcast && ether_addr_equal(sa, ndev->dev_addr)) {
/* mcast packet looped back to us */
- rc = GRO_DROP;
dev_kfree_skb(skb);
- goto stats;
+ ndev->stats.rx_dropped++;
+ stats->rx_dropped++;
+ wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
+ return;
}
} else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
if (mcast) {
@@ -967,26 +960,16 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
wil_rx_handle_eapol(vif, skb);

if (gro)
- rc = napi_gro_receive(&wil->napi_rx, skb);
+ napi_gro_receive(&wil->napi_rx, skb);
else
netif_rx_ni(skb);
- wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
- len, gro_res_str[rc]);
- }
-stats:
- /* statistics. rc set to GRO_NORMAL for AP bridging */
- if (unlikely(rc == GRO_DROP)) {
- ndev->stats.rx_dropped++;
- stats->rx_dropped++;
- wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
- } else {
- ndev->stats.rx_packets++;
- stats->rx_packets++;
- ndev->stats.rx_bytes += len;
- stats->rx_bytes += len;
- if (mcast)
- ndev->stats.multicast++;
}
+ ndev->stats.rx_packets++;
+ stats->rx_packets++;
+ ndev->stats.rx_bytes += len;
+ stats->rx_bytes += len;
+ if (mcast)
+ ndev->stats.multicast++;
}

void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index ccbb5b43b8b2c..4502f9c4708d0 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -679,18 +679,8 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
return a->mode;
}

- if (a == &dev_attr_align.attr) {
- int i;
-
- for (i = 0; i < nd_region->ndr_mappings; i++) {
- struct nd_mapping *nd_mapping = &nd_region->mapping[i];
- struct nvdimm *nvdimm = nd_mapping->nvdimm;
-
- if (test_bit(NDD_LABELING, &nvdimm->flags))
- return a->mode;
- }
- return 0;
- }
+ if (a == &dev_attr_align.attr)
+ return a->mode;

if (a != &dev_attr_set_cookie.attr
&& a != &dev_attr_available_size.attr)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 54603bd3e02de..17f172cf456ad 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -409,11 +409,10 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
{
struct nvme_ns_head *head = ns->head;

- lockdep_assert_held(&ns->head->lock);
-
if (!head->disk)
return;

+ mutex_lock(&head->lock);
if (!(head->disk->flags & GENHD_FL_UP))
device_add_disk(&head->subsys->dev, head->disk,
nvme_ns_id_attr_groups);
@@ -426,9 +425,10 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
__nvme_find_path(head, node);
srcu_read_unlock(&head->srcu, srcu_idx);
}
+ mutex_unlock(&head->lock);

- synchronize_srcu(&ns->head->srcu);
- kblockd_schedule_work(&ns->head->requeue_work);
+ synchronize_srcu(&head->srcu);
+ kblockd_schedule_work(&head->requeue_work);
}

static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
@@ -483,14 +483,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
struct nvme_ns *ns)
{
- mutex_lock(&ns->head->lock);
ns->ana_grpid = le32_to_cpu(desc->grpid);
ns->ana_state = desc->state;
clear_bit(NVME_NS_ANA_PENDING, &ns->flags);

if (nvme_state_is_live(ns->ana_state))
nvme_mpath_set_live(ns);
- mutex_unlock(&ns->head->lock);
}

static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
@@ -661,10 +659,8 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state);
mutex_unlock(&ns->ctrl->ana_lock);
} else {
- mutex_lock(&ns->head->lock);
ns->ana_state = NVME_ANA_OPTIMIZED;
nvme_mpath_set_live(ns);
- mutex_unlock(&ns->head->lock);
}
}

diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index aa5ca222c6f59..96deaf3484662 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -129,30 +129,41 @@ static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
}

-static void nvmet_async_events_process(struct nvmet_ctrl *ctrl, u16 status)
+static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
{
- struct nvmet_async_event *aen;
+ u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
struct nvmet_req *req;

- while (1) {
+ mutex_lock(&ctrl->lock);
+ while (ctrl->nr_async_event_cmds) {
+ req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
+ mutex_unlock(&ctrl->lock);
+ nvmet_req_complete(req, status);
mutex_lock(&ctrl->lock);
- aen = list_first_entry_or_null(&ctrl->async_events,
- struct nvmet_async_event, entry);
- if (!aen || !ctrl->nr_async_event_cmds) {
- mutex_unlock(&ctrl->lock);
- break;
- }
+ }
+ mutex_unlock(&ctrl->lock);
+}
+
+static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_async_event *aen;
+ struct nvmet_req *req;

+ mutex_lock(&ctrl->lock);
+ while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
+ aen = list_first_entry(&ctrl->async_events,
+ struct nvmet_async_event, entry);
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
- if (status == 0)
- nvmet_set_result(req, nvmet_async_event_result(aen));
+ nvmet_set_result(req, nvmet_async_event_result(aen));

list_del(&aen->entry);
kfree(aen);

mutex_unlock(&ctrl->lock);
- nvmet_req_complete(req, status);
+ nvmet_req_complete(req, 0);
+ mutex_lock(&ctrl->lock);
}
+ mutex_unlock(&ctrl->lock);
}

static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
@@ -172,7 +183,7 @@ static void nvmet_async_event_work(struct work_struct *work)
struct nvmet_ctrl *ctrl =
container_of(work, struct nvmet_ctrl, async_event_work);

- nvmet_async_events_process(ctrl, 0);
+ nvmet_async_events_process(ctrl);
}

void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
@@ -755,7 +766,6 @@ static void nvmet_confirm_sq(struct percpu_ref *ref)

void nvmet_sq_destroy(struct nvmet_sq *sq)
{
- u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
struct nvmet_ctrl *ctrl = sq->ctrl;

/*
@@ -763,7 +773,7 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
* queue doesn't have outstanding requests on it.
*/
if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
- nvmet_async_events_process(ctrl, status);
+ nvmet_async_events_failall(ctrl);
percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
wait_for_completion(&sq->confirm_done);
wait_for_completion(&sq->free_done);
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 9f982c0627a0d..95a3bb2e5eabf 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -303,10 +303,15 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
child, addr);

if (of_mdiobus_child_is_phy(child)) {
+ /* -ENODEV is the return code that PHYLIB has
+ * standardized on to indicate that bus
+ * scanning should continue.
+ */
rc = of_mdiobus_register_phy(mdio, child, addr);
- if (rc && rc != -ENODEV)
+ if (!rc)
+ break;
+ if (rc != -ENODEV)
goto unregister;
- break;
}
}
}
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index fe0be8a6ebb7b..092a48e4dff57 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -170,6 +170,7 @@ struct pmic_gpio_state {
struct regmap *map;
struct pinctrl_dev *ctrl;
struct gpio_chip chip;
+ struct irq_chip irq;
};

static const struct pinconf_generic_params pmic_gpio_bindings[] = {
@@ -917,16 +918,6 @@ static int pmic_gpio_populate(struct pmic_gpio_state *state,
return 0;
}

-static struct irq_chip pmic_gpio_irq_chip = {
- .name = "spmi-gpio",
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = irq_chip_mask_parent,
- .irq_unmask = irq_chip_unmask_parent,
- .irq_set_type = irq_chip_set_type_parent,
- .irq_set_wake = irq_chip_set_wake_parent,
- .flags = IRQCHIP_MASK_ON_SUSPEND,
-};
-
static int pmic_gpio_domain_translate(struct irq_domain *domain,
struct irq_fwspec *fwspec,
unsigned long *hwirq,
@@ -1053,8 +1044,16 @@ static int pmic_gpio_probe(struct platform_device *pdev)
if (!parent_domain)
return -ENXIO;

+ state->irq.name = "spmi-gpio",
+ state->irq.irq_ack = irq_chip_ack_parent,
+ state->irq.irq_mask = irq_chip_mask_parent,
+ state->irq.irq_unmask = irq_chip_unmask_parent,
+ state->irq.irq_set_type = irq_chip_set_type_parent,
+ state->irq.irq_set_wake = irq_chip_set_wake_parent,
+ state->irq.flags = IRQCHIP_MASK_ON_SUSPEND,
+
girq = &state->chip.irq;
- girq->chip = &pmic_gpio_irq_chip;
+ girq->chip = &state->irq;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
girq->fwnode = of_node_to_fwnode(state->dev->of_node);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 21661f6490d68..195cfe557511b 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -731,8 +731,8 @@ static int tegra_pinctrl_resume(struct device *dev)
}

const struct dev_pm_ops tegra_pinctrl_pm = {
- .suspend = &tegra_pinctrl_suspend,
- .resume = &tegra_pinctrl_resume
+ .suspend_noirq = &tegra_pinctrl_suspend,
+ .resume_noirq = &tegra_pinctrl_resume
};

static bool tegra_pinctrl_gpio_node_has_range(struct tegra_pmx *pmx)
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index e1d6c8f6d40bb..fe65b5acaf280 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -512,7 +512,6 @@ static const struct da9063_regulator_info da9063_regulator_info[] = {
},
{
DA9063_LDO(DA9063, LDO9, 950, 50, 3600),
- .suspend = BFIELD(DA9063_REG_LDO9_CONT, DA9063_VLDO9_SEL),
},
{
DA9063_LDO(DA9063, LDO11, 900, 50, 3600),
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 689537927f6f7..4c8e8b4722872 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -209,6 +209,19 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {

};

+static const struct regulator_ops pfuze3000_sw_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = pfuze100_set_ramp_delay,
+
+};
+
#define PFUZE100_FIXED_REG(_chip, _name, base, voltage) \
[_chip ## _ ## _name] = { \
.desc = { \
@@ -318,23 +331,28 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {
.stby_mask = 0x20, \
}

-
-#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step) { \
- .desc = { \
- .name = #_name,\
- .n_voltages = ((max) - (min)) / (step) + 1, \
- .ops = &pfuze100_sw_regulator_ops, \
- .type = REGULATOR_VOLTAGE, \
- .id = _chip ## _ ## _name, \
- .owner = THIS_MODULE, \
- .min_uV = (min), \
- .uV_step = (step), \
- .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
- .vsel_mask = 0x7, \
- }, \
- .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
- .stby_mask = 0x7, \
-}
+/* No linar case for the some switches of PFUZE3000 */
+#define PFUZE3000_SW_REG(_chip, _name, base, mask, voltages) \
+ [_chip ## _ ## _name] = { \
+ .desc = { \
+ .name = #_name, \
+ .n_voltages = ARRAY_SIZE(voltages), \
+ .ops = &pfuze3000_sw_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = _chip ## _ ## _name, \
+ .owner = THIS_MODULE, \
+ .volt_table = voltages, \
+ .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
+ .vsel_mask = (mask), \
+ .enable_reg = (base) + PFUZE100_MODE_OFFSET, \
+ .enable_mask = 0xf, \
+ .enable_val = 0x8, \
+ .enable_time = 500, \
+ }, \
+ .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
+ .stby_mask = (mask), \
+ .sw_reg = true, \
+ }

#define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step) { \
.desc = { \
@@ -391,9 +409,9 @@ static struct pfuze_regulator pfuze200_regulators[] = {
};

static struct pfuze_regulator pfuze3000_regulators[] = {
- PFUZE100_SWB_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
+ PFUZE3000_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000),
- PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+ PFUZE3000_SW_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst),
PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
@@ -407,8 +425,8 @@ static struct pfuze_regulator pfuze3000_regulators[] = {
};

static struct pfuze_regulator pfuze3001_regulators[] = {
- PFUZE100_SWB_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
- PFUZE100_SWB_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+ PFUZE3000_SW_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
+ PFUZE3000_SW_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
PFUZE3000_SW3_REG(PFUZE3001, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
PFUZE100_SWB_REG(PFUZE3001, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
PFUZE100_VGEN_REG(PFUZE3001, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000),
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 569966bdc5138..60d675fefac7d 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -4265,9 +4265,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
int fallback = *(int *)reply->param;

QETH_CARD_TEXT(card, 4, "setaccb");
- if (cmd->hdr.return_code)
- return -EIO;
- qeth_setadpparms_inspect_rc(cmd);

access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
QETH_CARD_TEXT_(card, 2, "rc=%d",
@@ -4277,7 +4274,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
access_ctrl_req->subcmd_code, CARD_DEVID(card),
cmd->data.setadapterparms.hdr.return_code);
- switch (cmd->data.setadapterparms.hdr.return_code) {
+ switch (qeth_setadpparms_inspect_rc(cmd)) {
case SET_ACCESS_CTRL_RC_SUCCESS:
if (card->options.isolation == ISOLATION_MODE_NONE) {
dev_info(&card->gdev->dev,
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 3d0bc000f5005..c621e8f0897f2 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -576,7 +576,10 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
ZFCP_STATUS_ERP_TIMEDOUT)) {
req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
zfcp_dbf_rec_run("erscf_1", act);
- req->erp_action = NULL;
+ /* lock-free concurrent access with
+ * zfcp_erp_timeout_handler()
+ */
+ WRITE_ONCE(req->erp_action, NULL);
}
if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
zfcp_dbf_rec_run("erscf_2", act);
@@ -612,8 +615,14 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
void zfcp_erp_timeout_handler(struct timer_list *t)
{
struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
- struct zfcp_erp_action *act = fsf_req->erp_action;
+ struct zfcp_erp_action *act;

+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
+ return;
+ /* lock-free concurrent access with zfcp_erp_strategy_check_fsfreq() */
+ act = READ_ONCE(fsf_req->erp_action);
+ if (!act)
+ return;
zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT);
}

diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 4104bdcdbb6fd..70be1f5de8730 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -11895,7 +11895,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
lpfc_sli4_xri_exchange_busy_wait(phba);

/* per-phba callback de-registration for hotplug event */
- lpfc_cpuhp_remove(phba);
+ if (phba->pport)
+ lpfc_cpuhp_remove(phba);

/* Disable PCI subsystem interrupt */
lpfc_sli4_disable_intr(phba);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 42c3ad27f1cbc..df670fba2ab8a 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -3496,7 +3496,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
qla2x00_clear_loop_id(fcport);
fcport->flags |= FCF_FABRIC_DEVICE;
} else if (fcport->d_id.b24 != rp->id.b24 ||
- fcport->scan_needed) {
+ (fcport->scan_needed &&
+ fcport->port_type != FCT_INITIATOR &&
+ fcport->port_type != FCT_NVME_INITIATOR)) {
qlt_schedule_sess_for_deletion(fcport);
}
fcport->d_id.b24 = rp->id.b24;
diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
index 719e1f189ebf2..ada0d8804d84c 100644
--- a/drivers/soc/imx/soc-imx8m.c
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -22,6 +22,8 @@
#define OCOTP_UID_LOW 0x410
#define OCOTP_UID_HIGH 0x420

+#define IMX8MP_OCOTP_UID_OFFSET 0x10
+
/* Same as ANADIG_DIGPROG_IMX7D */
#define ANADIG_DIGPROG_IMX8MM 0x800

@@ -88,6 +90,8 @@ static void __init imx8mm_soc_uid(void)
{
void __iomem *ocotp_base;
struct device_node *np;
+ u32 offset = of_machine_is_compatible("fsl,imx8mp") ?
+ IMX8MP_OCOTP_UID_OFFSET : 0;

np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp");
if (!np)
@@ -96,9 +100,9 @@ static void __init imx8mm_soc_uid(void)
ocotp_base = of_iomap(np, 0);
WARN_ON(!ocotp_base);

- soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
+ soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset);
soc_uid <<= 32;
- soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
+ soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset);

iounmap(ocotp_base);
of_node_put(np);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 2e9f9adc59003..88176eaca448f 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -584,14 +584,14 @@ static void dspi_release_dma(struct fsl_dspi *dspi)
return;

if (dma->chan_tx) {
- dma_unmap_single(dma->chan_tx->device->dev, dma->tx_dma_phys,
- dma_bufsize, DMA_TO_DEVICE);
+ dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize,
+ dma->tx_dma_buf, dma->tx_dma_phys);
dma_release_channel(dma->chan_tx);
}

if (dma->chan_rx) {
- dma_unmap_single(dma->chan_rx->device->dev, dma->rx_dma_phys,
- dma_bufsize, DMA_FROM_DEVICE);
+ dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize,
+ dma->rx_dma_buf, dma->rx_dma_phys);
dma_release_channel(dma->chan_rx);
}
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index 110338dbe3728..cc60f6a28d700 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -1830,12 +1830,14 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l
pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
if (!pIE)
return _FAIL;
+ if (ie_len > sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates))
+ return _FAIL;

memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len);
supportRateNum = ie_len;

pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
- if (pIE)
+ if (pIE && (ie_len <= sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates) - supportRateNum))
memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len);

return _SUCCESS;
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index f8e43a6faea9b..cdcc64ea2554f 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -75,8 +75,6 @@ static LIST_HEAD(hvc_structs);
*/
static DEFINE_MUTEX(hvc_structs_mutex);

-/* Mutex to serialize hvc_open */
-static DEFINE_MUTEX(hvc_open_mutex);
/*
* This value is used to assign a tty->index value to a hvc_struct based
* upon order of exposure via hvc_probe(), when we can not match it to
@@ -348,24 +346,16 @@ static int hvc_install(struct tty_driver *driver, struct tty_struct *tty)
*/
static int hvc_open(struct tty_struct *tty, struct file * filp)
{
- struct hvc_struct *hp;
+ struct hvc_struct *hp = tty->driver_data;
unsigned long flags;
int rc = 0;

- mutex_lock(&hvc_open_mutex);
-
- hp = tty->driver_data;
- if (!hp) {
- rc = -EIO;
- goto out;
- }
-
spin_lock_irqsave(&hp->port.lock, flags);
/* Check and then increment for fast path open. */
if (hp->port.count++ > 0) {
spin_unlock_irqrestore(&hp->port.lock, flags);
hvc_kick();
- goto out;
+ return 0;
} /* else count == 0 */
spin_unlock_irqrestore(&hp->port.lock, flags);

@@ -393,8 +383,6 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
/* Force wakeup of the polling thread */
hvc_kick();

-out:
- mutex_unlock(&hvc_open_mutex);
return rc;
}

diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c
index e71240b386b49..da4c5eb03d7ee 100644
--- a/drivers/usb/cdns3/ep0.c
+++ b/drivers/usb/cdns3/ep0.c
@@ -327,7 +327,8 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev,
if (!set || (tmode & 0xff) != 0)
return -EINVAL;

- switch (tmode >> 8) {
+ tmode >>= 8;
+ switch (tmode) {
case TEST_J:
case TEST_K:
case TEST_SE0_NAK:
@@ -711,15 +712,17 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
int ret = 0;
u8 zlp = 0;

+ spin_lock_irqsave(&priv_dev->lock, flags);
trace_cdns3_ep0_queue(priv_dev, request);

/* cancel the request if controller receive new SETUP packet. */
- if (cdns3_check_new_setup(priv_dev))
+ if (cdns3_check_new_setup(priv_dev)) {
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
return -ECONNRESET;
+ }

/* send STATUS stage. Should be called only for SET_CONFIGURATION */
if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE) {
- spin_lock_irqsave(&priv_dev->lock, flags);
cdns3_select_ep(priv_dev, 0x00);

erdy_sent = !priv_dev->hw_configured_flag;
@@ -744,7 +747,6 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
return 0;
}

- spin_lock_irqsave(&priv_dev->lock, flags);
if (!list_empty(&priv_ep->pending_req_list)) {
dev_err(priv_dev->dev,
"can't handle multiple requests for ep0\n");
diff --git a/drivers/usb/cdns3/trace.h b/drivers/usb/cdns3/trace.h
index 8d121e207fd8f..755c565822575 100644
--- a/drivers/usb/cdns3/trace.h
+++ b/drivers/usb/cdns3/trace.h
@@ -156,7 +156,7 @@ DECLARE_EVENT_CLASS(cdns3_log_ep0_irq,
__dynamic_array(char, str, CDNS3_MSG_MAX)
),
TP_fast_assign(
- __entry->ep_dir = priv_dev->ep0_data_dir;
+ __entry->ep_dir = priv_dev->selected_ep;
__entry->ep_sts = ep_sts;
),
TP_printk("%s", cdns3_decode_ep0_irq(__get_str(str),
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index f67088bb8218b..d5187b50fc828 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1689,6 +1689,8 @@ static int acm_pre_reset(struct usb_interface *intf)

static const struct usb_device_id acm_ids[] = {
/* quirky and broken devices */
+ { USB_DEVICE(0x0424, 0x274e), /* Microchip Technology, Inc. (formerly SMSC) */
+ .driver_info = DISABLE_ECHO, }, /* DISABLE ECHO in termios flag */
{ USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */
.driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
{ USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 3e8efe759c3e6..e0b77674869ce 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -218,11 +218,12 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Logitech HD Webcam C270 */
{ USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },

- /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
+ /* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x046d, 0x085c), .driver_info = USB_QUIRK_DELAY_INIT },

/* Logitech ConferenceCam CC3000e */
{ USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 12b98b4662872..7faf5f8c056d4 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -4920,12 +4920,6 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
epnum, 0);
}

- ret = usb_add_gadget_udc(dev, &hsotg->gadget);
- if (ret) {
- dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep,
- hsotg->ctrl_req);
- return ret;
- }
dwc2_hsotg_dump(hsotg);

return 0;
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 69972750e1612..5684c4781af9e 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -536,6 +536,17 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
dwc2_lowlevel_hw_disable(hsotg);

+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
+ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+ /* Postponed adding a new gadget to the udc class driver list */
+ if (hsotg->gadget_enabled) {
+ retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget);
+ if (retval) {
+ dwc2_hsotg_remove(hsotg);
+ goto error_init;
+ }
+ }
+#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
return 0;

error_init:
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 48b68b6f0dc82..90bb022737da8 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -162,12 +162,6 @@ static const struct dwc3_exynos_driverdata exynos5250_drvdata = {
.suspend_clk_idx = -1,
};

-static const struct dwc3_exynos_driverdata exynos5420_drvdata = {
- .clk_names = { "usbdrd30", "usbdrd30_susp_clk"},
- .num_clks = 2,
- .suspend_clk_idx = 1,
-};
-
static const struct dwc3_exynos_driverdata exynos5433_drvdata = {
.clk_names = { "aclk", "susp_clk", "pipe_pclk", "phyclk" },
.num_clks = 4,
@@ -184,9 +178,6 @@ static const struct of_device_id exynos_dwc3_match[] = {
{
.compatible = "samsung,exynos5250-dwusb3",
.data = &exynos5250_drvdata,
- }, {
- .compatible = "samsung,exynos5420-dwusb3",
- .data = &exynos5420_drvdata,
}, {
.compatible = "samsung,exynos5433-dwusb3",
.data = &exynos5433_drvdata,
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index cafde053788bb..80a1b52c656e0 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -2313,7 +2313,8 @@ static int mv_udc_probe(struct platform_device *pdev)
return 0;

err_create_workqueue:
- destroy_workqueue(udc->qwork);
+ if (udc->qwork)
+ destroy_workqueue(udc->qwork);
err_destroy_dma:
dma_pool_destroy(udc->dtd_pool);
err_free_dma:
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index a4e9abcbdc4fc..1a9b7572e17fe 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -203,9 +203,8 @@ static int exynos_ehci_probe(struct platform_device *pdev)
hcd->rsrc_len = resource_size(res);

irq = platform_get_irq(pdev, 0);
- if (!irq) {
- dev_err(&pdev->dev, "Failed to get IRQ\n");
- err = -ENODEV;
+ if (irq < 0) {
+ err = irq;
goto fail_io;
}

diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 1a48ab1bd3b2a..7ff2cbdcd0b22 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -216,6 +216,13 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
ehci_info(ehci, "applying MosChip frame-index workaround\n");
ehci->frame_index_bug = 1;
break;
+ case PCI_VENDOR_ID_HUAWEI:
+ /* Synopsys HC bug */
+ if (pdev->device == 0xa239) {
+ ehci_info(ehci, "applying Synopsys HC workaround\n");
+ ehci->has_synopsys_hc_bug = 1;
+ }
+ break;
}

/* optional debug port, normally in the first BAR */
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index cff9652403270..b91d50da6127f 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -191,6 +191,7 @@ static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev)
struct resource *mem;

usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index bfbdb3ceed291..4311d4c9b68de 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -587,6 +587,9 @@ static int xhci_mtk_remove(struct platform_device *dev)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct usb_hcd *shared_hcd = xhci->shared_hcd;

+ pm_runtime_put_noidle(&dev->dev);
+ pm_runtime_disable(&dev->dev);
+
usb_remove_hcd(shared_hcd);
xhci->shared_hcd = NULL;
device_init_wakeup(&dev->dev, false);
@@ -597,8 +600,6 @@ static int xhci_mtk_remove(struct platform_device *dev)
xhci_mtk_sch_exit(mtk);
xhci_mtk_clks_disable(mtk);
xhci_mtk_ldos_disable(mtk);
- pm_runtime_put_sync(&dev->dev);
- pm_runtime_disable(&dev->dev);

return 0;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index bee5deccc83d8..ed468eed299c5 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1430,6 +1430,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
xhci->devs[slot_id]->out_ctx, ep_index);

ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
+ ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */
ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));

@@ -4390,6 +4391,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
int hird, exit_latency;
int ret;

+ if (xhci->quirks & XHCI_HW_LPM_DISABLE)
+ return -EPERM;
+
if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
!udev->lpm_capable)
return -EPERM;
@@ -4412,7 +4416,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
enable ? "enable" : "disable", port_num + 1);

- if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) {
+ if (enable) {
/* Host supports BESL timeout instead of HIRD */
if (udev->usb2_hw_lpm_besl_capable) {
/* if device doesn't have a preferred BESL value use a
@@ -4471,6 +4475,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
mutex_lock(hcd->bandwidth_mutex);
xhci_change_max_exit_latency(xhci, udev, 0);
mutex_unlock(hcd->bandwidth_mutex);
+ readl_poll_timeout(ports[port_num]->addr, pm_val,
+ (pm_val & PORT_PLS_MASK) == XDEV_U0,
+ 100, 10000);
return 0;
}
}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 86cfefdd6632b..c80710e474769 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -716,7 +716,7 @@ struct xhci_ep_ctx {
* 4 - TRB error
* 5-7 - reserved
*/
-#define EP_STATE_MASK (0xf)
+#define EP_STATE_MASK (0x7)
#define EP_STATE_DISABLED 0
#define EP_STATE_RUNNING 1
#define EP_STATE_HALTED 2
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 01c6a48c41bc9..ac9a81ae82164 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -803,7 +803,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
return info->dma_map_ctrl(chan->device->dev, pkt, map);
}

-static void usbhsf_dma_complete(void *arg);
+static void usbhsf_dma_complete(void *arg,
+ const struct dmaengine_result *result);
static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
{
struct usbhs_pipe *pipe = pkt->pipe;
@@ -813,6 +814,7 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
struct dma_chan *chan;
struct device *dev = usbhs_priv_to_dev(priv);
enum dma_transfer_direction dir;
+ dma_cookie_t cookie;

fifo = usbhs_pipe_to_fifo(pipe);
if (!fifo)
@@ -827,11 +829,11 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
if (!desc)
return;

- desc->callback = usbhsf_dma_complete;
- desc->callback_param = pipe;
+ desc->callback_result = usbhsf_dma_complete;
+ desc->callback_param = pkt;

- pkt->cookie = dmaengine_submit(desc);
- if (pkt->cookie < 0) {
+ cookie = dmaengine_submit(desc);
+ if (cookie < 0) {
dev_err(dev, "Failed to submit dma descriptor\n");
return;
}
@@ -1152,12 +1154,10 @@ static size_t usbhs_dma_calc_received_size(struct usbhs_pkt *pkt,
struct dma_chan *chan, int dtln)
{
struct usbhs_pipe *pipe = pkt->pipe;
- struct dma_tx_state state;
size_t received_size;
int maxp = usbhs_pipe_get_maxpacket(pipe);

- dmaengine_tx_status(chan, pkt->cookie, &state);
- received_size = pkt->length - state.residue;
+ received_size = pkt->length - pkt->dma_result->residue;

if (dtln) {
received_size -= USBHS_USB_DMAC_XFER_SIZE;
@@ -1363,13 +1363,16 @@ static int usbhsf_irq_ready(struct usbhs_priv *priv,
return 0;
}

-static void usbhsf_dma_complete(void *arg)
+static void usbhsf_dma_complete(void *arg,
+ const struct dmaengine_result *result)
{
- struct usbhs_pipe *pipe = arg;
+ struct usbhs_pkt *pkt = arg;
+ struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct device *dev = usbhs_priv_to_dev(priv);
int ret;

+ pkt->dma_result = result;
ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
if (ret < 0)
dev_err(dev, "dma_complete run_error %d : %d\n",
diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
index c3d3cc35cee0f..4a7dc23ce3d35 100644
--- a/drivers/usb/renesas_usbhs/fifo.h
+++ b/drivers/usb/renesas_usbhs/fifo.h
@@ -50,7 +50,7 @@ struct usbhs_pkt {
struct usbhs_pkt *pkt);
struct work_struct work;
dma_addr_t dma;
- dma_cookie_t cookie;
+ const struct dmaengine_result *dma_result;
void *buf;
int length;
int trans;
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index c22e5c4bbf1a9..e35508f5e1287 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -129,7 +129,8 @@ pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_mux_state *state)
msg[0] = PMC_USB_DP_HPD;
msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;

- msg[1] = PMC_USB_DP_HPD_IRQ;
+ if (data->status & DP_STATUS_IRQ_HPD)
+ msg[1] = PMC_USB_DP_HPD_IRQ;

if (data->status & DP_STATUS_HPD_STATE)
msg[1] |= PMC_USB_DP_HPD_LVL;
@@ -142,6 +143,7 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
{
struct typec_displayport_data *data = state->data;
struct altmode_req req = { };
+ int ret;

if (data->status & DP_STATUS_IRQ_HPD)
return pmc_usb_mux_dp_hpd(port, state);
@@ -161,7 +163,14 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
if (data->status & DP_STATUS_HPD_STATE)
req.mode_data |= PMC_USB_ALTMODE_HPD_HIGH;

- return pmc_usb_command(port, (void *)&req, sizeof(req));
+ ret = pmc_usb_command(port, (void *)&req, sizeof(req));
+ if (ret)
+ return ret;
+
+ if (data->status & DP_STATUS_HPD_STATE)
+ return pmc_usb_mux_dp_hpd(port, state);
+
+ return 0;
}

static int
diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
index 017389021b96a..b56a0880a0441 100644
--- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c
+++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
@@ -179,26 +179,6 @@ out:
return tcpci_irq(chip->tcpci);
}

-static int rt1711h_init_alert(struct rt1711h_chip *chip,
- struct i2c_client *client)
-{
- int ret;
-
- /* Disable chip interrupts before requesting irq */
- ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0);
- if (ret < 0)
- return ret;
-
- ret = devm_request_threaded_irq(chip->dev, client->irq, NULL,
- rt1711h_irq,
- IRQF_ONESHOT | IRQF_TRIGGER_LOW,
- dev_name(chip->dev), chip);
- if (ret < 0)
- return ret;
- enable_irq_wake(client->irq);
- return 0;
-}
-
static int rt1711h_sw_reset(struct rt1711h_chip *chip)
{
int ret;
@@ -260,7 +240,8 @@ static int rt1711h_probe(struct i2c_client *client,
if (ret < 0)
return ret;

- ret = rt1711h_init_alert(chip, client);
+ /* Disable chip interrupts before requesting irq */
+ ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0);
if (ret < 0)
return ret;

@@ -271,6 +252,14 @@ static int rt1711h_probe(struct i2c_client *client,
if (IS_ERR_OR_NULL(chip->tcpci))
return PTR_ERR(chip->tcpci);

+ ret = devm_request_threaded_irq(chip->dev, client->irq, NULL,
+ rt1711h_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ dev_name(chip->dev), chip);
+ if (ret < 0)
+ return ret;
+ enable_irq_wake(client->irq);
+
return 0;
}

diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 9d28a8e3328fb..e2a490c5ae08f 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -2402,7 +2402,8 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
ops->graphics = 1;

if (!blank) {
- var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
+ var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE |
+ FB_ACTIVATE_KD_TEXT;
fb_set_var(info, &var);
ops->graphics = 0;
ops->var = info->var;
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 78ba5f9322879..296b489861a9a 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -154,10 +154,17 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
return ERR_PTR(-ENOMEM);
}

+ cell->name = kmalloc(namelen + 1, GFP_KERNEL);
+ if (!cell->name) {
+ kfree(cell);
+ return ERR_PTR(-ENOMEM);
+ }
+
cell->net = net;
cell->name_len = namelen;
for (i = 0; i < namelen; i++)
cell->name[i] = tolower(name[i]);
+ cell->name[i] = 0;

atomic_set(&cell->usage, 2);
INIT_WORK(&cell->manager, afs_manage_cell);
@@ -203,6 +210,7 @@ parse_failed:
if (ret == -EINVAL)
printk(KERN_ERR "kAFS: bad VL server IP address\n");
error:
+ kfree(cell->name);
kfree(cell);
_leave(" = %d", ret);
return ERR_PTR(ret);
@@ -483,6 +491,7 @@ static void afs_cell_destroy(struct rcu_head *rcu)

afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers));
key_put(cell->anonymous_key);
+ kfree(cell->name);
kfree(cell);

_leave(" [destroyed]");
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 98e0cebd5e5e9..c67a9767397d0 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -397,7 +397,7 @@ struct afs_cell {
struct afs_vlserver_list __rcu *vl_servers;

u8 name_len; /* Length of name */
- char name[64 + 1]; /* Cell name, case-flattened and NUL-padded */
+ char *name; /* Cell name, case-flattened and NUL-padded */
};

/*
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 233c5663f2332..0c17f18b47940 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -916,7 +916,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
- goto out_put_group;
+ goto out;
}

/*
@@ -954,7 +954,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
ret = btrfs_orphan_add(trans, BTRFS_I(inode));
if (ret) {
btrfs_add_delayed_iput(inode);
- goto out_put_group;
+ goto out;
}
clear_nlink(inode);
/* One for the block groups ref */
@@ -977,13 +977,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,

ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
if (ret < 0)
- goto out_put_group;
+ goto out;
if (ret > 0)
btrfs_release_path(path);
if (ret == 0) {
ret = btrfs_del_item(trans, tree_root, path);
if (ret)
- goto out_put_group;
+ goto out;
btrfs_release_path(path);
}

@@ -992,6 +992,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
&fs_info->block_group_cache_tree);
RB_CLEAR_NODE(&block_group->cache_node);

+ /* Once for the block groups rbtree */
+ btrfs_put_block_group(block_group);
+
if (fs_info->first_logical_byte == block_group->start)
fs_info->first_logical_byte = (u64)-1;
spin_unlock(&fs_info->block_group_cache_lock);
@@ -1102,10 +1105,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,

ret = remove_block_group_free_space(trans, block_group);
if (ret)
- goto out_put_group;
-
- /* Once for the block groups rbtree */
- btrfs_put_block_group(block_group);
+ goto out;

ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0)
@@ -1128,10 +1128,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
free_extent_map(em);
}

-out_put_group:
+out:
/* Once for the lookup reference */
btrfs_put_block_group(block_group);
-out:
if (remove_rsv)
btrfs_delayed_refs_rsv_release(fs_info, 1);
btrfs_free_path(path);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 196d4511f8126..09e6dff8a8f85 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -988,6 +988,8 @@ enum {
BTRFS_ROOT_DEAD_RELOC_TREE,
/* Mark dead root stored on device whose cleanup needs to be resumed */
BTRFS_ROOT_DEAD_TREE,
+ /* The root has a log tree. Used only for subvolume roots. */
+ BTRFS_ROOT_HAS_LOG_TREE,
};

/*
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 719e68ab552c5..52d565ff66e2d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1912,13 +1912,26 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
pos = iocb->ki_pos;
count = iov_iter_count(from);
if (iocb->ki_flags & IOCB_NOWAIT) {
+ size_t nocow_bytes = count;
+
/*
* We will allocate space in case nodatacow is not set,
* so bail
*/
if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
BTRFS_INODE_PREALLOC)) ||
- check_can_nocow(BTRFS_I(inode), pos, &count) <= 0) {
+ check_can_nocow(BTRFS_I(inode), pos, &nocow_bytes) <= 0) {
+ inode_unlock(inode);
+ return -EAGAIN;
+ }
+ /* check_can_nocow() locks the snapshot lock on success */
+ btrfs_drew_write_unlock(&root->snapshot_lock);
+ /*
+ * There are holes in the range or parts of the range that must
+ * be COWed (shared extents, RO block groups, etc), so just bail
+ * out.
+ */
+ if (nocow_bytes < count) {
inode_unlock(inode);
return -EAGAIN;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 66dd919fc7235..6aa200e373c8f 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -985,6 +985,7 @@ static noinline int cow_file_range(struct inode *inode,
u64 num_bytes;
unsigned long ram_size;
u64 cur_alloc_size = 0;
+ u64 min_alloc_size;
u64 blocksize = fs_info->sectorsize;
struct btrfs_key ins;
struct extent_map *em;
@@ -1035,10 +1036,26 @@ static noinline int cow_file_range(struct inode *inode,
btrfs_drop_extent_cache(BTRFS_I(inode), start,
start + num_bytes - 1, 0);

+ /*
+ * Relocation relies on the relocated extents to have exactly the same
+ * size as the original extents. Normally writeback for relocation data
+ * extents follows a NOCOW path because relocation preallocates the
+ * extents. However, due to an operation such as scrub turning a block
+ * group to RO mode, it may fallback to COW mode, so we must make sure
+ * an extent allocated during COW has exactly the requested size and can
+ * not be split into smaller extents, otherwise relocation breaks and
+ * fails during the stage where it updates the bytenr of file extent
+ * items.
+ */
+ if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
+ min_alloc_size = num_bytes;
+ else
+ min_alloc_size = fs_info->sectorsize;
+
while (num_bytes > 0) {
cur_alloc_size = num_bytes;
ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
- fs_info->sectorsize, 0, alloc_hint,
+ min_alloc_size, 0, alloc_hint,
&ins, 1, 1);
if (ret < 0)
goto out_unlock;
@@ -1361,6 +1378,8 @@ static int fallback_to_cow(struct inode *inode, struct page *locked_page,
int *page_started, unsigned long *nr_written)
{
const bool is_space_ino = btrfs_is_free_space_inode(BTRFS_I(inode));
+ const bool is_reloc_ino = (BTRFS_I(inode)->root->root_key.objectid ==
+ BTRFS_DATA_RELOC_TREE_OBJECTID);
const u64 range_bytes = end + 1 - start;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
u64 range_start = start;
@@ -1391,18 +1410,23 @@ static int fallback_to_cow(struct inode *inode, struct page *locked_page,
* data space info, which we incremented in the step above.
*
* If we need to fallback to cow and the inode corresponds to a free
- * space cache inode, we must also increment bytes_may_use of the data
- * space_info for the same reason. Space caches always get a prealloc
+ * space cache inode or an inode of the data relocation tree, we must
+ * also increment bytes_may_use of the data space_info for the same
+ * reason. Space caches and relocated data extents always get a prealloc
* extent for them, however scrub or balance may have set the block
- * group that contains that extent to RO mode.
+ * group that contains that extent to RO mode and therefore force COW
+ * when starting writeback.
*/
count = count_range_bits(io_tree, &range_start, end, range_bytes,
EXTENT_NORESERVE, 0);
- if (count > 0 || is_space_ino) {
- const u64 bytes = is_space_ino ? range_bytes : count;
+ if (count > 0 || is_space_ino || is_reloc_ino) {
+ u64 bytes = count;
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
struct btrfs_space_info *sinfo = fs_info->data_sinfo;

+ if (is_space_ino || is_reloc_ino)
+ bytes = range_bytes;
+
spin_lock(&sinfo->lock);
btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
spin_unlock(&sinfo->lock);
@@ -8238,9 +8262,6 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
dio_data.overwrite = 1;
inode_unlock(inode);
relock = true;
- } else if (iocb->ki_flags & IOCB_NOWAIT) {
- ret = -EAGAIN;
- goto out;
}
ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
offset, count);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index ea72b9d54ec86..bdfc421494481 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -169,6 +169,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
if (ret)
goto out;

+ set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
root->log_start_pid = current->pid;
}
@@ -195,6 +196,9 @@ static int join_running_log_trans(struct btrfs_root *root)
{
int ret = -ENOENT;

+ if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state))
+ return ret;
+
mutex_lock(&root->log_mutex);
if (root->log_root) {
ret = 0;
@@ -3312,6 +3316,7 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
if (root->log_root) {
free_log_tree(trans, root->log_root);
root->log_root = NULL;
+ clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
}
return 0;
}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index f829f4165d38c..6fc69c3b2749d 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -759,6 +759,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
/* close extra handle outside of crit sec */
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
}
+ rc = 0;
goto oshr_free;
}

@@ -3144,6 +3145,11 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
ses->Suid, offset, len);

+ /*
+ * We zero the range through ioctl, so we need remove the page caches
+ * first, otherwise the data may be inconsistent with the server.
+ */
+ truncate_pagecache_range(inode, offset, offset + len - 1);

/* if file not oplocked can't be sure whether asking to extend size */
if (!CIFS_CACHE_READ(cifsi))
@@ -3210,6 +3216,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
return rc;
}

+ /*
+ * We implement the punch hole through ioctl, so we need remove the page
+ * caches first, otherwise the data may be inconsistent with the server.
+ */
+ truncate_pagecache_range(inode, offset, offset + len - 1);
+
cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);

fsctl_buf.FileOffset = cpu_to_le64(offset);
diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h
index 7824f5563a552..9b66c28b3ae9d 100644
--- a/fs/erofs/zdata.h
+++ b/fs/erofs/zdata.h
@@ -144,22 +144,22 @@ static inline void z_erofs_onlinepage_init(struct page *page)
static inline void z_erofs_onlinepage_fixup(struct page *page,
uintptr_t index, bool down)
{
- unsigned long *p, o, v, id;
-repeat:
- p = &page_private(page);
- o = READ_ONCE(*p);
+ union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
+ int orig, orig_index, val;

- id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
- if (id) {
+repeat:
+ orig = atomic_read(u.o);
+ orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
+ if (orig_index) {
if (!index)
return;

- DBG_BUGON(id != index);
+ DBG_BUGON(orig_index != index);
}

- v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
- ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
- if (cmpxchg(p, o, v) != o)
+ val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
+ ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
+ if (atomic_cmpxchg(u.o, orig, val) != orig)
goto repeat;
}

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 1829be7f63a35..4ab1728de247c 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1942,10 +1942,8 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)

WRITE_ONCE(req->result, res);
/* order with io_poll_complete() checking ->result */
- if (res != -EAGAIN) {
- smp_wmb();
- WRITE_ONCE(req->iopoll_completed, 1);
- }
+ smp_wmb();
+ WRITE_ONCE(req->iopoll_completed, 1);
}

/*
@@ -5425,9 +5423,6 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
const bool in_async = io_wq_current_is_worker();

- if (req->result == -EAGAIN)
- return -EAGAIN;
-
/* workqueue context doesn't hold uring_lock, grab it now */
if (in_async)
mutex_lock(&ctx->uring_lock);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index d49b1d1979084..f0c3f0123131e 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -267,8 +267,6 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
{
struct inode *inode = dreq->inode;

- inode_dio_end(inode);
-
if (dreq->iocb) {
long res = (long) dreq->error;
if (dreq->count != 0) {
@@ -280,7 +278,10 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)

complete(&dreq->completion);

+ igrab(inode);
nfs_direct_req_release(dreq);
+ inode_dio_end(inode);
+ iput(inode);
}

static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
@@ -410,8 +411,10 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
* generic layer handle the completion.
*/
if (requested_bytes == 0) {
- inode_dio_end(inode);
+ igrab(inode);
nfs_direct_req_release(dreq);
+ inode_dio_end(inode);
+ iput(inode);
return result < 0 ? result : -EIO;
}

@@ -864,8 +867,10 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
* generic layer handle the completion.
*/
if (requested_bytes == 0) {
- inode_dio_end(inode);
+ igrab(inode);
nfs_direct_req_release(dreq);
+ inode_dio_end(inode);
+ iput(inode);
return result < 0 ? result : -EIO;
}

diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index f96367a2463e3..ccd6c1637b270 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -83,6 +83,7 @@ nfs_file_release(struct inode *inode, struct file *filp)
dprintk("NFS: release(%pD2)\n", filp);

nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
+ inode_dio_wait(inode);
nfs_file_clear_open_context(filp);
return 0;
}
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 7d399f72ebbbf..de03e440b7eef 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -907,9 +907,8 @@ retry:
goto out_mds;

/* Use a direct mapping of ds_idx to pgio mirror_idx */
- if (WARN_ON_ONCE(pgio->pg_mirror_count !=
- FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
- goto out_mds;
+ if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
+ goto out_eagain;

for (i = 0; i < pgio->pg_mirror_count; i++) {
mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
@@ -931,7 +930,10 @@ retry:
(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
pgio->pg_maxretrans = io_maxretrans;
return;
-
+out_eagain:
+ pnfs_generic_pg_cleanup(pgio);
+ pgio->pg_error = -EAGAIN;
+ return;
out_mds:
trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
0, NFS4_MAX_UINT64, IOMODE_RW,
@@ -941,6 +943,7 @@ out_mds:
pgio->pg_lseg = NULL;
pgio->pg_maxretrans = 0;
nfs_pageio_reset_write_mds(pgio);
+ pgio->pg_error = -EAGAIN;
}

static unsigned int
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 152a0fc4e9051..751bc4dc74663 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -689,6 +689,12 @@ static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
&ocfs2_nfs_sync_lops, osb);
}

+static void ocfs2_nfs_sync_lock_init(struct ocfs2_super *osb)
+{
+ ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
+ init_rwsem(&osb->nfs_sync_rwlock);
+}
+
void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb)
{
struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
@@ -2855,6 +2861,11 @@ int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
if (ocfs2_is_hard_readonly(osb))
return -EROFS;

+ if (ex)
+ down_write(&osb->nfs_sync_rwlock);
+ else
+ down_read(&osb->nfs_sync_rwlock);
+
if (ocfs2_mount_local(osb))
return 0;

@@ -2873,6 +2884,10 @@ void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
if (!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, lockres,
ex ? LKM_EXMODE : LKM_PRMODE);
+ if (ex)
+ up_write(&osb->nfs_sync_rwlock);
+ else
+ up_read(&osb->nfs_sync_rwlock);
}

int ocfs2_trim_fs_lock(struct ocfs2_super *osb,
@@ -3340,7 +3355,7 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
local:
ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
- ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
+ ocfs2_nfs_sync_lock_init(osb);
ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);

osb->cconn = conn;
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 9150cfa4df7dc..9461bd3e1c0c8 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -394,6 +394,7 @@ struct ocfs2_super
struct ocfs2_lock_res osb_super_lockres;
struct ocfs2_lock_res osb_rename_lockres;
struct ocfs2_lock_res osb_nfs_sync_lockres;
+ struct rw_semaphore nfs_sync_rwlock;
struct ocfs2_lock_res osb_trim_fs_lockres;
struct mutex obs_trim_fs_mutex;
struct ocfs2_dlm_debug *osb_dlm_debug;
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 0dd8c41bafd4c..19137c6d087bc 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -290,7 +290,7 @@
#define OCFS2_MAX_SLOTS 255

/* Slot map indicator for an empty slot */
-#define OCFS2_INVALID_SLOT -1
+#define OCFS2_INVALID_SLOT ((u16)-1)

#define OCFS2_VOL_UUID_LEN 16
#define OCFS2_MAX_VOL_LABEL_LEN 64
@@ -326,8 +326,8 @@ struct ocfs2_system_inode_info {
enum {
BAD_BLOCK_SYSTEM_INODE = 0,
GLOBAL_INODE_ALLOC_SYSTEM_INODE,
+#define OCFS2_FIRST_ONLINE_SYSTEM_INODE GLOBAL_INODE_ALLOC_SYSTEM_INODE
SLOT_MAP_SYSTEM_INODE,
-#define OCFS2_FIRST_ONLINE_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE
HEARTBEAT_SYSTEM_INODE,
GLOBAL_BITMAP_SYSTEM_INODE,
USER_QUOTA_SYSTEM_INODE,
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 4836becb7578a..45745cc3408a5 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -2825,9 +2825,12 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
goto bail;
}

- inode_alloc_inode =
- ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE,
- suballoc_slot);
+ if (suballoc_slot == (u16)OCFS2_INVALID_SLOT)
+ inode_alloc_inode = ocfs2_get_system_file_inode(osb,
+ GLOBAL_INODE_ALLOC_SYSTEM_INODE, suballoc_slot);
+ else
+ inode_alloc_inode = ocfs2_get_system_file_inode(osb,
+ INODE_ALLOC_SYSTEM_INODE, suballoc_slot);
if (!inode_alloc_inode) {
/* the error code could be inaccurate, but we are not able to
* get the correct one. */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index de23fb95fe918..64a5335046b00 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -40,6 +40,7 @@
#define DMA_PTE_SNP BIT_ULL(11)

#define DMA_FL_PTE_PRESENT BIT_ULL(0)
+#define DMA_FL_PTE_US BIT_ULL(2)
#define DMA_FL_PTE_XD BIT_ULL(63)

#define CONTEXT_TT_MULTI_LEVEL 0
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 130a668049ab0..36c7ad24d54d9 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3125,7 +3125,7 @@ static inline int dev_recursion_level(void)
return this_cpu_read(softnet_data.xmit.recursion);
}

-#define XMIT_RECURSION_LIMIT 10
+#define XMIT_RECURSION_LIMIT 8
static inline bool dev_xmit_recursion(void)
{
return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 733fad7dfbed9..6d15040c642cb 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -207,28 +207,34 @@ static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)

static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
{
+ u16 elem_per_page = p_chain->elem_per_page;
+ u32 prod = p_chain->u.chain16.prod_idx;
+ u32 cons = p_chain->u.chain16.cons_idx;
u16 used;

- used = (u16) (((u32)0x10000 +
- (u32)p_chain->u.chain16.prod_idx) -
- (u32)p_chain->u.chain16.cons_idx);
+ if (prod < cons)
+ prod += (u32)U16_MAX + 1;
+
+ used = (u16)(prod - cons);
if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
- used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
- p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
+ used -= prod / elem_per_page - cons / elem_per_page;

return (u16)(p_chain->capacity - used);
}

static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
{
+ u16 elem_per_page = p_chain->elem_per_page;
+ u64 prod = p_chain->u.chain32.prod_idx;
+ u64 cons = p_chain->u.chain32.cons_idx;
u32 used;

- used = (u32) (((u64)0x100000000ULL +
- (u64)p_chain->u.chain32.prod_idx) -
- (u64)p_chain->u.chain32.cons_idx);
+ if (prod < cons)
+ prod += (u64)U32_MAX + 1;
+
+ used = (u32)(prod - cons);
if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
- used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
- p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
+ used -= (u32)(prod / elem_per_page - cons / elem_per_page);

return p_chain->capacity - used;
}
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 1815065d52f37..1b0c7813197b0 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -1358,7 +1358,7 @@ static inline long ksys_lchown(const char __user *filename, uid_t user,

extern long do_sys_ftruncate(unsigned int fd, loff_t length, int small);

-static inline long ksys_ftruncate(unsigned int fd, unsigned long length)
+static inline long ksys_ftruncate(unsigned int fd, loff_t length)
{
return do_sys_ftruncate(fd, length, 1);
}
diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
index c253461b1c4e6..96d36b7a13440 100644
--- a/include/linux/tpm_eventlog.h
+++ b/include/linux/tpm_eventlog.h
@@ -81,6 +81,8 @@ struct tcg_efi_specid_event_algs {
u16 digest_size;
} __packed;

+#define TCG_SPECID_SIG "Spec ID Event03"
+
struct tcg_efi_specid_event_head {
u8 signature[16];
u32 platform_class;
@@ -171,6 +173,7 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
int i;
int j;
u32 count, event_type;
+ const u8 zero_digest[sizeof(event_header->digest)] = {0};

marker = event;
marker_start = marker;
@@ -198,10 +201,19 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
count = READ_ONCE(event->count);
event_type = READ_ONCE(event->event_type);

+ /* Verify that it's the log header */
+ if (event_header->pcr_idx != 0 ||
+ event_header->event_type != NO_ACTION ||
+ memcmp(event_header->digest, zero_digest, sizeof(zero_digest))) {
+ size = 0;
+ goto out;
+ }
+
efispecid = (struct tcg_efi_specid_event_head *)event_header->event;

/* Check if event is malformed. */
- if (count > efispecid->num_algs) {
+ if (memcmp(efispecid->signature, TCG_SPECID_SIG,
+ sizeof(TCG_SPECID_SIG)) || count > efispecid->num_algs) {
size = 0;
goto out;
}
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 15b4d9aec7ff2..122d9e2d8dfde 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -353,11 +353,13 @@ enum {
ipv4_is_anycast_6to4(a))

/* Flags used for the bind address copy functions. */
-#define SCTP_ADDR6_ALLOWED 0x00000001 /* IPv6 address is allowed by
+#define SCTP_ADDR4_ALLOWED 0x00000001 /* IPv4 address is allowed by
local sock family */
-#define SCTP_ADDR4_PEERSUPP 0x00000002 /* IPv4 address is supported by
+#define SCTP_ADDR6_ALLOWED 0x00000002 /* IPv6 address is allowed by
+ local sock family */
+#define SCTP_ADDR4_PEERSUPP 0x00000004 /* IPv4 address is supported by
peer */
-#define SCTP_ADDR6_PEERSUPP 0x00000004 /* IPv6 address is supported by
+#define SCTP_ADDR6_PEERSUPP 0x00000008 /* IPv6 address is supported by
peer */

/* Reasons to retransmit. */
diff --git a/include/net/sock.h b/include/net/sock.h
index 3e8c6d4b4b59f..46423e86dba50 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1846,7 +1846,6 @@ static inline int sk_rx_queue_get(const struct sock *sk)

static inline void sk_set_socket(struct sock *sk, struct socket *sock)
{
- sk_tx_queue_clear(sk);
sk->sk_socket = sock;
}

diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 8f71c111e65af..03024701c79f7 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1013,6 +1013,7 @@ struct xfrm_offload {
#define XFRM_GRO 32
#define XFRM_ESP_NO_TRAILER 64
#define XFRM_DEV_RESUME 128
+#define XFRM_XMIT 256

__u32 status;
#define CRYPTO_SUCCESS 1
diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h
index b6aac7ee1f670..4c14e8be72677 100644
--- a/include/uapi/linux/fb.h
+++ b/include/uapi/linux/fb.h
@@ -205,6 +205,7 @@ struct fb_bitfield {
#define FB_ACTIVATE_ALL 64 /* change all VCs on this fb */
#define FB_ACTIVATE_FORCE 128 /* force apply even when no change*/
#define FB_ACTIVATE_INV_MODE 256 /* invalidate videomode */
+#define FB_ACTIVATE_KD_TEXT 512 /* for KDSET vt ioctl */

#define FB_ACCELF_TEXT 1 /* (OBSOLETE) see fb_info.flags and vc_mode */

diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index cb305e71e7deb..25aebd21c15b1 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -1240,16 +1240,23 @@ static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,

static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
{
- if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0)
+ if (unlikely(max_optlen < 0))
return -EINVAL;

+ if (unlikely(max_optlen > PAGE_SIZE)) {
+ /* We don't expose optvals that are greater than PAGE_SIZE
+ * to the BPF program.
+ */
+ max_optlen = PAGE_SIZE;
+ }
+
ctx->optval = kzalloc(max_optlen, GFP_USER);
if (!ctx->optval)
return -ENOMEM;

ctx->optval_end = ctx->optval + max_optlen;

- return 0;
+ return max_optlen;
}

static void sockopt_free_buf(struct bpf_sockopt_kern *ctx)
@@ -1283,13 +1290,13 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
*/
max_optlen = max_t(int, 16, *optlen);

- ret = sockopt_alloc_buf(&ctx, max_optlen);
- if (ret)
- return ret;
+ max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
+ if (max_optlen < 0)
+ return max_optlen;

ctx.optlen = *optlen;

- if (copy_from_user(ctx.optval, optval, *optlen) != 0) {
+ if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
ret = -EFAULT;
goto out;
}
@@ -1317,8 +1324,14 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
/* export any potential modifications */
*level = ctx.level;
*optname = ctx.optname;
- *optlen = ctx.optlen;
- *kernel_optval = ctx.optval;
+
+ /* optlen == 0 from BPF indicates that we should
+ * use original userspace data.
+ */
+ if (ctx.optlen != 0) {
+ *optlen = ctx.optlen;
+ *kernel_optval = ctx.optval;
+ }
}

out:
@@ -1350,12 +1363,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
return retval;

- ret = sockopt_alloc_buf(&ctx, max_optlen);
- if (ret)
- return ret;
-
ctx.optlen = max_optlen;

+ max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
+ if (max_optlen < 0)
+ return max_optlen;
+
if (!retval) {
/* If kernel getsockopt finished successfully,
* copy whatever was returned to the user back
@@ -1369,10 +1382,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
goto out;
}

- if (ctx.optlen > max_optlen)
- ctx.optlen = max_optlen;
-
- if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) {
+ if (copy_from_user(ctx.optval, optval,
+ min(ctx.optlen, max_optlen)) != 0) {
ret = -EFAULT;
goto out;
}
@@ -1401,10 +1412,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
goto out;
}

- if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
- put_user(ctx.optlen, optlen)) {
- ret = -EFAULT;
- goto out;
+ if (ctx.optlen != 0) {
+ if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
+ put_user(ctx.optlen, optlen)) {
+ ret = -EFAULT;
+ goto out;
+ }
}

ret = ctx.retval;
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 58bdca5d978a8..badf382bbd365 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -85,12 +85,13 @@ static DEFINE_PER_CPU(struct list_head, dev_flush_list);
static DEFINE_SPINLOCK(dev_map_lock);
static LIST_HEAD(dev_map_list);

-static struct hlist_head *dev_map_create_hash(unsigned int entries)
+static struct hlist_head *dev_map_create_hash(unsigned int entries,
+ int numa_node)
{
int i;
struct hlist_head *hash;

- hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL);
+ hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node);
if (hash != NULL)
for (i = 0; i < entries; i++)
INIT_HLIST_HEAD(&hash[i]);
@@ -138,7 +139,8 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
return -EINVAL;

if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
- dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
+ dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
+ dtab->map.numa_node);
if (!dtab->dev_index_head)
goto free_charge;

@@ -223,7 +225,7 @@ static void dev_map_free(struct bpf_map *map)
}
}

- kfree(dtab->dev_index_head);
+ bpf_map_area_free(dtab->dev_index_head);
} else {
for (i = 0; i < dtab->map.max_entries; i++) {
struct bpf_dtab_netdev *dev;
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 8f4bbdaf965eb..2270930f36f83 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -124,6 +124,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
{
struct page *page;
void *ret;
+ int err;

if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_alloc_need_uncached(dev, attrs) &&
@@ -160,6 +161,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
__builtin_return_address(0));
if (!ret)
goto out_free_pages;
+ if (force_dma_unencrypted(dev)) {
+ err = set_memory_decrypted((unsigned long)ret,
+ 1 << get_order(size));
+ if (err)
+ goto out_free_pages;
+ }
memset(ret, 0, size);
goto done;
}
@@ -176,8 +183,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
}

ret = page_address(page);
- if (force_dma_unencrypted(dev))
- set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
+ if (force_dma_unencrypted(dev)) {
+ err = set_memory_decrypted((unsigned long)ret,
+ 1 << get_order(size));
+ if (err)
+ goto out_free_pages;
+ }

memset(ret, 0, size);

@@ -186,7 +197,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
arch_dma_prep_coherent(page, size);
ret = arch_dma_set_uncached(ret, size);
if (IS_ERR(ret))
- goto out_free_pages;
+ goto out_encrypt_pages;
}
done:
if (force_dma_unencrypted(dev))
@@ -194,6 +205,15 @@ done:
else
*dma_handle = phys_to_dma(dev, page_to_phys(page));
return ret;
+
+out_encrypt_pages:
+ if (force_dma_unencrypted(dev)) {
+ err = set_memory_encrypted((unsigned long)page_address(page),
+ 1 << get_order(size));
+ /* If memory cannot be re-encrypted, it must be leaked */
+ if (err)
+ return NULL;
+ }
out_free_pages:
dma_free_contiguous(dev, page, size);
return NULL;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 195ecb955fcc5..950a5cfd262ce 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -326,7 +326,8 @@ struct kprobe *get_kprobe(void *addr)
struct kprobe *p;

head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
- hlist_for_each_entry_rcu(p, head, hlist) {
+ hlist_for_each_entry_rcu(p, head, hlist,
+ lockdep_is_held(&kprobe_mutex)) {
if (p->addr == addr)
return p;
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5eccfb816d23b..f2618ade80479 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4461,7 +4461,8 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
*/
if (dl_prio(prio)) {
if (!dl_prio(p->normal_prio) ||
- (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
+ (pi_task && dl_prio(pi_task->prio) &&
+ dl_entity_preempt(&pi_task->dl, &p->dl))) {
p->dl.dl_boosted = 1;
queue_flag |= ENQUEUE_REPLENISH;
} else
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 504d2f51b0d63..f63f337c7147a 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2692,6 +2692,7 @@ void __dl_clear_params(struct task_struct *p)
dl_se->dl_bw = 0;
dl_se->dl_density = 0;

+ dl_se->dl_boosted = 0;
dl_se->dl_throttled = 0;
dl_se->dl_yielded = 0;
dl_se->dl_non_contending = 0;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2ae7e30ccb33c..5725199b32dcf 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -807,7 +807,7 @@ void post_init_entity_util_avg(struct task_struct *p)
}
}

- sa->runnable_avg = cpu_scale;
+ sa->runnable_avg = sa->util_avg;

if (p->sched_class != &fair_sched_class) {
/*
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 35610a4be4a9e..085fceca33774 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -3,6 +3,9 @@
* Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/blktrace_api.h>
@@ -494,6 +497,16 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
*/
strreplace(buts->name, '/', '_');

+ /*
+ * bdev can be NULL, as with scsi-generic, this is a helpful as
+ * we can be.
+ */
+ if (q->blk_trace) {
+ pr_warn("Concurrent blktraces are not allowed on %s\n",
+ buts->name);
+ return -EBUSY;
+ }
+
bt = kzalloc(sizeof(*bt), GFP_KERNEL);
if (!bt)
return -ENOMEM;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index b8e1ca48be50f..00867ff82412a 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2427,7 +2427,7 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
if (unlikely(info->add_timestamp)) {
bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer);

- event = rb_add_time_stamp(event, info->delta, abs);
+ event = rb_add_time_stamp(event, abs ? info->delta : delta, abs);
length -= RB_LEN_TIME_EXTEND;
delta = 0;
}
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index 9de29bb45a27f..fdc5abc00bf84 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -101,12 +101,16 @@ trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);

ret = kprobe_event_gen_cmd_start(&cmd, event, val);
- if (ret)
+ if (ret) {
+ pr_err("Failed to generate probe: %s\n", buf);
break;
+ }

ret = kprobe_event_gen_cmd_end(&cmd);
- if (ret)
+ if (ret) {
pr_err("Failed to add probe: %s\n", buf);
+ break;
+ }
}

return ret;
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 3a74736da363a..f725802160c0b 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -216,11 +216,17 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)

int trigger_process_regex(struct trace_event_file *file, char *buff)
{
- char *command, *next = buff;
+ char *command, *next;
struct event_command *p;
int ret = -EINVAL;

+ next = buff = skip_spaces(buff);
command = strsep(&next, ": \t");
+ if (next) {
+ next = skip_spaces(next);
+ if (!*next)
+ next = NULL;
+ }
command = (command[0] != '!') ? command : command + 1;

mutex_lock(&trigger_cmd_mutex);
@@ -630,8 +636,14 @@ event_trigger_callback(struct event_command *cmd_ops,
int ret;

/* separate the trigger from the filter (t:n [if filter]) */
- if (param && isdigit(param[0]))
+ if (param && isdigit(param[0])) {
trigger = strsep(&param, " \t");
+ if (param) {
+ param = skip_spaces(param);
+ if (!*param)
+ param = NULL;
+ }
+ }

trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);

@@ -1368,6 +1380,11 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
trigger = strsep(&param, " \t");
if (!trigger)
return -EINVAL;
+ if (param) {
+ param = skip_spaces(param);
+ if (!*param)
+ param = NULL;
+ }

system = strsep(&trigger, ":");
if (!trigger)
diff --git a/lib/test_objagg.c b/lib/test_objagg.c
index 72c1abfa154dc..da137939a4100 100644
--- a/lib/test_objagg.c
+++ b/lib/test_objagg.c
@@ -979,10 +979,10 @@ err_check_expect_stats2:
err_world2_obj_get:
for (i--; i >= 0; i--)
world_obj_put(&world2, objagg, hints_case->key_ids[i]);
- objagg_hints_put(hints);
- objagg_destroy(objagg2);
i = hints_case->key_ids_count;
+ objagg_destroy(objagg2);
err_check_expect_hints_stats:
+ objagg_hints_put(hints);
err_hints_get:
err_check_expect_stats:
err_world_obj_get:
diff --git a/mm/compaction.c b/mm/compaction.c
index 46f0fcc93081e..65b568e195823 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2318,15 +2318,26 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
.page = NULL,
};

- current->capture_control = &capc;
+ /*
+ * Make sure the structs are really initialized before we expose the
+ * capture control, in case we are interrupted and the interrupt handler
+ * frees a page.
+ */
+ barrier();
+ WRITE_ONCE(current->capture_control, &capc);

ret = compact_zone(&cc, &capc);

VM_BUG_ON(!list_empty(&cc.freepages));
VM_BUG_ON(!list_empty(&cc.migratepages));

- *capture = capc.page;
- current->capture_control = NULL;
+ /*
+ * Make sure we hide capture control first before we read the captured
+ * page pointer, otherwise an interrupt could free and capture a page
+ * and we would leak it.
+ */
+ WRITE_ONCE(current->capture_control, NULL);
+ *capture = READ_ONCE(capc.page);

return ret;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a3b97f1039665..ef0e291a8cf46 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2790,8 +2790,10 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
return;

cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
- if (!cw)
+ if (!cw) {
+ css_put(&memcg->css);
return;
+ }

cw->memcg = memcg;
cw->cachep = cachep;
@@ -6349,11 +6351,16 @@ static unsigned long effective_protection(unsigned long usage,
* We're using unprotected memory for the weight so that if
* some cgroups DO claim explicit protection, we don't protect
* the same bytes twice.
+ *
+ * Check both usage and parent_usage against the respective
+ * protected values. One should imply the other, but they
+ * aren't read atomically - make sure the division is sane.
*/
if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
return ep;
-
- if (parent_effective > siblings_protected && usage > protected) {
+ if (parent_effective > siblings_protected &&
+ parent_usage > siblings_protected &&
+ usage > protected) {
unsigned long unclaimed;

unclaimed = parent_effective - siblings_protected;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index fc0aad0bc1f54..744a3ea284b78 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -468,11 +468,20 @@ void __ref remove_pfn_range_from_zone(struct zone *zone,
unsigned long start_pfn,
unsigned long nr_pages)
{
+ const unsigned long end_pfn = start_pfn + nr_pages;
struct pglist_data *pgdat = zone->zone_pgdat;
- unsigned long flags;
+ unsigned long pfn, cur_nr_pages, flags;

/* Poison struct pages because they are now uninitialized again. */
- page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
+ for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) {
+ cond_resched();
+
+ /* Select all remaining pages up to the next section boundary */
+ cur_nr_pages =
+ min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn);
+ page_init_poison(pfn_to_page(pfn),
+ sizeof(struct page) * cur_nr_pages);
+ }

#ifdef CONFIG_ZONE_DEVICE
/*
diff --git a/mm/slab.h b/mm/slab.h
index 207c83ef6e06d..74f7e09a7cfd1 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -348,7 +348,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
gfp_t gfp, int order,
struct kmem_cache *s)
{
- unsigned int nr_pages = 1 << order;
+ int nr_pages = 1 << order;
struct mem_cgroup *memcg;
struct lruvec *lruvec;
int ret;
@@ -388,7 +388,7 @@ out:
static __always_inline void memcg_uncharge_slab(struct page *page, int order,
struct kmem_cache *s)
{
- unsigned int nr_pages = 1 << order;
+ int nr_pages = 1 << order;
struct mem_cgroup *memcg;
struct lruvec *lruvec;

diff --git a/mm/slab_common.c b/mm/slab_common.c
index 9e72ba2241750..37d48a56431d0 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1726,7 +1726,7 @@ void kzfree(const void *p)
if (unlikely(ZERO_OR_NULL_PTR(mem)))
return;
ks = ksize(mem);
- memset(mem, 0, ks);
+ memzero_explicit(mem, ks);
kfree(mem);
}
EXPORT_SYMBOL(kzfree);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 1f97703a52ffb..18430f79ac37e 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -217,8 +217,8 @@ struct net_bridge_port_group {
struct rcu_head rcu;
struct timer_list timer;
struct br_ip addr;
+ unsigned char eth_addr[ETH_ALEN] __aligned(2);
unsigned char flags;
- unsigned char eth_addr[ETH_ALEN];
};

struct net_bridge_mdb_entry {
diff --git a/net/core/dev.c b/net/core/dev.c
index 93a279ab4e97c..c9ee5d80d5ea1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4109,10 +4109,12 @@ int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)

local_bh_disable();

+ dev_xmit_recursion_inc();
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_drv_stopped(txq))
ret = netdev_start_xmit(skb, dev, txq, false);
HARD_TX_UNLOCK(dev, txq);
+ dev_xmit_recursion_dec();

local_bh_enable();

@@ -9435,6 +9437,13 @@ int register_netdevice(struct net_device *dev)
rcu_barrier();

dev->reg_state = NETREG_UNREGISTERED;
+ /* We should put the kobject that hold in
+ * netdev_unregister_kobject(), otherwise
+ * the net device cannot be freed when
+ * driver calls free_netdev(), because the
+ * kobject is being hold.
+ */
+ kobject_put(&dev->dev.kobj);
}
/*
* Prevent userspace races by waiting until the network
diff --git a/net/core/sock.c b/net/core/sock.c
index b714162213aea..afe4a62adf8ff 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -707,7 +707,7 @@ bool sk_mc_loop(struct sock *sk)
return inet6_sk(sk)->mc_loop;
#endif
}
- WARN_ON(1);
+ WARN_ON_ONCE(1);
return true;
}
EXPORT_SYMBOL(sk_mc_loop);
@@ -1678,6 +1678,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
cgroup_sk_alloc(&sk->sk_cgrp_data);
sock_update_classid(&sk->sk_cgrp_data);
sock_update_netprioidx(&sk->sk_cgrp_data);
+ sk_tx_queue_clear(sk);
}

return sk;
@@ -1901,6 +1902,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
*/
sk_refcnt_debug_inc(newsk);
sk_set_socket(newsk, NULL);
+ sk_tx_queue_clear(newsk);
RCU_INIT_POINTER(newsk->sk_wq, NULL);

if (newsk->sk_prot->sockets_allocated)
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index 423e640e3876d..aaecfc916a4db 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -40,9 +40,11 @@ const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
[NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation",
[NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation",
[NETIF_F_GSO_PARTIAL_BIT] = "tx-gso-partial",
+ [NETIF_F_GSO_TUNNEL_REMCSUM_BIT] = "tx-tunnel-remcsum-segmentation",
[NETIF_F_GSO_SCTP_BIT] = "tx-sctp-segmentation",
[NETIF_F_GSO_ESP_BIT] = "tx-esp-segmentation",
[NETIF_F_GSO_UDP_L4_BIT] = "tx-udp-segmentation",
+ [NETIF_F_GSO_FRAGLIST_BIT] = "tx-gso-list",

[NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
[NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp",
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 89d0b1827aaff..d3eeeb26396cd 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -2957,7 +2957,7 @@ ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input)
sizeof(match->mask.ipv6.dst));
}
if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr)) ||
- memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr))) {
+ memcmp(v6_m_spec->ip6dst, &zero_addr, sizeof(zero_addr))) {
match->dissector.used_keys |=
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
match->dissector.offset[FLOW_DISSECTOR_KEY_IPV6_ADDRS] =
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 55ca2e5218280..871c035be31f2 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1109,7 +1109,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table,
if (fl4.flowi4_scope < RT_SCOPE_LINK)
fl4.flowi4_scope = RT_SCOPE_LINK;

- if (table)
+ if (table && table != RT_TABLE_MAIN)
tbl = fib_get_table(net, table);

if (tbl)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index cd4b84310d929..a0b4dc54f8a60 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -85,9 +85,10 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
__be32 remote, __be32 local,
__be32 key)
{
- unsigned int hash;
struct ip_tunnel *t, *cand = NULL;
struct hlist_head *head;
+ struct net_device *ndev;
+ unsigned int hash;

hash = ip_tunnel_hash(key, remote);
head = &itn->tunnels[hash];
@@ -162,8 +163,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
if (t && t->dev->flags & IFF_UP)
return t;

- if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
- return netdev_priv(itn->fb_tunnel_dev);
+ ndev = READ_ONCE(itn->fb_tunnel_dev);
+ if (ndev && ndev->flags & IFF_UP)
+ return netdev_priv(ndev);

return NULL;
}
@@ -1245,9 +1247,9 @@ void ip_tunnel_uninit(struct net_device *dev)
struct ip_tunnel_net *itn;

itn = net_generic(net, tunnel->ip_tnl_net_id);
- /* fb_tunnel_dev will be unregisted in net-exit call. */
- if (itn->fb_tunnel_dev != dev)
- ip_tunnel_del(itn, netdev_priv(dev));
+ ip_tunnel_del(itn, netdev_priv(dev));
+ if (itn->fb_tunnel_dev == dev)
+ WRITE_ONCE(itn->fb_tunnel_dev, NULL);

dst_cache_reset(&tunnel->dst_cache);
}
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 8f8eefd3a3ce1..c7bf5b26bf0c2 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -432,10 +432,9 @@ static void hystart_update(struct sock *sk, u32 delay)

if (hystart_detect & HYSTART_DELAY) {
/* obtain the minimum delay of more than sampling packets */
+ if (ca->curr_rtt > delay)
+ ca->curr_rtt = delay;
if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
- if (ca->curr_rtt > delay)
- ca->curr_rtt = delay;
-
ca->sample_cnt++;
} else {
if (ca->curr_rtt > ca->delay_min +
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 29c6fc8c77168..1fa009999f577 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -261,7 +261,8 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
* cwnd may be very low (even just 1 packet), so we should ACK
* immediately.
*/
- inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
+ if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq)
+ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
}
}

@@ -3683,6 +3684,15 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_in_ack_event(sk, ack_ev_flags);
}

+ /* This is a deviation from RFC3168 since it states that:
+ * "When the TCP data sender is ready to set the CWR bit after reducing
+ * the congestion window, it SHOULD set the CWR bit only on the first
+ * new data packet that it transmits."
+ * We accept CWR on pure ACKs to be more robust
+ * with widely-deployed TCP implementations that do this.
+ */
+ tcp_ecn_accept_cwr(sk, skb);
+
/* We passed data and got it acked, remove any soft error
* log. Something worked...
*/
@@ -4593,7 +4603,11 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
skb, &fragstolen)) {
coalesce_done:
- tcp_grow_window(sk, skb);
+ /* For non sack flows, do not grow window to force DUPACK
+ * and trigger fast retransmit.
+ */
+ if (tcp_is_sack(tp))
+ tcp_grow_window(sk, skb);
kfree_skb_partial(skb, fragstolen);
skb = NULL;
goto add_sack;
@@ -4677,7 +4691,11 @@ add_sack:
tcp_sack_new_ofo_skb(sk, seq, end_seq);
end:
if (skb) {
- tcp_grow_window(sk, skb);
+ /* For non sack flows, do not grow window to force DUPACK
+ * and trigger fast retransmit.
+ */
+ if (tcp_is_sack(tp))
+ tcp_grow_window(sk, skb);
skb_condense(skb);
skb_set_owner_r(skb, sk);
}
@@ -4780,8 +4798,6 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
skb_dst_drop(skb);
__skb_pull(skb, tcp_hdr(skb)->doff * 4);

- tcp_ecn_accept_cwr(sk, skb);
-
tp->rx_opt.dsack = 0;

/* Queue data for delivery to the user.
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 781ca8c07a0da..6532bde82b40a 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -127,6 +127,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
gre_proto == htons(ETH_P_ERSPAN2)) ?
ARPHRD_ETHER : ARPHRD_IP6GRE;
int score, cand_score = 4;
+ struct net_device *ndev;

for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
if (!ipv6_addr_equal(local, &t->parms.laddr) ||
@@ -238,9 +239,9 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
if (t && t->dev->flags & IFF_UP)
return t;

- dev = ign->fb_tunnel_dev;
- if (dev && dev->flags & IFF_UP)
- return netdev_priv(dev);
+ ndev = READ_ONCE(ign->fb_tunnel_dev);
+ if (ndev && ndev->flags & IFF_UP)
+ return netdev_priv(ndev);

return NULL;
}
@@ -413,6 +414,8 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)

ip6gre_tunnel_unlink_md(ign, t);
ip6gre_tunnel_unlink(ign, t);
+ if (ign->fb_tunnel_dev == dev)
+ WRITE_ONCE(ign->fb_tunnel_dev, NULL);
dst_cache_reset(&t->dst_cache);
dev_put(dev);
}
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index eaa4c2cc2fbb2..c875c9b6edbe9 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2618,6 +2618,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
idev->mc_list = i->next;

write_unlock_bh(&idev->lock);
+ ip6_mc_clear_src(i);
ma_put(i);
write_lock_bh(&idev->lock);
}
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 1c20dd14b2aa2..2430bbfa34059 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -336,9 +336,7 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
*/
subflow->snd_isn = TCP_SKB_CB(skb)->end_seq;
if (subflow->request_mptcp) {
- pr_debug("local_key=%llu", subflow->local_key);
opts->suboptions = OPTION_MPTCP_MPC_SYN;
- opts->sndr_key = subflow->local_key;
*size = TCPOLEN_MPTCP_MPC_SYN;
return true;
} else if (subflow->request_join) {
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index e6feb05a93dc3..db3e4e74e7857 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -1015,8 +1015,10 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
err = tcp_set_ulp(sf->sk, "mptcp");
release_sock(sf->sk);

- if (err)
+ if (err) {
+ sock_release(sf);
return err;
+ }

/* the newly created socket really belongs to the owning MPTCP master
* socket, even if for additional subflows the allocation is performed
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 340cb955af25c..56621d6bfd297 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -460,6 +460,8 @@ ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
if (!add_extension(id, cadt_flags, tb))
continue;
+ if (align < ip_set_extensions[id].align)
+ align = ip_set_extensions[id].align;
len = ALIGN(len, ip_set_extensions[id].align);
set->offset[id] = len;
set->extensions |= ip_set_extensions[id].type;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index bcbba0bef1c2a..9c1c27f3a0894 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -474,8 +474,7 @@ genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
struct netlink_ext_ack *extack,
const struct genl_ops *ops,
int hdrlen,
- enum genl_validate_flags no_strict_flag,
- bool parallel)
+ enum genl_validate_flags no_strict_flag)
{
enum netlink_validation validate = ops->validate & no_strict_flag ?
NL_VALIDATE_LIBERAL :
@@ -486,7 +485,7 @@ genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
if (!family->maxattr)
return NULL;

- if (parallel) {
+ if (family->parallel_ops) {
attrbuf = kmalloc_array(family->maxattr + 1,
sizeof(struct nlattr *), GFP_KERNEL);
if (!attrbuf)
@@ -498,7 +497,7 @@ genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
err = __nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
family->policy, validate, extack);
if (err) {
- if (parallel)
+ if (family->parallel_ops)
kfree(attrbuf);
return ERR_PTR(err);
}
@@ -506,10 +505,9 @@ genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
}

static void genl_family_rcv_msg_attrs_free(const struct genl_family *family,
- struct nlattr **attrbuf,
- bool parallel)
+ struct nlattr **attrbuf)
{
- if (parallel)
+ if (family->parallel_ops)
kfree(attrbuf);
}

@@ -537,15 +535,14 @@ static int genl_start(struct netlink_callback *cb)

attrs = genl_family_rcv_msg_attrs_parse(ctx->family, ctx->nlh, ctx->extack,
ops, ctx->hdrlen,
- GENL_DONT_VALIDATE_DUMP_STRICT,
- true);
+ GENL_DONT_VALIDATE_DUMP_STRICT);
if (IS_ERR(attrs))
return PTR_ERR(attrs);

no_attrs:
info = genl_dumpit_info_alloc();
if (!info) {
- kfree(attrs);
+ genl_family_rcv_msg_attrs_free(ctx->family, attrs);
return -ENOMEM;
}
info->family = ctx->family;
@@ -562,7 +559,7 @@ no_attrs:
}

if (rc) {
- kfree(attrs);
+ genl_family_rcv_msg_attrs_free(info->family, info->attrs);
genl_dumpit_info_free(info);
cb->data = NULL;
}
@@ -591,7 +588,7 @@ static int genl_lock_done(struct netlink_callback *cb)
rc = ops->done(cb);
genl_unlock();
}
- genl_family_rcv_msg_attrs_free(info->family, info->attrs, false);
+ genl_family_rcv_msg_attrs_free(info->family, info->attrs);
genl_dumpit_info_free(info);
return rc;
}
@@ -604,7 +601,7 @@ static int genl_parallel_done(struct netlink_callback *cb)

if (ops->done)
rc = ops->done(cb);
- genl_family_rcv_msg_attrs_free(info->family, info->attrs, true);
+ genl_family_rcv_msg_attrs_free(info->family, info->attrs);
genl_dumpit_info_free(info);
return rc;
}
@@ -671,8 +668,7 @@ static int genl_family_rcv_msg_doit(const struct genl_family *family,

attrbuf = genl_family_rcv_msg_attrs_parse(family, nlh, extack,
ops, hdrlen,
- GENL_DONT_VALIDATE_STRICT,
- family->parallel_ops);
+ GENL_DONT_VALIDATE_STRICT);
if (IS_ERR(attrbuf))
return PTR_ERR(attrbuf);

@@ -698,7 +694,7 @@ static int genl_family_rcv_msg_doit(const struct genl_family *family,
family->post_doit(ops, skb, &info);

out:
- genl_family_rcv_msg_attrs_free(family, attrbuf, family->parallel_ops);
+ genl_family_rcv_msg_attrs_free(family, attrbuf);

return err;
}
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index fc0efd8833c84..2611657f40cac 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -1169,9 +1169,10 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key,
const struct nlattr *attr, bool last)
{
+ struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
const struct nlattr *actions, *cpl_arg;
+ int len, max_len, rem = nla_len(attr);
const struct check_pkt_len_arg *arg;
- int rem = nla_len(attr);
bool clone_flow_key;

/* The first netlink attribute in 'attr' is always
@@ -1180,7 +1181,11 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
cpl_arg = nla_data(attr);
arg = nla_data(cpl_arg);

- if (skb->len <= arg->pkt_len) {
+ len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
+ max_len = arg->pkt_len;
+
+ if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
+ len <= max_len) {
/* Second netlink attribute in 'attr' is always
* 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
*/
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index b7611cc159e51..032ed76c0166d 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -22,6 +22,11 @@
#include <net/ip.h>
#include "ar-internal.h"

+static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
+ unsigned long user_call_ID)
+{
+}
+
/*
* Preallocate a single service call, connection and peer and, if possible,
* give them a user ID and attach the user's side of the ID to them.
@@ -228,6 +233,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
if (rx->discard_new_call) {
_debug("discard %lx", call->user_call_ID);
rx->discard_new_call(call, call->user_call_ID);
+ if (call->notify_rx)
+ call->notify_rx = rxrpc_dummy_notify;
rxrpc_put_call(call, rxrpc_call_put_kernel);
}
rxrpc_call_completed(call);
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 3be4177baf707..22dec6049e1bb 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -723,13 +723,12 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
rwind, ntohl(ackinfo->jumbo_max));

+ if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
+ rwind = RXRPC_RXTX_BUFF_SIZE - 1;
if (call->tx_winsize != rwind) {
- if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
- rwind = RXRPC_RXTX_BUFF_SIZE - 1;
if (rwind > call->tx_winsize)
wake = true;
- trace_rxrpc_rx_rwind_change(call, sp->hdr.serial,
- ntohl(ackinfo->rwind), wake);
+ trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake);
call->tx_winsize = rwind;
}

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 1496e87cd07bb..9475fa81ea7f6 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1514,32 +1514,51 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
return idx + (tin << 16);
}

-static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
+static u8 cake_handle_diffserv(struct sk_buff *skb, bool wash)
{
- int wlen = skb_network_offset(skb);
+ const int offset = skb_network_offset(skb);
+ u16 *buf, buf_;
u8 dscp;

switch (tc_skb_protocol(skb)) {
case htons(ETH_P_IP):
- wlen += sizeof(struct iphdr);
- if (!pskb_may_pull(skb, wlen) ||
- skb_try_make_writable(skb, wlen))
+ buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
+ if (unlikely(!buf))
return 0;

- dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
- if (wash && dscp)
+ /* ToS is in the second byte of iphdr */
+ dscp = ipv4_get_dsfield((struct iphdr *)buf) >> 2;
+
+ if (wash && dscp) {
+ const int wlen = offset + sizeof(struct iphdr);
+
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
+ return 0;
+
ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+ }
+
return dscp;

case htons(ETH_P_IPV6):
- wlen += sizeof(struct ipv6hdr);
- if (!pskb_may_pull(skb, wlen) ||
- skb_try_make_writable(skb, wlen))
+ buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
+ if (unlikely(!buf))
return 0;

- dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
- if (wash && dscp)
+ /* Traffic class is in the first and second bytes of ipv6hdr */
+ dscp = ipv6_get_dsfield((struct ipv6hdr *)buf) >> 2;
+
+ if (wash && dscp) {
+ const int wlen = offset + sizeof(struct ipv6hdr);
+
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
+ return 0;
+
ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+ }
+
return dscp;

case htons(ETH_P_ARP):
@@ -1556,14 +1575,17 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
{
struct cake_sched_data *q = qdisc_priv(sch);
u32 tin, mark;
+ bool wash;
u8 dscp;

/* Tin selection: Default to diffserv-based selection, allow overriding
- * using firewall marks or skb->priority.
+ * using firewall marks or skb->priority. Call DSCP parsing early if
+ * wash is enabled, otherwise defer to below to skip unneeded parsing.
*/
- dscp = cake_handle_diffserv(skb,
- q->rate_flags & CAKE_FLAG_WASH);
mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
+ wash = !!(q->rate_flags & CAKE_FLAG_WASH);
+ if (wash)
+ dscp = cake_handle_diffserv(skb, wash);

if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
tin = 0;
@@ -1577,6 +1599,8 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
tin = q->tin_order[TC_H_MIN(skb->priority) - 1];

else {
+ if (!wash)
+ dscp = cake_handle_diffserv(skb, wash);
tin = q->tin_index[dscp];

if (unlikely(tin >= q->tin_cnt))
@@ -2654,7 +2678,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
qdisc_watchdog_init(&q->watchdog, sch);

if (opt) {
- int err = cake_change(sch, opt, extack);
+ err = cake_change(sch, opt, extack);

if (err)
return err;
@@ -2971,7 +2995,7 @@ static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl,
PUT_STAT_S32(BLUE_TIMER_US,
ktime_to_us(
ktime_sub(now,
- flow->cvars.blue_timer)));
+ flow->cvars.blue_timer)));
}
if (flow->cvars.dropping) {
PUT_STAT_S32(DROP_NEXT_US,
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 437079a4883d2..732bc9a45190b 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1565,12 +1565,15 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
enum sctp_scope scope, gfp_t gfp)
{
+ struct sock *sk = asoc->base.sk;
int flags;

/* Use scoping rules to determine the subset of addresses from
* the endpoint.
*/
- flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
+ flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
+ if (!inet_v6_ipv6only(sk))
+ flags |= SCTP_ADDR4_ALLOWED;
if (asoc->peer.ipv4_address)
flags |= SCTP_ADDR4_PEERSUPP;
if (asoc->peer.ipv6_address)
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 53bc61537f44f..701c5a4e441d9 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -461,6 +461,7 @@ static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest,
* well as the remote peer.
*/
if ((((AF_INET == addr->sa.sa_family) &&
+ (flags & SCTP_ADDR4_ALLOWED) &&
(flags & SCTP_ADDR4_PEERSUPP))) ||
(((AF_INET6 == addr->sa.sa_family) &&
(flags & SCTP_ADDR6_ALLOWED) &&
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 092d1afdee0d2..cde29f3c7fb3c 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -148,7 +148,8 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp,
* sock as well as the remote peer.
*/
if (addr->a.sa.sa_family == AF_INET &&
- !(copy_flags & SCTP_ADDR4_PEERSUPP))
+ (!(copy_flags & SCTP_ADDR4_ALLOWED) ||
+ !(copy_flags & SCTP_ADDR4_PEERSUPP)))
continue;
if (addr->a.sa.sa_family == AF_INET6 &&
(!(copy_flags & SCTP_ADDR6_ALLOWED) ||
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 39e14d5edaf13..e9d0953522f09 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1317,6 +1317,7 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data)
q.len = strlen(gssd_dummy_clnt_dir[0].name);
clnt_dentry = d_hash_and_lookup(gssd_dentry, &q);
if (!clnt_dentry) {
+ __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1);
pipe_dentry = ERR_PTR(-ENOENT);
goto out;
}
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 6f7d82fb1eb0a..be11d672b5b97 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -1118,6 +1118,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
base = 0;
} else {
base -= buf->head[0].iov_len;
+ subbuf->head[0].iov_base = buf->head[0].iov_base;
subbuf->head[0].iov_len = 0;
}

@@ -1130,6 +1131,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
base = 0;
} else {
base -= buf->page_len;
+ subbuf->pages = buf->pages;
+ subbuf->page_base = 0;
subbuf->page_len = 0;
}

@@ -1141,6 +1144,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
base = 0;
} else {
base -= buf->tail[0].iov_len;
+ subbuf->tail[0].iov_base = buf->tail[0].iov_base;
subbuf->tail[0].iov_len = 0;
}

diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 3c627dc685cc8..57118e342c8eb 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -1349,8 +1349,7 @@ rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
}

- r_xprt->rx_stats.bad_reply_count++;
- return -EREMOTEIO;
+ return -EIO;
}

/* Perform XID lookup, reconstruction of the RPC reply, and
@@ -1387,13 +1386,11 @@ out:
spin_unlock(&xprt->queue_lock);
return;

-/* If the incoming reply terminated a pending RPC, the next
- * RPC call will post a replacement receive buffer as it is
- * being marshaled.
- */
out_badheader:
trace_xprtrdma_reply_hdr(rep);
r_xprt->rx_stats.bad_reply_count++;
+ rqst->rq_task->tk_status = status;
+ status = 0;
goto out;
}

diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index f50d1f97cf8ec..626096bd0d294 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -108,7 +108,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
struct xfrm_offload *xo = xfrm_offload(skb);
struct sec_path *sp;

- if (!xo)
+ if (!xo || (xo->flags & XFRM_XMIT))
return skb;

if (!(features & NETIF_F_HW_ESP))
@@ -129,6 +129,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
return skb;
}

+ xo->flags |= XFRM_XMIT;
+
if (skb_is_gso(skb)) {
struct net_device *dev = skb->dev;

diff --git a/samples/bpf/xdp_monitor_user.c b/samples/bpf/xdp_monitor_user.c
index dd558cbb23094..ef53b93db5732 100644
--- a/samples/bpf/xdp_monitor_user.c
+++ b/samples/bpf/xdp_monitor_user.c
@@ -509,11 +509,8 @@ static void *alloc_rec_per_cpu(int record_size)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
void *array;
- size_t size;

- size = record_size * nr_cpus;
- array = malloc(size);
- memset(array, 0, size);
+ array = calloc(nr_cpus, record_size);
if (!array) {
fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
exit(EXIT_FAIL_MEM);
@@ -528,8 +525,7 @@ static struct stats_record *alloc_stats_record(void)
int i;

/* Alloc main stats_record structure */
- rec = malloc(sizeof(*rec));
- memset(rec, 0, sizeof(*rec));
+ rec = calloc(1, sizeof(*rec));
if (!rec) {
fprintf(stderr, "Mem alloc error\n");
exit(EXIT_FAIL_MEM);
diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c
index 313a8fe6d125c..2baf8db1f7e70 100644
--- a/samples/bpf/xdp_redirect_cpu_kern.c
+++ b/samples/bpf/xdp_redirect_cpu_kern.c
@@ -15,7 +15,7 @@
#include <bpf/bpf_helpers.h>
#include "hash_func01.h"

-#define MAX_CPUS 64 /* WARNING - sync with _user.c */
+#define MAX_CPUS NR_CPUS

/* Special map type that can XDP_REDIRECT frames to another CPU */
struct {
diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
index 15bdf047a2221..e86fed5cdb92c 100644
--- a/samples/bpf/xdp_redirect_cpu_user.c
+++ b/samples/bpf/xdp_redirect_cpu_user.c
@@ -13,6 +13,7 @@ static const char *__doc__ =
#include <unistd.h>
#include <locale.h>
#include <sys/resource.h>
+#include <sys/sysinfo.h>
#include <getopt.h>
#include <net/if.h>
#include <time.h>
@@ -24,8 +25,6 @@ static const char *__doc__ =
#include <arpa/inet.h>
#include <linux/if_link.h>

-#define MAX_CPUS 64 /* WARNING - sync with _kern.c */
-
/* How many xdp_progs are defined in _kern.c */
#define MAX_PROG 6

@@ -40,6 +39,7 @@ static char *ifname;
static __u32 prog_id;

static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
+static int n_cpus;
static int cpu_map_fd;
static int rx_cnt_map_fd;
static int redirect_err_cnt_map_fd;
@@ -170,7 +170,7 @@ struct stats_record {
struct record redir_err;
struct record kthread;
struct record exception;
- struct record enq[MAX_CPUS];
+ struct record enq[];
};

static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
@@ -210,11 +210,8 @@ static struct datarec *alloc_record_per_cpu(void)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
struct datarec *array;
- size_t size;

- size = sizeof(struct datarec) * nr_cpus;
- array = malloc(size);
- memset(array, 0, size);
+ array = calloc(nr_cpus, sizeof(struct datarec));
if (!array) {
fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
exit(EXIT_FAIL_MEM);
@@ -225,19 +222,20 @@ static struct datarec *alloc_record_per_cpu(void)
static struct stats_record *alloc_stats_record(void)
{
struct stats_record *rec;
- int i;
+ int i, size;

- rec = malloc(sizeof(*rec));
- memset(rec, 0, sizeof(*rec));
+ size = sizeof(*rec) + n_cpus * sizeof(struct record);
+ rec = malloc(size);
if (!rec) {
fprintf(stderr, "Mem alloc error\n");
exit(EXIT_FAIL_MEM);
}
+ memset(rec, 0, size);
rec->rx_cnt.cpu = alloc_record_per_cpu();
rec->redir_err.cpu = alloc_record_per_cpu();
rec->kthread.cpu = alloc_record_per_cpu();
rec->exception.cpu = alloc_record_per_cpu();
- for (i = 0; i < MAX_CPUS; i++)
+ for (i = 0; i < n_cpus; i++)
rec->enq[i].cpu = alloc_record_per_cpu();

return rec;
@@ -247,7 +245,7 @@ static void free_stats_record(struct stats_record *r)
{
int i;

- for (i = 0; i < MAX_CPUS; i++)
+ for (i = 0; i < n_cpus; i++)
free(r->enq[i].cpu);
free(r->exception.cpu);
free(r->kthread.cpu);
@@ -350,7 +348,7 @@ static void stats_print(struct stats_record *stats_rec,
}

/* cpumap enqueue stats */
- for (to_cpu = 0; to_cpu < MAX_CPUS; to_cpu++) {
+ for (to_cpu = 0; to_cpu < n_cpus; to_cpu++) {
char *fmt = "%-15s %3d:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n";
char *fm2 = "%-15s %3s:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n";
char *errstr = "";
@@ -475,7 +473,7 @@ static void stats_collect(struct stats_record *rec)
map_collect_percpu(fd, 1, &rec->redir_err);

fd = cpumap_enqueue_cnt_map_fd;
- for (i = 0; i < MAX_CPUS; i++)
+ for (i = 0; i < n_cpus; i++)
map_collect_percpu(fd, i, &rec->enq[i]);

fd = cpumap_kthread_cnt_map_fd;
@@ -549,10 +547,10 @@ static int create_cpu_entry(__u32 cpu, __u32 queue_size,
*/
static void mark_cpus_unavailable(void)
{
- __u32 invalid_cpu = MAX_CPUS;
+ __u32 invalid_cpu = n_cpus;
int ret, i;

- for (i = 0; i < MAX_CPUS; i++) {
+ for (i = 0; i < n_cpus; i++) {
ret = bpf_map_update_elem(cpus_available_map_fd, &i,
&invalid_cpu, 0);
if (ret) {
@@ -688,6 +686,8 @@ int main(int argc, char **argv)
int prog_fd;
__u32 qsize;

+ n_cpus = get_nprocs_conf();
+
/* Notice: choosing he queue size is very important with the
* ixgbe driver, because it's driver page recycling trick is
* dependend on pages being returned quickly. The number of
@@ -757,7 +757,7 @@ int main(int argc, char **argv)
case 'c':
/* Add multiple CPUs */
add_cpu = strtoul(optarg, NULL, 0);
- if (add_cpu >= MAX_CPUS) {
+ if (add_cpu >= n_cpus) {
fprintf(stderr,
"--cpu nr too large for cpumap err(%d):%s\n",
errno, strerror(errno));
diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
index 4fe47502ebed4..caa4e7ffcfc7b 100644
--- a/samples/bpf/xdp_rxq_info_user.c
+++ b/samples/bpf/xdp_rxq_info_user.c
@@ -198,11 +198,8 @@ static struct datarec *alloc_record_per_cpu(void)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
struct datarec *array;
- size_t size;

- size = sizeof(struct datarec) * nr_cpus;
- array = malloc(size);
- memset(array, 0, size);
+ array = calloc(nr_cpus, sizeof(struct datarec));
if (!array) {
fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
exit(EXIT_FAIL_MEM);
@@ -214,11 +211,8 @@ static struct record *alloc_record_per_rxq(void)
{
unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries;
struct record *array;
- size_t size;

- size = sizeof(struct record) * nr_rxqs;
- array = malloc(size);
- memset(array, 0, size);
+ array = calloc(nr_rxqs, sizeof(struct record));
if (!array) {
fprintf(stderr, "Mem alloc error (nr_rxqs:%u)\n", nr_rxqs);
exit(EXIT_FAIL_MEM);
@@ -232,8 +226,7 @@ static struct stats_record *alloc_stats_record(void)
struct stats_record *rec;
int i;

- rec = malloc(sizeof(*rec));
- memset(rec, 0, sizeof(*rec));
+ rec = calloc(1, sizeof(struct stats_record));
if (!rec) {
fprintf(stderr, "Mem alloc error\n");
exit(EXIT_FAIL_MEM);
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 6cabf20ce66a3..fe427f7fcfb31 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -86,20 +86,21 @@ cc-cross-prefix = $(firstword $(foreach c, $(1), \
$(if $(shell command -v -- $(c)gcc 2>/dev/null), $(c))))

# output directory for tests below
-TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/)
+TMPOUT = $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_$$$$

# try-run
# Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise)
# Exit code chooses option. "$$TMP" serves as a temporary file and is
# automatically cleaned up.
try-run = $(shell set -e; \
- TMP="$(TMPOUT).$$$$.tmp"; \
- TMPO="$(TMPOUT).$$$$.o"; \
+ TMP=$(TMPOUT)/tmp; \
+ TMPO=$(TMPOUT)/tmp.o; \
+ mkdir -p $(TMPOUT); \
+ trap "rm -rf $(TMPOUT)" EXIT; \
if ($(1)) >/dev/null 2>&1; \
then echo "$(2)"; \
else echo "$(3)"; \
- fi; \
- rm -f "$$TMP" "$$TMPO")
+ fi)

# as-option
# Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
index 74eab03e31d4d..f9b19524da112 100644
--- a/scripts/recordmcount.h
+++ b/scripts/recordmcount.h
@@ -29,6 +29,11 @@
#undef has_rel_mcount
#undef tot_relsize
#undef get_mcountsym
+#undef find_symtab
+#undef get_shnum
+#undef set_shnum
+#undef get_shstrndx
+#undef get_symindex
#undef get_sym_str_and_relp
#undef do_func
#undef Elf_Addr
@@ -58,6 +63,11 @@
# define __has_rel_mcount __has64_rel_mcount
# define has_rel_mcount has64_rel_mcount
# define tot_relsize tot64_relsize
+# define find_symtab find_symtab64
+# define get_shnum get_shnum64
+# define set_shnum set_shnum64
+# define get_shstrndx get_shstrndx64
+# define get_symindex get_symindex64
# define get_sym_str_and_relp get_sym_str_and_relp_64
# define do_func do64
# define get_mcountsym get_mcountsym_64
@@ -91,6 +101,11 @@
# define __has_rel_mcount __has32_rel_mcount
# define has_rel_mcount has32_rel_mcount
# define tot_relsize tot32_relsize
+# define find_symtab find_symtab32
+# define get_shnum get_shnum32
+# define set_shnum set_shnum32
+# define get_shstrndx get_shstrndx32
+# define get_symindex get_symindex32
# define get_sym_str_and_relp get_sym_str_and_relp_32
# define do_func do32
# define get_mcountsym get_mcountsym_32
@@ -173,6 +188,67 @@ static int MIPS_is_fake_mcount(Elf_Rel const *rp)
return is_fake;
}

+static unsigned int get_symindex(Elf_Sym const *sym, Elf32_Word const *symtab,
+ Elf32_Word const *symtab_shndx)
+{
+ unsigned long offset;
+ int index;
+
+ if (sym->st_shndx != SHN_XINDEX)
+ return w2(sym->st_shndx);
+
+ offset = (unsigned long)sym - (unsigned long)symtab;
+ index = offset / sizeof(*sym);
+
+ return w(symtab_shndx[index]);
+}
+
+static unsigned int get_shnum(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0)
+{
+ if (shdr0 && !ehdr->e_shnum)
+ return w(shdr0->sh_size);
+
+ return w2(ehdr->e_shnum);
+}
+
+static void set_shnum(Elf_Ehdr *ehdr, Elf_Shdr *shdr0, unsigned int new_shnum)
+{
+ if (new_shnum >= SHN_LORESERVE) {
+ ehdr->e_shnum = 0;
+ shdr0->sh_size = w(new_shnum);
+ } else
+ ehdr->e_shnum = w2(new_shnum);
+}
+
+static int get_shstrndx(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0)
+{
+ if (ehdr->e_shstrndx != SHN_XINDEX)
+ return w2(ehdr->e_shstrndx);
+
+ return w(shdr0->sh_link);
+}
+
+static void find_symtab(Elf_Ehdr *const ehdr, Elf_Shdr const *shdr0,
+ unsigned const nhdr, Elf32_Word **symtab,
+ Elf32_Word **symtab_shndx)
+{
+ Elf_Shdr const *relhdr;
+ unsigned k;
+
+ *symtab = NULL;
+ *symtab_shndx = NULL;
+
+ for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) {
+ if (relhdr->sh_type == SHT_SYMTAB)
+ *symtab = (void *)ehdr + relhdr->sh_offset;
+ else if (relhdr->sh_type == SHT_SYMTAB_SHNDX)
+ *symtab_shndx = (void *)ehdr + relhdr->sh_offset;
+
+ if (*symtab && *symtab_shndx)
+ break;
+ }
+}
+
/* Append the new shstrtab, Elf_Shdr[], __mcount_loc and its relocations. */
static int append_func(Elf_Ehdr *const ehdr,
Elf_Shdr *const shstr,
@@ -188,10 +264,12 @@ static int append_func(Elf_Ehdr *const ehdr,
char const *mc_name = (sizeof(Elf_Rela) == rel_entsize)
? ".rela__mcount_loc"
: ".rel__mcount_loc";
- unsigned const old_shnum = w2(ehdr->e_shnum);
uint_t const old_shoff = _w(ehdr->e_shoff);
uint_t const old_shstr_sh_size = _w(shstr->sh_size);
uint_t const old_shstr_sh_offset = _w(shstr->sh_offset);
+ Elf_Shdr *const shdr0 = (Elf_Shdr *)(old_shoff + (void *)ehdr);
+ unsigned int const old_shnum = get_shnum(ehdr, shdr0);
+ unsigned int const new_shnum = 2 + old_shnum; /* {.rel,}__mcount_loc */
uint_t t = 1 + strlen(mc_name) + _w(shstr->sh_size);
uint_t new_e_shoff;

@@ -201,6 +279,8 @@ static int append_func(Elf_Ehdr *const ehdr,
t += (_align & -t); /* word-byte align */
new_e_shoff = t;

+ set_shnum(ehdr, shdr0, new_shnum);
+
/* body for new shstrtab */
if (ulseek(sb.st_size, SEEK_SET) < 0)
return -1;
@@ -255,7 +335,6 @@ static int append_func(Elf_Ehdr *const ehdr,
return -1;

ehdr->e_shoff = _w(new_e_shoff);
- ehdr->e_shnum = w2(2 + w2(ehdr->e_shnum)); /* {.rel,}__mcount_loc */
if (ulseek(0, SEEK_SET) < 0)
return -1;
if (uwrite(ehdr, sizeof(*ehdr)) < 0)
@@ -434,6 +513,8 @@ static int find_secsym_ndx(unsigned const txtndx,
uint_t *const recvalp,
unsigned int *sym_index,
Elf_Shdr const *const symhdr,
+ Elf32_Word const *symtab,
+ Elf32_Word const *symtab_shndx,
Elf_Ehdr const *const ehdr)
{
Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symhdr->sh_offset)
@@ -445,7 +526,7 @@ static int find_secsym_ndx(unsigned const txtndx,
for (symp = sym0, t = nsym; t; --t, ++symp) {
unsigned int const st_bind = ELF_ST_BIND(symp->st_info);

- if (txtndx == w2(symp->st_shndx)
+ if (txtndx == get_symindex(symp, symtab, symtab_shndx)
/* avoid STB_WEAK */
&& (STB_LOCAL == st_bind || STB_GLOBAL == st_bind)) {
/* function symbols on ARM have quirks, avoid them */
@@ -516,21 +597,23 @@ static unsigned tot_relsize(Elf_Shdr const *const shdr0,
return totrelsz;
}

-
/* Overall supervision for Elf32 ET_REL file. */
static int do_func(Elf_Ehdr *const ehdr, char const *const fname,
unsigned const reltype)
{
Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff)
+ (void *)ehdr);
- unsigned const nhdr = w2(ehdr->e_shnum);
- Elf_Shdr *const shstr = &shdr0[w2(ehdr->e_shstrndx)];
+ unsigned const nhdr = get_shnum(ehdr, shdr0);
+ Elf_Shdr *const shstr = &shdr0[get_shstrndx(ehdr, shdr0)];
char const *const shstrtab = (char const *)(_w(shstr->sh_offset)
+ (void *)ehdr);

Elf_Shdr const *relhdr;
unsigned k;

+ Elf32_Word *symtab;
+ Elf32_Word *symtab_shndx;
+
/* Upper bound on space: assume all relevant relocs are for mcount. */
unsigned totrelsz;

@@ -561,6 +644,8 @@ static int do_func(Elf_Ehdr *const ehdr, char const *const fname,
return -1;
}

+ find_symtab(ehdr, shdr0, nhdr, &symtab, &symtab_shndx);
+
for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) {
char const *const txtname = has_rel_mcount(relhdr, shdr0,
shstrtab, fname);
@@ -577,6 +662,7 @@ static int do_func(Elf_Ehdr *const ehdr, char const *const fname,
result = find_secsym_ndx(w(relhdr->sh_info), txtname,
&recval, &recsym,
&shdr0[symsec_sh_link],
+ symtab, symtab_shndx,
ehdr);
if (result)
goto out;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 93760a3564cfa..137d655fed8f8 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -4145,6 +4145,11 @@ HDA_CODEC_ENTRY(0x10de0095, "GPU 95 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009a, "GPU 9a HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),
HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch),
HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index e057ecb5a9045..cb689878ba205 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2460,6 +2460,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
@@ -7435,6 +7436,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index bad89b0d129e7..1a2fa7f181423 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -678,8 +678,9 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
struct regmap *regs = ssi->regs;
u32 pm = 999, div2, psr, stccr, mask, afreq, factor, i;
unsigned long clkrate, baudrate, tmprate;
- unsigned int slots = params_channels(hw_params);
- unsigned int slot_width = 32;
+ unsigned int channels = params_channels(hw_params);
+ unsigned int slot_width = params_width(hw_params);
+ unsigned int slots = 2;
u64 sub, savesub = 100000;
unsigned int freq;
bool baudclk_is_used;
@@ -688,10 +689,14 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
/* Override slots and slot_width if being specifically set... */
if (ssi->slots)
slots = ssi->slots;
- /* ...but keep 32 bits if slots is 2 -- I2S Master mode */
- if (ssi->slot_width && slots != 2)
+ if (ssi->slot_width)
slot_width = ssi->slot_width;

+ /* ...but force 32 bits for stereo audio using I2S Master Mode */
+ if (channels == 2 &&
+ (ssi->i2s_net & SSI_SCR_I2S_MODE_MASK) == SSI_SCR_I2S_MODE_MASTER)
+ slot_width = 32;
+
/* Generate bit clock based on the slot number and slot width */
freq = slots * slot_width * params_rate(hw_params);

diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
index 6c20bdd850f33..8ada4ecba8472 100644
--- a/sound/soc/qcom/common.c
+++ b/sound/soc/qcom/common.c
@@ -4,6 +4,7 @@

#include <linux/module.h>
#include "common.h"
+#include "qdsp6/q6afe.h"

int qcom_snd_parse_of(struct snd_soc_card *card)
{
@@ -101,6 +102,15 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
}
link->no_pcm = 1;
link->ignore_pmdown_time = 1;
+
+ if (q6afe_is_rx_port(link->id)) {
+ link->dpcm_playback = 1;
+ link->dpcm_capture = 0;
+ } else {
+ link->dpcm_playback = 0;
+ link->dpcm_capture = 1;
+ }
+
} else {
dlc = devm_kzalloc(dev, sizeof(*dlc), GFP_KERNEL);
if (!dlc)
@@ -113,12 +123,12 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
link->codecs->dai_name = "snd-soc-dummy-dai";
link->codecs->name = "snd-soc-dummy";
link->dynamic = 1;
+ link->dpcm_playback = 1;
+ link->dpcm_capture = 1;
}

link->ignore_suspend = 1;
link->nonatomic = 1;
- link->dpcm_playback = 1;
- link->dpcm_capture = 1;
link->stream_name = link->name;
link++;

diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c
index e0945f7a58c81..0ce4eb60f9848 100644
--- a/sound/soc/qcom/qdsp6/q6afe.c
+++ b/sound/soc/qcom/qdsp6/q6afe.c
@@ -800,6 +800,14 @@ int q6afe_get_port_id(int index)
}
EXPORT_SYMBOL_GPL(q6afe_get_port_id);

+int q6afe_is_rx_port(int index)
+{
+ if (index < 0 || index >= AFE_PORT_MAX)
+ return -EINVAL;
+
+ return port_maps[index].is_rx;
+}
+EXPORT_SYMBOL_GPL(q6afe_is_rx_port);
static int afe_apr_send_pkt(struct q6afe *afe, struct apr_pkt *pkt,
struct q6afe_port *port)
{
diff --git a/sound/soc/qcom/qdsp6/q6afe.h b/sound/soc/qcom/qdsp6/q6afe.h
index c7ed5422baffd..1a0f80a14afea 100644
--- a/sound/soc/qcom/qdsp6/q6afe.h
+++ b/sound/soc/qcom/qdsp6/q6afe.h
@@ -198,6 +198,7 @@ int q6afe_port_start(struct q6afe_port *port);
int q6afe_port_stop(struct q6afe_port *port);
void q6afe_port_put(struct q6afe_port *port);
int q6afe_get_port_id(int index);
+int q6afe_is_rx_port(int index);
void q6afe_hdmi_port_prepare(struct q6afe_port *port,
struct q6afe_hdmi_cfg *cfg);
void q6afe_slim_port_prepare(struct q6afe_port *port,
diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c
index 0e0e8f7a460ab..ae4b2cabdf2d6 100644
--- a/sound/soc/qcom/qdsp6/q6asm.c
+++ b/sound/soc/qcom/qdsp6/q6asm.c
@@ -25,6 +25,7 @@
#define ASM_STREAM_CMD_FLUSH 0x00010BCE
#define ASM_SESSION_CMD_PAUSE 0x00010BD3
#define ASM_DATA_CMD_EOS 0x00010BDB
+#define ASM_DATA_EVENT_RENDERED_EOS 0x00010C1C
#define ASM_NULL_POPP_TOPOLOGY 0x00010C68
#define ASM_STREAM_CMD_FLUSH_READBUFS 0x00010C09
#define ASM_STREAM_CMD_SET_ENCDEC_PARAM 0x00010C10
@@ -622,9 +623,6 @@ static int32_t q6asm_stream_callback(struct apr_device *adev,
case ASM_SESSION_CMD_SUSPEND:
client_event = ASM_CLIENT_EVENT_CMD_SUSPEND_DONE;
break;
- case ASM_DATA_CMD_EOS:
- client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
- break;
case ASM_STREAM_CMD_FLUSH:
client_event = ASM_CLIENT_EVENT_CMD_FLUSH_DONE;
break;
@@ -727,6 +725,9 @@ static int32_t q6asm_stream_callback(struct apr_device *adev,
spin_unlock_irqrestore(&ac->lock, flags);
}

+ break;
+ case ASM_DATA_EVENT_RENDERED_EOS:
+ client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
break;
}

diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c
index 7cd42fcfcf38a..1707414cfa921 100644
--- a/sound/soc/rockchip/rockchip_pdm.c
+++ b/sound/soc/rockchip/rockchip_pdm.c
@@ -590,8 +590,10 @@ static int rockchip_pdm_resume(struct device *dev)
int ret;

ret = pm_runtime_get_sync(dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put(dev);
return ret;
+ }

ret = regcache_sync(pdm->regmap);

diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 39ce61c5b8744..fde097a7aad32 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -2749,15 +2749,15 @@ static int soc_dpcm_fe_runtime_update(struct snd_soc_pcm_runtime *fe, int new)
int count, paths;
int ret;

+ if (!fe->dai_link->dynamic)
+ return 0;
+
if (fe->num_cpus > 1) {
dev_err(fe->dev,
"%s doesn't support Multi CPU yet\n", __func__);
return -EINVAL;
}

- if (!fe->dai_link->dynamic)
- return 0;
-
/* only check active links */
if (!fe->cpu_dai->active)
return 0;
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 5ffb457cc88cf..1b28d01d1f4cd 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -394,8 +394,9 @@ skip_rate:
return nr_rates;
}

-/* Line6 Helix series don't support the UAC2_CS_RANGE usb function
- * call. Return a static table of known clock rates.
+/* Line6 Helix series and the Rode Rodecaster Pro don't support the
+ * UAC2_CS_RANGE usb function call. Return a static table of known
+ * clock rates.
*/
static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
struct audioformat *fp)
@@ -408,6 +409,7 @@ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
case USB_ID(0x0e41, 0x4248): /* Line6 Helix >= fw 2.82 */
case USB_ID(0x0e41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */
case USB_ID(0x0e41, 0x424a): /* Line6 Helix LT >= fw 2.82 */
+ case USB_ID(0x19f7, 0x0011): /* Rode Rodecaster Pro */
return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000);
}

diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 15769f266790e..eab0fd4fd7c33 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -581,8 +581,9 @@ static int check_matrix_bitmap(unsigned char *bmap,
* if failed, give up and free the control instance.
*/

-int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
- struct snd_kcontrol *kctl)
+int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list,
+ struct snd_kcontrol *kctl,
+ bool is_std_info)
{
struct usb_mixer_interface *mixer = list->mixer;
int err;
@@ -596,6 +597,7 @@ int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
return err;
}
list->kctl = kctl;
+ list->is_std_info = is_std_info;
list->next_id_elem = mixer->id_elems[list->id];
mixer->id_elems[list->id] = list;
return 0;
@@ -3234,8 +3236,11 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid)
unitid = delegate_notify(mixer, unitid, NULL, NULL);

for_each_mixer_elem(list, mixer, unitid) {
- struct usb_mixer_elem_info *info =
- mixer_elem_list_to_info(list);
+ struct usb_mixer_elem_info *info;
+
+ if (!list->is_std_info)
+ continue;
+ info = mixer_elem_list_to_info(list);
/* invalidate cache, so the value is read from the device */
info->cached = 0;
snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
@@ -3315,6 +3320,8 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer,

if (!list->kctl)
continue;
+ if (!list->is_std_info)
+ continue;

info = mixer_elem_list_to_info(list);
if (count > 1 && info->control != control)
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index 41ec9dc4139bb..c29e27ac43a7a 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -66,6 +66,7 @@ struct usb_mixer_elem_list {
struct usb_mixer_elem_list *next_id_elem; /* list of controls with same id */
struct snd_kcontrol *kctl;
unsigned int id;
+ bool is_std_info;
usb_mixer_elem_dump_func_t dump;
usb_mixer_elem_resume_func_t resume;
};
@@ -103,8 +104,12 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid);
int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
int request, int validx, int value_set);

-int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
- struct snd_kcontrol *kctl);
+int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list,
+ struct snd_kcontrol *kctl,
+ bool is_std_info);
+
+#define snd_usb_mixer_add_control(list, kctl) \
+ snd_usb_mixer_add_list(list, kctl, true)

void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list,
struct usb_mixer_interface *mixer,
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index aad2683ff7933..260607144f565 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -158,7 +158,8 @@ static int add_single_ctl_with_resume(struct usb_mixer_interface *mixer,
return -ENOMEM;
}
kctl->private_free = snd_usb_mixer_elem_free;
- return snd_usb_mixer_add_control(list, kctl);
+ /* don't use snd_usb_mixer_add_control() here, this is a special list element */
+ return snd_usb_mixer_add_list(list, kctl, false);
}

/*
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index d61c2f1095b5c..39aec83f8aca4 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -367,6 +367,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
ifnum = 0;
goto add_sync_ep_from_ifnum;
case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
+ case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */
ep = 0x81;
ifnum = 2;
goto add_sync_ep_from_ifnum;
@@ -1782,6 +1783,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream
return 0;
case SNDRV_PCM_TRIGGER_STOP:
stop_endpoints(subs);
+ subs->data_endpoint->retire_data_urb = NULL;
subs->running = 0;
return 0;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index d8a765be5dfe8..d7d900ebcf37f 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1505,6 +1505,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
static bool is_itf_usb_dsd_dac(unsigned int id)
{
switch (id) {
+ case USB_ID(0x154e, 0x1002): /* Denon DCD-1500RE */
case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
@@ -1646,6 +1647,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
chip->usb_id == USB_ID(0x0951, 0x16ad)) &&
(requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
usleep_range(1000, 2000);
+
+ /*
+ * Samsung USBC Headset (AKG) need a tiny delay after each
+ * class compliant request. (Model number: AAM625R or AAM627R)
+ */
+ if (chip->usb_id == USB_ID(0x04e8, 0xa051) &&
+ (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ usleep_range(5000, 6000);
}

/*
@@ -1843,6 +1852,7 @@ struct registration_quirk {
static const struct registration_quirk registration_quirks[] = {
REG_QUIRK_ENTRY(0x0951, 0x16d8, 2), /* Kingston HyperX AMP */
REG_QUIRK_ENTRY(0x0951, 0x16ed, 2), /* Kingston HyperX Cloud Alpha S */
+ REG_QUIRK_ENTRY(0x0951, 0x16ea, 2), /* Kingston HyperX Cloud Flight S */
{ 0 } /* terminator */
};

diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c
index 7897c8f4d363b..ef574087f1e1f 100644
--- a/tools/testing/selftests/bpf/progs/bpf_cubic.c
+++ b/tools/testing/selftests/bpf/progs/bpf_cubic.c
@@ -480,10 +480,9 @@ static __always_inline void hystart_update(struct sock *sk, __u32 delay)

if (hystart_detect & HYSTART_DELAY) {
/* obtain the minimum delay of more than sampling packets */
+ if (ca->curr_rtt > delay)
+ ca->curr_rtt = delay;
if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
- if (ca->curr_rtt > delay)
- ca->curr_rtt = delay;
-
ca->sample_cnt++;
} else {
if (ca->curr_rtt > ca->delay_min +
diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c
index 383bac05ac324..ceaad78e96674 100644
--- a/tools/testing/selftests/net/so_txtime.c
+++ b/tools/testing/selftests/net/so_txtime.c
@@ -15,8 +15,9 @@
#include <inttypes.h>
#include <linux/net_tstamp.h>
#include <linux/errqueue.h>
+#include <linux/if_ether.h>
#include <linux/ipv6.h>
-#include <linux/tcp.h>
+#include <linux/udp.h>
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
@@ -140,8 +141,8 @@ static void do_recv_errqueue_timeout(int fdt)
{
char control[CMSG_SPACE(sizeof(struct sock_extended_err)) +
CMSG_SPACE(sizeof(struct sockaddr_in6))] = {0};
- char data[sizeof(struct ipv6hdr) +
- sizeof(struct tcphdr) + 1];
+ char data[sizeof(struct ethhdr) + sizeof(struct ipv6hdr) +
+ sizeof(struct udphdr) + 1];
struct sock_extended_err *err;
struct msghdr msg = {0};
struct iovec iov = {0};
@@ -159,6 +160,8 @@ static void do_recv_errqueue_timeout(int fdt)
msg.msg_controllen = sizeof(control);

while (1) {
+ const char *reason;
+
ret = recvmsg(fdt, &msg, MSG_ERRQUEUE);
if (ret == -1 && errno == EAGAIN)
break;
@@ -176,14 +179,30 @@ static void do_recv_errqueue_timeout(int fdt)
err = (struct sock_extended_err *)CMSG_DATA(cm);
if (err->ee_origin != SO_EE_ORIGIN_TXTIME)
error(1, 0, "errqueue: origin 0x%x\n", err->ee_origin);
- if (err->ee_code != ECANCELED)
- error(1, 0, "errqueue: code 0x%x\n", err->ee_code);
+
+ switch (err->ee_errno) {
+ case ECANCELED:
+ if (err->ee_code != SO_EE_CODE_TXTIME_MISSED)
+ error(1, 0, "errqueue: unknown ECANCELED %u\n",
+ err->ee_code);
+ reason = "missed txtime";
+ break;
+ case EINVAL:
+ if (err->ee_code != SO_EE_CODE_TXTIME_INVALID_PARAM)
+ error(1, 0, "errqueue: unknown EINVAL %u\n",
+ err->ee_code);
+ reason = "invalid txtime";
+ break;
+ default:
+ error(1, 0, "errqueue: errno %u code %u\n",
+ err->ee_errno, err->ee_code);
+ };

tstamp = ((int64_t) err->ee_data) << 32 | err->ee_info;
tstamp -= (int64_t) glob_tstart;
tstamp /= 1000 * 1000;
- fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped\n",
- data[ret - 1], tstamp);
+ fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped: %s\n",
+ data[ret - 1], tstamp, reason);

msg.msg_flags = 0;
msg.msg_controllen = sizeof(control);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
index ca35dd8848b0a..af3df79d8163f 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
@@ -7,7 +7,7 @@ noarg:
# The EBB handler is 64-bit code and everything links against it
CFLAGS += -m64

-TMPOUT = $(OUTPUT)/
+TMPOUT = $(OUTPUT)/TMPDIR/
# Toolchains may build PIE by default which breaks the assembly
no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
$(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh
index 17a1f53ceba01..d77f4829f1e07 100755
--- a/tools/testing/selftests/wireguard/netns.sh
+++ b/tools/testing/selftests/wireguard/netns.sh
@@ -587,9 +587,20 @@ ip0 link set wg0 up
kill $ncat_pid
ip0 link del wg0

+# Ensure there aren't circular reference loops
+ip1 link add wg1 type wireguard
+ip2 link add wg2 type wireguard
+ip1 link set wg1 netns $netns2
+ip2 link set wg2 netns $netns1
+pp ip netns delete $netns1
+pp ip netns delete $netns2
+pp ip netns add $netns1
+pp ip netns add $netns2
+
+sleep 2 # Wait for cleanup and grace periods
declare -A objects
while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do
- [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ [0-9]+)\ .*(created|destroyed).* ]] || continue
+ [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ ?[0-9]*)\ .*(created|destroyed).* ]] || continue
objects["${BASH_REMATCH[1]}"]+="${BASH_REMATCH[2]}"
done < /dev/kmsg
alldeleted=1
\
 
 \ /
  Last update: 2020-07-01 15:57    [W:0.375 / U:0.120 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site