lkml.org 
[lkml]   [2021]   [Jul]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
SubjectRe: Linux 5.10.51
Date
diff --git a/Makefile b/Makefile
index 695f8e739a91..d0fad1e77493 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
-SUBLEVEL = 50
+SUBLEVEL = 51
EXTRAVERSION =
NAME = Dare mighty things

diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index 86cfb5c50a94..95ab6928cfd4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -384,6 +384,11 @@ &usb20_otg {
status = "okay";
};

+&usbdrd3 {
+ dr_mode = "host";
+ status = "okay";
+};
+
&usb_host0_ehci {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 93c734d8a46c..de1e5e8a0e88 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -984,6 +984,25 @@ usb_host0_ohci: usb@ff5d0000 {
status = "disabled";
};

+ usbdrd3: usb@ff600000 {
+ compatible = "rockchip,rk3328-dwc3", "snps,dwc3";
+ reg = <0x0 0xff600000 0x0 0x100000>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_USB3OTG_REF>, <&cru SCLK_USB3OTG_SUSPEND>,
+ <&cru ACLK_USB3OTG>;
+ clock-names = "ref_clk", "suspend_clk",
+ "bus_clk";
+ dr_mode = "otg";
+ phy_type = "utmi_wide";
+ snps,dis-del-phy-power-chg-quirk;
+ snps,dis_enblslpm_quirk;
+ snps,dis-tx-ipgap-linecheck-quirk;
+ snps,dis-u2-freeclk-exists-quirk;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_u3_susphy_quirk;
+ status = "disabled";
+ };
+
gic: interrupt-controller@ff811000 {
compatible = "arm,gic-400";
#interrupt-cells = <3>;
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 61c97d3b58c7..c995d1f4594f 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -28,6 +28,10 @@ static void tlb_flush(struct mmu_gather *tlb);
*/
static inline int tlb_get_level(struct mmu_gather *tlb)
{
+ /* The TTL field is only valid for the leaf entry. */
+ if (tlb->freed_tables)
+ return 0;
+
if (tlb->cleared_ptes && !(tlb->cleared_pmds ||
tlb->cleared_puds ||
tlb->cleared_p4ds))
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 1917ccd39256..1a63f592034e 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -418,6 +418,8 @@ config MACH_INGENIC_SOC
select MIPS_GENERIC
select MACH_INGENIC
select SYS_SUPPORTS_ZBOOT_UART16550
+ select CPU_SUPPORTS_CPUFREQ
+ select MIPS_EXTERNAL_TIMER

config LANTIQ
bool "Lantiq based platforms"
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index f2e216eef7da..8294eaa6f902 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -64,6 +64,8 @@
((MIPS_ISA_REV >= (ge)) && (MIPS_ISA_REV < (lt)))
#define __isa_range_or_flag(ge, lt, flag) \
(__isa_range(ge, lt) || ((MIPS_ISA_REV < (lt)) && __isa(flag)))
+#define __isa_range_and_ase(ge, lt, ase) \
+ (__isa_range(ge, lt) && __ase(ase))

/*
* SMP assumption: Options of CPU 0 are a superset of all processors.
@@ -423,7 +425,7 @@
#endif

#ifndef cpu_has_mipsmt
-#define cpu_has_mipsmt __isa_lt_and_ase(6, MIPS_ASE_MIPSMT)
+#define cpu_has_mipsmt __isa_range_and_ase(2, 6, MIPS_ASE_MIPSMT)
#endif

#ifndef cpu_has_vp
diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h
index 10e3be870df7..c2144409c0c4 100644
--- a/arch/mips/include/asm/hugetlb.h
+++ b/arch/mips/include/asm/hugetlb.h
@@ -46,7 +46,13 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
- flush_tlb_page(vma, addr & huge_page_mask(hstate_vma(vma)));
+ /*
+ * clear the huge pte entry firstly, so that the other smp threads will
+ * not get old pte entry after finishing flush_tlb_page and before
+ * setting new huge pte entry
+ */
+ huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ flush_tlb_page(vma, addr);
}

#define __HAVE_ARCH_HUGE_PTE_NONE
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index a0e8ae5497b6..7a7467d3f7f0 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -2073,7 +2073,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
({ int __res; \
__asm__ __volatile__( \
".set\tpush\n\t" \
- ".set\tmips32r2\n\t" \
+ ".set\tmips32r5\n\t" \
_ASM_SET_VIRT \
"mfgc0\t%0, " #source ", %1\n\t" \
".set\tpop" \
@@ -2086,7 +2086,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
({ unsigned long long __res; \
__asm__ __volatile__( \
".set\tpush\n\t" \
- ".set\tmips64r2\n\t" \
+ ".set\tmips64r5\n\t" \
_ASM_SET_VIRT \
"dmfgc0\t%0, " #source ", %1\n\t" \
".set\tpop" \
@@ -2099,7 +2099,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
do { \
__asm__ __volatile__( \
".set\tpush\n\t" \
- ".set\tmips32r2\n\t" \
+ ".set\tmips32r5\n\t" \
_ASM_SET_VIRT \
"mtgc0\t%z0, " #register ", %1\n\t" \
".set\tpop" \
@@ -2111,7 +2111,7 @@ do { \
do { \
__asm__ __volatile__( \
".set\tpush\n\t" \
- ".set\tmips64r2\n\t" \
+ ".set\tmips64r5\n\t" \
_ASM_SET_VIRT \
"dmtgc0\t%z0, " #register ", %1\n\t" \
".set\tpop" \
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index 8b18424b3120..d0cf997b4ba8 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -59,11 +59,15 @@ do { \

static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pmd_t *pmd;
+ pmd_t *pmd = NULL;
+ struct page *pg;

- pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
- if (pmd)
+ pg = alloc_pages(GFP_KERNEL | __GFP_ACCOUNT, PMD_ORDER);
+ if (pg) {
+ pgtable_pmd_page_ctor(pg);
+ pmd = (pmd_t *)page_address(pg);
pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
+ }
return pmd;
}

diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index e6ae2bcdbeda..067cb3eb1614 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -1827,6 +1827,11 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
*/
case PRID_COMP_INGENIC_D0:
c->isa_level &= ~MIPS_CPU_ISA_M32R2;
+
+ /* FPU is not properly detected on JZ4760(B). */
+ if (c->processor_id == 0x2ed0024f)
+ c->options |= MIPS_CPU_FPU;
+
fallthrough;

/*
diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c
index cf9459f79f9b..e4c461df3ee6 100644
--- a/arch/mips/loongson64/numa.c
+++ b/arch/mips/loongson64/numa.c
@@ -182,6 +182,9 @@ static void __init node_mem_init(unsigned int node)
if (node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT))
memblock_reserve((node_addrspace_offset | 0xfe000000),
32 << 20);
+
+ /* Reserve pfn range 0~node[0]->node_start_pfn */
+ memblock_reserve(0, PAGE_SIZE * start_pfn);
}
}

diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index f53c42380832..8c14f84a8770 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -46,6 +46,8 @@
# define SMPWMB eieio
#endif

+/* clang defines this macro for a builtin, which will not work with runtime patching */
+#undef __lwsync
#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
#define dma_rmb() __lwsync()
#define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 72e1b51beb10..55fdba8ba21c 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -198,9 +198,7 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
{
int is_exec = TRAP(regs) == 0x400;

- /* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */
- if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
- DSISR_PROTFAULT))) {
+ if (is_exec) {
pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n",
address >= TASK_SIZE ? "exec-protected" : "user",
address,
diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c
index 5f5fe63a3d1c..7ba0840fc3b5 100644
--- a/arch/powerpc/platforms/powernv/vas-window.c
+++ b/arch/powerpc/platforms/powernv/vas-window.c
@@ -1093,9 +1093,9 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
/*
* Process closes window during exit. In the case of
* multithread application, the child thread can open
- * window and can exit without closing it. Expects parent
- * thread to use and close the window. So do not need
- * to take pid reference for parent thread.
+ * window and can exit without closing it. so takes tgid
+ * reference until window closed to make sure tgid is not
+ * reused.
*/
txwin->tgid = find_get_pid(task_tgid_vnr(current));
/*
@@ -1339,8 +1339,9 @@ int vas_win_close(struct vas_window *window)
/* if send window, drop reference to matching receive window */
if (window->tx_win) {
if (window->user_win) {
- /* Drop references to pid and mm */
+ /* Drop references to pid. tgid and mm */
put_pid(window->pid);
+ put_pid(window->tgid);
if (window->mm) {
mm_context_remove_vas_window(window->mm);
mmdrop(window->mm);
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index 656460636ad3..e83af7bc7591 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -266,8 +266,8 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
if (!has_sleeper && acquire_inflight_cb(rqw, private_data))
return;

- prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
- has_sleeper = !wq_has_single_sleeper(&rqw->wait);
+ has_sleeper = !prepare_to_wait_exclusive(&rqw->wait, &data.wq,
+ TASK_UNINTERRUPTIBLE);
do {
/* The memory barrier in set_task_state saves us here. */
if (data.got_token)
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index cb69b737cb49..56b695136977 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -200,7 +200,7 @@ static void ahci_sunxi_start_engine(struct ata_port *ap)
}

static const struct ata_port_info ahci_sunxi_port_info = {
- .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
+ .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ | ATA_FLAG_NO_DIPM,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_platform_ops,
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index eef637fd90b3..a59554e5b8b0 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -3279,7 +3279,7 @@ static void __exit ia_module_exit(void)
{
pci_unregister_driver(&ia_driver);

- del_timer(&ia_timer);
+ del_timer_sync(&ia_timer);
}

module_init(ia_module_init);
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 09ad73361879..6eb4ed256a7e 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -297,7 +297,7 @@ static void __exit nicstar_cleanup(void)
{
XPRINTK("nicstar: nicstar_cleanup() called.\n");

- del_timer(&ns_timer);
+ del_timer_sync(&ns_timer);

pci_unregister_driver(&nicstar_driver);

@@ -525,6 +525,15 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
/* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
writel(0x00000000, card->membase + VPM);

+ card->intcnt = 0;
+ if (request_irq
+ (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) {
+ pr_err("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
+ error = 9;
+ ns_init_card_error(card, error);
+ return error;
+ }
+
/* Initialize TSQ */
card->tsq.org = dma_alloc_coherent(&card->pcidev->dev,
NS_TSQSIZE + NS_TSQ_ALIGNMENT,
@@ -751,15 +760,6 @@ static int ns_init_card(int i, struct pci_dev *pcidev)

card->efbie = 1;

- card->intcnt = 0;
- if (request_irq
- (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) {
- printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
- error = 9;
- ns_init_card_error(card, error);
- return error;
- }
-
/* Register device */
card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops,
-1, NULL);
@@ -837,10 +837,12 @@ static void ns_init_card_error(ns_dev *card, int error)
dev_kfree_skb_any(hb);
}
if (error >= 12) {
- kfree(card->rsq.org);
+ dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
+ card->rsq.org, card->rsq.dma);
}
if (error >= 11) {
- kfree(card->tsq.org);
+ dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
+ card->tsq.org, card->tsq.dma);
}
if (error >= 10) {
free_irq(card->pcidev->irq, card);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 06d44ae9701f..f0fa0c8e7ec6 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1224,6 +1224,9 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
goto out_unlock;
}

+ if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
+ blk_queue_write_cache(lo->lo_queue, false, false);
+
/* freeze request queue during the transition */
blk_mq_freeze_queue(lo->lo_queue);

diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index b1f0b13cc8bc..afd2b1f12d49 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -269,6 +269,8 @@ static const struct usb_device_id blacklist_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cf3, 0xe500), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME |
@@ -1719,6 +1721,13 @@ static void btusb_work(struct work_struct *work)
* which work with WBS at all.
*/
new_alts = btusb_find_altsetting(data, 6) ? 6 : 1;
+ /* Because mSBC frames do not need to be aligned to the
+ * SCO packet boundary. If support the Alt 3, use the
+ * Alt 3 for HCI payload >= 60 Bytes let air packet
+ * data satisfy 60 bytes.
+ */
+ if (new_alts == 1 && btusb_find_altsetting(data, 3))
+ new_alts = 3;
}

if (btusb_switch_alt_setting(hdev, new_alts) < 0)
@@ -2963,11 +2972,6 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
struct btmtk_wmt_hdr *hdr;
int err;

- /* Submit control IN URB on demand to process the WMT event */
- err = btusb_mtk_submit_wmt_recv_urb(hdev);
- if (err < 0)
- return err;
-
/* Send the WMT command and wait until the WMT event returns */
hlen = sizeof(*hdr) + wmt_params->dlen;
if (hlen > 255)
@@ -2989,6 +2993,11 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
return err;
}

+ /* Submit control IN URB on demand to process the WMT event */
+ err = btusb_mtk_submit_wmt_recv_urb(hdev);
+ if (err < 0)
+ return err;
+
/* The vendor specific WMT commands are all answered by a vendor
* specific event and will have the Command Status or Command
* Complete as with usual HCI command flow control.
@@ -3549,6 +3558,11 @@ static int btusb_setup_qca_download_fw(struct hci_dev *hdev,
sent += size;
count -= size;

+ /* ep2 need time to switch from function acl to function dfu,
+ * so we add 20ms delay here.
+ */
+ msleep(20);
+
while (count) {
size = min_t(size_t, count, QCA_DFU_PACKET_LEN);

diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index f78156d93c3f..6384510c48d6 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -371,16 +371,18 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
data[0] = 0;
WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);

- if ((ipmi_version_major > 1)
- || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
- /* This is an IPMI 1.5-only feature. */
- data[0] |= WDOG_DONT_STOP_ON_SET;
- } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
- /*
- * In ipmi 1.0, setting the timer stops the watchdog, we
- * need to start it back up again.
- */
- hbnow = 1;
+ if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+ if ((ipmi_version_major > 1) ||
+ ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
+ /* This is an IPMI 1.5-only feature. */
+ data[0] |= WDOG_DONT_STOP_ON_SET;
+ } else {
+ /*
+ * In ipmi 1.0, setting the timer stops the watchdog, we
+ * need to start it back up again.
+ */
+ hbnow = 1;
+ }
}

data[1] = 0;
diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
index 5b4691117b47..026e2612c33c 100644
--- a/drivers/clk/renesas/r8a77995-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
@@ -75,6 +75,7 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = {
DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000),

/* Core Clock Outputs */
+ DEF_FIXED("za2", R8A77995_CLK_ZA2, CLK_PLL0D3, 2, 1),
DEF_FIXED("z2", R8A77995_CLK_Z2, CLK_PLL0D3, 1, 1),
DEF_FIXED("ztr", R8A77995_CLK_ZTR, CLK_PLL1, 6, 1),
DEF_FIXED("zt", R8A77995_CLK_ZT, CLK_PLL1, 4, 1),
diff --git a/drivers/clk/renesas/rcar-usb2-clock-sel.c b/drivers/clk/renesas/rcar-usb2-clock-sel.c
index d4c02986c34e..0ccc6e709a38 100644
--- a/drivers/clk/renesas/rcar-usb2-clock-sel.c
+++ b/drivers/clk/renesas/rcar-usb2-clock-sel.c
@@ -128,10 +128,8 @@ static int rcar_usb2_clock_sel_resume(struct device *dev)
static int rcar_usb2_clock_sel_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct usb2_clock_sel_priv *priv = platform_get_drvdata(pdev);

of_clk_del_provider(dev->of_node);
- clk_hw_unregister(&priv->hw);
pm_runtime_put(dev);
pm_runtime_disable(dev);

@@ -164,9 +162,6 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
if (IS_ERR(priv->rsts))
return PTR_ERR(priv->rsts);

- pm_runtime_enable(dev);
- pm_runtime_get_sync(dev);
-
clk = devm_clk_get(dev, "usb_extal");
if (!IS_ERR(clk) && !clk_prepare_enable(clk)) {
priv->extal = !!clk_get_rate(clk);
@@ -183,6 +178,8 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
return -ENOENT;
}

+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
platform_set_drvdata(pdev, priv);
dev_set_drvdata(dev, priv);

@@ -193,11 +190,20 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
init.num_parents = 0;
priv->hw.init = &init;

- clk = clk_register(NULL, &priv->hw);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ ret = devm_clk_hw_register(NULL, &priv->hw);
+ if (ret)
+ goto pm_put;
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &priv->hw);
+ if (ret)
+ goto pm_put;
+
+ return 0;

- return of_clk_add_hw_provider(np, of_clk_hw_simple_get, &priv->hw);
+pm_put:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+ return ret;
}

static const struct dev_pm_ops rcar_usb2_clock_sel_pm_ops = {
diff --git a/drivers/clk/tegra/clk-periph-gate.c b/drivers/clk/tegra/clk-periph-gate.c
index 4b31beefc9fc..dc3f92678407 100644
--- a/drivers/clk/tegra/clk-periph-gate.c
+++ b/drivers/clk/tegra/clk-periph-gate.c
@@ -48,18 +48,9 @@ static int clk_periph_is_enabled(struct clk_hw *hw)
return state;
}

-static int clk_periph_enable(struct clk_hw *hw)
+static void clk_periph_enable_locked(struct clk_hw *hw)
{
struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
- unsigned long flags = 0;
-
- spin_lock_irqsave(&periph_ref_lock, flags);
-
- gate->enable_refcnt[gate->clk_num]++;
- if (gate->enable_refcnt[gate->clk_num] > 1) {
- spin_unlock_irqrestore(&periph_ref_lock, flags);
- return 0;
- }

write_enb_set(periph_clk_to_bit(gate), gate);
udelay(2);
@@ -78,6 +69,32 @@ static int clk_periph_enable(struct clk_hw *hw)
udelay(1);
writel_relaxed(0, gate->clk_base + LVL2_CLK_GATE_OVRE);
}
+}
+
+static void clk_periph_disable_locked(struct clk_hw *hw)
+{
+ struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
+
+ /*
+ * If peripheral is in the APB bus then read the APB bus to
+ * flush the write operation in apb bus. This will avoid the
+ * peripheral access after disabling clock
+ */
+ if (gate->flags & TEGRA_PERIPH_ON_APB)
+ tegra_read_chipid();
+
+ write_enb_clr(periph_clk_to_bit(gate), gate);
+}
+
+static int clk_periph_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&periph_ref_lock, flags);
+
+ if (!gate->enable_refcnt[gate->clk_num]++)
+ clk_periph_enable_locked(hw);

spin_unlock_irqrestore(&periph_ref_lock, flags);

@@ -91,21 +108,28 @@ static void clk_periph_disable(struct clk_hw *hw)

spin_lock_irqsave(&periph_ref_lock, flags);

- gate->enable_refcnt[gate->clk_num]--;
- if (gate->enable_refcnt[gate->clk_num] > 0) {
- spin_unlock_irqrestore(&periph_ref_lock, flags);
- return;
- }
+ WARN_ON(!gate->enable_refcnt[gate->clk_num]);
+
+ if (--gate->enable_refcnt[gate->clk_num] == 0)
+ clk_periph_disable_locked(hw);
+
+ spin_unlock_irqrestore(&periph_ref_lock, flags);
+}
+
+static void clk_periph_disable_unused(struct clk_hw *hw)
+{
+ struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&periph_ref_lock, flags);

/*
- * If peripheral is in the APB bus then read the APB bus to
- * flush the write operation in apb bus. This will avoid the
- * peripheral access after disabling clock
+ * Some clocks are duplicated and some of them are marked as critical,
+ * like fuse and fuse_burn for example, thus the enable_refcnt will
+ * be non-zero here if the "unused" duplicate is disabled by CCF.
*/
- if (gate->flags & TEGRA_PERIPH_ON_APB)
- tegra_read_chipid();
-
- write_enb_clr(periph_clk_to_bit(gate), gate);
+ if (!gate->enable_refcnt[gate->clk_num])
+ clk_periph_disable_locked(hw);

spin_unlock_irqrestore(&periph_ref_lock, flags);
}
@@ -114,6 +138,7 @@ const struct clk_ops tegra_clk_periph_gate_ops = {
.is_enabled = clk_periph_is_enabled,
.enable = clk_periph_enable,
.disable = clk_periph_disable,
+ .disable_unused = clk_periph_disable_unused,
};

struct clk *tegra_clk_register_periph_gate(const char *name,
@@ -148,9 +173,6 @@ struct clk *tegra_clk_register_periph_gate(const char *name,
gate->enable_refcnt = enable_refcnt;
gate->regs = pregs;

- if (read_enb(gate) & periph_clk_to_bit(gate))
- enable_refcnt[clk_num]++;
-
/* Data in .init is copied by clk_register(), so stack variable OK */
gate->hw.init = &init;

diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index 67620c7ecd9e..79ca3aa072b7 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -100,6 +100,15 @@ static void clk_periph_disable(struct clk_hw *hw)
gate_ops->disable(gate_hw);
}

+static void clk_periph_disable_unused(struct clk_hw *hw)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *gate_ops = periph->gate_ops;
+ struct clk_hw *gate_hw = &periph->gate.hw;
+
+ gate_ops->disable_unused(gate_hw);
+}
+
static void clk_periph_restore_context(struct clk_hw *hw)
{
struct tegra_clk_periph *periph = to_clk_periph(hw);
@@ -126,6 +135,7 @@ const struct clk_ops tegra_clk_periph_ops = {
.is_enabled = clk_periph_is_enabled,
.enable = clk_periph_enable,
.disable = clk_periph_disable,
+ .disable_unused = clk_periph_disable_unused,
.restore_context = clk_periph_restore_context,
};

@@ -135,6 +145,7 @@ static const struct clk_ops tegra_clk_periph_nodiv_ops = {
.is_enabled = clk_periph_is_enabled,
.enable = clk_periph_enable,
.disable = clk_periph_disable,
+ .disable_unused = clk_periph_disable_unused,
.restore_context = clk_periph_restore_context,
};

diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index c5cc0a2dac6f..d709ecb7d8d7 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -1131,7 +1131,8 @@ static int clk_pllu_enable(struct clk_hw *hw)
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);

- _clk_pll_enable(hw);
+ if (!clk_pll_is_enabled(hw))
+ _clk_pll_enable(hw);

ret = clk_pll_wait_for_lock(pll);
if (ret < 0)
@@ -1748,15 +1749,13 @@ static int clk_pllu_tegra114_enable(struct clk_hw *hw)
return -EINVAL;
}

- if (clk_pll_is_enabled(hw))
- return 0;
-
input_rate = clk_hw_get_rate(__clk_get_hw(osc));

if (pll->lock)
spin_lock_irqsave(pll->lock, flags);

- _clk_pll_enable(hw);
+ if (!clk_pll_is_enabled(hw))
+ _clk_pll_enable(hw);

ret = clk_pll_wait_for_lock(pll);
if (ret < 0)
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index d0177824c518..f4881764bf8f 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -352,7 +352,7 @@ static u64 notrace arm64_858921_read_cntvct_el0(void)
do { \
_val = read_sysreg(reg); \
_retries--; \
- } while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries); \
+ } while (((_val + 1) & GENMASK(8, 0)) <= 1 && _retries); \
\
WARN_ON_ONCE(!_retries); \
_val; \
diff --git a/drivers/extcon/extcon-intel-mrfld.c b/drivers/extcon/extcon-intel-mrfld.c
index f47016fb28a8..cd1a5f230077 100644
--- a/drivers/extcon/extcon-intel-mrfld.c
+++ b/drivers/extcon/extcon-intel-mrfld.c
@@ -197,6 +197,7 @@ static int mrfld_extcon_probe(struct platform_device *pdev)
struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
struct regmap *regmap = pmic->regmap;
struct mrfld_extcon_data *data;
+ unsigned int status;
unsigned int id;
int irq, ret;

@@ -244,6 +245,14 @@ static int mrfld_extcon_probe(struct platform_device *pdev)
/* Get initial state */
mrfld_extcon_role_detect(data);

+ /*
+ * Cached status value is used for cable detection, see comments
+ * in mrfld_extcon_cable_detect(), we need to sync cached value
+ * with a real state of the hardware.
+ */
+ regmap_read(regmap, BCOVE_SCHGRIRQ1, &status);
+ data->status = status;
+
mrfld_extcon_clear(data, BCOVE_MIRQLVL1, BCOVE_LVL1_CHGR);
mrfld_extcon_clear(data, BCOVE_MCHGRIRQ1, BCOVE_CHGRIRQ_ALL);

diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 0078260fbabe..172c751a4f6c 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -299,15 +299,13 @@ static int fw_cfg_do_platform_probe(struct platform_device *pdev)
return 0;
}

-static ssize_t fw_cfg_showrev(struct kobject *k, struct attribute *a, char *buf)
+static ssize_t fw_cfg_showrev(struct kobject *k, struct kobj_attribute *a,
+ char *buf)
{
return sprintf(buf, "%u\n", fw_cfg_rev);
}

-static const struct {
- struct attribute attr;
- ssize_t (*show)(struct kobject *k, struct attribute *a, char *buf);
-} fw_cfg_rev_attr = {
+static const struct kobj_attribute fw_cfg_rev_attr = {
.attr = { .name = "rev", .mode = S_IRUSR },
.show = fw_cfg_showrev,
};
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 657a70c5fc99..9e34bbbce26e 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -454,6 +454,7 @@ static int s10_remove(struct platform_device *pdev)
struct s10_priv *priv = mgr->priv;

fpga_mgr_unregister(mgr);
+ fpga_mgr_free(mgr);
stratix10_svc_free_channel(priv->chan);

return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 5da487b64a66..26f8a2138377 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -48,12 +48,6 @@ static struct {
spinlock_t mem_limit_lock;
} kfd_mem_limit;

-/* Struct used for amdgpu_amdkfd_bo_validate */
-struct amdgpu_vm_parser {
- uint32_t domain;
- bool wait;
-};
-
static const char * const domain_bit_to_string[] = {
"CPU",
"GTT",
@@ -337,11 +331,9 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
return ret;
}

-static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
+static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
{
- struct amdgpu_vm_parser *p = param;
-
- return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
+ return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
}

/* vm_validate_pt_pd_bos - Validate page table and directory BOs
@@ -355,20 +347,15 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
{
struct amdgpu_bo *pd = vm->root.base.bo;
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
- struct amdgpu_vm_parser param;
int ret;

- param.domain = AMDGPU_GEM_DOMAIN_VRAM;
- param.wait = false;
-
- ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
- &param);
+ ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
if (ret) {
pr_err("failed to validate PT BOs\n");
return ret;
}

- ret = amdgpu_amdkfd_validate(&param, pd);
+ ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
if (ret) {
pr_err("failed to validate PD\n");
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 87c7c45f1bb7..6948ab3c0d99 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2760,7 +2760,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_IH,
};

- for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
+ for (i = 0; i < adev->num_ip_blocks; i++) {
int j;
struct amdgpu_ip_block *block;

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 28f20f0b722f..163188ce02bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -128,7 +128,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
struct dma_fence *tmp = NULL;
- bool skip_preamble, need_ctx_switch;
+ bool need_ctx_switch;
unsigned patch_offset = ~0;
struct amdgpu_vm *vm;
uint64_t fence_ctx;
@@ -221,7 +221,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (need_ctx_switch)
status |= AMDGPU_HAVE_CTX_SWITCH;

- skip_preamble = ring->current_ctx == fence_ctx;
if (job && ring->funcs->emit_cntxcntl) {
status |= job->preamble_status;
status |= job->preemption_status;
@@ -239,14 +238,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];

- /* drop preamble IBs if we don't have a context switch */
- if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
- skip_preamble &&
- !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
- !amdgpu_mcbp &&
- !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
- continue;
-
if (job && ring->funcs->emit_frame_cntl) {
if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) {
amdgpu_ring_emit_frame_cntl(ring, false, secure);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 183814493658..bda4438c3925 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -21,6 +21,11 @@
#ifndef __AMDGPU_UMC_H__
#define __AMDGPU_UMC_H__

+/*
+ * (addr / 256) * 4096, the higher 26 bits in ErrorAddr
+ * is the index of 4KB block
+ */
+#define ADDR_OF_4KB_BLOCK(addr) (((addr) & ~0xffULL) << 4)
/*
* (addr / 256) * 8192, the higher 26 bits in ErrorAddr
* is the index of 8KB block
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index e82f49f62f6e..1f2e2460e121 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -143,7 +143,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003e0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
};

@@ -269,7 +269,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003fff07, 0x40000051),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003e0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe)
};

diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
index 5665c77a9d58..afbbe9f05d5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
@@ -233,7 +233,7 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
err_addr &= ~((0x1ULL << lsb) - 1);

/* translate umc channel address to soc pa, 3 parts are included */
- retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
+ retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
ADDR_OF_256B_BLOCK(channel_index) |
OFFSET_IN_256B_BLOCK(err_addr);

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 6ea8a4b6efde..352a32dc609b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -486,9 +486,6 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
if (retval == -ETIME)
qpd->reset_wavefronts = true;

-
- mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
-
list_del(&q->list);
if (list_empty(&qpd->queues_list)) {
if (qpd->reset_wavefronts) {
@@ -523,6 +520,8 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
int retval;
uint64_t sdma_val = 0;
struct kfd_process_device *pdd = qpd_to_pdd(qpd);
+ struct mqd_manager *mqd_mgr =
+ dqm->mqd_mgrs[get_mqd_type_from_queue_type(q->properties.type)];

/* Get the SDMA queue stats */
if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
@@ -540,6 +539,8 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
pdd->sdma_past_activity_counter += sdma_val;
dqm_unlock(dqm);

+ mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+
return retval;
}

@@ -1632,7 +1633,7 @@ static int set_trap_handler(struct device_queue_manager *dqm,
static int process_termination_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
- struct queue *q, *next;
+ struct queue *q;
struct device_process_node *cur, *next_dpn;
int retval = 0;
bool found = false;
@@ -1640,12 +1641,19 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
dqm_lock(dqm);

/* Clear all user mode queues */
- list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
+ while (!list_empty(&qpd->queues_list)) {
+ struct mqd_manager *mqd_mgr;
int ret;

+ q = list_first_entry(&qpd->queues_list, struct queue, list);
+ mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
+ q->properties.type)];
ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
if (ret)
retval = ret;
+ dqm_unlock(dqm);
+ mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+ dqm_lock(dqm);
}

/* Unregister process */
@@ -1677,36 +1685,34 @@ static int get_wave_state(struct device_queue_manager *dqm,
u32 *save_area_used_size)
{
struct mqd_manager *mqd_mgr;
- int r;

dqm_lock(dqm);

- if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
- q->properties.is_active || !q->device->cwsr_enabled) {
- r = -EINVAL;
- goto dqm_unlock;
- }
-
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];

- if (!mqd_mgr->get_wave_state) {
- r = -EINVAL;
- goto dqm_unlock;
+ if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.is_active || !q->device->cwsr_enabled ||
+ !mqd_mgr->get_wave_state) {
+ dqm_unlock(dqm);
+ return -EINVAL;
}

- r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
- ctl_stack_used_size, save_area_used_size);
-
-dqm_unlock:
dqm_unlock(dqm);
- return r;
+
+ /*
+ * get_wave_state is outside the dqm lock to prevent circular locking
+ * and the queue should be protected against destruction by the process
+ * lock.
+ */
+ return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
+ ctl_stack_used_size, save_area_used_size);
}

static int process_termination_cpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
int retval;
- struct queue *q, *next;
+ struct queue *q;
struct kernel_queue *kq, *kq_next;
struct mqd_manager *mqd_mgr;
struct device_process_node *cur, *next_dpn;
@@ -1763,24 +1769,26 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
qpd->reset_wavefronts = false;
}

- dqm_unlock(dqm);
-
- /* Outside the DQM lock because under the DQM lock we can't do
- * reclaim or take other locks that others hold while reclaiming.
- */
- if (found)
- kfd_dec_compute_active(dqm->dev);
-
/* Lastly, free mqd resources.
* Do free_mqd() after dqm_unlock to avoid circular locking.
*/
- list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
+ while (!list_empty(&qpd->queues_list)) {
+ q = list_first_entry(&qpd->queues_list, struct queue, list);
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
list_del(&q->list);
qpd->queue_count--;
+ dqm_unlock(dqm);
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+ dqm_lock(dqm);
}
+ dqm_unlock(dqm);
+
+ /* Outside the DQM lock because under the DQM lock we can't do
+ * reclaim or take other locks that others hold while reclaiming.
+ */
+ if (found)
+ kfd_dec_compute_active(dqm->dev);

return retval;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index df26c07cb912..6eb308670f48 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3685,6 +3685,23 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
scaling_info->src_rect.x = state->src_x >> 16;
scaling_info->src_rect.y = state->src_y >> 16;

+ /*
+ * For reasons we don't (yet) fully understand a non-zero
+ * src_y coordinate into an NV12 buffer can cause a
+ * system hang. To avoid hangs (and maybe be overly cautious)
+ * let's reject both non-zero src_x and src_y.
+ *
+ * We currently know of only one use-case to reproduce a
+ * scenario with non-zero src_x and src_y for NV12, which
+ * is to gesture the YouTube Android app into full screen
+ * on ChromeOS.
+ */
+ if (state->fb &&
+ state->fb->format->format == DRM_FORMAT_NV12 &&
+ (scaling_info->src_rect.x != 0 ||
+ scaling_info->src_rect.y != 0))
+ return -EINVAL;
+
/*
* For reasons we don't (yet) fully understand a non-zero
* src_y coordinate into an NV12 buffer can cause a
@@ -8291,7 +8308,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
BUG_ON(dm_new_crtc_state->stream == NULL);

/* Scaling or underscan settings */
- if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
+ if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))
update_stream_scaling_settings(
&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);

@@ -8744,6 +8762,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
continue;

+ ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
+ if (ret)
+ goto fail;
+
if (!new_crtc_state->enable)
continue;

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 1df7f1b18049..6c7235bb2f41 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -498,6 +498,7 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev);
#define MAX_COLOR_LEGACY_LUT_ENTRIES 256

void amdgpu_dm_init_color_mod(void);
+int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct dc_plane_state *dc_plane_state);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index 5df05f0d18bc..179ff4b42f20 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -284,6 +284,37 @@ static int __set_input_tf(struct dc_transfer_func *func,
return res ? 0 : -ENOMEM;
}

+/**
+ * Verifies that the Degamma and Gamma LUTs attached to the |crtc_state| are of
+ * the expected size.
+ * Returns 0 on success.
+ */
+int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
+{
+ const struct drm_color_lut *lut = NULL;
+ uint32_t size = 0;
+
+ lut = __extract_blob_lut(crtc_state->degamma_lut, &size);
+ if (lut && size != MAX_COLOR_LUT_ENTRIES) {
+ DRM_DEBUG_DRIVER(
+ "Invalid Degamma LUT size. Should be %u but got %u.\n",
+ MAX_COLOR_LUT_ENTRIES, size);
+ return -EINVAL;
+ }
+
+ lut = __extract_blob_lut(crtc_state->gamma_lut, &size);
+ if (lut && size != MAX_COLOR_LUT_ENTRIES &&
+ size != MAX_COLOR_LEGACY_LUT_ENTRIES) {
+ DRM_DEBUG_DRIVER(
+ "Invalid Gamma LUT size. Should be %u (or %u for legacy) but got %u.\n",
+ MAX_COLOR_LUT_ENTRIES, MAX_COLOR_LEGACY_LUT_ENTRIES,
+ size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/**
* amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream.
* @crtc: amdgpu_dm crtc state
@@ -317,14 +348,12 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
bool is_legacy;
int r;

- degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, &degamma_size);
- if (degamma_lut && degamma_size != MAX_COLOR_LUT_ENTRIES)
- return -EINVAL;
+ r = amdgpu_dm_verify_lut_sizes(&crtc->base);
+ if (r)
+ return r;

+ degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, &degamma_size);
regamma_lut = __extract_blob_lut(crtc->base.gamma_lut, &regamma_size);
- if (regamma_lut && regamma_size != MAX_COLOR_LUT_ENTRIES &&
- regamma_size != MAX_COLOR_LEGACY_LUT_ENTRIES)
- return -EINVAL;

has_degamma =
degamma_lut && !__is_lut_linear(degamma_lut, degamma_size);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 32b73ea86673..a7f8caf1086b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1704,6 +1704,8 @@ static void set_dp_mst_mode(struct dc_link *link, bool mst_enable)
link->type = dc_connection_single;
link->local_sink = link->remote_sinks[0];
link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT;
+ dc_sink_retain(link->local_sink);
+ dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
} else if (mst_enable == true &&
link->type == dc_connection_single &&
link->remote_sinks[0] != NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index fce37c527a0b..8bb5912d837d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -482,10 +482,13 @@ static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *d
int vtaps_c = scl_data->taps.v_taps_c;
int ceil_vratio = dc_fixpt_ceil(scl_data->ratios.vert);
int ceil_vratio_c = dc_fixpt_ceil(scl_data->ratios.vert_c);
- enum lb_memory_config mem_cfg = LB_MEMORY_CONFIG_0;

- if (dpp->base.ctx->dc->debug.use_max_lb)
- return mem_cfg;
+ if (dpp->base.ctx->dc->debug.use_max_lb) {
+ if (scl_data->format == PIXEL_FORMAT_420BPP8
+ || scl_data->format == PIXEL_FORMAT_420BPP10)
+ return LB_MEMORY_CONFIG_3;
+ return LB_MEMORY_CONFIG_0;
+ }

dpp->base.caps->dscl_calc_lb_num_partitions(
scl_data, LB_MEMORY_CONFIG_1, &num_part_y, &num_part_c);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index f1e9b3b06b92..9d3ccdd35582 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -243,7 +243,7 @@ void dcn20_dccg_init(struct dce_hwseq *hws)
REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x1186a0);

/* This value is dependent on the hardware pipeline delay so set once per SOC */
- REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x801003c);
+ REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0xe01003c);
}

void dcn20_disable_vga(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 9e0ae18e71fa..2663f1b31842 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -64,6 +64,7 @@ typedef struct {
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
#define DCN30_MAX_DSC_IMAGE_WIDTH 5184
+#define DCN30_MAX_FMT_420_BUFFER_WIDTH 4096

static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
@@ -2052,7 +2053,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->DISPCLKWithoutRamping,
v->DISPCLKDPPCLKVCOSpeed);
v->MaxDispclkRoundedToDFSGranularity = RoundToDFSGranularityDown(
- v->soc.clock_limits[mode_lib->soc.num_states].dispclk_mhz,
+ v->soc.clock_limits[mode_lib->soc.num_states - 1].dispclk_mhz,
v->DISPCLKDPPCLKVCOSpeed);
if (v->DISPCLKWithoutRampingRoundedToDFSGranularity
> v->MaxDispclkRoundedToDFSGranularity) {
@@ -3957,20 +3958,20 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->PlaneRequiredDISPCLKWithoutODMCombine = v->PixelClock[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1.0 + v->DISPCLKRampingMargin / 100.0);
- if ((v->PlaneRequiredDISPCLKWithoutODMCombine >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states]
- && v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states])) {
+ if ((v->PlaneRequiredDISPCLKWithoutODMCombine >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states - 1]
+ && v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states - 1])) {
v->PlaneRequiredDISPCLKWithoutODMCombine = v->PixelClock[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
v->PlaneRequiredDISPCLKWithODMCombine2To1 = v->PixelClock[k] / 2 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1 + v->DISPCLKRampingMargin / 100.0);
- if ((v->PlaneRequiredDISPCLKWithODMCombine2To1 >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states]
- && v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states])) {
+ if ((v->PlaneRequiredDISPCLKWithODMCombine2To1 >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states - 1]
+ && v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states - 1])) {
v->PlaneRequiredDISPCLKWithODMCombine2To1 = v->PixelClock[k] / 2 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
v->PlaneRequiredDISPCLKWithODMCombine4To1 = v->PixelClock[k] / 4 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1 + v->DISPCLKRampingMargin / 100.0);
- if ((v->PlaneRequiredDISPCLKWithODMCombine4To1 >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states]
- && v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states])) {
+ if ((v->PlaneRequiredDISPCLKWithODMCombine4To1 >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states - 1]
+ && v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states - 1])) {
v->PlaneRequiredDISPCLKWithODMCombine4To1 = v->PixelClock[k] / 4 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}

@@ -3987,19 +3988,30 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
} else if (v->PlaneRequiredDISPCLKWithoutODMCombine > v->MaxDispclkRoundedDownToDFSGranularity) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
- } else if (v->DSCEnabled[k] && (v->HActive[k] > DCN30_MAX_DSC_IMAGE_WIDTH)) {
- v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
- v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
} else {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithoutODMCombine;
- /*420 format workaround*/
- if (v->HActive[k] > 4096 && v->OutputFormat[k] == dm_420) {
+ }
+ if (v->DSCEnabled[k] && v->HActive[k] > DCN30_MAX_DSC_IMAGE_WIDTH
+ && v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) {
+ if (v->HActive[k] / 2 > DCN30_MAX_DSC_IMAGE_WIDTH) {
+ v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
+ v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
+ } else {
+ v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
+ }
+ }
+ if (v->OutputFormat[k] == dm_420 && v->HActive[k] > DCN30_MAX_FMT_420_BUFFER_WIDTH
+ && v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) {
+ if (v->HActive[k] / 2 > DCN30_MAX_FMT_420_BUFFER_WIDTH) {
+ v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
+ v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
+ } else {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
}
}
-
if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1) {
v->MPCCombine[i][j][k] = false;
v->NoOfDPP[i][j][k] = 4;
@@ -4281,42 +4293,8 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}

- for (i = 0; i < v->soc.num_states; i++) {
- v->DSCCLKRequiredMoreThanSupported[i] = false;
- for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
- if (v->BlendingAndTiming[k] == k) {
- if (v->Output[k] == dm_dp || v->Output[k] == dm_edp) {
- if (v->OutputFormat[k] == dm_420) {
- v->DSCFormatFactor = 2;
- } else if (v->OutputFormat[k] == dm_444) {
- v->DSCFormatFactor = 1;
- } else if (v->OutputFormat[k] == dm_n422) {
- v->DSCFormatFactor = 2;
- } else {
- v->DSCFormatFactor = 1;
- }
- if (v->RequiresDSC[i][k] == true) {
- if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1) {
- if (v->PixelClockBackEnd[k] / 12.0 / v->DSCFormatFactor
- > (1.0 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * v->MaxDSCCLK[i]) {
- v->DSCCLKRequiredMoreThanSupported[i] = true;
- }
- } else if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
- if (v->PixelClockBackEnd[k] / 6.0 / v->DSCFormatFactor
- > (1.0 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * v->MaxDSCCLK[i]) {
- v->DSCCLKRequiredMoreThanSupported[i] = true;
- }
- } else {
- if (v->PixelClockBackEnd[k] / 3.0 / v->DSCFormatFactor
- > (1.0 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * v->MaxDSCCLK[i]) {
- v->DSCCLKRequiredMoreThanSupported[i] = true;
- }
- }
- }
- }
- }
- }
- }
+ /* Skip dscclk validation: as long as dispclk is supported, dscclk is also implicitly supported */
+
for (i = 0; i < v->soc.num_states; i++) {
v->NotEnoughDSCUnits[i] = false;
v->TotalDSCUnitsRequired = 0.0;
@@ -5319,7 +5297,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (j = 0; j < 2; j++) {
if (v->ScaleRatioAndTapsSupport == 1 && v->SourceFormatPixelAndScanSupport == 1 && v->ViewportSizeSupport[i][j] == 1
&& v->DIOSupport[i] == 1 && v->ODMCombine4To1SupportCheckOK[i] == 1
- && v->NotEnoughDSCUnits[i] == 0 && v->DSCCLKRequiredMoreThanSupported[i] == 0
+ && v->NotEnoughDSCUnits[i] == 0
&& v->DTBCLKRequiredMoreThanSupported[i] == 0
&& v->ROBSupport[i][j] == 1 && v->DISPCLK_DPPCLK_Support[i][j] == 1 && v->TotalAvailablePipesSupport[i][j] == 1
&& EnoughWritebackUnits == 1 && WritebackModeSupport == 1
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
index d0ccd81ad5b4..ad3e5621a174 100644
--- a/drivers/gpu/drm/amd/display/dc/irq_types.h
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -163,7 +163,7 @@ enum irq_type
};

#define DAL_VALID_IRQ_SRC_NUM(src) \
- ((src) <= DAL_IRQ_SOURCES_NUMBER && (src) > DC_IRQ_SOURCE_INVALID)
+ ((src) < DAL_IRQ_SOURCES_NUMBER && (src) > DC_IRQ_SOURCE_INVALID)

/* Number of Page Flip IRQ Sources. */
#define DAL_PFLIP_IRQ_SRC_NUM \
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index 20e554e771d1..fa8aeec304ef 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -260,7 +260,6 @@ enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp,
struct mod_hdcp_output output;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;

- memset(hdcp, 0, sizeof(struct mod_hdcp));
memset(&output, 0, sizeof(output));
hdcp->config = *config;
HDCP_TOP_INTERFACE_TRACE(hdcp);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index f244b72e74e0..53eab2b8e2c8 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -29,8 +29,10 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
{
uint64_t n = 0;
uint8_t count = 0;
+ u8 bksv[sizeof(n)] = { };

- memcpy(&n, hdcp->auth.msg.hdcp1.bksv, sizeof(uint64_t));
+ memcpy(bksv, hdcp->auth.msg.hdcp1.bksv, sizeof(hdcp->auth.msg.hdcp1.bksv));
+ n = *(uint64_t *)bksv;

while (n) {
count++;
diff --git a/drivers/gpu/drm/amd/include/navi10_enum.h b/drivers/gpu/drm/amd/include/navi10_enum.h
index d5ead9680c6e..84bcb96f76ea 100644
--- a/drivers/gpu/drm/amd/include/navi10_enum.h
+++ b/drivers/gpu/drm/amd/include/navi10_enum.h
@@ -430,7 +430,7 @@ ARRAY_2D_DEPTH = 0x00000001,
*/

typedef enum ENUM_NUM_SIMD_PER_CU {
-NUM_SIMD_PER_CU = 0x00000004,
+NUM_SIMD_PER_CU = 0x00000002,
} ENUM_NUM_SIMD_PER_CU;

/*
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 351a85088d0e..f1e8bc39b16d 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -922,6 +922,11 @@ static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
.atomic_disable = malidp_de_plane_disable,
};

+static const uint64_t linear_only_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
int malidp_de_planes_init(struct drm_device *drm)
{
struct malidp_drm *malidp = drm->dev_private;
@@ -985,8 +990,8 @@ int malidp_de_planes_init(struct drm_device *drm)
*/
ret = drm_universal_plane_init(drm, &plane->base, crtcs,
&malidp_de_plane_funcs, formats, n,
- (id == DE_SMART) ? NULL : modifiers, plane_type,
- NULL);
+ (id == DE_SMART) ? linear_only_modifiers : modifiers,
+ plane_type, NULL);

if (ret < 0)
goto cleanup;
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
index 88121c0e0d05..cd93c44f2662 100644
--- a/drivers/gpu/drm/ast/ast_dp501.c
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -189,6 +189,9 @@ bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
u32 i, data;
u32 boot_address;

+ if (ast->config_mode != ast_use_p2a)
+ return false;
+
data = ast_mindwm(ast, 0x1e6e2100) & 0x01;
if (data) {
boot_address = get_fw_base(ast);
@@ -207,6 +210,9 @@ static bool ast_launch_m68k(struct drm_device *dev)
u8 *fw_addr = NULL;
u8 jreg;

+ if (ast->config_mode != ast_use_p2a)
+ return false;
+
data = ast_mindwm(ast, 0x1e6e2100) & 0x01;
if (!data) {

@@ -271,25 +277,55 @@ u8 ast_get_dp501_max_clk(struct drm_device *dev)
struct ast_private *ast = to_ast_private(dev);
u32 boot_address, offset, data;
u8 linkcap[4], linkrate, linklanes, maxclk = 0xff;
+ u32 *plinkcap;

- boot_address = get_fw_base(ast);
-
- /* validate FW version */
- offset = 0xf000;
- data = ast_mindwm(ast, boot_address + offset);
- if ((data & 0xf0) != 0x10) /* version: 1x */
- return maxclk;
-
- /* Read Link Capability */
- offset = 0xf014;
- *(u32 *)linkcap = ast_mindwm(ast, boot_address + offset);
- if (linkcap[2] == 0) {
- linkrate = linkcap[0];
- linklanes = linkcap[1];
- data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes);
- if (data > 0xff)
- data = 0xff;
- maxclk = (u8)data;
+ if (ast->config_mode == ast_use_p2a) {
+ boot_address = get_fw_base(ast);
+
+ /* validate FW version */
+ offset = AST_DP501_GBL_VERSION;
+ data = ast_mindwm(ast, boot_address + offset);
+ if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1) /* version: 1x */
+ return maxclk;
+
+ /* Read Link Capability */
+ offset = AST_DP501_LINKRATE;
+ plinkcap = (u32 *)linkcap;
+ *plinkcap = ast_mindwm(ast, boot_address + offset);
+ if (linkcap[2] == 0) {
+ linkrate = linkcap[0];
+ linklanes = linkcap[1];
+ data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes);
+ if (data > 0xff)
+ data = 0xff;
+ maxclk = (u8)data;
+ }
+ } else {
+ if (!ast->dp501_fw_buf)
+ return AST_DP501_DEFAULT_DCLK; /* 1024x768 as default */
+
+ /* dummy read */
+ offset = 0x0000;
+ data = readl(ast->dp501_fw_buf + offset);
+
+ /* validate FW version */
+ offset = AST_DP501_GBL_VERSION;
+ data = readl(ast->dp501_fw_buf + offset);
+ if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1) /* version: 1x */
+ return maxclk;
+
+ /* Read Link Capability */
+ offset = AST_DP501_LINKRATE;
+ plinkcap = (u32 *)linkcap;
+ *plinkcap = readl(ast->dp501_fw_buf + offset);
+ if (linkcap[2] == 0) {
+ linkrate = linkcap[0];
+ linklanes = linkcap[1];
+ data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes);
+ if (data > 0xff)
+ data = 0xff;
+ maxclk = (u8)data;
+ }
}
return maxclk;
}
@@ -298,26 +334,57 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
{
struct ast_private *ast = to_ast_private(dev);
u32 i, boot_address, offset, data;
+ u32 *pEDIDidx;

- boot_address = get_fw_base(ast);
-
- /* validate FW version */
- offset = 0xf000;
- data = ast_mindwm(ast, boot_address + offset);
- if ((data & 0xf0) != 0x10)
- return false;
-
- /* validate PnP Monitor */
- offset = 0xf010;
- data = ast_mindwm(ast, boot_address + offset);
- if (!(data & 0x01))
- return false;
+ if (ast->config_mode == ast_use_p2a) {
+ boot_address = get_fw_base(ast);

- /* Read EDID */
- offset = 0xf020;
- for (i = 0; i < 128; i += 4) {
- data = ast_mindwm(ast, boot_address + offset + i);
- *(u32 *)(ediddata + i) = data;
+ /* validate FW version */
+ offset = AST_DP501_GBL_VERSION;
+ data = ast_mindwm(ast, boot_address + offset);
+ if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1)
+ return false;
+
+ /* validate PnP Monitor */
+ offset = AST_DP501_PNPMONITOR;
+ data = ast_mindwm(ast, boot_address + offset);
+ if (!(data & AST_DP501_PNP_CONNECTED))
+ return false;
+
+ /* Read EDID */
+ offset = AST_DP501_EDID_DATA;
+ for (i = 0; i < 128; i += 4) {
+ data = ast_mindwm(ast, boot_address + offset + i);
+ pEDIDidx = (u32 *)(ediddata + i);
+ *pEDIDidx = data;
+ }
+ } else {
+ if (!ast->dp501_fw_buf)
+ return false;
+
+ /* dummy read */
+ offset = 0x0000;
+ data = readl(ast->dp501_fw_buf + offset);
+
+ /* validate FW version */
+ offset = AST_DP501_GBL_VERSION;
+ data = readl(ast->dp501_fw_buf + offset);
+ if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1)
+ return false;
+
+ /* validate PnP Monitor */
+ offset = AST_DP501_PNPMONITOR;
+ data = readl(ast->dp501_fw_buf + offset);
+ if (!(data & AST_DP501_PNP_CONNECTED))
+ return false;
+
+ /* Read EDID */
+ offset = AST_DP501_EDID_DATA;
+ for (i = 0; i < 128; i += 4) {
+ data = readl(ast->dp501_fw_buf + offset + i);
+ pEDIDidx = (u32 *)(ediddata + i);
+ *pEDIDidx = data;
+ }
}

return true;
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 467049ca8430..b68b1ddfecb7 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -120,6 +120,7 @@ struct ast_private {

void __iomem *regs;
void __iomem *ioregs;
+ void __iomem *dp501_fw_buf;

enum ast_chip chip;
bool vga2_clone;
@@ -298,6 +299,17 @@ int ast_mode_config_init(struct ast_private *ast);
#define AST_MM_ALIGN_SHIFT 4
#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)

+#define AST_DP501_FW_VERSION_MASK GENMASK(7, 4)
+#define AST_DP501_FW_VERSION_1 BIT(4)
+#define AST_DP501_PNP_CONNECTED BIT(1)
+
+#define AST_DP501_DEFAULT_DCLK 65
+
+#define AST_DP501_GBL_VERSION 0xf000
+#define AST_DP501_PNPMONITOR 0xf010
+#define AST_DP501_LINKRATE 0xf014
+#define AST_DP501_EDID_DATA 0xf020
+
int ast_mm_init(struct ast_private *ast);

/* ast post */
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index ee82b2ddf932..cf08215a9f21 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -98,7 +98,7 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
/* Double check it's actually working */
data = ast_read32(ast, 0xf004);
- if (data != 0xFFFFFFFF) {
+ if ((data != 0xFFFFFFFF) && (data != 0x00)) {
/* P2A works, grab silicon revision */
ast->config_mode = ast_use_p2a;

@@ -406,7 +406,6 @@ struct ast_private *ast_device_create(struct drm_driver *drv,
return ast;
dev = &ast->base;

- dev->pdev = pdev;
pci_set_drvdata(pdev, dev);

ast->regs = pcim_iomap(pdev, 1, 0);
@@ -446,6 +445,14 @@ struct ast_private *ast_device_create(struct drm_driver *drv,
if (ret)
return ERR_PTR(ret);

+ /* map reserved buffer */
+ ast->dp501_fw_buf = NULL;
+ if (dev->vram_mm->vram_size < pci_resource_len(pdev, 0)) {
+ ast->dp501_fw_buf = pci_iomap_range(pdev, 0, dev->vram_mm->vram_size, 0);
+ if (!ast->dp501_fw_buf)
+ drm_info(dev, "failed to map reserved buffer!\n");
+ }
+
ret = ast_mode_config_init(ast);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index d0c65610ebb5..f56ff97c9899 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -2369,9 +2369,9 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
clk_prepare_enable(clk);

pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
+ dev_err(dev, "pm_runtime_resume_and_get failed\n");
pm_runtime_disable(dev);
goto clk_disable;
}
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index 76373e31df92..b31281f76117 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -1028,7 +1028,7 @@ static ssize_t cdns_dsi_transfer(struct mipi_dsi_host *host,
struct mipi_dsi_packet packet;
int ret, i, tx_len, rx_len;

- ret = pm_runtime_get_sync(host->dev);
+ ret = pm_runtime_resume_and_get(host->dev);
if (ret < 0)
return ret;

diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index d734d9402c35..c1926154eda8 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -1209,6 +1209,7 @@ static struct i2c_device_id lt9611_id[] = {
{ "lontium,lt9611", 0 },
{}
};
+MODULE_DEVICE_TABLE(i2c, lt9611_id);

static const struct of_device_id lt9611_match_table[] = {
{ .compatible = "lontium,lt9611" },
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index 66b67402f1ac..c65ca860712d 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -21,6 +21,7 @@
#include <linux/sys_soc.h>
#include <linux/time64.h>

+#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
@@ -742,7 +743,9 @@ static int nwl_dsi_disable(struct nwl_dsi *dsi)
return 0;
}

-static void nwl_dsi_bridge_disable(struct drm_bridge *bridge)
+static void
+nwl_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
int ret;
@@ -803,17 +806,6 @@ static int nwl_dsi_get_dphy_params(struct nwl_dsi *dsi,
return 0;
}

-static bool nwl_dsi_bridge_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* At least LCDIF + NWL needs active high sync */
- adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
- adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
-
- return true;
-}
-
static enum drm_mode_status
nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
@@ -831,6 +823,24 @@ nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}

+static int nwl_dsi_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+
+ /* At least LCDIF + NWL needs active high sync */
+ adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
+
+ /* Do a full modeset if crtc_state->active is changed to be true. */
+ if (crtc_state->active_changed && crtc_state->active)
+ crtc_state->mode_changed = true;
+
+ return 0;
+}
+
static void
nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
@@ -862,7 +872,9 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
drm_mode_debug_printmodeline(adjusted_mode);
}

-static void nwl_dsi_bridge_pre_enable(struct drm_bridge *bridge)
+static void
+nwl_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
int ret;
@@ -897,7 +909,9 @@ static void nwl_dsi_bridge_pre_enable(struct drm_bridge *bridge)
}
}

-static void nwl_dsi_bridge_enable(struct drm_bridge *bridge)
+static void
+nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
int ret;
@@ -942,14 +956,17 @@ static void nwl_dsi_bridge_detach(struct drm_bridge *bridge)
}

static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = {
- .pre_enable = nwl_dsi_bridge_pre_enable,
- .enable = nwl_dsi_bridge_enable,
- .disable = nwl_dsi_bridge_disable,
- .mode_fixup = nwl_dsi_bridge_mode_fixup,
- .mode_set = nwl_dsi_bridge_mode_set,
- .mode_valid = nwl_dsi_bridge_mode_valid,
- .attach = nwl_dsi_bridge_attach,
- .detach = nwl_dsi_bridge_detach,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_check = nwl_dsi_bridge_atomic_check,
+ .atomic_pre_enable = nwl_dsi_bridge_atomic_pre_enable,
+ .atomic_enable = nwl_dsi_bridge_atomic_enable,
+ .atomic_disable = nwl_dsi_bridge_atomic_disable,
+ .mode_set = nwl_dsi_bridge_mode_set,
+ .mode_valid = nwl_dsi_bridge_mode_valid,
+ .attach = nwl_dsi_bridge_attach,
+ .detach = nwl_dsi_bridge_detach,
};

static int nwl_dsi_parse_dt(struct nwl_dsi *dsi)
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index deeed73f4ed6..3c55753bab16 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -602,7 +602,14 @@ int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
return 0;

+ /* Some branches advertise having 0 downstream ports, despite also advertising they have a
+ * downstream port present. The DP spec isn't clear on if this is allowed or not, but since
+ * some branches do it we need to handle it regardless.
+ */
len = drm_dp_downstream_port_count(dpcd);
+ if (!len)
+ return 0;
+
if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE)
len *= 4;

diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index eb02ecb6e345..65d73eb5e155 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -5080,7 +5080,7 @@ static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
if (size < sizeof(struct dp_sdp))
return -EINVAL;

- memset(vsc, 0, size);
+ memset(vsc, 0, sizeof(*vsc));

if (sdp->sdp_header.HB0 != 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index ac038572164d..dfd5ed15a7f4 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -274,7 +274,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
drm_connector_list_iter_end(&conn_iter);
}

- ret = pm_runtime_get_sync(crtc->dev->dev);
+ ret = pm_runtime_resume_and_get(crtc->dev->dev);
if (ret < 0) {
DRM_ERROR("Failed to enable power domain: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index dbf8d429223e..2f75e3905202 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -88,8 +88,6 @@ static int mdp4_hw_init(struct msm_kms *kms)
if (mdp4_kms->rev > 1)
mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);

- dev->mode_config.allow_fb_modifiers = true;
-
out:
pm_runtime_put_sync(dev->dev);

diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index da3cc1d8c331..ee1dbb2b84af 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -347,6 +347,12 @@ enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane)
return mdp4_plane->pipe;
}

+static const uint64_t supported_format_modifiers[] = {
+ DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
/* initialize plane */
struct drm_plane *mdp4_plane_init(struct drm_device *dev,
enum mdp4_pipe pipe_id, bool private_plane)
@@ -375,7 +381,7 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
mdp4_plane->formats, mdp4_plane->nformats,
- NULL, type, NULL);
+ supported_format_modifiers, type, NULL);
if (ret)
goto fail;

diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig
index 0143d539f8f8..ee22cd25d3e3 100644
--- a/drivers/gpu/drm/mxsfb/Kconfig
+++ b/drivers/gpu/drm/mxsfb/Kconfig
@@ -10,7 +10,6 @@ config DRM_MXSFB
depends on COMMON_CLK
select DRM_MXS
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_PANEL
select DRM_PANEL_BRIDGE
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index bceb48a2dfca..f2ad6f49fb72 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -700,7 +700,6 @@ nouveau_display_create(struct drm_device *dev)

dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
- dev->mode_config.allow_fb_modifiers = true;

if (drm->client.device.info.chipset < 0x11)
dev->mode_config.async_page_flip = false;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index e0ae911ef427..71bdafac9210 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1334,6 +1334,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
if (obj->import_attach) {
DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
+ drm_gem_object_put(obj);
return ERR_PTR(-EINVAL);
}

diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 4cd30613fa1d..e9df5e8ef0a5 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -416,13 +416,13 @@ radeon_pci_shutdown(struct pci_dev *pdev)
if (radeon_device_is_virtual())
radeon_pci_remove(pdev);

-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC64) || defined(CONFIG_MACH_LOONGSON64)
/*
* Some adapters need to be suspended before a
* shutdown occurs in order to prevent an error
- * during kexec.
- * Make this power specific becauase it breaks
- * some non-power boards.
+ * during kexec, shutdown or reboot.
+ * Make this power and Loongson specific because
+ * it breaks some other boards.
*/
radeon_suspend_kms(pci_get_drvdata(pdev), true, true, false);
#endif
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index 75a76408cb29..d0c9610ad220 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -243,7 +243,6 @@ struct dw_mipi_dsi_rockchip {
struct dw_mipi_dsi *dmd;
const struct rockchip_dw_dsi_chip_data *cdata;
struct dw_mipi_dsi_plat_data pdata;
- int devcnt;
};

struct dphy_pll_parameter_map {
@@ -1141,9 +1140,6 @@ static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev)
{
struct dw_mipi_dsi_rockchip *dsi = platform_get_drvdata(pdev);

- if (dsi->devcnt == 0)
- component_del(dsi->dev, &dw_mipi_dsi_rockchip_ops);
-
dw_mipi_dsi_remove(dsi->dmd);

return 0;
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 80053d91a301..a6fe03c3748a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -349,8 +349,8 @@ static const struct vop_win_phy rk3066_win0_data = {
.nformats = ARRAY_SIZE(formats_win_full),
.format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 0),
- .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 4),
- .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 19),
+ .format = VOP_REG(RK3066_SYS_CTRL1, 0x7, 4),
+ .rb_swap = VOP_REG(RK3066_SYS_CTRL1, 0x1, 19),
.act_info = VOP_REG(RK3066_WIN0_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(RK3066_WIN0_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3066_WIN0_DSP_ST, 0x1fff1fff, 0),
@@ -361,13 +361,12 @@ static const struct vop_win_phy rk3066_win0_data = {
};

static const struct vop_win_phy rk3066_win1_data = {
- .scl = &rk3066_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
.format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 1),
- .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 7),
- .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 23),
+ .format = VOP_REG(RK3066_SYS_CTRL1, 0x7, 7),
+ .rb_swap = VOP_REG(RK3066_SYS_CTRL1, 0x1, 23),
.act_info = VOP_REG(RK3066_WIN1_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(RK3066_WIN1_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3066_WIN1_DSP_ST, 0x1fff1fff, 0),
@@ -382,8 +381,8 @@ static const struct vop_win_phy rk3066_win2_data = {
.nformats = ARRAY_SIZE(formats_win_lite),
.format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 2),
- .format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 10),
- .rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 27),
+ .format = VOP_REG(RK3066_SYS_CTRL1, 0x7, 10),
+ .rb_swap = VOP_REG(RK3066_SYS_CTRL1, 0x1, 27),
.dsp_info = VOP_REG(RK3066_WIN2_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3066_WIN2_DSP_ST, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(RK3066_WIN2_MST, 0xffffffff, 0),
@@ -408,6 +407,9 @@ static const struct vop_common rk3066_common = {
.dither_down_en = VOP_REG(RK3066_DSP_CTRL0, 0x1, 11),
.dither_down_mode = VOP_REG(RK3066_DSP_CTRL0, 0x1, 10),
.dsp_blank = VOP_REG(RK3066_DSP_CTRL1, 0x1, 24),
+ .dither_up = VOP_REG(RK3066_DSP_CTRL0, 0x1, 9),
+ .dsp_lut_en = VOP_REG(RK3066_SYS_CTRL1, 0x1, 31),
+ .data_blank = VOP_REG(RK3066_DSP_CTRL1, 0x1, 25),
};

static const struct vop_win_data rk3066_vop_win_data[] = {
@@ -505,7 +507,10 @@ static const struct vop_common rk3188_common = {
.dither_down_sel = VOP_REG(RK3188_DSP_CTRL0, 0x1, 27),
.dither_down_en = VOP_REG(RK3188_DSP_CTRL0, 0x1, 11),
.dither_down_mode = VOP_REG(RK3188_DSP_CTRL0, 0x1, 10),
- .dsp_blank = VOP_REG(RK3188_DSP_CTRL1, 0x3, 24),
+ .dsp_blank = VOP_REG(RK3188_DSP_CTRL1, 0x1, 24),
+ .dither_up = VOP_REG(RK3188_DSP_CTRL0, 0x1, 9),
+ .dsp_lut_en = VOP_REG(RK3188_SYS_CTRL, 0x1, 28),
+ .data_blank = VOP_REG(RK3188_DSP_CTRL1, 0x1, 25),
};

static const struct vop_win_data rk3188_vop_win_data[] = {
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 146380118962..3f7f761df4cd 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -113,7 +113,8 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
rmb(); /* for list_empty to work without lock */

if (list_empty(&entity->list) ||
- spsc_queue_count(&entity->job_queue) == 0)
+ spsc_queue_count(&entity->job_queue) == 0 ||
+ entity->stopped)
return true;

return false;
@@ -218,11 +219,16 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
{
struct drm_sched_job *job;
+ struct dma_fence *f;
int r;

while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
struct drm_sched_fence *s_fence = job->s_fence;

+ /* Wait for all dependencies to avoid data corruptions */
+ while ((f = job->sched->ops->dependency(job, entity)))
+ dma_fence_wait(f, false);
+
drm_sched_fence_scheduled(s_fence);
dma_fence_set_error(&s_fence->finished, -ESRCH);

diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 7111e0f527b0..b6c2757c3d83 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -887,9 +887,33 @@ EXPORT_SYMBOL(drm_sched_init);
*/
void drm_sched_fini(struct drm_gpu_scheduler *sched)
{
+ struct drm_sched_entity *s_entity;
+ int i;
+
if (sched->thread)
kthread_stop(sched->thread);

+ for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ struct drm_sched_rq *rq = &sched->sched_rq[i];
+
+ if (!rq)
+ continue;
+
+ spin_lock(&rq->lock);
+ list_for_each_entry(s_entity, &rq->entities, list)
+ /*
+ * Prevents reinsertion and marks job_queue as idle,
+ * it will removed from rq in drm_sched_entity_fini
+ * eventually
+ */
+ s_entity->stopped = true;
+ spin_unlock(&rq->lock);
+
+ }
+
+ /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
+ wake_up_all(&sched->job_scheduled);
+
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&sched->work_tdr);

diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 3aa9a7406085..ceb86338c003 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -947,6 +947,11 @@ static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
.atomic_disable = tegra_cursor_atomic_disable,
};

+static const uint64_t linear_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
struct tegra_dc *dc)
{
@@ -975,7 +980,7 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,

err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_plane_funcs, formats,
- num_formats, NULL,
+ num_formats, linear_modifiers,
DRM_PLANE_TYPE_CURSOR, NULL);
if (err < 0) {
kfree(plane);
@@ -1094,7 +1099,8 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,

err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_plane_funcs, formats,
- num_formats, NULL, type, NULL);
+ num_formats, linear_modifiers,
+ type, NULL);
if (err < 0) {
kfree(plane);
return ERR_PTR(err);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index e4baf07992a4..2c6ebc328b24 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1127,8 +1127,6 @@ static int host1x_drm_probe(struct host1x_device *dev)
drm->mode_config.max_width = 4096;
drm->mode_config.max_height = 4096;

- drm->mode_config.allow_fb_modifiers = true;
-
drm->mode_config.normalize_zpos = true;

drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 1d2416d466a3..f4ccca922e44 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -1001,7 +1001,7 @@ static const struct vc4_pv_data bcm2711_pv3_data = {
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
- [0] = VC4_ENCODER_TYPE_VEC,
+ [PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC,
},
};

@@ -1042,6 +1042,9 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm,
struct vc4_encoder *vc4_encoder;
int i;

+ if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+ continue;
+
vc4_encoder = to_vc4_encoder(encoder);
for (i = 0; i < ARRAY_SIZE(pv_data->encoder_types); i++) {
if (vc4_encoder->type == encoder_types[i]) {
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index c5f2944d5bc6..9809c3a856c6 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -837,7 +837,7 @@ void vc4_crtc_destroy_state(struct drm_crtc *crtc,
void vc4_crtc_reset(struct drm_crtc *crtc);
void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
void vc4_crtc_get_margins(struct drm_crtc_state *state,
- unsigned int *right, unsigned int *left,
+ unsigned int *left, unsigned int *right,
unsigned int *top, unsigned int *bottom);

/* vc4_debugfs.c */
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 25a09aaf5883..c58b8840090a 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -627,7 +627,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
unsigned long pixel_rate, hsm_rate;
int ret;

- ret = pm_runtime_get_sync(&vc4_hdmi->pdev->dev);
+ ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
if (ret < 0) {
DRM_ERROR("Failed to retain power domain: %d\n", ret);
return;
@@ -1807,6 +1807,14 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
if (vc4_hdmi->variant->reset)
vc4_hdmi->variant->reset(vc4_hdmi);

+ if ((of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi0") ||
+ of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1")) &&
+ HDMI_READ(HDMI_VID_CTL) & VC4_HD_VID_CTL_ENABLE) {
+ clk_prepare_enable(vc4_hdmi->pixel_clock);
+ clk_prepare_enable(vc4_hdmi->hsm_clock);
+ clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
+ }
+
pm_runtime_enable(dev);

drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 849dcafbfff1..d13502ae973d 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -503,7 +503,7 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
return ret;

encoder = &txp->connector.encoder;
- encoder->possible_crtcs |= drm_crtc_mask(crtc);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);

ret = devm_request_irq(dev, irq, vc4_txp_interrupt, 0,
dev_name(dev), txp);
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index eed57a931309..a28b01f92793 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -209,6 +209,7 @@ int virtio_gpu_init(struct drm_device *dev)
err_vbufs:
vgdev->vdev->config->del_vqs(vgdev->vdev);
err_vqs:
+ dev->dev_private = NULL;
kfree(vgdev);
return ret;
}
diff --git a/drivers/gpu/drm/zte/Kconfig b/drivers/gpu/drm/zte/Kconfig
index 90ebaedc11fd..aa8594190b50 100644
--- a/drivers/gpu/drm/zte/Kconfig
+++ b/drivers/gpu/drm/zte/Kconfig
@@ -3,7 +3,6 @@ config DRM_ZTE
tristate "DRM Support for ZTE SoCs"
depends on DRM && ARCH_ZX
select DRM_KMS_CMA_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_HELPER
select SND_SOC_HDMI_CODEC if SND_SOC
select VIDEOMODE_HELPERS
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index b2088d2d386a..424c296845db 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -1347,7 +1347,7 @@ static int coresight_fixup_device_conns(struct coresight_device *csdev)
}
}

- return 0;
+ return ret;
}

static int coresight_remove_match(struct device *dev, void *data)
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 989d965f3d90..8978f3410bee 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -528,7 +528,7 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
buf_ptr = buf->data_pages[cur] + offset;
*buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);

- if (lost && *barrier) {
+ if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
*buf_ptr = *barrier;
barrier++;
}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 0c879e40bd18..34b94e525390 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2793,7 +2793,8 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv,

cma_init_resolve_route_work(work, id_priv);

- route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
+ if (!route->path_rec)
+ route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
if (!route->path_rec) {
ret = -ENOMEM;
goto err1;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 5df4bb52bb10..861e19fdfeb4 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -295,6 +295,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) {
pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
+ ret = -EINVAL;
goto free_dma;
}

diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index d2ce852447c1..026285f7f36a 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -139,7 +139,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n",
(int)PTR_ERR(umem));
- err = -EINVAL;
+ err = PTR_ERR(umem);
goto err1;
}

diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index e653c83f8a35..edea37da8a5b 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -35,10 +35,10 @@ static const struct kernel_param_ops sg_tablesize_ops = {
.get = param_get_int,
};

-static int isert_sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE;
+static int isert_sg_tablesize = ISCSI_ISER_MIN_SG_TABLESIZE;
module_param_cb(sg_tablesize, &sg_tablesize_ops, &isert_sg_tablesize, 0644);
MODULE_PARM_DESC(sg_tablesize,
- "Number of gather/scatter entries in a single scsi command, should >= 128 (default: 256, max: 4096)");
+ "Number of gather/scatter entries in a single scsi command, should >= 128 (default: 128, max: 4096)");

static DEFINE_MUTEX(device_list_mutex);
static LIST_HEAD(device_list);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 6c5af13db4e0..ca8cfebe26ca 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -65,9 +65,6 @@
*/
#define ISER_RX_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 1024)

-/* Default I/O size is 1MB */
-#define ISCSI_ISER_DEF_SG_TABLESIZE 256
-
/* Minimum I/O size is 512KB */
#define ISCSI_ISER_MIN_SG_TABLESIZE 128

diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index 8caad0a2322b..51c60f542876 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -47,12 +47,15 @@ enum {
MAX_PATHS_NUM = 128,

/*
- * With the size of struct rtrs_permit allocated on the client, 4K
- * is the maximum number of rtrs_permits we can allocate. This number is
- * also used on the client to allocate the IU for the user connection
- * to receive the RDMA addresses from the server.
+ * Max IB immediate data size is 2^28 (MAX_IMM_PAYL_BITS)
+ * and the minimum chunk size is 4096 (2^12).
+ * So the maximum sess_queue_depth is 65536 (2^16) in theory.
+ * But mempool_create, create_qp and ib_post_send fail with
+ * "cannot allocate memory" error if sess_queue_depth is too big.
+ * Therefore the pratical max value of sess_queue_depth is
+ * somewhere between 1 and 65536 and it depends on the system.
*/
- MAX_SESS_QUEUE_DEPTH = 4096,
+ MAX_SESS_QUEUE_DEPTH = 65536,

RTRS_HB_INTERVAL_MS = 5000,
RTRS_HB_MISSED_MAX = 5,
diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c
index ec71063fff76..e1822e87ec3d 100644
--- a/drivers/ipack/carriers/tpci200.c
+++ b/drivers/ipack/carriers/tpci200.c
@@ -596,8 +596,11 @@ static int tpci200_pci_probe(struct pci_dev *pdev,

out_err_bus_register:
tpci200_uninstall(tpci200);
+ /* tpci200->info->cfg_regs is unmapped in tpci200_uninstall */
+ tpci200->info->cfg_regs = NULL;
out_err_install:
- iounmap(tpci200->info->cfg_regs);
+ if (tpci200->info->cfg_regs)
+ iounmap(tpci200->info->cfg_regs);
out_err_ioremap:
pci_release_region(pdev, TPCI200_CFG_MEM_BAR);
out_err_pci_request:
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 56bd2e9db6ed..e501cb03f211 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2342,7 +2342,7 @@ static void __exit
HFC_cleanup(void)
{
if (timer_pending(&hfc_tl))
- del_timer(&hfc_tl);
+ del_timer_sync(&hfc_tl);

pci_unregister_driver(&hfc_driver);
}
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 8628c4aa2e85..9d6ae3e64285 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -532,7 +532,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc)

region.bdev = wc->ssd_dev->bdev;
region.sector = 0;
- region.count = PAGE_SIZE >> SECTOR_SHIFT;
+ region.count = max(4096U, wc->block_size) >> SECTOR_SHIFT;

if (unlikely(region.sector + region.count > wc->metadata_sectors))
region.count = wc->metadata_sectors - region.sector;
@@ -1301,8 +1301,12 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
writecache_flush(wc);
if (writecache_has_error(wc))
goto unlock_error;
+ if (unlikely(wc->cleaner))
+ goto unlock_remap_origin;
goto unlock_submit;
} else {
+ if (dm_bio_get_target_bio_nr(bio))
+ goto unlock_remap_origin;
writecache_offload_bio(wc, bio);
goto unlock_return;
}
@@ -1360,14 +1364,18 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
} else {
do {
bool found_entry = false;
+ bool search_used = false;
if (writecache_has_error(wc))
goto unlock_error;
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
if (e) {
- if (!writecache_entry_is_committed(wc, e))
+ if (!writecache_entry_is_committed(wc, e)) {
+ search_used = true;
goto bio_copy;
+ }
if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
wc->overwrote_committed = true;
+ search_used = true;
goto bio_copy;
}
found_entry = true;
@@ -1377,7 +1385,7 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
}
e = writecache_pop_from_freelist(wc, (sector_t)-1);
if (unlikely(!e)) {
- if (!found_entry) {
+ if (!WC_MODE_PMEM(wc) && !found_entry) {
direct_write:
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
if (e) {
@@ -1404,13 +1412,31 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);

while (bio_size < bio->bi_iter.bi_size) {
- struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
- if (!f)
- break;
- write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
- (bio_size >> SECTOR_SHIFT), wc->seq_count);
- writecache_insert_entry(wc, f);
- wc->uncommitted_blocks++;
+ if (!search_used) {
+ struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
+ if (!f)
+ break;
+ write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
+ (bio_size >> SECTOR_SHIFT), wc->seq_count);
+ writecache_insert_entry(wc, f);
+ wc->uncommitted_blocks++;
+ } else {
+ struct wc_entry *f;
+ struct rb_node *next = rb_next(&e->rb_node);
+ if (!next)
+ break;
+ f = container_of(next, struct wc_entry, rb_node);
+ if (f != e + 1)
+ break;
+ if (read_original_sector(wc, f) !=
+ read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
+ break;
+ if (unlikely(f->write_in_progress))
+ break;
+ if (writecache_entry_is_committed(wc, f))
+ wc->overwrote_committed = true;
+ e = f;
+ }
bio_size += wc->block_size;
current_cache_sec += wc->block_size >> SECTOR_SHIFT;
}
@@ -2465,7 +2491,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}

- ti->num_flush_bios = 1;
+ ti->num_flush_bios = WC_MODE_PMEM(wc) ? 1 : 2;
ti->flush_supported = true;
ti->num_discard_bios = 1;

diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index b298fefb022e..510090797461 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1390,6 +1390,13 @@ static int dmz_init_zone(struct blk_zone *blkz, unsigned int num, void *data)
return -ENXIO;
}

+ /*
+ * Devices that have zones with a capacity smaller than the zone size
+ * (e.g. NVMe zoned namespaces) are not supported.
+ */
+ if (blkz->capacity != blkz->len)
+ return -ENXIO;
+
switch (blkz->type) {
case BLK_ZONE_TYPE_CONVENTIONAL:
set_bit(DMZ_RND, &zone->flags);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 638c04f9e832..19a70f434029 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1230,8 +1230,8 @@ static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,

/*
* A target may call dm_accept_partial_bio only from the map routine. It is
- * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET,
- * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH.
+ * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
+ * operations and REQ_OP_ZONE_APPEND (zone append writes).
*
* dm_accept_partial_bio informs the dm that the target only wants to process
* additional n_sectors sectors of the bio and the rest of the data should be
@@ -1261,9 +1261,13 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
{
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
+
BUG_ON(bio->bi_opf & REQ_PREFLUSH);
+ BUG_ON(op_is_zone_mgmt(bio_op(bio)));
+ BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
BUG_ON(bi_size > *tio->len_ptr);
BUG_ON(n_sectors > bi_size);
+
*tio->len_ptr -= bi_size - n_sectors;
bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
}
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index eff04fa23dfa..9e4d1212f4c1 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -549,7 +549,8 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
delete_at(n, index);
}

- *new_root = shadow_root(&spine);
+ if (!r)
+ *new_root = shadow_root(&spine);
exit_shadow_spine(&spine);

return r;
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index bf4c5e2ccb6f..e0acae7a3815 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -171,6 +171,14 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
* Any block we allocate has to be free in both the old and current ll.
*/
r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b);
+ if (r == -ENOSPC) {
+ /*
+ * There's no free block between smd->begin and the end of the metadata device.
+ * We search before smd->begin in case something has been freed.
+ */
+ r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, 0, smd->begin, b);
+ }
+
if (r)
return r;

@@ -199,7 +207,6 @@ static int sm_disk_commit(struct dm_space_map *sm)
return r;

memcpy(&smd->old_ll, &smd->ll, sizeof(smd->old_ll));
- smd->begin = 0;
smd->nr_allocated_this_transaction = 0;

r = sm_disk_get_nr_free(sm, &nr_free);
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 9e3c64ec2026..da439ac85796 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -452,6 +452,14 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
* Any block we allocate has to be free in both the old and current ll.
*/
r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b);
+ if (r == -ENOSPC) {
+ /*
+ * There's no free block between smm->begin and the end of the metadata device.
+ * We search before smm->begin in case something has been freed.
+ */
+ r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, 0, smm->begin, b);
+ }
+
if (r)
return r;

@@ -503,7 +511,6 @@ static int sm_metadata_commit(struct dm_space_map *sm)
return r;

memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
- smm->begin = 0;
smm->allocated_this_transaction = 0;

return 0;
diff --git a/drivers/media/i2c/saa6588.c b/drivers/media/i2c/saa6588.c
index ecb491d5f2ab..d1e0716bdfff 100644
--- a/drivers/media/i2c/saa6588.c
+++ b/drivers/media/i2c/saa6588.c
@@ -380,7 +380,7 @@ static void saa6588_configure(struct saa6588 *s)

/* ---------------------------------------------------------------------- */

-static long saa6588_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+static long saa6588_command(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{
struct saa6588 *s = to_saa6588(sd);
struct saa6588_command *a = arg;
@@ -433,7 +433,7 @@ static int saa6588_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt)
/* ----------------------------------------------------------------------- */

static const struct v4l2_subdev_core_ops saa6588_core_ops = {
- .ioctl = saa6588_ioctl,
+ .command = saa6588_command,
};

static const struct v4l2_subdev_tuner_ops saa6588_tuner_ops = {
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 8824dd0fb331..35a51e9b539d 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -3187,7 +3187,7 @@ static int radio_release(struct file *file)

btv->radio_user--;

- bttv_call_all(btv, core, ioctl, SAA6588_CMD_CLOSE, &cmd);
+ bttv_call_all(btv, core, command, SAA6588_CMD_CLOSE, &cmd);

if (btv->radio_user == 0)
btv->has_radio_tuner = 0;
@@ -3268,7 +3268,7 @@ static ssize_t radio_read(struct file *file, char __user *data,
cmd.result = -ENODEV;
radio_enable(btv);

- bttv_call_all(btv, core, ioctl, SAA6588_CMD_READ, &cmd);
+ bttv_call_all(btv, core, command, SAA6588_CMD_READ, &cmd);

return cmd.result;
}
@@ -3289,7 +3289,7 @@ static __poll_t radio_poll(struct file *file, poll_table *wait)
cmd.instance = file;
cmd.event_list = wait;
cmd.poll_mask = res;
- bttv_call_all(btv, core, ioctl, SAA6588_CMD_POLL, &cmd);
+ bttv_call_all(btv, core, command, SAA6588_CMD_POLL, &cmd);

return cmd.poll_mask;
}
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 9a6a6b68f8e3..85d082baaadc 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1178,7 +1178,7 @@ static int video_release(struct file *file)

saa_call_all(dev, tuner, standby);
if (vdev->vfl_type == VFL_TYPE_RADIO)
- saa_call_all(dev, core, ioctl, SAA6588_CMD_CLOSE, &cmd);
+ saa_call_all(dev, core, command, SAA6588_CMD_CLOSE, &cmd);
mutex_unlock(&dev->lock);

return 0;
@@ -1197,7 +1197,7 @@ static ssize_t radio_read(struct file *file, char __user *data,
cmd.result = -ENODEV;

mutex_lock(&dev->lock);
- saa_call_all(dev, core, ioctl, SAA6588_CMD_READ, &cmd);
+ saa_call_all(dev, core, command, SAA6588_CMD_READ, &cmd);
mutex_unlock(&dev->lock);

return cmd.result;
@@ -1213,7 +1213,7 @@ static __poll_t radio_poll(struct file *file, poll_table *wait)
cmd.event_list = wait;
cmd.poll_mask = 0;
mutex_lock(&dev->lock);
- saa_call_all(dev, core, ioctl, SAA6588_CMD_POLL, &cmd);
+ saa_call_all(dev, core, command, SAA6588_CMD_POLL, &cmd);
mutex_unlock(&dev->lock);

return rc | cmd.poll_mask;
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index d19bad997f30..bf3c3e76b921 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -47,7 +47,7 @@ static int venc_is_second_field(struct vpbe_display *disp_dev)

ret = v4l2_subdev_call(vpbe_dev->venc,
core,
- ioctl,
+ command,
VENC_GET_FLD,
&val);
if (ret < 0) {
diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/davinci/vpbe_venc.c
index 8caa084e5704..bde241c26d79 100644
--- a/drivers/media/platform/davinci/vpbe_venc.c
+++ b/drivers/media/platform/davinci/vpbe_venc.c
@@ -521,9 +521,7 @@ static int venc_s_routing(struct v4l2_subdev *sd, u32 input, u32 output,
return ret;
}

-static long venc_ioctl(struct v4l2_subdev *sd,
- unsigned int cmd,
- void *arg)
+static long venc_command(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{
u32 val;

@@ -542,7 +540,7 @@ static long venc_ioctl(struct v4l2_subdev *sd,
}

static const struct v4l2_subdev_core_ops venc_core_ops = {
- .ioctl = venc_ioctl,
+ .command = venc_command,
};

static const struct v4l2_subdev_video_ops venc_video_ops = {
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index 3fe3edd80876..afae0afe3f81 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -326,7 +326,8 @@ int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
}

if (attr->query.prog_cnt != 0 && prog_ids && cnt)
- ret = bpf_prog_array_copy_to_user(progs, prog_ids, cnt);
+ ret = bpf_prog_array_copy_to_user(progs, prog_ids,
+ attr->query.prog_cnt);

unlock:
mutex_unlock(&ir_raw_handler_lock);
diff --git a/drivers/media/usb/dvb-usb/dtv5100.c b/drivers/media/usb/dvb-usb/dtv5100.c
index fba06932a9e0..1c13e493322c 100644
--- a/drivers/media/usb/dvb-usb/dtv5100.c
+++ b/drivers/media/usb/dvb-usb/dtv5100.c
@@ -26,6 +26,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
{
struct dtv5100_state *st = d->priv;
+ unsigned int pipe;
u8 request;
u8 type;
u16 value;
@@ -34,6 +35,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
switch (wlen) {
case 1:
/* write { reg }, read { value } */
+ pipe = usb_rcvctrlpipe(d->udev, 0);
request = (addr == DTV5100_DEMOD_ADDR ? DTV5100_DEMOD_READ :
DTV5100_TUNER_READ);
type = USB_TYPE_VENDOR | USB_DIR_IN;
@@ -41,6 +43,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
break;
case 2:
/* write { reg, value } */
+ pipe = usb_sndctrlpipe(d->udev, 0);
request = (addr == DTV5100_DEMOD_ADDR ? DTV5100_DEMOD_WRITE :
DTV5100_TUNER_WRITE);
type = USB_TYPE_VENDOR | USB_DIR_OUT;
@@ -54,7 +57,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,

memcpy(st->data, rbuf, rlen);
msleep(1); /* avoid I2C errors */
- return usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), request,
+ return usb_control_msg(d->udev, pipe, request,
type, value, index, st->data, rlen,
DTV5100_USB_TIMEOUT);
}
@@ -141,7 +144,7 @@ static int dtv5100_probe(struct usb_interface *intf,

/* initialize non qt1010/zl10353 part? */
for (i = 0; dtv5100_init[i].request; i++) {
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
dtv5100_init[i].request,
USB_TYPE_VENDOR | USB_DIR_OUT,
dtv5100_init[i].value,
diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c
index 949111070971..32504ebcfd4d 100644
--- a/drivers/media/usb/gspca/sq905.c
+++ b/drivers/media/usb/gspca/sq905.c
@@ -116,7 +116,7 @@ static int sq905_command(struct gspca_dev *gspca_dev, u16 index)
}

ret = usb_control_msg(gspca_dev->dev,
- usb_sndctrlpipe(gspca_dev->dev, 0),
+ usb_rcvctrlpipe(gspca_dev->dev, 0),
USB_REQ_SYNCH_FRAME, /* request */
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
SQ905_PING, 0, gspca_dev->usb_buf, 1,
diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
index ace3da40006e..971dee0a56da 100644
--- a/drivers/media/usb/gspca/sunplus.c
+++ b/drivers/media/usb/gspca/sunplus.c
@@ -242,6 +242,10 @@ static void reg_r(struct gspca_dev *gspca_dev,
gspca_err(gspca_dev, "reg_r: buffer overflow\n");
return;
}
+ if (len == 0) {
+ gspca_err(gspca_dev, "reg_r: zero-length read\n");
+ return;
+ }
if (gspca_dev->usb_err < 0)
return;
ret = usb_control_msg(gspca_dev->dev,
@@ -250,7 +254,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, /* value */
index,
- len ? gspca_dev->usb_buf : NULL, len,
+ gspca_dev->usb_buf, len,
500);
if (ret < 0) {
pr_err("reg_r err %d\n", ret);
@@ -727,7 +731,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
case MegaImageVI:
reg_w_riv(gspca_dev, 0xf0, 0, 0);
spca504B_WaitCmdStatus(gspca_dev);
- reg_r(gspca_dev, 0xf0, 4, 0);
+ reg_w_riv(gspca_dev, 0xf0, 4, 0);
spca504B_WaitCmdStatus(gspca_dev);
break;
default:
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index a6a441d92b94..5878c7833486 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -124,10 +124,37 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
struct uvc_streaming_control *ctrl)
{
+ static const struct usb_device_id elgato_cam_link_4k = {
+ USB_DEVICE(0x0fd9, 0x0066)
+ };
struct uvc_format *format = NULL;
struct uvc_frame *frame = NULL;
unsigned int i;

+ /*
+ * The response of the Elgato Cam Link 4K is incorrect: The second byte
+ * contains bFormatIndex (instead of being the second byte of bmHint).
+ * The first byte is always zero. The third byte is always 1.
+ *
+ * The UVC 1.5 class specification defines the first five bits in the
+ * bmHint bitfield. The remaining bits are reserved and should be zero.
+ * Therefore a valid bmHint will be less than 32.
+ *
+ * Latest Elgato Cam Link 4K firmware as of 2021-03-23 needs this fix.
+ * MCU: 20.02.19, FPGA: 67
+ */
+ if (usb_match_one_id(stream->dev->intf, &elgato_cam_link_4k) &&
+ ctrl->bmHint > 255) {
+ u8 corrected_format_index = ctrl->bmHint >> 8;
+
+ /* uvc_dbg(stream->dev, VIDEO,
+ "Correct USB video probe response from {bmHint: 0x%04x, bFormatIndex: %u} to {bmHint: 0x%04x, bFormatIndex: %u}\n",
+ ctrl->bmHint, ctrl->bFormatIndex,
+ 1, corrected_format_index); */
+ ctrl->bmHint = 1;
+ ctrl->bFormatIndex = corrected_format_index;
+ }
+
for (i = 0; i < stream->nformats; ++i) {
if (stream->format[i].index == ctrl->bFormatIndex) {
format = &stream->format[i];
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 8c670934d920..1b79053b2a05 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -1034,6 +1034,7 @@ static int zr364xx_start_readpipe(struct zr364xx_camera *cam)
DBG("submitting URB %p\n", pipe_info->stream_urb);
retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL);
if (retval) {
+ usb_free_urb(pipe_info->stream_urb);
printk(KERN_ERR KBUILD_MODNAME ": start read pipe failed\n");
return retval;
}
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index ca465794ea9c..df5cebb372a5 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -108,6 +108,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
syscon_config.max_register = resource_size(&res) - reg_io_width;

regmap = regmap_init_mmio(NULL, base, &syscon_config);
+ kfree(syscon_config.name);
if (IS_ERR(regmap)) {
pr_err("regmap init failed\n");
ret = PTR_ERR(regmap);
@@ -144,7 +145,6 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
regmap_exit(regmap);
err_regmap:
iounmap(base);
- kfree(syscon_config.name);
err_map:
kfree(syscon);
return ERR_PTR(ret);
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index a0675d4154d2..a337f97b30e2 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -144,6 +144,9 @@ void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
if (*p == 0)
val = 0x87654321;
*p = val;
+
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+ pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n");
}

void lkdtm_SOFTLOCKUP(void)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index eaf4810fe656..b5f3f160c842 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -936,11 +936,14 @@ int mmc_execute_tuning(struct mmc_card *card)

err = host->ops->execute_tuning(host, opcode);

- if (err)
+ if (err) {
pr_err("%s: tuning execution failed: %d\n",
mmc_hostname(host), err);
- else
+ } else {
+ host->retune_now = 0;
+ host->need_retune = 0;
mmc_retune_enable(host);
+ }

return err;
}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 636d4e3aa0e3..bac343a8d569 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -847,11 +847,13 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
return err;

/*
- * In case CCS and S18A in the response is set, start Signal Voltage
- * Switch procedure. SPI mode doesn't support CMD11.
+ * In case the S18A bit is set in the response, let's start the signal
+ * voltage switch procedure. SPI mode doesn't support CMD11.
+ * Note that, according to the spec, the S18A bit is not valid unless
+ * the CCS bit is set as well. We deliberately deviate from the spec in
+ * regards to this, which allows UHS-I to be supported for SDSC cards.
*/
- if (!mmc_host_is_spi(host) && rocr &&
- ((*rocr & 0x41000000) == 0x41000000)) {
+ if (!mmc_host_is_spi(host) && rocr && (*rocr & 0x01000000)) {
err = mmc_set_uhs_voltage(host, pocr);
if (err == -EAGAIN) {
retries--;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 54205e3be9e8..a2cdb37fcbbe 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -788,6 +788,17 @@ static const struct dmi_system_id sdhci_acpi_quirks[] = {
},
.driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
},
+ {
+ /*
+ * The Toshiba WT8-B's microSD slot always reports the card being
+ * write-protected.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TOSHIBA ENCORE 2 WT8-B"),
+ },
+ .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
+ },
{} /* Terminating entry */
};

diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 58c977d581e7..6cdadbb3accd 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1813,6 +1813,10 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host)
u16 preset = 0;

switch (host->timing) {
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
+ break;
case MMC_TIMING_UHS_SDR12:
preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
break;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 0770c036e2ff..960fed78529e 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -253,6 +253,7 @@

/* 60-FB reserved */

+#define SDHCI_PRESET_FOR_HIGH_SPEED 0x64
#define SDHCI_PRESET_FOR_SDR12 0x66
#define SDHCI_PRESET_FOR_SDR25 0x68
#define SDHCI_PRESET_FOR_SDR50 0x6A
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index ebbaf6817ec8..7026523f886c 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -1214,6 +1214,11 @@ static int seville_probe(struct platform_device *pdev)
felix->info = &seville_info_vsc9953;

res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "Invalid resource\n");
+ goto err_alloc_felix;
+ }
felix->switch_base = res->start;

ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 6fb6c3556285..f9e91304d232 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -423,6 +423,10 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
int id, ret;

pres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!pres) {
+ dev_err(&pdev->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
memset(&res, 0, sizeof(res));
memset(&ppd, 0, sizeof(ppd));

diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 960def41cc55..2cb73e850a32 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -75,6 +75,8 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);

#define DRIVER_NAME "fec"

+static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
+
/* Pause frame feild and FIFO threshold */
#define FEC_ENET_FCE (1 << 5)
#define FEC_ENET_RSEM_V 0x84
@@ -3222,10 +3224,40 @@ static int fec_set_features(struct net_device *netdev,
return 0;
}

+static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb)
+{
+ struct vlan_ethhdr *vhdr;
+ unsigned short vlan_TCI = 0;
+
+ if (skb->protocol == htons(ETH_P_ALL)) {
+ vhdr = (struct vlan_ethhdr *)(skb->data);
+ vlan_TCI = ntohs(vhdr->h_vlan_TCI);
+ }
+
+ return vlan_TCI;
+}
+
+static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ u16 vlan_tag;
+
+ if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
+ return netdev_pick_tx(ndev, skb, NULL);
+
+ vlan_tag = fec_enet_get_raw_vlan_tci(skb);
+ if (!vlan_tag)
+ return vlan_tag;
+
+ return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
+}
+
static const struct net_device_ops fec_netdev_ops = {
.ndo_open = fec_enet_open,
.ndo_stop = fec_enet_close,
.ndo_start_xmit = fec_enet_start_xmit,
+ .ndo_select_queue = fec_enet_select_queue,
.ndo_set_rx_mode = set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = fec_timeout,
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 8cc651d37a7f..609e47b8287d 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1395,7 +1395,7 @@ static int e100_phy_check_without_mii(struct nic *nic)
u8 phy_type;
int without_mii;

- phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
+ phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f;

switch (phy_type) {
case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
@@ -1515,7 +1515,7 @@ static int e100_phy_init(struct nic *nic)
mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
(mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
- (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
+ (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) {
/* enable/disable MDI/MDI-X auto-switching. */
mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
@@ -2263,9 +2263,9 @@ static int e100_asf(struct nic *nic)
{
/* ASF can be enabled from eeprom */
return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
- (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
- !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
- ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
+ (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) &&
+ !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) &&
+ ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE);
}

static int e100_up(struct nic *nic)
@@ -2920,7 +2920,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)

/* Wol magic packet can be enabled from eeprom */
if ((nic->mac >= mac_82558_D101_A4) &&
- (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
+ (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) {
nic->flags |= wol_magic;
device_set_wakeup_enable(&pdev->dev, true);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 7a879614ca55..1dad6c93ac9c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -11,13 +11,14 @@
* operate with the nanosecond field directly without fear of overflow.
*
* Much like the 82599, the update period is dependent upon the link speed:
- * At 40Gb link or no link, the period is 1.6ns.
- * At 10Gb link, the period is multiplied by 2. (3.2ns)
+ * At 40Gb, 25Gb, or no link, the period is 1.6ns.
+ * At 10Gb or 5Gb link, the period is multiplied by 2. (3.2ns)
* At 1Gb link, the period is multiplied by 20. (32ns)
* 1588 functionality is not supported at 100Mbps.
*/
#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
#define I40E_PTP_10GB_INCVAL_MULT 2
+#define I40E_PTP_5GB_INCVAL_MULT 2
#define I40E_PTP_1GB_INCVAL_MULT 20

#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
@@ -465,6 +466,9 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
case I40E_LINK_SPEED_10GB:
mult = I40E_PTP_10GB_INCVAL_MULT;
break;
+ case I40E_LINK_SPEED_5GB:
+ mult = I40E_PTP_5GB_INCVAL_MULT;
+ break;
case I40E_LINK_SPEED_1GB:
mult = I40E_PTP_1GB_INCVAL_MULT;
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index a7975afecf70..14eba9bc174d 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3492,13 +3492,9 @@ static int
ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
struct ice_ring_container *rc)
{
- struct ice_pf *pf;
-
if (!rc->ring)
return -EINVAL;

- pf = rc->ring->vsi->back;
-
switch (c_type) {
case ICE_RX_CONTAINER:
ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting);
@@ -3510,7 +3506,7 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
ec->tx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC;
break;
default:
- dev_dbg(ice_pf_to_dev(pf), "Invalid c_type %d\n", c_type);
+ dev_dbg(ice_pf_to_dev(rc->ring->vsi->back), "Invalid c_type %d\n", c_type);
return -EINVAL;
}

diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 4ec24c3e813f..c0ee0541e53f 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -608,7 +608,7 @@ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
/* L2 Packet types */
ICE_PTT_UNUSED_ENTRY(0),
ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT_UNUSED_ENTRY(2),
ICE_PTT_UNUSED_ENTRY(3),
ICE_PTT_UNUSED_ENTRY(4),
ICE_PTT_UNUSED_ENTRY(5),
@@ -722,7 +722,7 @@ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
/* Non Tunneled IPv6 */
ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
ICE_PTT_UNUSED_ENTRY(91),
ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 1bed183d96a0..ee3497d25464 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -63,7 +63,7 @@ enum ice_aq_res_ids {
/* FW update timeout definitions are in milliseconds */
#define ICE_NVM_TIMEOUT 180000
#define ICE_CHANGE_LOCK_TIMEOUT 1000
-#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000
+#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 5000

enum ice_aq_res_access_type {
ICE_RES_READ = 1,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 5c87c0a7ce3d..4b9b5148c916 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2643,7 +2643,8 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter,
}

input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
- input->filter.vlan_tci = match.key->vlan_priority;
+ input->filter.vlan_tci =
+ (__force __be16)match.key->vlan_priority;
}
}

@@ -6288,12 +6289,12 @@ int igb_xmit_xdp_ring(struct igb_adapter *adapter,
cmd_type |= len | IGB_TXD_DCMD;
tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);

- olinfo_status = cpu_to_le32(len << E1000_ADVTXD_PAYLEN_SHIFT);
+ olinfo_status = len << E1000_ADVTXD_PAYLEN_SHIFT;
/* 82575 requires a unique index per ring */
if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
olinfo_status |= tx_ring->reg_idx << 4;

- tx_desc->read.olinfo_status = olinfo_status;
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);

netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount);

@@ -8617,7 +8618,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,

if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
- vid = be16_to_cpu(rx_desc->wb.upper.vlan);
+ vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
else
vid = le16_to_cpu(rx_desc->wb.upper.vlan);

diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index ee9f8c1dca83..07c9e9e0546f 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -83,14 +83,14 @@ static int igbvf_desc_unused(struct igbvf_ring *ring)
static void igbvf_receive_skb(struct igbvf_adapter *adapter,
struct net_device *netdev,
struct sk_buff *skb,
- u32 status, u16 vlan)
+ u32 status, __le16 vlan)
{
u16 vid;

if (status & E1000_RXD_STAT_VP) {
if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) &&
(status & E1000_RXDEXT_STATERR_LB))
- vid = be16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
+ vid = be16_to_cpu((__force __be16)vlan) & E1000_RXD_SPC_VLAN_MASK;
else
vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
if (test_bit(vid, adapter->active_vlans))
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index a9f65d667761..ec9b6c564300 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -6871,6 +6871,10 @@ static int mvpp2_probe(struct platform_device *pdev)
return PTR_ERR(priv->lms_base);
} else {
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
if (has_acpi_companion(&pdev->dev)) {
/* In case the MDIO memory region is declared in
* the ACPI, it can already appear as 'in-use'
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 7e1f8660dfec..f327b78261ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1318,7 +1318,8 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);

- if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
+ if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) &&
+ !mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) {
dev_kfree_skb_any(skb);
goto free_wqe;
}
@@ -1375,7 +1376,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64

mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);

- if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
+ if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) &&
+ !mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) {
dev_kfree_skb_any(skb);
goto mpwrq_cqe_out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 9025e5f38bb6..fe5476a76464 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -118,17 +118,24 @@ static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
u8 *port1, u8 *port2)
{
+ bool p1en;
+ bool p2en;
+
+ p1en = tracker->netdev_state[MLX5_LAG_P1].tx_enabled &&
+ tracker->netdev_state[MLX5_LAG_P1].link_up;
+
+ p2en = tracker->netdev_state[MLX5_LAG_P2].tx_enabled &&
+ tracker->netdev_state[MLX5_LAG_P2].link_up;
+
*port1 = 1;
*port2 = 2;
- if (!tracker->netdev_state[MLX5_LAG_P1].tx_enabled ||
- !tracker->netdev_state[MLX5_LAG_P1].link_up) {
- *port1 = 2;
+ if ((!p1en && !p2en) || (p1en && p2en))
return;
- }

- if (!tracker->netdev_state[MLX5_LAG_P2].tx_enabled ||
- !tracker->netdev_state[MLX5_LAG_P2].link_up)
+ if (p1en)
*port2 = 1;
+ else
+ *port1 = 2;
}

void mlx5_modify_lag(struct mlx5_lag *ldev,
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index caa251d0e381..b27713906d3a 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1135,6 +1135,10 @@ static int ks8842_probe(struct platform_device *pdev)
unsigned i;

iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem) {
+ dev_err(&pdev->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
goto err_mem_region;

diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 49fd843c4c8a..a4380c45f668 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -481,14 +481,13 @@ static int moxart_mac_probe(struct platform_device *pdev)
priv->ndev = ndev;
priv->pdev = pdev;

- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ndev->base_addr = res->start;
- priv->base = devm_ioremap_resource(p_dev, res);
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->base)) {
dev_err(p_dev, "devm_ioremap_resource failed\n");
ret = PTR_ERR(priv->base);
goto init_fail;
}
+ ndev->base_addr = res->start;

spin_lock_init(&priv->txlock);

diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 9a0870dc2f03..2942102efd48 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -107,7 +107,7 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
{
u8 *data = skb->data;
unsigned int offset;
- u16 *hi, *id;
+ u16 hi, id;
u32 lo;

if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
@@ -118,14 +118,11 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
return 0;

- hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
- id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+ hi = get_unaligned_be16(data + offset + OFF_PTP_SOURCE_UUID + 0);
+ lo = get_unaligned_be32(data + offset + OFF_PTP_SOURCE_UUID + 2);
+ id = get_unaligned_be16(data + offset + OFF_PTP_SEQUENCE_ID);

- memcpy(&lo, &hi[1], sizeof(lo));
-
- return (uid_hi == *hi &&
- uid_lo == lo &&
- seqid == *id);
+ return (uid_hi == hi && uid_lo == lo && seqid == id);
}

static void
@@ -135,7 +132,6 @@ pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
struct pci_dev *pdev;
u64 ns;
u32 hi, lo, val;
- u16 uid, seq;

if (!adapter->hwts_rx_en)
return;
@@ -151,10 +147,7 @@ pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
lo = pch_src_uuid_lo_read(pdev);
hi = pch_src_uuid_hi_read(pdev);

- uid = hi & 0xffff;
- seq = (hi >> 16) & 0xffff;
-
- if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
+ if (!pch_ptp_match(skb, hi, lo, hi >> 16))
goto out;

ns = pch_rx_snap_read(pdev);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index a6bf80b52967..9010aabd9782 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -3547,7 +3547,6 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);

rtl_pcie_state_l2l3_disable(tp);
- rtl_hw_aspm_clkreq_enable(tp, true);
}

DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 21fa6c0e8873..84041cd587d7 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -402,12 +402,17 @@ static int efx_ef10_pci_sriov_enable(struct efx_nic *efx, int num_vfs)
return rc;
}

+/* Disable SRIOV and remove VFs
+ * If some VFs are attached to a guest (using Xen, only) nothing is
+ * done if force=false, and vports are freed if force=true (for the non
+ * attachedc ones, only) but SRIOV is not disabled and VFs are not
+ * removed in either case.
+ */
static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
{
struct pci_dev *dev = efx->pci_dev;
- unsigned int vfs_assigned = 0;
-
- vfs_assigned = pci_vfs_assigned(dev);
+ unsigned int vfs_assigned = pci_vfs_assigned(dev);
+ int rc = 0;

if (vfs_assigned && !force) {
netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
@@ -417,10 +422,12 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)

if (!vfs_assigned)
pci_disable_sriov(dev);
+ else
+ rc = -EBUSY;

efx_ef10_sriov_free_vf_vswitching(efx);
efx->vf_count = 0;
- return 0;
+ return rc;
}

int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs)
@@ -439,7 +446,6 @@ int efx_ef10_sriov_init(struct efx_nic *efx)
void efx_ef10_sriov_fini(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
- unsigned int i;
int rc;

if (!nic_data->vf) {
@@ -449,14 +455,7 @@ void efx_ef10_sriov_fini(struct efx_nic *efx)
return;
}

- /* Remove any VFs in the host */
- for (i = 0; i < efx->vf_count; ++i) {
- struct efx_nic *vf_efx = nic_data->vf[i].efx;
-
- if (vf_efx)
- vf_efx->pci_dev->driver->remove(vf_efx->pci_dev);
- }
-
+ /* Disable SRIOV and remove any VFs in the host */
rc = efx_ef10_pci_sriov_disable(efx, true);
if (rc)
netif_dbg(efx, drv, efx->net_dev,
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 6eef0f45b133..2b29fd4cbdf4 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -835,6 +835,10 @@ static int ioc3eth_probe(struct platform_device *pdev)
int err;

regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
/* get mac addr from one wire prom */
if (ioc3eth_get_mac_addr(regs, mac_addr))
return -EPROBE_DEFER; /* not available yet */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index b2a707e2ef43..678726c62a8a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -441,6 +441,12 @@ int stmmac_mdio_register(struct net_device *ndev)
found = 1;
}

+ if (!found && !mdio_node) {
+ dev_warn(dev, "No PHY found\n");
+ err = -ENODEV;
+ goto no_phy_found;
+ }
+
/* Try to probe the XPCS by scanning all addresses. */
if (priv->hw->xpcs) {
struct mdio_xpcs_args *xpcs = &priv->hw->xpcs_args;
@@ -449,6 +455,7 @@ int stmmac_mdio_register(struct net_device *ndev)

xpcs->bus = new_bus;

+ found = 0;
for (addr = 0; addr < max_addr; addr++) {
xpcs->addr = addr;

@@ -458,13 +465,12 @@ int stmmac_mdio_register(struct net_device *ndev)
break;
}
}
- }

- if (!found && !mdio_node) {
- dev_warn(dev, "No PHY found\n");
- mdiobus_unregister(new_bus);
- mdiobus_free(new_bus);
- return -ENODEV;
+ if (!found && !mdio_node) {
+ dev_warn(dev, "No XPCS found\n");
+ err = -ENODEV;
+ goto no_xpcs_found;
+ }
}

bus_register_done:
@@ -472,6 +478,9 @@ int stmmac_mdio_register(struct net_device *ndev)

return 0;

+no_xpcs_found:
+no_phy_found:
+ mdiobus_unregister(new_bus);
bus_register_fail:
mdiobus_free(new_bus);
return err;
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 466622664424..e449d9466122 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1262,6 +1262,10 @@ static int fjes_probe(struct platform_device *plat_dev)
adapter->interrupt_watch_enable = false;

res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -EINVAL;
+ goto err_free_control_wq;
+ }
hw->hw_res.start = res->start;
hw->hw_res.size = resource_size(res);
hw->hw_res.irq = platform_get_irq(plat_dev, 0);
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index cd4d993b0bbb..4162a608a3bf 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -589,6 +589,7 @@ static int ipa_firmware_load(struct device *dev)
}

ret = of_address_to_resource(node, 0, &res);
+ of_node_put(node);
if (ret) {
dev_err(dev, "error %d getting \"memory-region\" resource\n",
ret);
diff --git a/drivers/net/mdio/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c
index 1bd18857e1c5..f0a6bfa61645 100644
--- a/drivers/net/mdio/mdio-ipq8064.c
+++ b/drivers/net/mdio/mdio-ipq8064.c
@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/of_mdio.h>
-#include <linux/phy.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>

@@ -96,14 +96,34 @@ ipq8064_mdio_write(struct mii_bus *bus, int phy_addr, int reg_offset, u16 data)
return ipq8064_mdio_wait_busy(priv);
}

+static const struct regmap_config ipq8064_mdio_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .can_multi_write = false,
+ /* the mdio lock is used by any user of this mdio driver */
+ .disable_locking = true,
+
+ .cache_type = REGCACHE_NONE,
+};
+
static int
ipq8064_mdio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct ipq8064_mdio *priv;
+ struct resource res;
struct mii_bus *bus;
+ void __iomem *base;
int ret;

+ if (of_address_to_resource(np, 0, &res))
+ return -ENOMEM;
+
+ base = ioremap(res.start, resource_size(&res));
+ if (!base)
+ return -ENOMEM;
+
bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*priv));
if (!bus)
return -ENOMEM;
@@ -115,15 +135,10 @@ ipq8064_mdio_probe(struct platform_device *pdev)
bus->parent = &pdev->dev;

priv = bus->priv;
- priv->base = device_node_to_regmap(np);
- if (IS_ERR(priv->base)) {
- if (priv->base == ERR_PTR(-EPROBE_DEFER))
- return -EPROBE_DEFER;
-
- dev_err(&pdev->dev, "error getting device regmap, error=%pe\n",
- priv->base);
+ priv->base = devm_regmap_init_mmio(&pdev->dev, base,
+ &ipq8064_mdio_regmap_config);
+ if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- }

ret = of_mdiobus_register(bus, np);
if (ret)
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 575580d3ffe0..b4879306bb8a 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -246,6 +246,19 @@ static int rtl8211f_config_init(struct phy_device *phydev)
return 0;
}

+static int rtl821x_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_resume(phydev);
+ if (ret < 0)
+ return ret;
+
+ msleep(20);
+
+ return 0;
+}
+
static int rtl8211e_config_init(struct phy_device *phydev)
{
int ret = 0, oldpage;
@@ -624,7 +637,7 @@ static struct phy_driver realtek_drvs[] = {
.ack_interrupt = &rtl8211f_ack_interrupt,
.config_intr = &rtl8211f_config_intr,
.suspend = genphy_suspend,
- .resume = genphy_resume,
+ .resume = rtl821x_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 286f836a53bf..91e0e6254a01 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -660,6 +660,12 @@ static struct sk_buff *receive_small(struct net_device *dev,
len -= vi->hdr_len;
stats->bytes += len;

+ if (unlikely(len > GOOD_PACKET_LEN)) {
+ pr_debug("%s: rx error: len %u exceeds max size %d\n",
+ dev->name, len, GOOD_PACKET_LEN);
+ dev->stats.rx_length_errors++;
+ goto err_len;
+ }
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
@@ -763,6 +769,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
err_xdp:
rcu_read_unlock();
stats->xdp_drops++;
+err_len:
stats->drops++;
put_page(page);
xdp_xmit:
@@ -816,6 +823,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
head_skb = NULL;
stats->bytes += len - vi->hdr_len;

+ if (unlikely(len > truesize)) {
+ pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
+ dev->name, len, (unsigned long)ctx);
+ dev->stats.rx_length_errors++;
+ goto err_skb;
+ }
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
@@ -943,13 +956,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
rcu_read_unlock();

- if (unlikely(len > truesize)) {
- pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
- dev->name, len, (unsigned long)ctx);
- dev->stats.rx_length_errors++;
- goto err_skb;
- }
-
head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
metasize);
curr_skb = head_skb;
@@ -1557,7 +1563,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
virtio_is_little_endian(vi->vdev), false,
0))
- BUG();
+ return -EPROTO;

if (vi->mergeable_rx_bufs)
hdr->num_buffers = 0;
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index cc0c30ceaa0d..63d70aecbd0f 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -4603,13 +4603,13 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
if (ret) {
ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
arvif->vdev_id, vif->addr);
- return ret;
+ goto err;
}

ret = ath11k_wait_for_peer_delete_done(ar, arvif->vdev_id,
vif->addr);
if (ret)
- return ret;
+ goto err;

ar->num_peers--;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index d42165559df6..8cba923b1ec6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -3794,6 +3794,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct cfg80211_chan_def chandef;
struct iwl_mvm_phy_ctxt *phy_ctxt;
+ bool band_change_removal;
int ret, i;

IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
@@ -3874,19 +3875,30 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);

/*
- * Change the PHY context configuration as it is currently referenced
- * only by the P2P Device MAC
+ * Check if the remain-on-channel is on a different band and that
+ * requires context removal, see iwl_mvm_phy_ctxt_changed(). If
+ * so, we'll need to release and then re-configure here, since we
+ * must not remove a PHY context that's part of a binding.
*/
- if (mvmvif->phy_ctxt->ref == 1) {
+ band_change_removal =
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
+ mvmvif->phy_ctxt->channel->band != chandef.chan->band;
+
+ if (mvmvif->phy_ctxt->ref == 1 && !band_change_removal) {
+ /*
+ * Change the PHY context configuration as it is currently
+ * referenced only by the P2P Device MAC (and we can modify it)
+ */
ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
&chandef, 1, 1);
if (ret)
goto out_unlock;
} else {
/*
- * The PHY context is shared with other MACs. Need to remove the
- * P2P Device from the binding, allocate an new PHY context and
- * create a new binding
+ * The PHY context is shared with other MACs (or we're trying to
+ * switch bands), so remove the P2P Device from the binding,
+ * allocate an new PHY context and create a new binding.
*/
phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
if (!phy_ctxt) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 3939eccd3d5a..394598b14a17 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -345,6 +345,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
* and know the dtim period.
*/
iwl_mvm_te_check_disconnect(mvm, te_data->vif,
+ !te_data->vif->bss_conf.assoc ?
+ "Not associated and the time event is over already..." :
"No beacon heard and the time event is over already...");
break;
default:
@@ -843,6 +845,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
* and know the dtim period.
*/
iwl_mvm_te_check_disconnect(mvm, vif,
+ !vif->bss_conf.assoc ?
+ "Not associated and the session protection is over already..." :
"No beacon heard and the session protection is over already...");
spin_lock_bh(&mvm->time_event_lock);
iwl_mvm_te_clear_data(mvm, te_data);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index ec1d6025081d..56f63f5f5dd3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -126,7 +126,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
struct iwl_prph_scratch *prph_scratch;
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
struct iwl_prph_info *prph_info;
- void *iml_img;
u32 control_flags = 0;
int ret;
int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
@@ -234,14 +233,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
trans_pcie->prph_scratch = prph_scratch;

/* Allocate IML */
- iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
- &trans_pcie->iml_dma_addr, GFP_KERNEL);
- if (!iml_img) {
+ trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len,
+ &trans_pcie->iml_dma_addr,
+ GFP_KERNEL);
+ if (!trans_pcie->iml) {
ret = -ENOMEM;
goto err_free_ctxt_info;
}

- memcpy(iml_img, trans->iml, trans->iml_len);
+ memcpy(trans_pcie->iml, trans->iml, trans->iml_len);

iwl_enable_fw_load_int_ctx_info(trans);

@@ -290,6 +290,11 @@ void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
trans_pcie->ctxt_info_dma_addr = 0;
trans_pcie->ctxt_info_gen3 = NULL;

+ dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml,
+ trans_pcie->iml_dma_addr);
+ trans_pcie->iml_dma_addr = 0;
+ trans_pcie->iml = NULL;
+
iwl_pcie_ctxt_info_free_fw_img(trans);

dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index ff542d2f0054..f05025e8d11d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -336,6 +336,8 @@ struct cont_rec {
* Context information addresses will be taken from here.
* This is driver's local copy for keeping track of size and
* count for allocating and freeing the memory.
+ * @iml: image loader image virtual address
+ * @iml_dma_addr: image loader image DMA address
* @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM
* @kw: keep warm address
@@ -388,6 +390,7 @@ struct iwl_trans_pcie {
};
struct iwl_prph_info *prph_info;
struct iwl_prph_scratch *prph_scratch;
+ void *iml;
dma_addr_t ctxt_info_dma_addr;
dma_addr_t prph_info_dma_addr;
dma_addr_t prph_scratch_dma_addr;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 4c3ca2a37696..b031e9304983 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -269,7 +269,8 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
/* now that we got alive we can free the fw image & the context info.
* paging memory cannot be freed included since FW will still use it
*/
- iwl_pcie_ctxt_info_free(trans);
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ iwl_pcie_ctxt_info_free(trans);

/*
* Re-enable all the interrupts, including the RF-Kill one, now that
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index f147d4feedb9..4ca0b06d09ad 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -557,6 +557,7 @@ struct mac80211_hwsim_data {
u32 ciphers[ARRAY_SIZE(hwsim_ciphers)];

struct mac_address addresses[2];
+ struct ieee80211_chanctx_conf *chanctx;
int channels, idx;
bool use_chanctx;
bool destroy_on_close;
@@ -1187,7 +1188,8 @@ static inline u16 trans_tx_rate_flags_ieee2hwsim(struct ieee80211_tx_rate *rate)

static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
struct sk_buff *my_skb,
- int dst_portid)
+ int dst_portid,
+ struct ieee80211_channel *channel)
{
struct sk_buff *skb;
struct mac80211_hwsim_data *data = hw->priv;
@@ -1242,7 +1244,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags))
goto nla_put_failure;

- if (nla_put_u32(skb, HWSIM_ATTR_FREQ, data->channel->center_freq))
+ if (nla_put_u32(skb, HWSIM_ATTR_FREQ, channel->center_freq))
goto nla_put_failure;

/* We get the tx control (rate and retries) info*/
@@ -1589,7 +1591,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
_portid = READ_ONCE(data->wmediumd);

if (_portid || hwsim_virtio_enabled)
- return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
+ return mac80211_hwsim_tx_frame_nl(hw, skb, _portid, channel);

/* NO wmediumd detected, perfect medium simulation */
data->tx_pkts++;
@@ -1705,7 +1707,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
mac80211_hwsim_monitor_rx(hw, skb, chan);

if (_pid || hwsim_virtio_enabled)
- return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
+ return mac80211_hwsim_tx_frame_nl(hw, skb, _pid, chan);

mac80211_hwsim_tx_frame_no_nl(hw, skb, chan);
dev_kfree_skb(skb);
@@ -2444,6 +2446,11 @@ static int mac80211_hwsim_croc(struct ieee80211_hw *hw,
static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
+ struct mac80211_hwsim_data *hwsim = hw->priv;
+
+ mutex_lock(&hwsim->mutex);
+ hwsim->chanctx = ctx;
+ mutex_unlock(&hwsim->mutex);
hwsim_set_chanctx_magic(ctx);
wiphy_dbg(hw->wiphy,
"add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
@@ -2455,6 +2462,11 @@ static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
static void mac80211_hwsim_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
+ struct mac80211_hwsim_data *hwsim = hw->priv;
+
+ mutex_lock(&hwsim->mutex);
+ hwsim->chanctx = NULL;
+ mutex_unlock(&hwsim->mutex);
wiphy_dbg(hw->wiphy,
"remove channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
ctx->def.chan->center_freq, ctx->def.width,
@@ -2467,6 +2479,11 @@ static void mac80211_hwsim_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
u32 changed)
{
+ struct mac80211_hwsim_data *hwsim = hw->priv;
+
+ mutex_lock(&hwsim->mutex);
+ hwsim->chanctx = ctx;
+ mutex_unlock(&hwsim->mutex);
hwsim_check_chanctx_magic(ctx);
wiphy_dbg(hw->wiphy,
"change channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
@@ -3059,6 +3076,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
hw->wiphy->max_remain_on_channel_duration = 1000;
data->if_combination.radar_detect_widths = 0;
data->if_combination.num_different_channels = data->channels;
+ data->chanctx = NULL;
} else {
data->if_combination.num_different_channels = 1;
data->if_combination.radar_detect_widths =
@@ -3566,6 +3584,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
int frame_data_len;
void *frame_data;
struct sk_buff *skb = NULL;
+ struct ieee80211_channel *channel = NULL;

if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] ||
!info->attrs[HWSIM_ATTR_FRAME] ||
@@ -3592,6 +3611,17 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
if (!data2)
goto out;

+ if (data2->use_chanctx) {
+ if (data2->tmp_chan)
+ channel = data2->tmp_chan;
+ else if (data2->chanctx)
+ channel = data2->chanctx->def.chan;
+ } else {
+ channel = data2->channel;
+ }
+ if (!channel)
+ goto out;
+
if (!hwsim_virtio_enabled) {
if (hwsim_net_get_netgroup(genl_info_net(info)) !=
data2->netgroup)
@@ -3603,7 +3633,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,

/* check if radio is configured properly */

- if (data2->idle || !data2->started)
+ if ((data2->idle && !data2->tmp_chan) || !data2->started)
goto out;

/* A frame is received from user space */
@@ -3616,18 +3646,16 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
mutex_lock(&data2->mutex);
rx_status.freq = nla_get_u32(info->attrs[HWSIM_ATTR_FREQ]);

- if (rx_status.freq != data2->channel->center_freq &&
- (!data2->tmp_chan ||
- rx_status.freq != data2->tmp_chan->center_freq)) {
+ if (rx_status.freq != channel->center_freq) {
mutex_unlock(&data2->mutex);
goto out;
}
mutex_unlock(&data2->mutex);
} else {
- rx_status.freq = data2->channel->center_freq;
+ rx_status.freq = channel->center_freq;
}

- rx_status.band = data2->channel->band;
+ rx_status.band = channel->band;
rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);

diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index 5795e44f8a52..f44f478bb970 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -1177,22 +1177,20 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
int first_idx = 0, last_idx;
int i, idx, count;
bool fixed_rate, ack_timeout;
- bool probe, ampdu, cck = false;
+ bool ampdu, cck = false;
bool rs_idx;
u32 rate_set_tsf;
u32 final_rate, final_rate_flags, final_nss, txs;

- fixed_rate = info->status.rates[0].count;
- probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
-
txs = le32_to_cpu(txs_data[1]);
- ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU);
+ ampdu = txs & MT_TXS1_AMPDU;

txs = le32_to_cpu(txs_data[3]);
count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs);

txs = le32_to_cpu(txs_data[0]);
+ fixed_rate = txs & MT_TXS0_FIXED_RATE;
final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;

@@ -1214,7 +1212,7 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,

first_idx = max_t(int, 0, last_idx - (count - 1) / MT7615_RATE_RETRY);

- if (fixed_rate && !probe) {
+ if (fixed_rate) {
info->status.rates[0].count = count;
i = 0;
goto out;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index 0232b66acb4f..8f01ca1694bc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -335,6 +335,9 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
if (nss < 2)
return;

+ /* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
+ elem->phy_cap_info[7] |= min_t(int, nss - 1, 2) << 3;
+
if (vif != NL80211_IFTYPE_AP)
return;

@@ -348,9 +351,6 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB |
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB;
elem->phy_cap_info[6] |= c;
-
- /* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
- elem->phy_cap_info[7] |= min_t(int, nss - 1, 2) << 3;
}

static void
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index d6d1be4169e5..acb6b0cd3667 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -853,15 +853,10 @@ struct rtl8192eu_efuse {
u8 usb_optional_function;
u8 res9[2];
u8 mac_addr[ETH_ALEN]; /* 0xd7 */
- u8 res10[2];
- u8 vendor_name[7];
- u8 res11[2];
- u8 device_name[0x0b]; /* 0xe8 */
- u8 res12[2];
- u8 serial[0x0b]; /* 0xf5 */
- u8 res13[0x30];
+ u8 device_info[80];
+ u8 res11[3];
u8 unknown[0x0d]; /* 0x130 */
- u8 res14[0xc3];
+ u8 res12[0xc3];
};

struct rtl8xxxu_reg8val {
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index 9f1f93d04145..199e7e031d7d 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -554,9 +554,43 @@ rtl8192e_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
}
}

+static void rtl8192eu_log_next_device_info(struct rtl8xxxu_priv *priv,
+ char *record_name,
+ char *device_info,
+ unsigned int *record_offset)
+{
+ char *record = device_info + *record_offset;
+
+ /* A record is [ total length | 0x03 | value ] */
+ unsigned char l = record[0];
+
+ /*
+ * The whole device info section seems to be 80 characters, make sure
+ * we don't read further.
+ */
+ if (*record_offset + l > 80) {
+ dev_warn(&priv->udev->dev,
+ "invalid record length %d while parsing \"%s\" at offset %u.\n",
+ l, record_name, *record_offset);
+ return;
+ }
+
+ if (l >= 2) {
+ char value[80];
+
+ memcpy(value, &record[2], l - 2);
+ value[l - 2] = '\0';
+ dev_info(&priv->udev->dev, "%s: %s\n", record_name, value);
+ *record_offset = *record_offset + l;
+ } else {
+ dev_info(&priv->udev->dev, "%s not available.\n", record_name);
+ }
+}
+
static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
{
struct rtl8192eu_efuse *efuse = &priv->efuse_wifi.efuse8192eu;
+ unsigned int record_offset;
int i;

if (efuse->rtl_id != cpu_to_le16(0x8129))
@@ -604,12 +638,25 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
priv->has_xtalk = 1;
priv->xtalk = priv->efuse_wifi.efuse8192eu.xtal_k & 0x3f;

- dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
- dev_info(&priv->udev->dev, "Product: %.11s\n", efuse->device_name);
- if (memchr_inv(efuse->serial, 0xff, 11))
- dev_info(&priv->udev->dev, "Serial: %.11s\n", efuse->serial);
- else
- dev_info(&priv->udev->dev, "Serial not available.\n");
+ /*
+ * device_info section seems to be laid out as records
+ * [ total length | 0x03 | value ] so:
+ * - vendor length + 2
+ * - 0x03
+ * - vendor string (not null terminated)
+ * - product length + 2
+ * - 0x03
+ * - product string (not null terminated)
+ * Then there is one or 2 0x00 on all the 4 devices I own or found
+ * dumped online.
+ * As previous version of the code handled an optional serial
+ * string, I now assume there may be a third record if the
+ * length is not 0.
+ */
+ record_offset = 0;
+ rtl8192eu_log_next_device_info(priv, "Vendor", efuse->device_info, &record_offset);
+ rtl8192eu_log_next_device_info(priv, "Product", efuse->device_info, &record_offset);
+ rtl8192eu_log_next_device_info(priv, "Serial", efuse->device_info, &record_offset);

if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
unsigned char *raw = priv->efuse_wifi.raw;
diff --git a/drivers/net/wireless/st/cw1200/cw1200_sdio.c b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
index b65ec14136c7..4c30b5772ce0 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_sdio.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
@@ -53,6 +53,7 @@ static const struct sdio_device_id cw1200_sdio_ids[] = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200) },
{ /* end: all zeroes */ },
};
+MODULE_DEVICE_TABLE(sdio, cw1200_sdio_ids);

/* hwbus_ops implemetation */

diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index 9547aea01b0f..ea0215246c5c 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
@@ -466,9 +466,12 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
cmd->channels[i].channel = channels[i]->hw_value;
}

- cmd->params.ssid_len = ssid_len;
- if (ssid)
- memcpy(cmd->params.ssid, ssid, ssid_len);
+ if (ssid) {
+ int len = clamp_val(ssid_len, 0, IEEE80211_MAX_SSID_LEN);
+
+ cmd->params.ssid_len = len;
+ memcpy(cmd->params.ssid, ssid, len);
+ }

ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index 9d7dbfe7fe0c..c6da0cfb4afb 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1503,6 +1503,13 @@ static int wl12xx_get_fuse_mac(struct wl1271 *wl)
u32 mac1, mac2;
int ret;

+ /* Device may be in ELP from the bootloader or kexec */
+ ret = wlcore_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
+ if (ret < 0)
+ goto out;
+
+ usleep_range(500000, 700000);
+
ret = wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
if (ret < 0)
goto out;
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 9b6ab83956c3..bb8fb2b3711d 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -549,15 +549,17 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
continue;
if (len < 2 * sizeof(u32)) {
dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
+ of_node_put(child);
return -EINVAL;
}

cell = kzalloc(sizeof(*cell), GFP_KERNEL);
- if (!cell)
+ if (!cell) {
+ of_node_put(child);
return -ENOMEM;
+ }

cell->nvmem = nvmem;
- cell->np = of_node_get(child);
cell->offset = be32_to_cpup(addr++);
cell->bytes = be32_to_cpup(addr);
cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
@@ -578,11 +580,12 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
cell->name, nvmem->stride);
/* Cells already added will be freed later. */
kfree_const(cell->name);
- of_node_put(cell->np);
kfree(cell);
+ of_node_put(child);
return -EINVAL;
}

+ cell->np = of_node_get(child);
nvmem_cell_add(cell);
}

diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 41be72c74e3a..b1b41b61e0bd 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -56,7 +56,7 @@
#define PIO_COMPLETION_STATUS_UR 1
#define PIO_COMPLETION_STATUS_CRS 2
#define PIO_COMPLETION_STATUS_CA 4
-#define PIO_NON_POSTED_REQ BIT(0)
+#define PIO_NON_POSTED_REQ BIT(10)
#define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
#define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
#define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
@@ -124,6 +124,7 @@
#define LTSSM_MASK 0x3f
#define LTSSM_L0 0x10
#define RC_BAR_CONFIG 0x300
+#define VENDOR_ID_REG (LMI_BASE_ADDR + 0x44)

/* PCIe core controller registers */
#define CTRL_CORE_BASE_ADDR 0x18000
@@ -385,6 +386,16 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
reg |= (IS_RC_MSK << IS_RC_SHIFT);
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);

+ /*
+ * Replace incorrect PCI vendor id value 0x1b4b by correct value 0x11ab.
+ * VENDOR_ID_REG contains vendor id in low 16 bits and subsystem vendor
+ * id in high 16 bits. Updating this register changes readback value of
+ * read-only vendor id bits in PCIE_CORE_DEV_ID_REG register. Workaround
+ * for erratum 4.1: "The value of device and vendor ID is incorrect".
+ */
+ reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL;
+ advk_writel(pcie, reg, VENDOR_ID_REG);
+
/* Set Advanced Error Capabilities and Control PF0 register */
reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 16fb3d7714d5..41bcdfac03d8 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -27,6 +27,7 @@
#include <linux/nvme.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
#include <linux/switchtec.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
@@ -3667,6 +3668,16 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
return;
if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
return;
+
+ /*
+ * SXIO/SXFP/SXLF turns off power to the Thunderbolt controller.
+ * We don't know how to turn it back on again, but firmware does,
+ * so we can only use SXIO/SXFP/SXLF if we're suspending via
+ * firmware.
+ */
+ if (!pm_suspend_via_firmware())
+ return;
+
bridge = ACPI_HANDLE(&dev->dev);
if (!bridge)
return;
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 899c16c17b6d..ef49402c0623 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -952,6 +952,7 @@ static int amd_gpio_remove(struct platform_device *pdev)
static const struct acpi_device_id amd_gpio_acpi_match[] = {
{ "AMD0030", 0 },
{ "AMDI0030", 0},
+ { "AMDI0031", 0},
{ },
};
MODULE_DEVICE_TABLE(acpi, amd_gpio_acpi_match);
diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
index 067271b7d35a..ac1c47f542c1 100644
--- a/drivers/pinctrl/pinctrl-equilibrium.c
+++ b/drivers/pinctrl/pinctrl-equilibrium.c
@@ -929,6 +929,7 @@ static const struct of_device_id eqbr_pinctrl_dt_match[] = {
{ .compatible = "intel,lgm-io" },
{}
};
+MODULE_DEVICE_TABLE(of, eqbr_pinctrl_dt_match);

static struct platform_driver eqbr_pinctrl_driver = {
.probe = eqbr_pinctrl_probe,
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index ce2d8014b7e0..d0259577934e 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -351,6 +351,11 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
if (mcp_read(mcp, MCP_INTF, &intf))
goto unlock;

+ if (intf == 0) {
+ /* There is no interrupt pending */
+ goto unlock;
+ }
+
if (mcp_read(mcp, MCP_INTCAP, &intcap))
goto unlock;

@@ -368,11 +373,6 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
mcp->cached_gpio = gpio;
mutex_unlock(&mcp->lock);

- if (intf == 0) {
- /* There is no interrupt pending */
- return IRQ_HANDLED;
- }
-
dev_dbg(mcp->chip.parent,
"intcap 0x%04X intf 0x%04X gpio_orig 0x%04X gpio 0x%04X\n",
intcap, intf, gpio_orig, gpio);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 41b8192d207d..41023fc4bf2d 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -3089,9 +3089,10 @@ fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
}
}

-static void iscsi_start_session_recovery(struct iscsi_session *session,
- struct iscsi_conn *conn, int flag)
+void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
int old_stop_stage;

mutex_lock(&session->eh_mutex);
@@ -3149,27 +3150,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
}
-
-void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
-{
- struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_session *session = conn->session;
-
- switch (flag) {
- case STOP_CONN_RECOVER:
- cls_conn->state = ISCSI_CONN_FAILED;
- break;
- case STOP_CONN_TERM:
- cls_conn->state = ISCSI_CONN_DOWN;
- break;
- default:
- iscsi_conn_printk(KERN_ERR, conn,
- "invalid stop flag %d\n", flag);
- return;
- }
-
- iscsi_start_session_recovery(session, conn, flag);
-}
EXPORT_SYMBOL_GPL(iscsi_conn_stop);

int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index c520239082fc..2735178f15c7 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2479,9 +2479,22 @@ static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
* it works.
*/
mutex_lock(&conn_mutex);
+ switch (flag) {
+ case STOP_CONN_RECOVER:
+ conn->state = ISCSI_CONN_FAILED;
+ break;
+ case STOP_CONN_TERM:
+ conn->state = ISCSI_CONN_DOWN;
+ break;
+ default:
+ iscsi_cls_conn_printk(KERN_ERR, conn,
+ "invalid stop flag %d\n", flag);
+ goto unlock;
+ }
+
conn->transport->stop_conn(conn, flag);
+unlock:
mutex_unlock(&conn_mutex);
-
}

static void stop_conn_work_fn(struct work_struct *work)
@@ -2906,6 +2919,13 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
default:
err = transport->set_param(conn, ev->u.set_param.param,
data, ev->u.set_param.len);
+ if ((conn->state == ISCSI_CONN_BOUND) ||
+ (conn->state == ISCSI_CONN_UP)) {
+ err = transport->set_param(conn, ev->u.set_param.param,
+ data, ev->u.set_param.len);
+ } else {
+ return -ENOTCONN;
+ }
}

return err;
@@ -2965,6 +2985,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
mutex_lock(&conn->ep_mutex);
conn->ep = NULL;
mutex_unlock(&conn->ep_mutex);
+ conn->state = ISCSI_CONN_FAILED;
}

transport->ep_disconnect(ep);
@@ -3732,6 +3753,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
ev->r.retcode = transport->bind_conn(session, conn,
ev->u.b_conn.transport_eph,
ev->u.b_conn.is_leading);
+ if (!ev->r.retcode)
+ conn->state = ISCSI_CONN_BOUND;
mutex_unlock(&conn_mutex);

if (ev->r.retcode || !transport->ep_connect)
@@ -3971,7 +3994,8 @@ iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR);
static const char *const connection_state_names[] = {
[ISCSI_CONN_UP] = "up",
[ISCSI_CONN_DOWN] = "down",
- [ISCSI_CONN_FAILED] = "failed"
+ [ISCSI_CONN_FAILED] = "failed",
+ [ISCSI_CONN_BOUND] = "bound"
};

static ssize_t show_conn_state(struct device *dev,
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index 81e8b15ef405..74158fa660dd 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -156,24 +156,27 @@ static ssize_t tcc_offset_degree_celsius_show(struct device *dev,
if (err)
return err;

- val = (val >> 24) & 0xff;
+ val = (val >> 24) & 0x3f;
return sprintf(buf, "%d\n", (int)val);
}

-static int tcc_offset_update(int tcc)
+static int tcc_offset_update(unsigned int tcc)
{
u64 val;
int err;

- if (!tcc)
+ if (tcc > 63)
return -EINVAL;

err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
if (err)
return err;

- val &= ~GENMASK_ULL(31, 24);
- val |= (tcc & 0xff) << 24;
+ if (val & BIT(31))
+ return -EPERM;
+
+ val &= ~GENMASK_ULL(29, 24);
+ val |= (tcc & 0x3f) << 24;

err = wrmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, val);
if (err)
@@ -182,14 +185,15 @@ static int tcc_offset_update(int tcc)
return 0;
}

-static int tcc_offset_save;
+static unsigned int tcc_offset_save;

static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
+ unsigned int tcc;
u64 val;
- int tcc, err;
+ int err;

err = rdmsrl_safe(MSR_PLATFORM_INFO, &val);
if (err)
@@ -198,7 +202,7 @@ static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
if (!(val & BIT(30)))
return -EACCES;

- if (kstrtoint(buf, 0, &tcc))
+ if (kstrtouint(buf, 0, &tcc))
return -EINVAL;

err = tcc_offset_update(tcc);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 7cae226b000f..115a77b96e5e 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1480,6 +1480,7 @@ struct ext4_sb_info {
struct kobject s_kobj;
struct completion s_kobj_unregister;
struct super_block *s_sb;
+ struct buffer_head *s_mmp_bh;

/* Journaling */
struct journal_s *s_journal;
@@ -3624,6 +3625,9 @@ extern struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end);
/* mmp.c */
extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);

+/* mmp.c */
+extern void ext4_stop_mmpd(struct ext4_sb_info *sbi);
+
/* verity.c */
extern const struct fsverity_operations ext4_verityops;

diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 68fbeedd627b..6cb598b549ca 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -127,9 +127,9 @@ void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
*/
static int kmmpd(void *data)
{
- struct super_block *sb = ((struct mmpd_data *) data)->sb;
- struct buffer_head *bh = ((struct mmpd_data *) data)->bh;
+ struct super_block *sb = (struct super_block *) data;
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+ struct buffer_head *bh = EXT4_SB(sb)->s_mmp_bh;
struct mmp_struct *mmp;
ext4_fsblk_t mmp_block;
u32 seq = 0;
@@ -245,12 +245,18 @@ static int kmmpd(void *data)
retval = write_mmp_block(sb, bh);

exit_thread:
- EXT4_SB(sb)->s_mmp_tsk = NULL;
- kfree(data);
- brelse(bh);
return retval;
}

+void ext4_stop_mmpd(struct ext4_sb_info *sbi)
+{
+ if (sbi->s_mmp_tsk) {
+ kthread_stop(sbi->s_mmp_tsk);
+ brelse(sbi->s_mmp_bh);
+ sbi->s_mmp_tsk = NULL;
+ }
+}
+
/*
* Get a random new sequence number but make sure it is not greater than
* EXT4_MMP_SEQ_MAX.
@@ -275,7 +281,6 @@ int ext4_multi_mount_protect(struct super_block *sb,
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
struct buffer_head *bh = NULL;
struct mmp_struct *mmp = NULL;
- struct mmpd_data *mmpd_data;
u32 seq;
unsigned int mmp_check_interval = le16_to_cpu(es->s_mmp_update_interval);
unsigned int wait_time = 0;
@@ -364,24 +369,17 @@ int ext4_multi_mount_protect(struct super_block *sb,
goto failed;
}

- mmpd_data = kmalloc(sizeof(*mmpd_data), GFP_KERNEL);
- if (!mmpd_data) {
- ext4_warning(sb, "not enough memory for mmpd_data");
- goto failed;
- }
- mmpd_data->sb = sb;
- mmpd_data->bh = bh;
+ EXT4_SB(sb)->s_mmp_bh = bh;

/*
* Start a kernel thread to update the MMP block periodically.
*/
- EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%.*s",
+ EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, sb, "kmmpd-%.*s",
(int)sizeof(mmp->mmp_bdevname),
bdevname(bh->b_bdev,
mmp->mmp_bdevname));
if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
EXT4_SB(sb)->s_mmp_tsk = NULL;
- kfree(mmpd_data);
ext4_warning(sb, "Unable to create kmmpd thread for %s.",
sb->s_id);
goto failed;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 4956917b7cc2..099e4afa41e5 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1260,8 +1260,8 @@ static void ext4_put_super(struct super_block *sb)
ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
sbi->s_ea_block_cache = NULL;

- if (sbi->s_mmp_tsk)
- kthread_stop(sbi->s_mmp_tsk);
+ ext4_stop_mmpd(sbi);
+
brelse(sbi->s_sbh);
sb->s_fs_info = NULL;
/*
@@ -5173,8 +5173,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ext4_es_unregister_shrinker(sbi);
failed_mount3:
del_timer_sync(&sbi->s_err_report);
- if (sbi->s_mmp_tsk)
- kthread_stop(sbi->s_mmp_tsk);
+ ext4_stop_mmpd(sbi);
failed_mount2:
rcu_read_lock();
group_desc = rcu_dereference(sbi->s_group_desc);
@@ -5927,8 +5926,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
*/
ext4_mark_recovery_complete(sb, es);
}
- if (sbi->s_mmp_tsk)
- kthread_stop(sbi->s_mmp_tsk);
+ ext4_stop_mmpd(sbi);
} else {
/* Make sure we can mount this feature set readwrite */
if (ext4_has_feature_readonly(sb) ||
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 69a390c6064c..2d7799bd30b1 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -3462,6 +3462,8 @@ void f2fs_destroy_garbage_collection_cache(void);
*/
int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi);
+int __init f2fs_create_recovery_cache(void);
+void f2fs_destroy_recovery_cache(void);

/*
* debug.c
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 4f12ade6410a..72ce13111679 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -777,13 +777,6 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
#endif

- fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
- sizeof(struct fsync_inode_entry));
- if (!fsync_entry_slab) {
- err = -ENOMEM;
- goto out;
- }
-
INIT_LIST_HEAD(&inode_list);
INIT_LIST_HEAD(&tmp_inode_list);
INIT_LIST_HEAD(&dir_list);
@@ -856,8 +849,6 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
}
}

- kmem_cache_destroy(fsync_entry_slab);
-out:
#ifdef CONFIG_QUOTA
/* Turn quotas off */
if (quota_enabled)
@@ -867,3 +858,17 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)

return ret ? ret: err;
}
+
+int __init f2fs_create_recovery_cache(void)
+{
+ fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
+ sizeof(struct fsync_inode_entry));
+ if (!fsync_entry_slab)
+ return -ENOMEM;
+ return 0;
+}
+
+void f2fs_destroy_recovery_cache(void)
+{
+ kmem_cache_destroy(fsync_entry_slab);
+}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index abc469dd9aea..4af02719bb14 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -4027,9 +4027,12 @@ static int __init init_f2fs_fs(void)
err = f2fs_create_checkpoint_caches();
if (err)
goto free_segment_manager_caches;
- err = f2fs_create_extent_cache();
+ err = f2fs_create_recovery_cache();
if (err)
goto free_checkpoint_caches;
+ err = f2fs_create_extent_cache();
+ if (err)
+ goto free_recovery_cache;
err = f2fs_create_garbage_collection_cache();
if (err)
goto free_extent_cache;
@@ -4078,6 +4081,8 @@ static int __init init_f2fs_fs(void)
f2fs_destroy_garbage_collection_cache();
free_extent_cache:
f2fs_destroy_extent_cache();
+free_recovery_cache:
+ f2fs_destroy_recovery_cache();
free_checkpoint_caches:
f2fs_destroy_checkpoint_caches();
free_segment_manager_caches:
@@ -4103,6 +4108,7 @@ static void __exit exit_f2fs_fs(void)
f2fs_exit_sysfs();
f2fs_destroy_garbage_collection_cache();
f2fs_destroy_extent_cache();
+ f2fs_destroy_recovery_cache();
f2fs_destroy_checkpoint_caches();
f2fs_destroy_segment_manager_caches();
f2fs_destroy_node_manager_caches();
diff --git a/fs/io-wq.c b/fs/io-wq.c
index f72d53848dcb..8bb17b6d4de3 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -299,7 +299,8 @@ static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
* Most likely an attempt to queue unbounded work on an io_wq that
* wasn't setup with any unbounded workers.
*/
- WARN_ON_ONCE(!acct->max_workers);
+ if (unlikely(!acct->max_workers))
+ pr_warn_once("io-wq is not configured for unbound workers");

rcu_read_lock();
ret = io_wqe_activate_free_worker(wqe);
@@ -1085,6 +1086,8 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)

if (WARN_ON_ONCE(!data->free_work || !data->do_work))
return ERR_PTR(-EINVAL);
+ if (WARN_ON_ONCE(!bounded))
+ return ERR_PTR(-EINVAL);

wq = kzalloc(sizeof(*wq), GFP_KERNEL);
if (!wq)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 0138aa713317..42153106b7bc 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -344,9 +344,10 @@ struct io_ring_ctx {
struct socket *ring_sock;
#endif

- struct idr io_buffer_idr;
+ struct xarray io_buffers;

- struct idr personality_idr;
+ struct xarray personalities;
+ u32 pers_next;

struct {
unsigned cached_cq_tail;
@@ -1211,8 +1212,8 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
INIT_LIST_HEAD(&ctx->cq_overflow_list);
init_completion(&ctx->ref_comp);
init_completion(&ctx->sq_thread_comp);
- idr_init(&ctx->io_buffer_idr);
- idr_init(&ctx->personality_idr);
+ xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
+ xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
mutex_init(&ctx->uring_lock);
init_waitqueue_head(&ctx->wait);
spin_lock_init(&ctx->completion_lock);
@@ -2086,7 +2087,6 @@ static void __io_req_task_submit(struct io_kiocb *req)
__io_req_task_cancel(req, -EFAULT);
mutex_unlock(&ctx->uring_lock);

- ctx->flags &= ~IORING_SETUP_R_DISABLED;
if (ctx->flags & IORING_SETUP_SQPOLL)
io_sq_thread_drop_mm();
}
@@ -2989,7 +2989,7 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,

lockdep_assert_held(&req->ctx->uring_lock);

- head = idr_find(&req->ctx->io_buffer_idr, bgid);
+ head = xa_load(&req->ctx->io_buffers, bgid);
if (head) {
if (!list_empty(&head->list)) {
kbuf = list_last_entry(&head->list, struct io_buffer,
@@ -2997,7 +2997,7 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
list_del(&kbuf->list);
} else {
kbuf = head;
- idr_remove(&req->ctx->io_buffer_idr, bgid);
+ xa_erase(&req->ctx->io_buffers, bgid);
}
if (*len > kbuf->len)
*len = kbuf->len;
@@ -3959,7 +3959,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
}
i++;
kfree(buf);
- idr_remove(&ctx->io_buffer_idr, bgid);
+ xa_erase(&ctx->io_buffers, bgid);

return i;
}
@@ -3977,7 +3977,7 @@ static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,
lockdep_assert_held(&ctx->uring_lock);

ret = -ENOENT;
- head = idr_find(&ctx->io_buffer_idr, p->bgid);
+ head = xa_load(&ctx->io_buffers, p->bgid);
if (head)
ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
if (ret < 0)
@@ -4068,21 +4068,14 @@ static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock,

lockdep_assert_held(&ctx->uring_lock);

- list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
+ list = head = xa_load(&ctx->io_buffers, p->bgid);

ret = io_add_buffers(p, &head);
- if (ret < 0)
- goto out;
-
- if (!list) {
- ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
- GFP_KERNEL);
- if (ret < 0) {
+ if (ret >= 0 && !list) {
+ ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
+ if (ret < 0)
__io_remove_buffers(ctx, head, p->bgid, -1U);
- goto out;
- }
}
-out:
if (ret < 0)
req_set_fail_links(req);

@@ -6629,7 +6622,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (id) {
struct io_identity *iod;

- iod = idr_find(&ctx->personality_idr, id);
+ iod = xa_load(&ctx->personalities, id);
if (unlikely(!iod))
return -EINVAL;
refcount_inc(&iod->count);
@@ -7998,6 +7991,7 @@ static void io_sq_offload_start(struct io_ring_ctx *ctx)
{
struct io_sq_data *sqd = ctx->sq_data;

+ ctx->flags &= ~IORING_SETUP_R_DISABLED;
if ((ctx->flags & IORING_SETUP_SQPOLL) && sqd->thread)
wake_up_process(sqd->thread);
}
@@ -8410,19 +8404,13 @@ static int io_eventfd_unregister(struct io_ring_ctx *ctx)
return -ENXIO;
}

-static int __io_destroy_buffers(int id, void *p, void *data)
-{
- struct io_ring_ctx *ctx = data;
- struct io_buffer *buf = p;
-
- __io_remove_buffers(ctx, buf, id, -1U);
- return 0;
-}
-
static void io_destroy_buffers(struct io_ring_ctx *ctx)
{
- idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
- idr_destroy(&ctx->io_buffer_idr);
+ struct io_buffer *buf;
+ unsigned long index;
+
+ xa_for_each(&ctx->io_buffers, index, buf)
+ __io_remove_buffers(ctx, buf, index, -1U);
}

static void io_ring_ctx_free(struct io_ring_ctx *ctx)
@@ -8445,7 +8433,6 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_sqe_files_unregister(ctx);
io_eventfd_unregister(ctx);
io_destroy_buffers(ctx);
- idr_destroy(&ctx->personality_idr);

#if defined(CONFIG_UNIX)
if (ctx->ring_sock) {
@@ -8505,18 +8492,19 @@ static int io_uring_fasync(int fd, struct file *file, int on)
return fasync_helper(fd, file, on, &ctx->cq_fasync);
}

-static int io_remove_personalities(int id, void *p, void *data)
+static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
{
- struct io_ring_ctx *ctx = data;
struct io_identity *iod;

- iod = idr_remove(&ctx->personality_idr, id);
+ iod = xa_erase(&ctx->personalities, id);
if (iod) {
put_cred(iod->creds);
if (refcount_dec_and_test(&iod->count))
kfree(iod);
+ return 0;
}
- return 0;
+
+ return -EINVAL;
}

static void io_ring_exit_work(struct work_struct *work)
@@ -8545,6 +8533,9 @@ static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)

static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
{
+ unsigned long index;
+ struct io_identify *iod;
+
mutex_lock(&ctx->uring_lock);
percpu_ref_kill(&ctx->refs);
/* if force is set, the ring is going away. always drop after that */
@@ -8565,7 +8556,8 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)

/* if we failed setting up the ctx, we might not have any rings */
io_iopoll_try_reap_events(ctx);
- idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
+ xa_for_each(&ctx->personalities, index, iod)
+ io_unregister_personality(ctx, index);

/*
* Do this upfront, so we won't have a grace period where the ring
@@ -9128,11 +9120,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
}

#ifdef CONFIG_PROC_FS
-static int io_uring_show_cred(int id, void *p, void *data)
+static int io_uring_show_cred(struct seq_file *m, unsigned int id,
+ const struct io_identity *iod)
{
- struct io_identity *iod = p;
const struct cred *cred = iod->creds;
- struct seq_file *m = data;
struct user_namespace *uns = seq_user_ns(m);
struct group_info *gi;
kernel_cap_t cap;
@@ -9200,9 +9191,13 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
(unsigned int) buf->len);
}
- if (has_lock && !idr_is_empty(&ctx->personality_idr)) {
+ if (has_lock && !xa_empty(&ctx->personalities)) {
+ unsigned long index;
+ const struct io_identity *iod;
+
seq_printf(m, "Personalities:\n");
- idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
+ xa_for_each(&ctx->personalities, index, iod)
+ io_uring_show_cred(m, index, iod);
}
seq_printf(m, "PollList:\n");
spin_lock_irq(&ctx->completion_lock);
@@ -9588,39 +9583,26 @@ static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)

static int io_register_personality(struct io_ring_ctx *ctx)
{
- struct io_identity *id;
+ struct io_identity *iod;
+ u32 id;
int ret;

- id = kmalloc(sizeof(*id), GFP_KERNEL);
- if (unlikely(!id))
+ iod = kmalloc(sizeof(*iod), GFP_KERNEL);
+ if (unlikely(!iod))
return -ENOMEM;

- io_init_identity(id);
- id->creds = get_current_cred();
+ io_init_identity(iod);
+ iod->creds = get_current_cred();

- ret = idr_alloc_cyclic(&ctx->personality_idr, id, 1, USHRT_MAX, GFP_KERNEL);
- if (ret < 0) {
- put_cred(id->creds);
- kfree(id);
- }
+ ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)iod,
+ XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
+ if (!ret)
+ return id;
+ put_cred(iod->creds);
+ kfree(iod);
return ret;
}

-static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
-{
- struct io_identity *iod;
-
- iod = idr_remove(&ctx->personality_idr, id);
- if (iod) {
- put_cred(iod->creds);
- if (refcount_dec_and_test(&iod->count))
- kfree(iod);
- return 0;
- }
-
- return -EINVAL;
-}
-
static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
unsigned int nr_args)
{
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 6f65bfa9f18d..b0eb9c85eea0 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -151,7 +151,8 @@ void jfs_evict_inode(struct inode *inode)
if (test_cflag(COMMIT_Freewmap, inode))
jfs_free_zero_link(inode);

- diFree(inode);
+ if (JFS_SBI(inode->i_sb)->ipimap)
+ diFree(inode);

/*
* Free the inode from the quota allocation.
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index e98f99338f8f..df5fc12a6cee 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2760,6 +2760,20 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
goto free_and_return;
}

+ /*
+ * Sanity check to see if journal first block is correct.
+ * If journal first block is invalid it can cause
+ * zeroing important superblock members.
+ */
+ if (!SB_ONDISK_JOURNAL_DEVICE(sb) &&
+ SB_ONDISK_JOURNAL_1st_BLOCK(sb) < SB_JOURNAL_1st_RESERVED_BLOCK(sb)) {
+ reiserfs_warning(sb, "journal-1393",
+ "journal 1st super block is invalid: 1st reserved block %d, but actual 1st block is %d",
+ SB_JOURNAL_1st_RESERVED_BLOCK(sb),
+ SB_ONDISK_JOURNAL_1st_BLOCK(sb));
+ goto free_and_return;
+ }
+
if (journal_init_dev(sb, journal, j_dev_name) != 0) {
reiserfs_warning(sb, "sh-462",
"unable to initialize journal device");
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index dacbb999ae34..cfd46753a685 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -275,6 +275,7 @@ static struct inode *ubifs_alloc_inode(struct super_block *sb)
memset((void *)ui + sizeof(struct inode), 0,
sizeof(struct ubifs_inode) - sizeof(struct inode));
mutex_init(&ui->ui_mutex);
+ init_rwsem(&ui->xattr_sem);
spin_lock_init(&ui->ui_lock);
return &ui->vfs_inode;
};
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 4ffd832e3b93..e7e48f3b179a 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -356,6 +356,7 @@ struct ubifs_gced_idx_leb {
* @ui_mutex: serializes inode write-back with the rest of VFS operations,
* serializes "clean <-> dirty" state changes, serializes bulk-read,
* protects @dirty, @bulk_read, @ui_size, and @xattr_size
+ * @xattr_sem: serilizes write operations (remove|set|create) on xattr
* @ui_lock: protects @synced_i_size
* @synced_i_size: synchronized size of inode, i.e. the value of inode size
* currently stored on the flash; used only for regular file
@@ -409,6 +410,7 @@ struct ubifs_inode {
unsigned int bulk_read:1;
unsigned int compr_type:2;
struct mutex ui_mutex;
+ struct rw_semaphore xattr_sem;
spinlock_t ui_lock;
loff_t synced_i_size;
loff_t ui_size;
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index a0b9b349efe6..09280796fc61 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -285,6 +285,7 @@ int ubifs_xattr_set(struct inode *host, const char *name, const void *value,
if (!xent)
return -ENOMEM;

+ down_write(&ubifs_inode(host)->xattr_sem);
/*
* The extended attribute entries are stored in LNC, so multiple
* look-ups do not involve reading the flash.
@@ -319,6 +320,7 @@ int ubifs_xattr_set(struct inode *host, const char *name, const void *value,
iput(inode);

out_free:
+ up_write(&ubifs_inode(host)->xattr_sem);
kfree(xent);
return err;
}
@@ -341,18 +343,19 @@ ssize_t ubifs_xattr_get(struct inode *host, const char *name, void *buf,
if (!xent)
return -ENOMEM;

+ down_read(&ubifs_inode(host)->xattr_sem);
xent_key_init(c, &key, host->i_ino, &nm);
err = ubifs_tnc_lookup_nm(c, &key, xent, &nm);
if (err) {
if (err == -ENOENT)
err = -ENODATA;
- goto out_unlock;
+ goto out_cleanup;
}

inode = iget_xattr(c, le64_to_cpu(xent->inum));
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
- goto out_unlock;
+ goto out_cleanup;
}

ui = ubifs_inode(inode);
@@ -374,7 +377,8 @@ ssize_t ubifs_xattr_get(struct inode *host, const char *name, void *buf,
out_iput:
mutex_unlock(&ui->ui_mutex);
iput(inode);
-out_unlock:
+out_cleanup:
+ up_read(&ubifs_inode(host)->xattr_sem);
kfree(xent);
return err;
}
@@ -406,16 +410,21 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)
dbg_gen("ino %lu ('%pd'), buffer size %zd", host->i_ino,
dentry, size);

+ down_read(&host_ui->xattr_sem);
len = host_ui->xattr_names + host_ui->xattr_cnt;
- if (!buffer)
+ if (!buffer) {
/*
* We should return the minimum buffer size which will fit a
* null-terminated list of all the extended attribute names.
*/
- return len;
+ err = len;
+ goto out_err;
+ }

- if (len > size)
- return -ERANGE;
+ if (len > size) {
+ err = -ERANGE;
+ goto out_err;
+ }

lowest_xent_key(c, &key, host->i_ino);
while (1) {
@@ -437,8 +446,9 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)
pxent = xent;
key_read(c, &xent->key, &key);
}
-
kfree(pxent);
+ up_read(&host_ui->xattr_sem);
+
if (err != -ENOENT) {
ubifs_err(c, "cannot find next direntry, error %d", err);
return err;
@@ -446,6 +456,10 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)

ubifs_assert(c, written <= size);
return written;
+
+out_err:
+ up_read(&host_ui->xattr_sem);
+ return err;
}

static int remove_xattr(struct ubifs_info *c, struct inode *host,
@@ -504,6 +518,7 @@ int ubifs_purge_xattrs(struct inode *host)
ubifs_warn(c, "inode %lu has too many xattrs, doing a non-atomic deletion",
host->i_ino);

+ down_write(&ubifs_inode(host)->xattr_sem);
lowest_xent_key(c, &key, host->i_ino);
while (1) {
xent = ubifs_tnc_next_ent(c, &key, &nm);
@@ -523,7 +538,7 @@ int ubifs_purge_xattrs(struct inode *host)
ubifs_ro_mode(c, err);
kfree(pxent);
kfree(xent);
- return err;
+ goto out_err;
}

ubifs_assert(c, ubifs_inode(xino)->xattr);
@@ -535,7 +550,7 @@ int ubifs_purge_xattrs(struct inode *host)
kfree(xent);
iput(xino);
ubifs_err(c, "cannot remove xattr, error %d", err);
- return err;
+ goto out_err;
}

iput(xino);
@@ -544,14 +559,19 @@ int ubifs_purge_xattrs(struct inode *host)
pxent = xent;
key_read(c, &xent->key, &key);
}
-
kfree(pxent);
+ up_write(&ubifs_inode(host)->xattr_sem);
+
if (err != -ENOENT) {
ubifs_err(c, "cannot find next direntry, error %d", err);
return err;
}

return 0;
+
+out_err:
+ up_write(&ubifs_inode(host)->xattr_sem);
+ return err;
}

/**
@@ -594,6 +614,7 @@ static int ubifs_xattr_remove(struct inode *host, const char *name)
if (!xent)
return -ENOMEM;

+ down_write(&ubifs_inode(host)->xattr_sem);
xent_key_init(c, &key, host->i_ino, &nm);
err = ubifs_tnc_lookup_nm(c, &key, xent, &nm);
if (err) {
@@ -618,6 +639,7 @@ static int ubifs_xattr_remove(struct inode *host, const char *name)
iput(inode);

out_free:
+ up_write(&ubifs_inode(host)->xattr_sem);
kfree(xent);
return err;
}
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index e169d8fe35b5..f4a72ff8cf95 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -932,6 +932,10 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
iinfo->i_location.partitionReferenceNum,
0);
epos.bh = udf_tgetblk(sb, block);
+ if (unlikely(!epos.bh)) {
+ err = -ENOMEM;
+ goto out_no_entry;
+ }
lock_buffer(epos.bh);
memset(epos.bh->b_data, 0x00, bsize);
set_buffer_uptodate(epos.bh);
diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
index 9b97d284d0ce..bc3819dc33e1 100644
--- a/include/linux/mfd/abx500/ux500_chargalg.h
+++ b/include/linux/mfd/abx500/ux500_chargalg.h
@@ -15,7 +15,7 @@
* - POWER_SUPPLY_TYPE_USB,
* because only them store as drv_data pointer to struct ux500_charger.
*/
-#define psy_to_ux500_charger(x) power_supply_get_drvdata(psy)
+#define psy_to_ux500_charger(x) power_supply_get_drvdata(x)

/* Forward declaration */
struct ux500_charger;
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 0b17c4322b09..f96b7f8d82e5 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -87,7 +87,7 @@ enum {

/*
* Add your fresh new feature above and remember to update
- * netdev_features_strings[] in net/core/ethtool.c and maybe
+ * netdev_features_strings[] in net/ethtool/common.c and maybe
* some feature mask #defines below. Please also describe it
* in Documentation/networking/netdev-features.rst.
*/
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index cfe8c607a628..f56c6a9230ac 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -75,6 +75,13 @@ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *
return mdiobus_register(mdio);
}

+static inline int devm_of_mdiobus_register(struct device *dev,
+ struct mii_bus *mdio,
+ struct device_node *np)
+{
+ return devm_mdiobus_register(dev, mdio);
+}
+
static inline struct mdio_device *of_mdio_find_device(struct device_node *np)
{
return NULL;
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 27fb99cfeb02..f8b0704968a1 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -1126,7 +1126,7 @@ do { \
* Waitqueues which are removed from the waitqueue_head at wakeup time
*/
void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
-void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
+bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 1de960bfcab9..73150520c02d 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -162,6 +162,9 @@ struct v4l2_subdev_io_pin_config {
* @s_gpio: set GPIO pins. Very simple right now, might need to be extended with
* a direction argument if needed.
*
+ * @command: called by in-kernel drivers in order to call functions internal
+ * to subdev drivers driver that have a separate callback.
+ *
* @ioctl: called at the end of ioctl() syscall handler at the V4L2 core.
* used to provide support for private ioctls used on the driver.
*
@@ -193,6 +196,7 @@ struct v4l2_subdev_core_ops {
int (*load_fw)(struct v4l2_subdev *sd);
int (*reset)(struct v4l2_subdev *sd, u32 val);
int (*s_gpio)(struct v4l2_subdev *sd, u32 val);
+ long (*command)(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
long (*ioctl)(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
#ifdef CONFIG_COMPAT
long (*compat_ioctl32)(struct v4l2_subdev *sd, unsigned int cmd,
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index 123b1e9ea304..161b90979038 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -312,12 +312,14 @@ flow_action_mixed_hw_stats_check(const struct flow_action *action,
if (flow_offload_has_one_action(action))
return true;

- flow_action_for_each(i, action_entry, action) {
- if (i && action_entry->hw_stats != last_hw_stats) {
- NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
- return false;
+ if (action) {
+ flow_action_for_each(i, action_entry, action) {
+ if (i && action_entry->hw_stats != last_hw_stats) {
+ NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
+ return false;
+ }
+ last_hw_stats = action_entry->hw_stats;
}
- last_hw_stats = action_entry->hw_stats;
}
return true;
}
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 0bdff38eb4bb..51d698f2656f 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -458,7 +458,7 @@ struct sctp_af {
int saddr);
void (*from_sk) (union sctp_addr *,
struct sock *sk);
- void (*from_addr_param) (union sctp_addr *,
+ bool (*from_addr_param) (union sctp_addr *,
union sctp_addr_param *,
__be16 port, int iif);
int (*to_addr_param) (const union sctp_addr *,
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 8a26a2ffa952..fc5a39839b4b 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -193,6 +193,7 @@ enum iscsi_connection_state {
ISCSI_CONN_UP = 0,
ISCSI_CONN_DOWN,
ISCSI_CONN_FAILED,
+ ISCSI_CONN_BOUND,
};

struct iscsi_cls_conn {
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index cde753bb2093..13772f039c8d 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -223,7 +223,7 @@ enum tunable_id {
ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
/*
* Add your fresh new tunable attribute above and remember to update
- * tunable_strings[] in net/core/ethtool.c
+ * tunable_strings[] in net/ethtool/common.c
*/
__ETHTOOL_TUNABLE_COUNT,
};
@@ -287,7 +287,7 @@ enum phy_tunable_id {
ETHTOOL_PHY_EDPD,
/*
* Add your fresh new phy tunable attribute above and remember to update
- * phy_tunable_strings[] in net/core/ethtool.c
+ * phy_tunable_strings[] in net/ethtool/common.c
*/
__ETHTOOL_PHY_TUNABLE_COUNT,
};
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 182e162f8fd0..239c6b3b5993 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1395,29 +1395,54 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
select_insn:
goto *jumptable[insn->code];

- /* ALU */
-#define ALU(OPCODE, OP) \
- ALU64_##OPCODE##_X: \
- DST = DST OP SRC; \
- CONT; \
- ALU_##OPCODE##_X: \
- DST = (u32) DST OP (u32) SRC; \
- CONT; \
- ALU64_##OPCODE##_K: \
- DST = DST OP IMM; \
- CONT; \
- ALU_##OPCODE##_K: \
- DST = (u32) DST OP (u32) IMM; \
+ /* Explicitly mask the register-based shift amounts with 63 or 31
+ * to avoid undefined behavior. Normally this won't affect the
+ * generated code, for example, in case of native 64 bit archs such
+ * as x86-64 or arm64, the compiler is optimizing the AND away for
+ * the interpreter. In case of JITs, each of the JIT backends compiles
+ * the BPF shift operations to machine instructions which produce
+ * implementation-defined results in such a case; the resulting
+ * contents of the register may be arbitrary, but program behaviour
+ * as a whole remains defined. In other words, in case of JIT backends,
+ * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
+ */
+ /* ALU (shifts) */
+#define SHT(OPCODE, OP) \
+ ALU64_##OPCODE##_X: \
+ DST = DST OP (SRC & 63); \
+ CONT; \
+ ALU_##OPCODE##_X: \
+ DST = (u32) DST OP ((u32) SRC & 31); \
+ CONT; \
+ ALU64_##OPCODE##_K: \
+ DST = DST OP IMM; \
+ CONT; \
+ ALU_##OPCODE##_K: \
+ DST = (u32) DST OP (u32) IMM; \
+ CONT;
+ /* ALU (rest) */
+#define ALU(OPCODE, OP) \
+ ALU64_##OPCODE##_X: \
+ DST = DST OP SRC; \
+ CONT; \
+ ALU_##OPCODE##_X: \
+ DST = (u32) DST OP (u32) SRC; \
+ CONT; \
+ ALU64_##OPCODE##_K: \
+ DST = DST OP IMM; \
+ CONT; \
+ ALU_##OPCODE##_K: \
+ DST = (u32) DST OP (u32) IMM; \
CONT;
-
ALU(ADD, +)
ALU(SUB, -)
ALU(AND, &)
ALU(OR, |)
- ALU(LSH, <<)
- ALU(RSH, >>)
ALU(XOR, ^)
ALU(MUL, *)
+ SHT(LSH, <<)
+ SHT(RSH, >>)
+#undef SHT
#undef ALU
ALU_NEG:
DST = (u32) -DST;
@@ -1442,13 +1467,13 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
insn++;
CONT;
ALU_ARSH_X:
- DST = (u64) (u32) (((s32) DST) >> SRC);
+ DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
CONT;
ALU_ARSH_K:
DST = (u64) (u32) (((s32) DST) >> IMM);
CONT;
ALU64_ARSH_X:
- (*(s64 *) &DST) >>= SRC;
+ (*(s64 *) &DST) >>= (SRC & 63);
CONT;
ALU64_ARSH_K:
(*(s64 *) &DST) >>= IMM;
diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
index add0b34f2b34..f9913bc65ef8 100644
--- a/kernel/bpf/ringbuf.c
+++ b/kernel/bpf/ringbuf.c
@@ -8,6 +8,7 @@
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <linux/poll.h>
+#include <linux/kmemleak.h>
#include <uapi/linux/btf.h>

#define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE)
@@ -109,6 +110,7 @@ static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages,
VM_ALLOC | VM_USERMAP, PAGE_KERNEL);
if (rb) {
+ kmemleak_not_leak(pages);
rb->pages = pages;
rb->nr_pages = nr_pages;
return rb;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 2b8d7a5db383..67c22941b5f2 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -32,6 +32,7 @@
#include <linux/relay.h>
#include <linux/slab.h>
#include <linux/percpu-rwsem.h>
+#include <linux/cpuset.h>

#include <trace/events/power.h>
#define CREATE_TRACE_POINTS
@@ -814,6 +815,52 @@ void __init cpuhp_threads_init(void)
kthread_unpark(this_cpu_read(cpuhp_state.thread));
}

+/*
+ *
+ * Serialize hotplug trainwrecks outside of the cpu_hotplug_lock
+ * protected region.
+ *
+ * The operation is still serialized against concurrent CPU hotplug via
+ * cpu_add_remove_lock, i.e. CPU map protection. But it is _not_
+ * serialized against other hotplug related activity like adding or
+ * removing of state callbacks and state instances, which invoke either the
+ * startup or the teardown callback of the affected state.
+ *
+ * This is required for subsystems which are unfixable vs. CPU hotplug and
+ * evade lock inversion problems by scheduling work which has to be
+ * completed _before_ cpu_up()/_cpu_down() returns.
+ *
+ * Don't even think about adding anything to this for any new code or even
+ * drivers. It's only purpose is to keep existing lock order trainwrecks
+ * working.
+ *
+ * For cpu_down() there might be valid reasons to finish cleanups which are
+ * not required to be done under cpu_hotplug_lock, but that's a different
+ * story and would be not invoked via this.
+ */
+static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen)
+{
+ /*
+ * cpusets delegate hotplug operations to a worker to "solve" the
+ * lock order problems. Wait for the worker, but only if tasks are
+ * _not_ frozen (suspend, hibernate) as that would wait forever.
+ *
+ * The wait is required because otherwise the hotplug operation
+ * returns with inconsistent state, which could even be observed in
+ * user space when a new CPU is brought up. The CPU plug uevent
+ * would be delivered and user space reacting on it would fail to
+ * move tasks to the newly plugged CPU up to the point where the
+ * work has finished because up to that point the newly plugged CPU
+ * is not assignable in cpusets/cgroups. On unplug that's not
+ * necessarily a visible issue, but it is still inconsistent state,
+ * which is the real problem which needs to be "fixed". This can't
+ * prevent the transient state between scheduling the work and
+ * returning from waiting for it.
+ */
+ if (!tasks_frozen)
+ cpuset_wait_for_hotplug();
+}
+
#ifdef CONFIG_HOTPLUG_CPU
#ifndef arch_clear_mm_cpumask_cpu
#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
@@ -1051,6 +1098,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
*/
lockup_detector_cleanup();
arch_smt_update();
+ cpu_up_down_serialize_trainwrecks(tasks_frozen);
return ret;
}

@@ -1247,6 +1295,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
out:
cpus_write_unlock();
arch_smt_update();
+ cpu_up_down_serialize_trainwrecks(tasks_frozen);
return ret;
}

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3d92de7909bf..32c0905bca84 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3672,15 +3672,15 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)

r = removed_load;
sub_positive(&sa->load_avg, r);
- sub_positive(&sa->load_sum, r * divider);
+ sa->load_sum = sa->load_avg * divider;

r = removed_util;
sub_positive(&sa->util_avg, r);
- sub_positive(&sa->util_sum, r * divider);
+ sa->util_sum = sa->util_avg * divider;

r = removed_runnable;
sub_positive(&sa->runnable_avg, r);
- sub_positive(&sa->runnable_sum, r * divider);
+ sa->runnable_sum = sa->runnable_avg * divider;

/*
* removed_runnable is the unweighted version of removed_load so we
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 01f5d3020589..21005b980a6b 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -249,17 +249,22 @@ prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_ent
}
EXPORT_SYMBOL(prepare_to_wait);

-void
+/* Returns true if we are the first waiter in the queue, false otherwise. */
+bool
prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
{
unsigned long flags;
+ bool was_empty = false;

wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&wq_head->lock, flags);
- if (list_empty(&wq_entry->entry))
+ if (list_empty(&wq_entry->entry)) {
+ was_empty = list_empty(&wq_head->head);
__add_wait_queue_entry_tail(wq_head, wq_entry);
+ }
set_current_state(state);
spin_unlock_irqrestore(&wq_head->lock, flags);
+ return was_empty;
}
EXPORT_SYMBOL(prepare_to_wait_exclusive);

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b09c59806501..ee84891bacfa 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2181,8 +2181,15 @@ void tracing_reset_all_online_cpus(void)
}
}

+/*
+ * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
+ * is the tgid last observed corresponding to pid=i.
+ */
static int *tgid_map;

+/* The maximum valid index into tgid_map. */
+static size_t tgid_map_max;
+
#define SAVED_CMDLINES_DEFAULT 128
#define NO_CMDLINE_MAP UINT_MAX
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
@@ -2455,24 +2462,41 @@ void trace_find_cmdline(int pid, char comm[])
preempt_enable();
}

+static int *trace_find_tgid_ptr(int pid)
+{
+ /*
+ * Pairs with the smp_store_release in set_tracer_flag() to ensure that
+ * if we observe a non-NULL tgid_map then we also observe the correct
+ * tgid_map_max.
+ */
+ int *map = smp_load_acquire(&tgid_map);
+
+ if (unlikely(!map || pid > tgid_map_max))
+ return NULL;
+
+ return &map[pid];
+}
+
int trace_find_tgid(int pid)
{
- if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
- return 0;
+ int *ptr = trace_find_tgid_ptr(pid);

- return tgid_map[pid];
+ return ptr ? *ptr : 0;
}

static int trace_save_tgid(struct task_struct *tsk)
{
+ int *ptr;
+
/* treat recording of idle task as a success */
if (!tsk->pid)
return 1;

- if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
+ ptr = trace_find_tgid_ptr(tsk->pid);
+ if (!ptr)
return 0;

- tgid_map[tsk->pid] = tsk->tgid;
+ *ptr = tsk->tgid;
return 1;
}

@@ -4847,6 +4871,8 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)

int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
{
+ int *map;
+
if ((mask == TRACE_ITER_RECORD_TGID) ||
(mask == TRACE_ITER_RECORD_CMD))
lockdep_assert_held(&event_mutex);
@@ -4869,10 +4895,19 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
trace_event_enable_cmd_record(enabled);

if (mask == TRACE_ITER_RECORD_TGID) {
- if (!tgid_map)
- tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
- sizeof(*tgid_map),
- GFP_KERNEL);
+ if (!tgid_map) {
+ tgid_map_max = pid_max;
+ map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
+ GFP_KERNEL);
+
+ /*
+ * Pairs with smp_load_acquire() in
+ * trace_find_tgid_ptr() to ensure that if it observes
+ * the tgid_map we just allocated then it also observes
+ * the corresponding tgid_map_max value.
+ */
+ smp_store_release(&tgid_map, map);
+ }
if (!tgid_map) {
tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
return -ENOMEM;
@@ -5284,37 +5319,16 @@ static const struct file_operations tracing_readme_fops = {

static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
{
- int *ptr = v;
+ int pid = ++(*pos);

- if (*pos || m->count)
- ptr++;
-
- (*pos)++;
-
- for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
- if (trace_find_tgid(*ptr))
- return ptr;
- }
-
- return NULL;
+ return trace_find_tgid_ptr(pid);
}

static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
{
- void *v;
- loff_t l = 0;
+ int pid = *pos;

- if (!tgid_map)
- return NULL;
-
- v = &tgid_map[0];
- while (l <= *pos) {
- v = saved_tgids_next(m, v, &l);
- if (!v)
- return NULL;
- }
-
- return v;
+ return trace_find_tgid_ptr(pid);
}

static void saved_tgids_stop(struct seq_file *m, void *v)
@@ -5323,9 +5337,14 @@ static void saved_tgids_stop(struct seq_file *m, void *v)

static int saved_tgids_show(struct seq_file *m, void *v)
{
- int pid = (int *)v - tgid_map;
+ int *entry = (int *)v;
+ int pid = entry - tgid_map;
+ int tgid = *entry;
+
+ if (tgid == 0)
+ return SEQ_SKIP;

- seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
+ seq_printf(m, "%d %d\n", pid, tgid);
return 0;
}

diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 89c26c393bdb..6dafde851333 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -229,8 +229,10 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,

WARN_ON(s->size == 0);

+ BUILD_BUG_ON(MAX_MEMHEX_BYTES * 2 >= HEX_CHARS);
+
while (len) {
- start_len = min(len, HEX_CHARS - 1);
+ start_len = min(len, MAX_MEMHEX_BYTES);
#ifdef __BIG_ENDIAN
for (i = 0, j = 0; i < start_len; i++) {
#else
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 25fb82320e3d..01445ddff58d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1856,11 +1856,11 @@ static int __soft_offline_page(struct page *page)
pr_info("soft offline: %#lx: %s migration failed %d, type %lx (%pGp)\n",
pfn, msg_page[huge], ret, page->flags, &page->flags);
if (ret > 0)
- ret = -EIO;
+ ret = -EBUSY;
}
} else {
- pr_info("soft offline: %#lx: %s isolation failed: %d, page count %d, type %lx (%pGp)\n",
- pfn, msg_page[huge], ret, page_count(page), page->flags, &page->flags);
+ pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %lx (%pGp)\n",
+ pfn, msg_page[huge], page_count(page), page->flags, &page->flags);
ret = -EBUSY;
}
return ret;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 86ebfc6ae698..0854f1b35683 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1713,14 +1713,6 @@ int hci_dev_do_close(struct hci_dev *hdev)

BT_DBG("%s %p", hdev->name, hdev);

- if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
- !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
- test_bit(HCI_UP, &hdev->flags)) {
- /* Execute vendor specific shutdown routine */
- if (hdev->shutdown)
- hdev->shutdown(hdev);
- }
-
cancel_delayed_work(&hdev->power_off);

hci_request_cancel_all(hdev);
@@ -1796,6 +1788,14 @@ int hci_dev_do_close(struct hci_dev *hdev)
clear_bit(HCI_INIT, &hdev->flags);
}

+ if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
+ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ test_bit(HCI_UP, &hdev->flags)) {
+ /* Execute vendor specific shutdown routine */
+ if (hdev->shutdown)
+ hdev->shutdown(hdev);
+ }
+
/* flush cmd work */
flush_work(&hdev->cmd_work);

diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index d62ac4b73709..e59ae24a8f17 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -4360,12 +4360,12 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,

bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);

- switch (conn->setting & SCO_AIRMODE_MASK) {
- case SCO_AIRMODE_CVSD:
+ switch (ev->air_mode) {
+ case 0x02:
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
break;
- case SCO_AIRMODE_TRANSP:
+ case 0x03:
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
break;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index cdc386337173..0ddbc415ce15 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -6055,7 +6055,7 @@ static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
struct l2cap_ecred_conn_rsp *rsp = (void *) data;
struct hci_conn *hcon = conn->hcon;
u16 mtu, mps, credits, result;
- struct l2cap_chan *chan;
+ struct l2cap_chan *chan, *tmp;
int err = 0, sec_level;
int i = 0;

@@ -6074,7 +6074,7 @@ static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,

cmd_len -= sizeof(*rsp);

- list_for_each_entry(chan, &conn->chan_l, list) {
+ list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
u16 dcid;

if (chan->ident != cmd->ident ||
@@ -6237,7 +6237,7 @@ static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
u8 *data)
{
- struct l2cap_chan *chan;
+ struct l2cap_chan *chan, *tmp;
struct l2cap_ecred_conn_rsp *rsp = (void *) data;
u16 result;

@@ -6251,7 +6251,7 @@ static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
if (!result)
return 0;

- list_for_each_entry(chan, &conn->chan_l, list) {
+ list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
if (chan->ident != cmd->ident)
continue;

diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 13520c7b4f2f..31a585fe0c7c 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -247,12 +247,15 @@ static const u8 mgmt_status_table[] = {
MGMT_STATUS_TIMEOUT, /* Instant Passed */
MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
MGMT_STATUS_FAILED, /* Transaction Collision */
+ MGMT_STATUS_FAILED, /* Reserved for future use */
MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
MGMT_STATUS_REJECTED, /* QoS Rejected */
MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
MGMT_STATUS_REJECTED, /* Insufficient Security */
MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
+ MGMT_STATUS_FAILED, /* Reserved for future use */
MGMT_STATUS_BUSY, /* Role Switch Pending */
+ MGMT_STATUS_FAILED, /* Reserved for future use */
MGMT_STATUS_FAILED, /* Slot Violation */
MGMT_STATUS_FAILED, /* Role Switch Failed */
MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
@@ -4035,6 +4038,8 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,

hci_dev_lock(hdev);

+ memset(&rp, 0, sizeof(rp));
+
if (cp->addr.type == BDADDR_BREDR) {
br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
&cp->addr.bdaddr,
diff --git a/net/bridge/br_mrp.c b/net/bridge/br_mrp.c
index d1336a7ad7ff..3259f5480127 100644
--- a/net/bridge/br_mrp.c
+++ b/net/bridge/br_mrp.c
@@ -607,8 +607,7 @@ int br_mrp_set_ring_state(struct net_bridge *br,
if (!mrp)
return -EINVAL;

- if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED &&
- state->ring_state != BR_MRP_RING_STATE_CLOSED)
+ if (mrp->ring_state != state->ring_state)
mrp->ring_transitions++;

mrp->ring_state = state->ring_state;
@@ -690,8 +689,7 @@ int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state)
if (!mrp)
return -EINVAL;

- if (mrp->in_state == BR_MRP_IN_STATE_CLOSED &&
- state->in_state != BR_MRP_IN_STATE_CLOSED)
+ if (mrp->in_state != state->in_state)
mrp->in_transitions++;

mrp->in_state = state->in_state;
diff --git a/net/core/dev.c b/net/core/dev.c
index 0c9ce36afc8c..2fdf30eefc59 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6433,11 +6433,18 @@ EXPORT_SYMBOL(napi_schedule_prep);
* __napi_schedule_irqoff - schedule for receive
* @n: entry to schedule
*
- * Variant of __napi_schedule() assuming hard irqs are masked
+ * Variant of __napi_schedule() assuming hard irqs are masked.
+ *
+ * On PREEMPT_RT enabled kernels this maps to __napi_schedule()
+ * because the interrupt disabled assumption might not be true
+ * due to force-threaded interrupts and spinlock substitution.
*/
void __napi_schedule_irqoff(struct napi_struct *n)
{
- ____napi_schedule(this_cpu_ptr(&softnet_data), n);
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ ____napi_schedule(this_cpu_ptr(&softnet_data), n);
+ else
+ __napi_schedule(n);
}
EXPORT_SYMBOL(__napi_schedule_irqoff);

diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 97975bed491a..560d5dc43562 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1053,7 +1053,7 @@ static int __ip_append_data(struct sock *sk,
unsigned int datalen;
unsigned int fraglen;
unsigned int fraggap;
- unsigned int alloclen;
+ unsigned int alloclen, alloc_extra;
unsigned int pagedlen;
struct sk_buff *skb_prev;
alloc_new_skb:
@@ -1073,35 +1073,39 @@ static int __ip_append_data(struct sock *sk,
fraglen = datalen + fragheaderlen;
pagedlen = 0;

+ alloc_extra = hh_len + 15;
+ alloc_extra += exthdrlen;
+
+ /* The last fragment gets additional space at tail.
+ * Note, with MSG_MORE we overallocate on fragments,
+ * because we have no idea what fragment will be
+ * the last.
+ */
+ if (datalen == length + fraggap)
+ alloc_extra += rt->dst.trailer_len;
+
if ((flags & MSG_MORE) &&
!(rt->dst.dev->features&NETIF_F_SG))
alloclen = mtu;
- else if (!paged)
+ else if (!paged &&
+ (fraglen + alloc_extra < SKB_MAX_ALLOC ||
+ !(rt->dst.dev->features & NETIF_F_SG)))
alloclen = fraglen;
else {
alloclen = min_t(int, fraglen, MAX_HEADER);
pagedlen = fraglen - alloclen;
}

- alloclen += exthdrlen;
-
- /* The last fragment gets additional space at tail.
- * Note, with MSG_MORE we overallocate on fragments,
- * because we have no idea what fragment will be
- * the last.
- */
- if (datalen == length + fraggap)
- alloclen += rt->dst.trailer_len;
+ alloclen += alloc_extra;

if (transhdrlen) {
- skb = sock_alloc_send_skb(sk,
- alloclen + hh_len + 15,
+ skb = sock_alloc_send_skb(sk, alloclen,
(flags & MSG_DONTWAIT), &err);
} else {
skb = NULL;
if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
2 * sk->sk_sndbuf)
- skb = alloc_skb(alloclen + hh_len + 15,
+ skb = alloc_skb(alloclen,
sk->sk_allocation);
if (unlikely(!skb))
err = -ENOBUFS;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index fac5c1469cee..4d4b641c204d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2802,8 +2802,17 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
*rexmit = REXMIT_LOST;
}

+static bool tcp_force_fast_retransmit(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ return after(tcp_highest_sack_seq(tp),
+ tp->snd_una + tp->reordering * tp->mss_cache);
+}
+
/* Undo during fast recovery after partial ACK. */
-static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una)
+static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una,
+ bool *do_lost)
{
struct tcp_sock *tp = tcp_sk(sk);

@@ -2828,7 +2837,9 @@ static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una)
tcp_undo_cwnd_reduction(sk, true);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
tcp_try_keep_open(sk);
- return true;
+ } else {
+ /* Partial ACK arrived. Force fast retransmit. */
+ *do_lost = tcp_force_fast_retransmit(sk);
}
return false;
}
@@ -2852,14 +2863,6 @@ static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag)
}
}

-static bool tcp_force_fast_retransmit(struct sock *sk)
-{
- struct tcp_sock *tp = tcp_sk(sk);
-
- return after(tcp_highest_sack_seq(tp),
- tp->snd_una + tp->reordering * tp->mss_cache);
-}
-
/* Process an event, which can update packets-in-flight not trivially.
* Main goal of this function is to calculate new estimate for left_out,
* taking into account both packets sitting in receiver's buffer and
@@ -2929,17 +2932,21 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
if (!(flag & FLAG_SND_UNA_ADVANCED)) {
if (tcp_is_reno(tp))
tcp_add_reno_sack(sk, num_dupack, ece_ack);
- } else {
- if (tcp_try_undo_partial(sk, prior_snd_una))
- return;
- /* Partial ACK arrived. Force fast retransmit. */
- do_lost = tcp_force_fast_retransmit(sk);
- }
- if (tcp_try_undo_dsack(sk)) {
- tcp_try_keep_open(sk);
+ } else if (tcp_try_undo_partial(sk, prior_snd_una, &do_lost))
return;
- }
+
+ if (tcp_try_undo_dsack(sk))
+ tcp_try_keep_open(sk);
+
tcp_identify_packet_loss(sk, ack_flag);
+ if (icsk->icsk_ca_state != TCP_CA_Recovery) {
+ if (!tcp_time_to_recover(sk, flag))
+ return;
+ /* Undo reverts the recovery state. If loss is evident,
+ * starts a new recovery (e.g. reordering then loss);
+ */
+ tcp_enter_recovery(sk, ece_ack);
+ }
break;
case TCP_CA_Loss:
tcp_process_loss(sk, flag, num_dupack, rexmit);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 077d43af8226..e889655ca0e2 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1554,7 +1554,7 @@ static int __ip6_append_data(struct sock *sk,
unsigned int datalen;
unsigned int fraglen;
unsigned int fraggap;
- unsigned int alloclen;
+ unsigned int alloclen, alloc_extra;
unsigned int pagedlen;
alloc_new_skb:
/* There's no room in the current skb */
@@ -1581,17 +1581,28 @@ static int __ip6_append_data(struct sock *sk,
fraglen = datalen + fragheaderlen;
pagedlen = 0;

+ alloc_extra = hh_len;
+ alloc_extra += dst_exthdrlen;
+ alloc_extra += rt->dst.trailer_len;
+
+ /* We just reserve space for fragment header.
+ * Note: this may be overallocation if the message
+ * (without MSG_MORE) fits into the MTU.
+ */
+ alloc_extra += sizeof(struct frag_hdr);
+
if ((flags & MSG_MORE) &&
!(rt->dst.dev->features&NETIF_F_SG))
alloclen = mtu;
- else if (!paged)
+ else if (!paged &&
+ (fraglen + alloc_extra < SKB_MAX_ALLOC ||
+ !(rt->dst.dev->features & NETIF_F_SG)))
alloclen = fraglen;
else {
alloclen = min_t(int, fraglen, MAX_HEADER);
pagedlen = fraglen - alloclen;
}
-
- alloclen += dst_exthdrlen;
+ alloclen += alloc_extra;

if (datalen != length + fraggap) {
/*
@@ -1601,30 +1612,21 @@ static int __ip6_append_data(struct sock *sk,
datalen += rt->dst.trailer_len;
}

- alloclen += rt->dst.trailer_len;
fraglen = datalen + fragheaderlen;

- /*
- * We just reserve space for fragment header.
- * Note: this may be overallocation if the message
- * (without MSG_MORE) fits into the MTU.
- */
- alloclen += sizeof(struct frag_hdr);
-
copy = datalen - transhdrlen - fraggap - pagedlen;
if (copy < 0) {
err = -EINVAL;
goto error;
}
if (transhdrlen) {
- skb = sock_alloc_send_skb(sk,
- alloclen + hh_len,
+ skb = sock_alloc_send_skb(sk, alloclen,
(flags & MSG_DONTWAIT), &err);
} else {
skb = NULL;
if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
2 * sk->sk_sndbuf)
- skb = alloc_skb(alloclen + hh_len,
+ skb = alloc_skb(alloclen,
sk->sk_allocation);
if (unlikely(!skb))
err = -ENOBUFS;
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index af36acc1a644..2880dc7d9a49 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -15,29 +15,11 @@ static u32 __ipv6_select_ident(struct net *net,
const struct in6_addr *dst,
const struct in6_addr *src)
{
- const struct {
- struct in6_addr dst;
- struct in6_addr src;
- } __aligned(SIPHASH_ALIGNMENT) combined = {
- .dst = *dst,
- .src = *src,
- };
- u32 hash, id;
-
- /* Note the following code is not safe, but this is okay. */
- if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
- get_random_bytes(&net->ipv4.ip_id_key,
- sizeof(net->ipv4.ip_id_key));
-
- hash = siphash(&combined, sizeof(combined), &net->ipv4.ip_id_key);
-
- /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
- * set the hight order instead thus minimizing possible future
- * collisions.
- */
- id = ip_idents_reserve(hash, 1);
- if (unlikely(!id))
- id = 1 << 31;
+ u32 id;
+
+ do {
+ id = prandom_u32();
+ } while (!id);

return id;
}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 13250cadb420..e18c3855f616 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2088,10 +2088,9 @@ static struct ieee80211_sta_rx_stats *
sta_get_last_rx_stats(struct sta_info *sta)
{
struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
- struct ieee80211_local *local = sta->local;
int cpu;

- if (!ieee80211_hw_check(&local->hw, USES_RSS))
+ if (!sta->pcpu_rx_stats)
return stats;

for_each_possible_cpu(cpu) {
@@ -2191,9 +2190,7 @@ static void sta_set_tidstats(struct sta_info *sta,
int cpu;

if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) {
- if (!ieee80211_hw_check(&local->hw, USES_RSS))
- tidstats->rx_msdu +=
- sta_get_tidstats_msdu(&sta->rx_stats, tid);
+ tidstats->rx_msdu += sta_get_tidstats_msdu(&sta->rx_stats, tid);

if (sta->pcpu_rx_stats) {
for_each_possible_cpu(cpu) {
@@ -2272,7 +2269,6 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
sinfo->rx_beacon = sdata->u.mgd.count_beacon_signal;

drv_sta_statistics(local, sdata, &sta->sta, sinfo);
-
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) |
BIT_ULL(NL80211_STA_INFO_STA_FLAGS) |
BIT_ULL(NL80211_STA_INFO_BSS_PARAM) |
@@ -2307,8 +2303,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,

if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) |
BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) {
- if (!ieee80211_hw_check(&local->hw, USES_RSS))
- sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats);
+ sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats);

if (sta->pcpu_rx_stats) {
for_each_possible_cpu(cpu) {
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 88e14cfeb5d5..f613299ca7f0 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -333,7 +333,8 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
}
mutex_unlock(&idrinfo->lock);

- if (nla_put_u32(skb, TCA_FCNT, n_i))
+ ret = nla_put_u32(skb, TCA_FCNT, n_i);
+ if (ret)
goto nla_put_failure;
nla_nest_end(skb, nest);

diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index a281da07bb1d..30090794b791 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1532,7 +1532,7 @@ static inline int __tcf_classify(struct sk_buff *skb,
u32 *last_executed_chain)
{
#ifdef CONFIG_NET_CLS_ACT
- const int max_reclassify_loop = 4;
+ const int max_reclassify_loop = 16;
const struct tcf_proto *first_tp;
int limit = 0;

diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 53e5ed79f63f..59e653b528b1 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -270,22 +270,19 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list,
rawaddr = (union sctp_addr_param *)raw_addr_list;

af = sctp_get_af_specific(param_type2af(param->type));
- if (unlikely(!af)) {
+ if (unlikely(!af) ||
+ !af->from_addr_param(&addr, rawaddr, htons(port), 0)) {
retval = -EINVAL;
- sctp_bind_addr_clean(bp);
- break;
+ goto out_err;
}

- af->from_addr_param(&addr, rawaddr, htons(port), 0);
if (sctp_bind_addr_state(bp, &addr) != -1)
goto next;
retval = sctp_add_bind_addr(bp, &addr, sizeof(addr),
SCTP_ADDR_SRC, gfp);
- if (retval) {
+ if (retval)
/* Can't finish building the list, clean up. */
- sctp_bind_addr_clean(bp);
- break;
- }
+ goto out_err;

next:
len = ntohs(param->length);
@@ -294,6 +291,12 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list,
}

return retval;
+
+out_err:
+ if (retval)
+ sctp_bind_addr_clean(bp);
+
+ return retval;
}

/********************************************************************
diff --git a/net/sctp/input.c b/net/sctp/input.c
index d508f6f3dd08..f72bff93745c 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -1131,7 +1131,8 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
if (!af)
continue;

- af->from_addr_param(paddr, params.addr, sh->source, 0);
+ if (!af->from_addr_param(paddr, params.addr, sh->source, 0))
+ continue;

asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
if (asoc)
@@ -1174,7 +1175,8 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
if (unlikely(!af))
return NULL;

- af->from_addr_param(&paddr, param, peer_port, 0);
+ if (af->from_addr_param(&paddr, param, peer_port, 0))
+ return NULL;

return __sctp_lookup_association(net, laddr, &paddr, transportp);
}
@@ -1245,7 +1247,7 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,

ch = (struct sctp_chunkhdr *)ch_end;
chunk_num++;
- } while (ch_end < skb_tail_pointer(skb));
+ } while (ch_end + sizeof(*ch) < skb_tail_pointer(skb));

return asoc;
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index c8074f435d3e..d594b949ae82 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -530,15 +530,20 @@ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
}

/* Initialize a sctp_addr from an address parameter. */
-static void sctp_v6_from_addr_param(union sctp_addr *addr,
+static bool sctp_v6_from_addr_param(union sctp_addr *addr,
union sctp_addr_param *param,
__be16 port, int iif)
{
+ if (ntohs(param->v6.param_hdr.length) < sizeof(struct sctp_ipv6addr_param))
+ return false;
+
addr->v6.sin6_family = AF_INET6;
addr->v6.sin6_port = port;
addr->v6.sin6_flowinfo = 0; /* BUG */
addr->v6.sin6_addr = param->v6.addr;
addr->v6.sin6_scope_id = iif;
+
+ return true;
}

/* Initialize an address parameter from a sctp_addr and return the length
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 25833238fe93..47fb87ce489f 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -253,14 +253,19 @@ static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
}

/* Initialize a sctp_addr from an address parameter. */
-static void sctp_v4_from_addr_param(union sctp_addr *addr,
+static bool sctp_v4_from_addr_param(union sctp_addr *addr,
union sctp_addr_param *param,
__be16 port, int iif)
{
+ if (ntohs(param->v4.param_hdr.length) < sizeof(struct sctp_ipv4addr_param))
+ return false;
+
addr->v4.sin_family = AF_INET;
addr->v4.sin_port = port;
addr->v4.sin_addr.s_addr = param->v4.addr.s_addr;
memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
+
+ return true;
}

/* Initialize an address parameter from a sctp_addr and return the length
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index b9d6babe2870..7411fa442821 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2329,11 +2329,13 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,

/* Process the initialization parameters. */
sctp_walk_params(param, peer_init, init_hdr.params) {
- if (!src_match && (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
- param.p->type == SCTP_PARAM_IPV6_ADDRESS)) {
+ if (!src_match &&
+ (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
+ param.p->type == SCTP_PARAM_IPV6_ADDRESS)) {
af = sctp_get_af_specific(param_type2af(param.p->type));
- af->from_addr_param(&addr, param.addr,
- chunk->sctp_hdr->source, 0);
+ if (!af->from_addr_param(&addr, param.addr,
+ chunk->sctp_hdr->source, 0))
+ continue;
if (sctp_cmp_addr_exact(sctp_source(chunk), &addr))
src_match = 1;
}
@@ -2514,7 +2516,8 @@ static int sctp_process_param(struct sctp_association *asoc,
break;
do_addr_param:
af = sctp_get_af_specific(param_type2af(param.p->type));
- af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0);
+ if (!af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0))
+ break;
scope = sctp_scope(peer_addr);
if (sctp_in_scope(net, &addr, scope))
if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))
@@ -2615,15 +2618,13 @@ static int sctp_process_param(struct sctp_association *asoc,
addr_param = param.v + sizeof(struct sctp_addip_param);

af = sctp_get_af_specific(param_type2af(addr_param->p.type));
- if (af == NULL)
+ if (!af)
break;

- af->from_addr_param(&addr, addr_param,
- htons(asoc->peer.port), 0);
+ if (!af->from_addr_param(&addr, addr_param,
+ htons(asoc->peer.port), 0))
+ break;

- /* if the address is invalid, we can't process it.
- * XXX: see spec for what to do.
- */
if (!af->addr_valid(&addr, NULL, NULL))
break;

@@ -3037,7 +3038,8 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
if (unlikely(!af))
return SCTP_ERROR_DNS_FAILED;

- af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0);
+ if (!af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0))
+ return SCTP_ERROR_DNS_FAILED;

/* ADDIP 4.2.1 This parameter MUST NOT contain a broadcast
* or multicast address.
@@ -3314,7 +3316,8 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,

/* We have checked the packet before, so we do not check again. */
af = sctp_get_af_specific(param_type2af(addr_param->p.type));
- af->from_addr_param(&addr, addr_param, htons(bp->port), 0);
+ if (!af->from_addr_param(&addr, addr_param, htons(bp->port), 0))
+ return;

switch (asconf_param->param_hdr.type) {
case SCTP_PARAM_ADD_IP:
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index cf86c1376b1a..326250513570 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1352,7 +1352,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,

if (signal_pending(current)) {
err = sock_intr_errno(timeout);
- sk->sk_state = TCP_CLOSE;
+ sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
sock->state = SS_UNCONNECTED;
vsock_transport_cancel_pkt(vsk);
goto out_wait;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index daf3f29c7f0c..8fb0478888fb 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4625,11 +4625,10 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
sband->ht_cap.mcs.rx_mask,
sizeof(mask->control[i].ht_mcs));

- if (!sband->vht_cap.vht_supported)
- continue;
-
- vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
- vht_build_mcs_mask(vht_tx_mcs_map, mask->control[i].vht_mcs);
+ if (sband->vht_cap.vht_supported) {
+ vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ vht_build_mcs_mask(vht_tx_mcs_map, mask->control[i].vht_mcs);
+ }

he_cap = ieee80211_get_he_iftype_cap(sband, wdev->iftype);
if (!he_cap)
diff --git a/net/wireless/wext-spy.c b/net/wireless/wext-spy.c
index 33bef22e44e9..b379a0371653 100644
--- a/net/wireless/wext-spy.c
+++ b/net/wireless/wext-spy.c
@@ -120,8 +120,8 @@ int iw_handler_set_thrspy(struct net_device * dev,
return -EOPNOTSUPP;

/* Just do it */
- memcpy(&(spydata->spy_thr_low), &(threshold->low),
- 2 * sizeof(struct iw_quality));
+ spydata->spy_thr_low = threshold->low;
+ spydata->spy_thr_high = threshold->high;

/* Clear flag */
memset(spydata->spy_thr_under, '\0', sizeof(spydata->spy_thr_under));
@@ -147,8 +147,8 @@ int iw_handler_get_thrspy(struct net_device * dev,
return -EOPNOTSUPP;

/* Just do it */
- memcpy(&(threshold->low), &(spydata->spy_thr_low),
- 2 * sizeof(struct iw_quality));
+ threshold->low = spydata->spy_thr_low;
+ threshold->high = spydata->spy_thr_high;

return 0;
}
@@ -173,10 +173,10 @@ static void iw_send_thrspy_event(struct net_device * dev,
memcpy(threshold.addr.sa_data, address, ETH_ALEN);
threshold.addr.sa_family = ARPHRD_ETHER;
/* Copy stats */
- memcpy(&(threshold.qual), wstats, sizeof(struct iw_quality));
+ threshold.qual = *wstats;
/* Copy also thresholds */
- memcpy(&(threshold.low), &(spydata->spy_thr_low),
- 2 * sizeof(struct iw_quality));
+ threshold.low = spydata->spy_thr_low;
+ threshold.high = spydata->spy_thr_high;

/* Send event to user space */
wireless_send_event(dev, SIOCGIWTHRSPY, &wrqu, (char *) &threshold);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index d0c32a8fcc4a..45f86a97eaf2 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -580,6 +580,20 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,

copy_from_user_state(x, p);

+ if (attrs[XFRMA_ENCAP]) {
+ x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
+ sizeof(*x->encap), GFP_KERNEL);
+ if (x->encap == NULL)
+ goto error;
+ }
+
+ if (attrs[XFRMA_COADDR]) {
+ x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
+ sizeof(*x->coaddr), GFP_KERNEL);
+ if (x->coaddr == NULL)
+ goto error;
+ }
+
if (attrs[XFRMA_SA_EXTRA_FLAGS])
x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]);

@@ -600,23 +614,9 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
attrs[XFRMA_ALG_COMP])))
goto error;

- if (attrs[XFRMA_ENCAP]) {
- x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
- sizeof(*x->encap), GFP_KERNEL);
- if (x->encap == NULL)
- goto error;
- }
-
if (attrs[XFRMA_TFCPAD])
x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);

- if (attrs[XFRMA_COADDR]) {
- x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
- sizeof(*x->coaddr), GFP_KERNEL);
- if (x->coaddr == NULL)
- goto error;
- }
-
xfrm_mark_get(attrs, &x->mark);

xfrm_smark_init(attrs, &x->props.smark);
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 3c05827608b6..884a014ce2b8 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -297,26 +297,27 @@ static struct avc_xperms_decision_node
struct avc_xperms_decision_node *xpd_node;
struct extended_perms_decision *xpd;

- xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, GFP_NOWAIT);
+ xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep,
+ GFP_NOWAIT | __GFP_NOWARN);
if (!xpd_node)
return NULL;

xpd = &xpd_node->xpd;
if (which & XPERMS_ALLOWED) {
xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep,
- GFP_NOWAIT);
+ GFP_NOWAIT | __GFP_NOWARN);
if (!xpd->allowed)
goto error;
}
if (which & XPERMS_AUDITALLOW) {
xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep,
- GFP_NOWAIT);
+ GFP_NOWAIT | __GFP_NOWARN);
if (!xpd->auditallow)
goto error;
}
if (which & XPERMS_DONTAUDIT) {
xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep,
- GFP_NOWAIT);
+ GFP_NOWAIT | __GFP_NOWARN);
if (!xpd->dontaudit)
goto error;
}
@@ -344,7 +345,7 @@ static struct avc_xperms_node *avc_xperms_alloc(void)
{
struct avc_xperms_node *xp_node;

- xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT);
+ xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT | __GFP_NOWARN);
if (!xp_node)
return xp_node;
INIT_LIST_HEAD(&xp_node->xpd_head);
@@ -500,7 +501,7 @@ static struct avc_node *avc_alloc_node(struct selinux_avc *avc)
{
struct avc_node *node;

- node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT);
+ node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT | __GFP_NOWARN);
if (!node)
goto out;

diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 334299357e71..b88c1a953833 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -855,6 +855,8 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
if (format == SMK_FIXED24_FMT &&
(count < SMK_CIPSOMIN || count > SMK_CIPSOMAX))
return -EINVAL;
+ if (count > PAGE_SIZE)
+ return -EINVAL;

data = memdup_user_nul(buf, count);
if (IS_ERR(data))
diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c
index 8661877bf4c6..13a956c6077b 100644
--- a/sound/soc/tegra/tegra_alc5632.c
+++ b/sound/soc/tegra/tegra_alc5632.c
@@ -139,6 +139,7 @@ static struct snd_soc_dai_link tegra_alc5632_dai = {

static struct snd_soc_card snd_soc_tegra_alc5632 = {
.name = "tegra-alc5632",
+ .driver_name = "tegra",
.owner = THIS_MODULE,
.dai_link = &tegra_alc5632_dai,
.num_links = 1,
diff --git a/sound/soc/tegra/tegra_max98090.c b/sound/soc/tegra/tegra_max98090.c
index 9d8e16473ab9..2fdf46bad2bf 100644
--- a/sound/soc/tegra/tegra_max98090.c
+++ b/sound/soc/tegra/tegra_max98090.c
@@ -182,6 +182,7 @@ static struct snd_soc_dai_link tegra_max98090_dai = {

static struct snd_soc_card snd_soc_tegra_max98090 = {
.name = "tegra-max98090",
+ .driver_name = "tegra",
.owner = THIS_MODULE,
.dai_link = &tegra_max98090_dai,
.num_links = 1,
diff --git a/sound/soc/tegra/tegra_rt5640.c b/sound/soc/tegra/tegra_rt5640.c
index c73bd23b3d67..6c2689f5da22 100644
--- a/sound/soc/tegra/tegra_rt5640.c
+++ b/sound/soc/tegra/tegra_rt5640.c
@@ -132,6 +132,7 @@ static struct snd_soc_dai_link tegra_rt5640_dai = {

static struct snd_soc_card snd_soc_tegra_rt5640 = {
.name = "tegra-rt5640",
+ .driver_name = "tegra",
.owner = THIS_MODULE,
.dai_link = &tegra_rt5640_dai,
.num_links = 1,
diff --git a/sound/soc/tegra/tegra_rt5677.c b/sound/soc/tegra/tegra_rt5677.c
index 7504507dd8b8..0588889d081a 100644
--- a/sound/soc/tegra/tegra_rt5677.c
+++ b/sound/soc/tegra/tegra_rt5677.c
@@ -175,6 +175,7 @@ static struct snd_soc_dai_link tegra_rt5677_dai = {

static struct snd_soc_card snd_soc_tegra_rt5677 = {
.name = "tegra-rt5677",
+ .driver_name = "tegra",
.owner = THIS_MODULE,
.dai_link = &tegra_rt5677_dai,
.num_links = 1,
diff --git a/sound/soc/tegra/tegra_sgtl5000.c b/sound/soc/tegra/tegra_sgtl5000.c
index e1dc8e7d337a..3d35a57d8c0c 100644
--- a/sound/soc/tegra/tegra_sgtl5000.c
+++ b/sound/soc/tegra/tegra_sgtl5000.c
@@ -97,6 +97,7 @@ static struct snd_soc_dai_link tegra_sgtl5000_dai = {

static struct snd_soc_card snd_soc_tegra_sgtl5000 = {
.name = "tegra-sgtl5000",
+ .driver_name = "tegra",
.owner = THIS_MODULE,
.dai_link = &tegra_sgtl5000_dai,
.num_links = 1,
diff --git a/sound/soc/tegra/tegra_wm8753.c b/sound/soc/tegra/tegra_wm8753.c
index fa41fa366daf..bdddda4eb913 100644
--- a/sound/soc/tegra/tegra_wm8753.c
+++ b/sound/soc/tegra/tegra_wm8753.c
@@ -101,6 +101,7 @@ static struct snd_soc_dai_link tegra_wm8753_dai = {

static struct snd_soc_card snd_soc_tegra_wm8753 = {
.name = "tegra-wm8753",
+ .driver_name = "tegra",
.owner = THIS_MODULE,
.dai_link = &tegra_wm8753_dai,
.num_links = 1,
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
index ef6652aaac9b..98adf93fb898 100644
--- a/sound/soc/tegra/tegra_wm8903.c
+++ b/sound/soc/tegra/tegra_wm8903.c
@@ -235,6 +235,7 @@ static struct snd_soc_dai_link tegra_wm8903_dai = {

static struct snd_soc_card snd_soc_tegra_wm8903 = {
.name = "tegra-wm8903",
+ .driver_name = "tegra",
.owner = THIS_MODULE,
.dai_link = &tegra_wm8903_dai,
.num_links = 1,
diff --git a/sound/soc/tegra/tegra_wm9712.c b/sound/soc/tegra/tegra_wm9712.c
index 726edfa21a29..df7662258bc6 100644
--- a/sound/soc/tegra/tegra_wm9712.c
+++ b/sound/soc/tegra/tegra_wm9712.c
@@ -54,6 +54,7 @@ static struct snd_soc_dai_link tegra_wm9712_dai = {

static struct snd_soc_card snd_soc_tegra_wm9712 = {
.name = "tegra-wm9712",
+ .driver_name = "tegra",
.owner = THIS_MODULE,
.dai_link = &tegra_wm9712_dai,
.num_links = 1,
diff --git a/sound/soc/tegra/trimslice.c b/sound/soc/tegra/trimslice.c
index baae4cce7fc6..d8fbb22482d5 100644
--- a/sound/soc/tegra/trimslice.c
+++ b/sound/soc/tegra/trimslice.c
@@ -94,6 +94,7 @@ static struct snd_soc_dai_link trimslice_tlv320aic23_dai = {

static struct snd_soc_card snd_soc_trimslice = {
.name = "tegra-trimslice",
+ .driver_name = "tegra",
.owner = THIS_MODULE,
.dai_link = &trimslice_tlv320aic23_dai,
.num_links = 1,
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh
index f5abb1ebd392..269b2680611b 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh
@@ -108,6 +108,9 @@ router_destroy()
__addr_add_del $rp1 del 192.0.2.2/24 2001:db8:1::2/64

tc qdisc del dev $rp2 clsact
+
+ ip link set dev $rp2 down
+ ip link set dev $rp1 down
}

setup_prepare()
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh
index 1fedfc9da434..1d157b1bd838 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh
@@ -111,6 +111,9 @@ router_destroy()
__addr_add_del $rp1 del 192.0.2.2/24 2001:db8:1::2/64

tc qdisc del dev $rp2 clsact
+
+ ip link set dev $rp2 down
+ ip link set dev $rp1 down
}

setup_prepare()
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
index 5cbff8038f84..28a570006d4d 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
@@ -93,7 +93,9 @@ switch_destroy()
lldptool -T -i $swp1 -V APP -d $(dscp_map 10) >/dev/null
lldpad_app_wait_del

+ ip link set dev $swp2 down
ip link set dev $swp2 nomaster
+ ip link set dev $swp1 down
ip link set dev $swp1 nomaster
ip link del dev br1
}
diff --git a/tools/testing/selftests/lkdtm/tests.txt b/tools/testing/selftests/lkdtm/tests.txt
index 74a8d329a72c..9b84cba9e911 100644
--- a/tools/testing/selftests/lkdtm/tests.txt
+++ b/tools/testing/selftests/lkdtm/tests.txt
@@ -11,7 +11,7 @@ CORRUPT_LIST_ADD list_add corruption
CORRUPT_LIST_DEL list_del corruption
STACK_GUARD_PAGE_LEADING
STACK_GUARD_PAGE_TRAILING
-UNSET_SMEP CR4 bits went missing
+UNSET_SMEP pinned CR4 bits changed:
DOUBLE_FAULT
CORRUPT_PAC
UNALIGNED_LOAD_STORE_WRITE
diff --git a/tools/testing/selftests/net/forwarding/pedit_dsfield.sh b/tools/testing/selftests/net/forwarding/pedit_dsfield.sh
index 55eeacf59241..64fbd211d907 100755
--- a/tools/testing/selftests/net/forwarding/pedit_dsfield.sh
+++ b/tools/testing/selftests/net/forwarding/pedit_dsfield.sh
@@ -75,7 +75,9 @@ switch_destroy()
tc qdisc del dev $swp2 clsact
tc qdisc del dev $swp1 clsact

+ ip link set dev $swp2 down
ip link set dev $swp2 nomaster
+ ip link set dev $swp1 down
ip link set dev $swp1 nomaster
ip link del dev br1
}
diff --git a/tools/testing/selftests/net/forwarding/pedit_l4port.sh b/tools/testing/selftests/net/forwarding/pedit_l4port.sh
index 5f20d289ee43..10e594c55117 100755
--- a/tools/testing/selftests/net/forwarding/pedit_l4port.sh
+++ b/tools/testing/selftests/net/forwarding/pedit_l4port.sh
@@ -71,7 +71,9 @@ switch_destroy()
tc qdisc del dev $swp2 clsact
tc qdisc del dev $swp1 clsact

+ ip link set dev $swp2 down
ip link set dev $swp2 nomaster
+ ip link set dev $swp1 down
ip link set dev $swp1 nomaster
ip link del dev br1
}
diff --git a/tools/testing/selftests/net/forwarding/skbedit_priority.sh b/tools/testing/selftests/net/forwarding/skbedit_priority.sh
index e3bd8a6bb8b4..bde11dc27873 100755
--- a/tools/testing/selftests/net/forwarding/skbedit_priority.sh
+++ b/tools/testing/selftests/net/forwarding/skbedit_priority.sh
@@ -72,7 +72,9 @@ switch_destroy()
tc qdisc del dev $swp2 clsact
tc qdisc del dev $swp1 clsact

+ ip link set dev $swp2 down
ip link set dev $swp2 nomaster
+ ip link set dev $swp1 down
ip link set dev $swp1 nomaster
ip link del dev br1
}
diff --git a/tools/testing/selftests/resctrl/README b/tools/testing/selftests/resctrl/README
index 6e5a0ffa18e8..20502cb4791f 100644
--- a/tools/testing/selftests/resctrl/README
+++ b/tools/testing/selftests/resctrl/README
@@ -47,7 +47,7 @@ Parameter '-h' shows usage information.

usage: resctrl_tests [-h] [-b "benchmark_cmd [options]"] [-t test list] [-n no_of_bits]
-b benchmark_cmd [options]: run specified benchmark for MBM, MBA and CQM default benchmark is builtin fill_buf
- -t test list: run tests specified in the test list, e.g. -t mbm, mba, cqm, cat
+ -t test list: run tests specified in the test list, e.g. -t mbm,mba,cqm,cat
-n no_of_bits: run cache tests using specified no of bits in cache bit mask
-p cpu_no: specify CPU number to run the test. 1 is default
-h: help
diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
index ac2269610aa9..bd98746c6f85 100644
--- a/tools/testing/selftests/resctrl/resctrl_tests.c
+++ b/tools/testing/selftests/resctrl/resctrl_tests.c
@@ -40,7 +40,7 @@ static void cmd_help(void)
printf("\t-b benchmark_cmd [options]: run specified benchmark for MBM, MBA and CQM");
printf("\t default benchmark is builtin fill_buf\n");
printf("\t-t test list: run tests specified in the test list, ");
- printf("e.g. -t mbm, mba, cqm, cat\n");
+ printf("e.g. -t mbm,mba,cqm,cat\n");
printf("\t-n no_of_bits: run cache tests using specified no of bits in cache bit mask\n");
printf("\t-p cpu_no: specify CPU number to run the test. 1 is default\n");
printf("\t-h: help\n");
@@ -98,7 +98,7 @@ int main(int argc, char **argv)

return -1;
}
- token = strtok(NULL, ":\t");
+ token = strtok(NULL, ",");
}
break;
case 'p':
\
 
 \ /
  Last update: 2021-07-19 10:08    [W:0.178 / U:0.100 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site