lkml.org 
[lkml]   [2018]   [Sep]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: Linux 4.18.9
diff --git a/Makefile b/Makefile
index 0d73431f66cd..1178348fb9ca 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 18
-SUBLEVEL = 8
+SUBLEVEL = 9
EXTRAVERSION =
NAME = Merciless Moray

diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index 47b74fbc403c..37bafd44e36d 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -9,6 +9,10 @@
*/

/ {
+ aliases {
+ ethernet = &gmac;
+ };
+
axs10x_mb {
compatible = "simple-bus";
#address-cells = <1>;
@@ -68,7 +72,7 @@
};
};

- ethernet@0x18000 {
+ gmac: ethernet@0x18000 {
#interrupt-cells = <1>;
compatible = "snps,dwmac";
reg = < 0x18000 0x2000 >;
@@ -81,6 +85,7 @@
max-speed = <100>;
resets = <&creg_rst 5>;
reset-names = "stmmaceth";
+ mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
};

ehci@0x40000 {
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 006aa3de5348..d00f283094d3 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -25,6 +25,10 @@
bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
};

+ aliases {
+ ethernet = &gmac;
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -163,7 +167,7 @@
#clock-cells = <0>;
};

- ethernet@8000 {
+ gmac: ethernet@8000 {
#interrupt-cells = <1>;
compatible = "snps,dwmac";
reg = <0x8000 0x2000>;
@@ -176,6 +180,7 @@
phy-handle = <&phy0>;
resets = <&cgu_rst HSDK_ETH_RESET>;
reset-names = "stmmaceth";
+ mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */

mdio {
#address-cells = <1>;
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index a635ea972304..df848c44dacd 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -1,5 +1,4 @@
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
# CONFIG_CROSS_MEMORY_ATTACH is not set
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index aa507e423075..bcbdc0494faa 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -1,5 +1,4 @@
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
# CONFIG_CROSS_MEMORY_ATTACH is not set
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index eba07f468654..d145bce7ebdf 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -1,5 +1,4 @@
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
# CONFIG_CROSS_MEMORY_ATTACH is not set
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index d496ef579859..ca46153d7915 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -98,8 +98,10 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
val = read_sysreg(cpacr_el1);
val |= CPACR_EL1_TTA;
val &= ~CPACR_EL1_ZEN;
- if (!update_fp_enabled(vcpu))
+ if (!update_fp_enabled(vcpu)) {
val &= ~CPACR_EL1_FPEN;
+ __activate_traps_fpsimd32(vcpu);
+ }

write_sysreg(val, cpacr_el1);

@@ -114,8 +116,10 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)

val = CPTR_EL2_DEFAULT;
val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
- if (!update_fp_enabled(vcpu))
+ if (!update_fp_enabled(vcpu)) {
val |= CPTR_EL2_TFP;
+ __activate_traps_fpsimd32(vcpu);
+ }

write_sysreg(val, cptr_el2);
}
@@ -129,7 +133,6 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);

- __activate_traps_fpsimd32(vcpu);
if (has_vhe())
activate_traps_vhe(vcpu);
else
diff --git a/arch/mips/boot/dts/mscc/ocelot.dtsi b/arch/mips/boot/dts/mscc/ocelot.dtsi
index 4f33dbc67348..7096915f26e0 100644
--- a/arch/mips/boot/dts/mscc/ocelot.dtsi
+++ b/arch/mips/boot/dts/mscc/ocelot.dtsi
@@ -184,7 +184,7 @@
#address-cells = <1>;
#size-cells = <0>;
compatible = "mscc,ocelot-miim";
- reg = <0x107009c 0x36>, <0x10700f0 0x8>;
+ reg = <0x107009c 0x24>, <0x10700f0 0x8>;
interrupts = <14>;
status = "disabled";

diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 8505db478904..1d92efb82c37 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -322,6 +322,7 @@ static int __init octeon_ehci_device_init(void)
return 0;

pd = of_find_device_by_node(ehci_node);
+ of_node_put(ehci_node);
if (!pd)
return 0;

@@ -384,6 +385,7 @@ static int __init octeon_ohci_device_init(void)
return 0;

pd = of_find_device_by_node(ohci_node);
+ of_node_put(ohci_node);
if (!pd)
return 0;

diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
index 5ba6fcc26fa7..94a78dbbc91f 100644
--- a/arch/mips/generic/init.c
+++ b/arch/mips/generic/init.c
@@ -204,6 +204,7 @@ void __init arch_init_irq(void)
"mti,cpu-interrupt-controller");
if (!cpu_has_veic && !intc_node)
mips_cpu_irq_init();
+ of_node_put(intc_node);

irqchip_init();
}
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index cea8ad864b3f..57b34257be2b 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -141,14 +141,14 @@ static inline void * phys_to_virt(unsigned long address)
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
-static inline unsigned long isa_virt_to_bus(volatile void * address)
+static inline unsigned long isa_virt_to_bus(volatile void *address)
{
- return (unsigned long)address - PAGE_OFFSET;
+ return virt_to_phys(address);
}

-static inline void * isa_bus_to_virt(unsigned long address)
+static inline void *isa_bus_to_virt(unsigned long address)
{
- return (void *)(address + PAGE_OFFSET);
+ return phys_to_virt(address);
}

#define isa_page_to_bus page_to_phys
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 019035d7225c..8f845f6e5f42 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/ioport.h>
+#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -20,6 +21,7 @@

#include <asm/abi.h>
#include <asm/mips-cps.h>
+#include <asm/page.h>
#include <asm/vdso.h>

/* Kernel-provided data used by the VDSO. */
@@ -128,12 +130,30 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vvar_size = gic_size + PAGE_SIZE;
size = vvar_size + image->size;

+ /*
+ * Find a region that's large enough for us to perform the
+ * colour-matching alignment below.
+ */
+ if (cpu_has_dc_aliases)
+ size += shm_align_mask + 1;
+
base = get_unmapped_area(NULL, 0, size, 0, 0);
if (IS_ERR_VALUE(base)) {
ret = base;
goto out;
}

+ /*
+ * If we suffer from dcache aliasing, ensure that the VDSO data page
+ * mapping is coloured the same as the kernel's mapping of that memory.
+ * This ensures that when the kernel updates the VDSO data userland
+ * will observe it without requiring cache invalidations.
+ */
+ if (cpu_has_dc_aliases) {
+ base = __ALIGN_MASK(base, shm_align_mask);
+ base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask;
+ }
+
data_addr = base + gic_size;
vdso_addr = data_addr + PAGE_SIZE;

diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index e12dfa48b478..a5893b2cdc0e 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -835,7 +835,8 @@ static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
{
/* Catch bad driver code */
- BUG_ON(size == 0);
+ if (WARN_ON(size == 0))
+ return;

preempt_disable();
if (cpu_has_inclusive_pcaches) {
@@ -871,7 +872,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
{
/* Catch bad driver code */
- BUG_ON(size == 0);
+ if (WARN_ON(size == 0))
+ return;

preempt_disable();
if (cpu_has_inclusive_pcaches) {
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index 01ee40f11f3a..76234a14b97d 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -9,6 +9,7 @@

#include <linux/slab.h>
#include <linux/cpumask.h>
+#include <linux/kmemleak.h>
#include <linux/percpu.h>

struct vmemmap_backing {
@@ -82,6 +83,13 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)

pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
pgtable_gfp_flags(mm, GFP_KERNEL));
+ /*
+ * Don't scan the PGD for pointers, it contains references to PUDs but
+ * those references are not full pointers and so can't be recognised by
+ * kmemleak.
+ */
+ kmemleak_no_scan(pgd);
+
/*
* With hugetlb, we don't clear the second half of the page table.
* If we share the same slab cache with the pmd or pud level table,
@@ -110,8 +118,19 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)

static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
- pgtable_gfp_flags(mm, GFP_KERNEL));
+ pud_t *pud;
+
+ pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
+ /*
+ * Tell kmemleak to ignore the PUD, that means don't scan it for
+ * pointers and don't consider it a leak. PUDs are typically only
+ * referred to by their PGD, but kmemleak is not able to recognise those
+ * as pointers, leading to false leak reports.
+ */
+ kmemleak_ignore(pud);
+
+ return pud;
}

static inline void pud_free(struct mm_struct *mm, pud_t *pud)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 176f911ee983..7efc42538ccf 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -738,10 +738,10 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
gpa, shift);
kvmppc_radix_tlbie_page(kvm, gpa, shift);
if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
- unsigned long npages = 1;
+ unsigned long psize = PAGE_SIZE;
if (shift)
- npages = 1ul << (shift - PAGE_SHIFT);
- kvmppc_update_dirty_map(memslot, gfn, npages);
+ psize = 1ul << shift;
+ kvmppc_update_dirty_map(memslot, gfn, psize);
}
}
return 0;
diff --git a/arch/powerpc/platforms/4xx/msi.c b/arch/powerpc/platforms/4xx/msi.c
index 81b2cbce7df8..7c324eff2f22 100644
--- a/arch/powerpc/platforms/4xx/msi.c
+++ b/arch/powerpc/platforms/4xx/msi.c
@@ -146,13 +146,19 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
const u32 *sdr_addr;
dma_addr_t msi_phys;
void *msi_virt;
+ int err;

sdr_addr = of_get_property(dev->dev.of_node, "sdr-base", NULL);
if (!sdr_addr)
- return -1;
+ return -EINVAL;

- mtdcri(SDR0, *sdr_addr, upper_32_bits(res.start)); /*HIGH addr */
- mtdcri(SDR0, *sdr_addr + 1, lower_32_bits(res.start)); /* Low addr */
+ msi_data = of_get_property(dev->dev.of_node, "msi-data", NULL);
+ if (!msi_data)
+ return -EINVAL;
+
+ msi_mask = of_get_property(dev->dev.of_node, "msi-mask", NULL);
+ if (!msi_mask)
+ return -EINVAL;

msi->msi_dev = of_find_node_by_name(NULL, "ppc4xx-msi");
if (!msi->msi_dev)
@@ -160,30 +166,30 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,

msi->msi_regs = of_iomap(msi->msi_dev, 0);
if (!msi->msi_regs) {
- dev_err(&dev->dev, "of_iomap problem failed\n");
- return -ENOMEM;
+ dev_err(&dev->dev, "of_iomap failed\n");
+ err = -ENOMEM;
+ goto node_put;
}
dev_dbg(&dev->dev, "PCIE-MSI: msi register mapped 0x%x 0x%x\n",
(u32) (msi->msi_regs + PEIH_TERMADH), (u32) (msi->msi_regs));

msi_virt = dma_alloc_coherent(&dev->dev, 64, &msi_phys, GFP_KERNEL);
- if (!msi_virt)
- return -ENOMEM;
+ if (!msi_virt) {
+ err = -ENOMEM;
+ goto iounmap;
+ }
msi->msi_addr_hi = upper_32_bits(msi_phys);
msi->msi_addr_lo = lower_32_bits(msi_phys & 0xffffffff);
dev_dbg(&dev->dev, "PCIE-MSI: msi address high 0x%x, low 0x%x\n",
msi->msi_addr_hi, msi->msi_addr_lo);

+ mtdcri(SDR0, *sdr_addr, upper_32_bits(res.start)); /*HIGH addr */
+ mtdcri(SDR0, *sdr_addr + 1, lower_32_bits(res.start)); /* Low addr */
+
/* Progam the Interrupt handler Termination addr registers */
out_be32(msi->msi_regs + PEIH_TERMADH, msi->msi_addr_hi);
out_be32(msi->msi_regs + PEIH_TERMADL, msi->msi_addr_lo);

- msi_data = of_get_property(dev->dev.of_node, "msi-data", NULL);
- if (!msi_data)
- return -1;
- msi_mask = of_get_property(dev->dev.of_node, "msi-mask", NULL);
- if (!msi_mask)
- return -1;
/* Program MSI Expected data and Mask bits */
out_be32(msi->msi_regs + PEIH_MSIED, *msi_data);
out_be32(msi->msi_regs + PEIH_MSIMK, *msi_mask);
@@ -191,6 +197,12 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
dma_free_coherent(&dev->dev, 64, msi_virt, msi_phys);

return 0;
+
+iounmap:
+ iounmap(msi->msi_regs);
+node_put:
+ of_node_put(msi->msi_dev);
+ return err;
}

static int ppc4xx_of_msi_remove(struct platform_device *dev)
@@ -209,7 +221,6 @@ static int ppc4xx_of_msi_remove(struct platform_device *dev)
msi_bitmap_free(&msi->bitmap);
iounmap(msi->msi_regs);
of_node_put(msi->msi_dev);
- kfree(msi);

return 0;
}
@@ -223,18 +234,16 @@ static int ppc4xx_msi_probe(struct platform_device *dev)

dev_dbg(&dev->dev, "PCIE-MSI: Setting up MSI support...\n");

- msi = kzalloc(sizeof(*msi), GFP_KERNEL);
- if (!msi) {
- dev_err(&dev->dev, "No memory for MSI structure\n");
+ msi = devm_kzalloc(&dev->dev, sizeof(*msi), GFP_KERNEL);
+ if (!msi)
return -ENOMEM;
- }
dev->dev.platform_data = msi;

/* Get MSI ranges */
err = of_address_to_resource(dev->dev.of_node, 0, &res);
if (err) {
dev_err(&dev->dev, "%pOF resource error!\n", dev->dev.of_node);
- goto error_out;
+ return err;
}

msi_irqs = of_irq_count(dev->dev.of_node);
@@ -243,7 +252,7 @@ static int ppc4xx_msi_probe(struct platform_device *dev)

err = ppc4xx_setup_pcieh_hw(dev, res, msi);
if (err)
- goto error_out;
+ return err;

err = ppc4xx_msi_init_allocator(dev, msi);
if (err) {
@@ -256,7 +265,7 @@ static int ppc4xx_msi_probe(struct platform_device *dev)
phb->controller_ops.setup_msi_irqs = ppc4xx_setup_msi_irqs;
phb->controller_ops.teardown_msi_irqs = ppc4xx_teardown_msi_irqs;
}
- return err;
+ return 0;

error_out:
ppc4xx_of_msi_remove(dev);
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 8cdf91f5d3a4..c773465b2c95 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -437,8 +437,9 @@ static int get_mmio_atsd_reg(struct npu *npu)
int i;

for (i = 0; i < npu->mmio_atsd_count; i++) {
- if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
- return i;
+ if (!test_bit(i, &npu->mmio_atsd_usage))
+ if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
+ return i;
}

return -ENOSPC;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 8a4868a3964b..cb098e962ffe 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -647,6 +647,15 @@ void of_pci_parse_iov_addrs(struct pci_dev *dev, const int *indexes)
}
}

+static void pseries_disable_sriov_resources(struct pci_dev *pdev)
+{
+ int i;
+
+ pci_warn(pdev, "No hypervisor support for SR-IOV on this device, IOV BARs disabled.\n");
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
+ pdev->resource[i + PCI_IOV_RESOURCES].flags = 0;
+}
+
static void pseries_pci_fixup_resources(struct pci_dev *pdev)
{
const int *indexes;
@@ -654,10 +663,10 @@ static void pseries_pci_fixup_resources(struct pci_dev *pdev)

/*Firmware must support open sriov otherwise dont configure*/
indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
- if (!indexes)
- return;
- /* Assign the addresses from device tree*/
- of_pci_set_vf_bar_size(pdev, indexes);
+ if (indexes)
+ of_pci_set_vf_bar_size(pdev, indexes);
+ else
+ pseries_disable_sriov_resources(pdev);
}

static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
@@ -669,10 +678,10 @@ static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
return;
/*Firmware must support open sriov otherwise dont configure*/
indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
- if (!indexes)
- return;
- /* Assign the addresses from device tree*/
- of_pci_parse_iov_addrs(pdev, indexes);
+ if (indexes)
+ of_pci_parse_iov_addrs(pdev, indexes);
+ else
+ pseries_disable_sriov_resources(pdev);
}

static resource_size_t pseries_pci_iov_resource_alignment(struct pci_dev *pdev,
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 84c89cb9636f..cbdd8341f17e 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -173,7 +173,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
return set_validity_icpt(scb_s, 0x0039U);

/* copy only the wrapping keys */
- if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56))
+ if (read_guest_real(vcpu, crycb_addr + 72,
+ vsie_page->crycb.dea_wrapping_key_mask, 56))
return set_validity_icpt(scb_s, 0x0035U);

scb_s->ecb3 |= ecb3_flags;
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index 395c9631e000..75f1e35e7c15 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -22,10 +22,20 @@ enum die_val {
DIE_NMIUNKNOWN,
};

+enum show_regs_mode {
+ SHOW_REGS_SHORT,
+ /*
+ * For when userspace crashed, but we don't think it's our fault, and
+ * therefore don't print kernel registers.
+ */
+ SHOW_REGS_USER,
+ SHOW_REGS_ALL
+};
+
extern void die(const char *, struct pt_regs *,long);
extern int __must_check __die(const char *, struct pt_regs *, long);
extern void show_stack_regs(struct pt_regs *regs);
-extern void __show_regs(struct pt_regs *regs, int all);
+extern void __show_regs(struct pt_regs *regs, enum show_regs_mode);
extern void show_iret_regs(struct pt_regs *regs);
extern unsigned long oops_begin(void);
extern void oops_end(unsigned long, struct pt_regs *, int signr);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index acebb808c4b5..0722b7745382 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1198,18 +1198,22 @@ enum emulation_result {
#define EMULTYPE_NO_DECODE (1 << 0)
#define EMULTYPE_TRAP_UD (1 << 1)
#define EMULTYPE_SKIP (1 << 2)
-#define EMULTYPE_RETRY (1 << 3)
-#define EMULTYPE_NO_REEXECUTE (1 << 4)
-#define EMULTYPE_NO_UD_ON_FAIL (1 << 5)
-#define EMULTYPE_VMWARE (1 << 6)
+#define EMULTYPE_ALLOW_RETRY (1 << 3)
+#define EMULTYPE_NO_UD_ON_FAIL (1 << 4)
+#define EMULTYPE_VMWARE (1 << 5)
int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
int emulation_type, void *insn, int insn_len);

static inline int emulate_instruction(struct kvm_vcpu *vcpu,
int emulation_type)
{
- return x86_emulate_instruction(vcpu, 0,
- emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
+ return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
+}
+
+static inline int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
+ void *insn, int insn_len)
+{
+ return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len);
}

void kvm_enable_efer_bits(u64);
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index c9b773401fd8..21d1fa5eaa5f 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -422,7 +422,7 @@ static int activate_managed(struct irq_data *irqd)
if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
/* Something in the core code broke! Survive gracefully */
pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
- return EINVAL;
+ return -EINVAL;
}

ret = assign_managed_vector(irqd, vector_searchmask);
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 0624957aa068..07b5fc00b188 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -504,6 +504,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
struct microcode_amd *mc_amd;
struct ucode_cpu_info *uci;
struct ucode_patch *p;
+ enum ucode_state ret;
u32 rev, dummy;

BUG_ON(raw_smp_processor_id() != cpu);
@@ -521,9 +522,8 @@ static enum ucode_state apply_microcode_amd(int cpu)

/* need to apply patch? */
if (rev >= mc_amd->hdr.patch_id) {
- c->microcode = rev;
- uci->cpu_sig.rev = rev;
- return UCODE_OK;
+ ret = UCODE_OK;
+ goto out;
}

if (__apply_microcode_amd(mc_amd)) {
@@ -531,13 +531,21 @@ static enum ucode_state apply_microcode_amd(int cpu)
cpu, mc_amd->hdr.patch_id);
return UCODE_ERROR;
}
- pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
- mc_amd->hdr.patch_id);

- uci->cpu_sig.rev = mc_amd->hdr.patch_id;
- c->microcode = mc_amd->hdr.patch_id;
+ rev = mc_amd->hdr.patch_id;
+ ret = UCODE_UPDATED;
+
+ pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);

- return UCODE_UPDATED;
+out:
+ uci->cpu_sig.rev = rev;
+ c->microcode = rev;
+
+ /* Update boot_cpu_data's revision too, if we're on the BSP: */
+ if (c->cpu_index == boot_cpu_data.cpu_index)
+ boot_cpu_data.microcode = rev;
+
+ return ret;
}

static int install_equiv_cpu_table(const u8 *buf)
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 97ccf4c3b45b..16936a24795c 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -795,6 +795,7 @@ static enum ucode_state apply_microcode_intel(int cpu)
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
struct cpuinfo_x86 *c = &cpu_data(cpu);
struct microcode_intel *mc;
+ enum ucode_state ret;
static int prev_rev;
u32 rev;

@@ -817,9 +818,8 @@ static enum ucode_state apply_microcode_intel(int cpu)
*/
rev = intel_get_microcode_revision();
if (rev >= mc->hdr.rev) {
- uci->cpu_sig.rev = rev;
- c->microcode = rev;
- return UCODE_OK;
+ ret = UCODE_OK;
+ goto out;
}

/*
@@ -848,10 +848,17 @@ static enum ucode_state apply_microcode_intel(int cpu)
prev_rev = rev;
}

+ ret = UCODE_UPDATED;
+
+out:
uci->cpu_sig.rev = rev;
- c->microcode = rev;
+ c->microcode = rev;
+
+ /* Update boot_cpu_data's revision too, if we're on the BSP: */
+ if (c->cpu_index == boot_cpu_data.cpu_index)
+ boot_cpu_data.microcode = rev;

- return UCODE_UPDATED;
+ return ret;
}

static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 17b02adc79aa..0c5a9fc6e36d 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -155,7 +155,7 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
* they can be printed in the right context.
*/
if (!partial && on_stack(info, regs, sizeof(*regs))) {
- __show_regs(regs, 0);
+ __show_regs(regs, SHOW_REGS_SHORT);

} else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
IRET_FRAME_SIZE)) {
@@ -353,7 +353,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
oops_exit();

/* Executive summary in case the oops scrolled away */
- __show_regs(&exec_summary_regs, true);
+ __show_regs(&exec_summary_regs, SHOW_REGS_ALL);

if (!signr)
return;
@@ -416,14 +416,9 @@ void die(const char *str, struct pt_regs *regs, long err)

void show_regs(struct pt_regs *regs)
{
- bool all = true;
-
show_regs_print_info(KERN_DEFAULT);

- if (IS_ENABLED(CONFIG_X86_32))
- all = !user_mode(regs);
-
- __show_regs(regs, all);
+ __show_regs(regs, user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL);

/*
* When in-kernel, we also print out the stack at the time of the fault..
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 0ae659de21eb..666d1825390d 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -59,7 +59,7 @@
#include <asm/intel_rdt_sched.h>
#include <asm/proto.h>

-void __show_regs(struct pt_regs *regs, int all)
+void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
unsigned long d0, d1, d2, d3, d6, d7;
@@ -85,7 +85,7 @@ void __show_regs(struct pt_regs *regs, int all)
printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n",
(u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags);

- if (!all)
+ if (mode != SHOW_REGS_ALL)
return;

cr0 = read_cr0();
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 4344a032ebe6..0091a733c1cf 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -62,7 +62,7 @@
__visible DEFINE_PER_CPU(unsigned long, rsp_scratch);

/* Prints also some state that isn't saved in the pt_regs */
-void __show_regs(struct pt_regs *regs, int all)
+void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
unsigned long d0, d1, d2, d3, d6, d7;
@@ -87,9 +87,17 @@ void __show_regs(struct pt_regs *regs, int all)
printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
regs->r13, regs->r14, regs->r15);

- if (!all)
+ if (mode == SHOW_REGS_SHORT)
return;

+ if (mode == SHOW_REGS_USER) {
+ rdmsrl(MSR_FS_BASE, fs);
+ rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
+ printk(KERN_DEFAULT "FS: %016lx GS: %016lx\n",
+ fs, shadowgs);
+ return;
+ }
+
asm("movl %%ds,%0" : "=r" (ds));
asm("movl %%cs,%0" : "=r" (cs));
asm("movl %%es,%0" : "=r" (es));
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 42f1ba92622a..97d41754769e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4960,7 +4960,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
void *insn, int insn_len)
{
- int r, emulation_type = EMULTYPE_RETRY;
+ int r, emulation_type = 0;
enum emulation_result er;
bool direct = vcpu->arch.mmu.direct_map;

@@ -4973,10 +4973,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
r = RET_PF_INVALID;
if (unlikely(error_code & PFERR_RSVD_MASK)) {
r = handle_mmio_page_fault(vcpu, cr2, direct);
- if (r == RET_PF_EMULATE) {
- emulation_type = 0;
+ if (r == RET_PF_EMULATE)
goto emulate;
- }
}

if (r == RET_PF_INVALID) {
@@ -5003,8 +5001,19 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
return 1;
}

- if (mmio_info_in_cache(vcpu, cr2, direct))
- emulation_type = 0;
+ /*
+ * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
+ * optimistically try to just unprotect the page and let the processor
+ * re-execute the instruction that caused the page fault. Do not allow
+ * retrying MMIO emulation, as it's not only pointless but could also
+ * cause us to enter an infinite loop because the processor will keep
+ * faulting on the non-existent MMIO address. Retrying an instruction
+ * from a nested guest is also pointless and dangerous as we are only
+ * explicitly shadowing L1's page tables, i.e. unprotecting something
+ * for L1 isn't going to magically fix whatever issue cause L2 to fail.
+ */
+ if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu))
+ emulation_type = EMULTYPE_ALLOW_RETRY;
emulate:
/*
* On AMD platforms, under certain conditions insn_len may be zero on #NPF.
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 9799f86388e7..ef772e5634d4 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3875,8 +3875,8 @@ static int emulate_on_interception(struct vcpu_svm *svm)

static int rsm_interception(struct vcpu_svm *svm)
{
- return x86_emulate_instruction(&svm->vcpu, 0, 0,
- rsm_ins_bytes, 2) == EMULATE_DONE;
+ return kvm_emulate_instruction_from_buffer(&svm->vcpu,
+ rsm_ins_bytes, 2) == EMULATE_DONE;
}

static int rdpmc_interception(struct vcpu_svm *svm)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9869bfd0c601..d0c3be353bb6 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7539,8 +7539,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
return kvm_skip_emulated_instruction(vcpu);
else
- return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP,
- NULL, 0) == EMULATE_DONE;
+ return emulate_instruction(vcpu, EMULTYPE_SKIP) ==
+ EMULATE_DONE;
}

return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 94cd63081471..97fcac34e007 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5810,7 +5810,10 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
gpa_t gpa = cr2;
kvm_pfn_t pfn;

- if (emulation_type & EMULTYPE_NO_REEXECUTE)
+ if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
+ return false;
+
+ if (WARN_ON_ONCE(is_guest_mode(vcpu)))
return false;

if (!vcpu->arch.mmu.direct_map) {
@@ -5898,7 +5901,10 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
*/
vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;

- if (!(emulation_type & EMULTYPE_RETRY))
+ if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
+ return false;
+
+ if (WARN_ON_ONCE(is_guest_mode(vcpu)))
return false;

if (x86_page_table_writing_insn(ctxt))
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index d1f1612672c7..045338ac1667 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -317,8 +317,6 @@ static noinline int vmalloc_fault(unsigned long address)
if (!(address >= VMALLOC_START && address < VMALLOC_END))
return -1;

- WARN_ON_ONCE(in_nmi());
-
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 58c6efa9f9a9..9fe5952d117d 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -275,9 +275,9 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg)

void bfqg_and_blkg_put(struct bfq_group *bfqg)
{
- bfqg_put(bfqg);
-
blkg_put(bfqg_to_blkg(bfqg));
+
+ bfqg_put(bfqg);
}

/* @stats = 0 */
diff --git a/block/blk-core.c b/block/blk-core.c
index 746a5eac4541..cbaca5a73f2e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2161,9 +2161,12 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
{
const int op = bio_op(bio);

- if (part->policy && (op_is_write(op) && !op_is_flush(op))) {
+ if (part->policy && op_is_write(op)) {
char b[BDEVNAME_SIZE];

+ if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
+ return false;
+
WARN_ONCE(1,
"generic_make_request: Trying to write "
"to read-only block-device %s (partno %d)\n",
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index d5f2c21d8531..816923bf874d 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -402,8 +402,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
if (tdepth <= tags->nr_reserved_tags)
return -EINVAL;

- tdepth -= tags->nr_reserved_tags;
-
/*
* If we are allowed to grow beyond the original size, allocate
* a new set of tags before freeing the old one.
@@ -423,7 +421,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
if (tdepth > 16 * BLKDEV_MAX_RQ)
return -EINVAL;

- new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0);
+ new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
+ tags->nr_reserved_tags);
if (!new)
return -ENOMEM;
ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
@@ -440,7 +439,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
* Don't need (or can't) update reserved tags here, they
* remain static and should never need resizing.
*/
- sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
+ sbitmap_queue_resize(&tags->bitmap_tags,
+ tdepth - tags->nr_reserved_tags);
}

return 0;
diff --git a/block/partitions/aix.c b/block/partitions/aix.c
index 007f95eea0e1..903f3ed175d0 100644
--- a/block/partitions/aix.c
+++ b/block/partitions/aix.c
@@ -178,7 +178,7 @@ int aix_partition(struct parsed_partitions *state)
u32 vgda_sector = 0;
u32 vgda_len = 0;
int numlvs = 0;
- struct pvd *pvd;
+ struct pvd *pvd = NULL;
struct lv_info {
unsigned short pps_per_lv;
unsigned short pps_found;
@@ -232,10 +232,11 @@ int aix_partition(struct parsed_partitions *state)
if (lvip[i].pps_per_lv)
foundlvs += 1;
}
+ /* pvd loops depend on n[].name and lvip[].pps_per_lv */
+ pvd = alloc_pvd(state, vgda_sector + 17);
}
put_dev_sector(sect);
}
- pvd = alloc_pvd(state, vgda_sector + 17);
if (pvd) {
int numpps = be16_to_cpu(pvd->pp_count);
int psn_part1 = be32_to_cpu(pvd->psn_part1);
@@ -282,10 +283,14 @@ int aix_partition(struct parsed_partitions *state)
next_lp_ix += 1;
}
for (i = 0; i < state->limit; i += 1)
- if (lvip[i].pps_found && !lvip[i].lv_is_contiguous)
+ if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) {
+ char tmp[sizeof(n[i].name) + 1]; // null char
+
+ snprintf(tmp, sizeof(tmp), "%s", n[i].name);
pr_warn("partition %s (%u pp's found) is "
"not contiguous\n",
- n[i].name, lvip[i].pps_found);
+ tmp, lvip[i].pps_found);
+ }
kfree(pvd);
}
kfree(n);
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 9706613eecf9..bf64cfa30feb 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -879,7 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev)
#define LPSS_GPIODEF0_DMA_LLP BIT(13)

static DEFINE_MUTEX(lpss_iosf_mutex);
-static bool lpss_iosf_d3_entered;
+static bool lpss_iosf_d3_entered = true;

static void lpss_iosf_enter_d3_state(void)
{
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 2628806c64a2..3d5277a39097 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -327,6 +327,35 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
return vma ? -ENOMEM : -ESRCH;
}

+
+static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
+ struct vm_area_struct *vma)
+{
+ if (vma)
+ alloc->vma_vm_mm = vma->vm_mm;
+ /*
+ * If we see alloc->vma is not NULL, buffer data structures set up
+ * completely. Look at smp_rmb side binder_alloc_get_vma.
+ * We also want to guarantee new alloc->vma_vm_mm is always visible
+ * if alloc->vma is set.
+ */
+ smp_wmb();
+ alloc->vma = vma;
+}
+
+static inline struct vm_area_struct *binder_alloc_get_vma(
+ struct binder_alloc *alloc)
+{
+ struct vm_area_struct *vma = NULL;
+
+ if (alloc->vma) {
+ /* Look at description in binder_alloc_set_vma */
+ smp_rmb();
+ vma = alloc->vma;
+ }
+ return vma;
+}
+
static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_alloc *alloc,
size_t data_size,
@@ -343,7 +372,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size;
int ret;

- if (alloc->vma == NULL) {
+ if (!binder_alloc_get_vma(alloc)) {
pr_err("%d: binder_alloc_buf, no vma\n",
alloc->pid);
return ERR_PTR(-ESRCH);
@@ -714,9 +743,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
- barrier();
- alloc->vma = vma;
- alloc->vma_vm_mm = vma->vm_mm;
+ binder_alloc_set_vma(alloc, vma);
mmgrab(alloc->vma_vm_mm);

return 0;
@@ -743,10 +770,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
int buffers, page_count;
struct binder_buffer *buffer;

- BUG_ON(alloc->vma);
-
buffers = 0;
mutex_lock(&alloc->mutex);
+ BUG_ON(alloc->vma);
+
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);

@@ -889,7 +916,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
*/
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
- WRITE_ONCE(alloc->vma, NULL);
+ binder_alloc_set_vma(alloc, NULL);
}

/**
@@ -924,7 +951,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,

index = page - alloc->pages;
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
- vma = alloc->vma;
+ vma = binder_alloc_get_vma(alloc);
if (vma) {
if (!mmget_not_zero(alloc->vma_vm_mm))
goto err_mmget;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 09620c2ffa0f..704a761f94b2 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -2107,7 +2107,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
struct ata_device *dev = ap->link.device;
- u32 devslp, dm, dito, mdat, deto;
+ u32 devslp, dm, dito, mdat, deto, dito_conf;
int rc;
unsigned int err_mask;

@@ -2131,8 +2131,15 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
return;
}

- /* device sleep was already enabled */
- if (devslp & PORT_DEVSLP_ADSE)
+ dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
+ dito = devslp_idle_timeout / (dm + 1);
+ if (dito > 0x3ff)
+ dito = 0x3ff;
+
+ dito_conf = (devslp >> PORT_DEVSLP_DITO_OFFSET) & 0x3FF;
+
+ /* device sleep was already enabled and same dito */
+ if ((devslp & PORT_DEVSLP_ADSE) && (dito_conf == dito))
return;

/* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
@@ -2140,11 +2147,6 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
if (rc)
return;

- dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
- dito = devslp_idle_timeout / (dm + 1);
- if (dito > 0x3ff)
- dito = 0x3ff;
-
/* Use the nominal value 10 ms if the read MDAT is zero,
* the nominal value of DETO is 20 ms.
*/
@@ -2162,6 +2164,8 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
deto = 20;
}

+ /* Make dito, mdat, deto bits to 0s */
+ devslp &= ~GENMASK_ULL(24, 2);
devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) |
(mdat << PORT_DEVSLP_MDAT_OFFSET) |
(deto << PORT_DEVSLP_DETO_OFFSET) |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index f5e560188a18..622ab8edc035 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -416,26 +416,24 @@ static ssize_t show_valid_zones(struct device *dev,
struct zone *default_zone;
int nid;

- /*
- * The block contains more than one zone can not be offlined.
- * This can happen e.g. for ZONE_DMA and ZONE_DMA32
- */
- if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn))
- return sprintf(buf, "none\n");
-
- start_pfn = valid_start_pfn;
- nr_pages = valid_end_pfn - start_pfn;
-
/*
* Check the existing zone. Make sure that we do that only on the
* online nodes otherwise the page_zone is not reliable
*/
if (mem->state == MEM_ONLINE) {
+ /*
+ * The block contains more than one zone can not be offlined.
+ * This can happen e.g. for ZONE_DMA and ZONE_DMA32
+ */
+ if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages,
+ &valid_start_pfn, &valid_end_pfn))
+ return sprintf(buf, "none\n");
+ start_pfn = valid_start_pfn;
strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
goto out;
}

- nid = pfn_to_nid(start_pfn);
+ nid = mem->nid;
default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
strcat(buf, default_zone->name);

diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 3fb95c8d9fd8..15a5ce5bba3d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1239,6 +1239,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
case NBD_SET_SOCK:
return nbd_add_socket(nbd, arg, false);
case NBD_SET_BLKSIZE:
+ if (!arg || !is_power_of_2(arg) || arg < 512 ||
+ arg > PAGE_SIZE)
+ return -EINVAL;
nbd_size_set(nbd, arg,
div_s64(config->bytesize, arg));
return 0;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index b3f83cd96f33..01f59be71433 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -67,7 +67,7 @@
#include <scsi/scsi.h>
#include <linux/debugfs.h>
#include <linux/device.h>
-
+#include <linux/nospec.h>
#include <linux/uaccess.h>

#define DRIVER_NAME "pktcdvd"
@@ -2231,6 +2231,8 @@ static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
{
if (dev_minor >= MAX_WRITERS)
return NULL;
+
+ dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
return pkt_devs[dev_minor];
}

diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index f3c643a0473c..5f953ca8ac5b 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -159,6 +159,7 @@ config BT_HCIUART_LL
config BT_HCIUART_3WIRE
bool "Three-wire UART (H5) protocol support"
depends on BT_HCIUART
+ depends on BT_HCIUART_SERDEV
help
The HCI Three-wire UART Transport Layer makes it possible to
user the Bluetooth HCI over a serial port interface. The HCI
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 6116cd05e228..9086edc9066b 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -117,7 +117,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
/* Lock the adapter for the duration of the whole sequence. */
if (!tpm_dev.client->adapter->algo->master_xfer)
return -EOPNOTSUPP;
- i2c_lock_adapter(tpm_dev.client->adapter);
+ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);

if (tpm_dev.chip_type == SLB9645) {
/* use a combined read for newer chips
@@ -192,7 +192,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
}

out:
- i2c_unlock_adapter(tpm_dev.client->adapter);
+ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
/* take care of 'guard time' */
usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);

@@ -224,7 +224,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,

if (!tpm_dev.client->adapter->algo->master_xfer)
return -EOPNOTSUPP;
- i2c_lock_adapter(tpm_dev.client->adapter);
+ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);

/* prepend the 'register address' to the buffer */
tpm_dev.buf[0] = addr;
@@ -243,7 +243,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
usleep_range(sleep_low, sleep_hi);
}

- i2c_unlock_adapter(tpm_dev.client->adapter);
+ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
/* take care of 'guard time' */
usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);

diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
index 424ff2fde1f2..9914f6973463 100644
--- a/drivers/char/tpm/tpm_tis_spi.c
+++ b/drivers/char/tpm/tpm_tis_spi.c
@@ -199,6 +199,7 @@ static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
static int tpm_tis_spi_probe(struct spi_device *dev)
{
struct tpm_tis_spi_phy *phy;
+ int irq;

phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
GFP_KERNEL);
@@ -211,7 +212,13 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
if (!phy->iobuf)
return -ENOMEM;

- return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
+ /* If the SPI device has an IRQ then use that */
+ if (dev->irq > 0)
+ irq = dev->irq;
+ else
+ irq = -1;
+
+ return tpm_tis_core_init(&dev->dev, &phy->priv, irq, &tpm_spi_phy_ops,
NULL);
}

diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index bb2a6f2f5516..a985bf5e1ac6 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -38,7 +38,6 @@ static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
- int step;
u64 fmin, fmax, ftmp;
struct scmi_clk *clk = to_scmi_clk(hw);

@@ -60,9 +59,9 @@ static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,

ftmp = rate - fmin;
ftmp += clk->info->range.step_size - 1; /* to round up */
- step = do_div(ftmp, clk->info->range.step_size);
+ do_div(ftmp, clk->info->range.step_size);

- return step * clk->info->range.step_size + fmin;
+ return ftmp * clk->info->range.step_size + fmin;
}

static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index fd49b24fd6af..99e2aace8078 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -105,15 +105,19 @@ static int dax_pmem_probe(struct device *dev)
if (rc)
return rc;

- rc = devm_add_action_or_reset(dev, dax_pmem_percpu_exit,
- &dax_pmem->ref);
- if (rc)
+ rc = devm_add_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
+ if (rc) {
+ percpu_ref_exit(&dax_pmem->ref);
return rc;
+ }

dax_pmem->pgmap.ref = &dax_pmem->ref;
addr = devm_memremap_pages(dev, &dax_pmem->pgmap);
- if (IS_ERR(addr))
+ if (IS_ERR(addr)) {
+ devm_remove_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
+ percpu_ref_exit(&dax_pmem->ref);
return PTR_ERR(addr);
+ }

rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill,
&dax_pmem->ref);
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index e9db895916c3..1aa67bb5d8c0 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -246,6 +246,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
kfree(sec->raw_name);
memunmap(sec->baseaddr);
+ sec->enabled = false;
}

return 0;
@@ -279,8 +280,10 @@ static int vpd_sections_init(phys_addr_t physaddr)
ret = vpd_section_init("rw", &rw_vpd,
physaddr + sizeof(struct vpd_cbmem) +
header.ro_size, header.rw_size);
- if (ret)
+ if (ret) {
+ vpd_section_destroy(&ro_vpd);
return ret;
+ }
}

return 0;
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index b23d9a36be1f..51c7d1b84c2e 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -496,9 +496,10 @@ static int ioh_gpio_probe(struct pci_dev *pdev,
return 0;

err_gpiochip_add:
+ chip = chip_save;
while (--i >= 0) {
- chip--;
gpiochip_remove(&chip->gpio);
+ chip++;
}
kfree(chip_save);

diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 1e66f808051c..2e33fd552899 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -241,6 +241,17 @@ int pxa_irq_to_gpio(int irq)
return irq_gpio0;
}

+static bool pxa_gpio_has_pinctrl(void)
+{
+ switch (gpio_type) {
+ case PXA3XX_GPIO:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
static int pxa_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct pxa_gpio_chip *pchip = chip_to_pxachip(chip);
@@ -255,9 +266,11 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
unsigned long flags;
int ret;

- ret = pinctrl_gpio_direction_input(chip->base + offset);
- if (!ret)
- return 0;
+ if (pxa_gpio_has_pinctrl()) {
+ ret = pinctrl_gpio_direction_input(chip->base + offset);
+ if (!ret)
+ return 0;
+ }

spin_lock_irqsave(&gpio_lock, flags);

@@ -282,9 +295,11 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,

writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));

- ret = pinctrl_gpio_direction_output(chip->base + offset);
- if (ret)
- return ret;
+ if (pxa_gpio_has_pinctrl()) {
+ ret = pinctrl_gpio_direction_output(chip->base + offset);
+ if (ret)
+ return ret;
+ }

spin_lock_irqsave(&gpio_lock, flags);

@@ -348,8 +363,12 @@ static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio,
pchip->chip.set = pxa_gpio_set;
pchip->chip.to_irq = pxa_gpio_to_irq;
pchip->chip.ngpio = ngpio;
- pchip->chip.request = gpiochip_generic_request;
- pchip->chip.free = gpiochip_generic_free;
+
+ if (pxa_gpio_has_pinctrl()) {
+ pchip->chip.request = gpiochip_generic_request;
+ pchip->chip.free = gpiochip_generic_free;
+ }
+
#ifdef CONFIG_OF_GPIO
pchip->chip.of_node = np;
pchip->chip.of_xlate = pxa_gpio_of_xlate;
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 94396caaca75..d5d79727c55d 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -720,4 +720,4 @@ static int __init tegra_gpio_init(void)
{
return platform_driver_register(&tegra_gpio_driver);
}
-postcore_initcall(tegra_gpio_init);
+subsys_initcall(tegra_gpio_init);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index a576b8bbb3cd..dea40b322191 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -150,7 +150,7 @@ static void dce_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
}
}

-static void dce_dmcu_setup_psr(struct dmcu *dmcu,
+static bool dce_dmcu_setup_psr(struct dmcu *dmcu,
struct dc_link *link,
struct psr_context *psr_context)
{
@@ -261,6 +261,8 @@ static void dce_dmcu_setup_psr(struct dmcu *dmcu,

/* notifyDMCUMsg */
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ return true;
}

static bool dce_is_dmcu_initialized(struct dmcu *dmcu)
@@ -545,24 +547,25 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
* least a few frames. Should never hit the max retry assert below.
*/
if (wait == true) {
- for (retryCount = 0; retryCount <= 1000; retryCount++) {
- dcn10_get_dmcu_psr_state(dmcu, &psr_state);
- if (enable) {
- if (psr_state != 0)
- break;
- } else {
- if (psr_state == 0)
- break;
+ for (retryCount = 0; retryCount <= 1000; retryCount++) {
+ dcn10_get_dmcu_psr_state(dmcu, &psr_state);
+ if (enable) {
+ if (psr_state != 0)
+ break;
+ } else {
+ if (psr_state == 0)
+ break;
+ }
+ udelay(500);
}
- udelay(500);
- }

- /* assert if max retry hit */
- ASSERT(retryCount <= 1000);
+ /* assert if max retry hit */
+ if (retryCount >= 1000)
+ ASSERT(0);
}
}

-static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
+static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
struct dc_link *link,
struct psr_context *psr_context)
{
@@ -577,7 +580,7 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,

/* If microcontroller is not running, do nothing */
if (dmcu->dmcu_state != DMCU_RUNNING)
- return;
+ return false;

link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
psr_context->psrExitLinkTrainingRequired);
@@ -677,6 +680,11 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,

/* notifyDMCUMsg */
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
+
+ return true;
}

static void dcn10_psr_wait_loop(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index de60f940030d..4550747fb61c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -48,7 +48,7 @@ struct dmcu_funcs {
const char *src,
unsigned int bytes);
void (*set_psr_enable)(struct dmcu *dmcu, bool enable, bool wait);
- void (*setup_psr)(struct dmcu *dmcu,
+ bool (*setup_psr)(struct dmcu *dmcu,
struct dc_link *link,
struct psr_context *psr_context);
void (*get_psr_state)(struct dmcu *dmcu, uint32_t *psr_state);
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 48685cddbad1..c73bd003f845 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1401,6 +1401,8 @@ static int ipu_probe(struct platform_device *pdev)
return -ENODEV;

ipu->id = of_alias_get_id(np, "ipu");
+ if (ipu->id < 0)
+ ipu->id = 0;

if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
IS_ENABLED(CONFIG_DRM)) {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index c7981ddd8776..e80bcd71fe1e 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -528,6 +528,7 @@

#define I2C_VENDOR_ID_RAYD 0x2386
#define I2C_PRODUCT_ID_RAYD_3118 0x3118
+#define I2C_PRODUCT_ID_RAYD_4B33 0x4B33

#define USB_VENDOR_ID_HANWANG 0x0b57
#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index ab93dd5927c3..b23c4b5854d8 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1579,6 +1579,7 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid,
input_dev->dev.parent = &hid->dev;

hidinput->input = input_dev;
+ hidinput->application = application;
list_add_tail(&hidinput->list, &hid->inputs);

INIT_LIST_HEAD(&hidinput->reports);
@@ -1674,8 +1675,7 @@ static struct hid_input *hidinput_match_application(struct hid_report *report)
struct hid_input *hidinput;

list_for_each_entry(hidinput, &hid->inputs, list) {
- if (hidinput->report &&
- hidinput->report->application == report->application)
+ if (hidinput->application == report->application)
return hidinput;
}

@@ -1812,6 +1812,7 @@ void hidinput_disconnect(struct hid_device *hid)
input_unregister_device(hidinput->input);
else
input_free_device(hidinput->input);
+ kfree(hidinput->name);
kfree(hidinput);
}

diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 45968f7970f8..15c934ef6b18 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1167,7 +1167,8 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
struct hid_usage *usage,
enum latency_mode latency,
bool surface_switch,
- bool button_switch)
+ bool button_switch,
+ bool *inputmode_found)
{
struct mt_device *td = hid_get_drvdata(hdev);
struct mt_class *cls = &td->mtclass;
@@ -1179,6 +1180,14 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,

switch (usage->hid) {
case HID_DG_INPUTMODE:
+ /*
+ * Some elan panels wrongly declare 2 input mode features,
+ * and silently ignore when we set the value in the second
+ * field. Skip the second feature and hope for the best.
+ */
+ if (*inputmode_found)
+ return false;
+
if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) {
report_len = hid_report_len(report);
buf = hid_alloc_report_buf(report, GFP_KERNEL);
@@ -1194,6 +1203,7 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
}

field->value[index] = td->inputmode_value;
+ *inputmode_found = true;
return true;

case HID_DG_CONTACTMAX:
@@ -1231,6 +1241,7 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
struct hid_usage *usage;
int i, j;
bool update_report;
+ bool inputmode_found = false;

rep_enum = &hdev->report_enum[HID_FEATURE_REPORT];
list_for_each_entry(rep, &rep_enum->report_list, list) {
@@ -1249,7 +1260,8 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
usage,
latency,
surface_switch,
- button_switch))
+ button_switch,
+ &inputmode_found))
update_report = true;
}
}
@@ -1476,6 +1488,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
*/
hdev->quirks |= HID_QUIRK_INPUT_PER_APP;

+ if (id->group != HID_GROUP_MULTITOUCH_WIN_8)
+ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+
timer_setup(&td->release_timer, mt_expired_timeout, 0);

ret = hid_parse(hdev);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index eae0cb3ddec6..5fd1159fc095 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -174,6 +174,8 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_RESEND_REPORT_DESCR },
{ USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
I2C_HID_QUIRK_RESEND_REPORT_DESCR },
+ { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_4B33,
+ I2C_HID_QUIRK_RESEND_REPORT_DESCR },
{ 0, 0 }
};

diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 658dc765753b..553adccb05d7 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -242,6 +242,10 @@ int hv_synic_alloc(void)

return 0;
err:
+ /*
+ * Any memory allocations that succeeded will be freed when
+ * the caller cleans up by calling hv_synic_free()
+ */
return -ENOMEM;
}

@@ -254,12 +258,10 @@ void hv_synic_free(void)
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);

- if (hv_cpu->synic_event_page)
- free_page((unsigned long)hv_cpu->synic_event_page);
- if (hv_cpu->synic_message_page)
- free_page((unsigned long)hv_cpu->synic_message_page);
- if (hv_cpu->post_msg_page)
- free_page((unsigned long)hv_cpu->post_msg_page);
+ kfree(hv_cpu->clk_evt);
+ free_page((unsigned long)hv_cpu->synic_event_page);
+ free_page((unsigned long)hv_cpu->synic_message_page);
+ free_page((unsigned long)hv_cpu->post_msg_page);
}

kfree(hv_context.hv_numa_map);
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 60e4d0e939a3..715b6fdb4989 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -868,7 +868,7 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
if (!match)
bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val;
else
- bus->get_clk_reg_val = match->data;
+ bus->get_clk_reg_val = (u32 (*)(u32))match->data;

/* Initialize the I2C adapter */
spin_lock_init(&bus->lock);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index aa726607645e..45fcf0c37a9e 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -139,6 +139,7 @@

#define SBREG_BAR 0x10
#define SBREG_SMBCTRL 0xc6000c
+#define SBREG_SMBCTRL_DNV 0xcf000c

/* Host status bits for SMBPCISTS */
#define SMBPCISTS_INTS BIT(3)
@@ -1396,7 +1397,11 @@ static void i801_add_tco(struct i801_priv *priv)
spin_unlock(&p2sb_spinlock);

res = &tco_res[ICH_RES_MEM_OFF];
- res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+ if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
+ res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
+ else
+ res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+
res->end = res->start + 3;
res->flags = IORESOURCE_MEM;

diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 9a71e50d21f1..0c51c0ffdda9 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -532,6 +532,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
{
u8 rx_watermark;
struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
+ unsigned long flags;

/* Clear and enable Rx full interrupt. */
xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
@@ -547,6 +548,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
rx_watermark = IIC_RX_FIFO_DEPTH;
xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);

+ local_irq_save(flags);
if (!(msg->flags & I2C_M_NOSTART))
/* write the address */
xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
@@ -556,6 +558,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c)

xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
+ local_irq_restore(flags);
+
if (i2c->nmsgs == 1)
/* very last, enable bus not busy as well */
xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index bff10ab141b0..dafcb6f019b3 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1445,9 +1445,16 @@ static bool cma_match_net_dev(const struct rdma_cm_id *id,
(addr->src_addr.ss_family == AF_IB ||
rdma_protocol_roce(id->device, port_num));

- return !addr->dev_addr.bound_dev_if ||
- (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
- addr->dev_addr.bound_dev_if == net_dev->ifindex);
+ /*
+ * Net namespaces must match, and if the listner is listening
+ * on a specific netdevice than netdevice must match as well.
+ */
+ if (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
+ (!!addr->dev_addr.bound_dev_if ==
+ (addr->dev_addr.bound_dev_if == net_dev->ifindex)))
+ return true;
+ else
+ return false;
}

static struct rdma_id_private *cma_find_listener(
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 63b5b3edabcb..8dc336a85128 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -494,6 +494,9 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
step_idx = 1;
} else if (hop_num == HNS_ROCE_HOP_NUM_0) {
step_idx = 0;
+ } else {
+ ret = -EINVAL;
+ goto err_dma_alloc_l1;
}

/* set HEM base address to hardware */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index a6e11be0ea0f..c00925ed9da8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -273,7 +273,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
case IB_WR_RDMA_WRITE_WITH_IMM:
- ud_sq_wqe->immtdata = wr->ex.imm_data;
+ ud_sq_wqe->immtdata =
+ cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
break;
default:
ud_sq_wqe->immtdata = 0;
@@ -371,7 +372,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
case IB_WR_RDMA_WRITE_WITH_IMM:
- rc_sq_wqe->immtdata = wr->ex.imm_data;
+ rc_sq_wqe->immtdata =
+ cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
break;
case IB_WR_SEND_WITH_INV:
rc_sq_wqe->inv_key =
@@ -1931,7 +1933,8 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = cqe->immtdata;
+ wc->ex.imm_data =
+ cpu_to_be32(le32_to_cpu(cqe->immtdata));
break;
case HNS_ROCE_V2_OPCODE_SEND:
wc->opcode = IB_WC_RECV;
@@ -1940,7 +1943,8 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = cqe->immtdata;
+ wc->ex.imm_data =
+ cpu_to_be32(le32_to_cpu(cqe->immtdata));
break;
case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
wc->opcode = IB_WC_RECV;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index d47675f365c7..7e2c740e0df5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -768,7 +768,7 @@ struct hns_roce_v2_cqe {
__le32 byte_4;
union {
__le32 rkey;
- __be32 immtdata;
+ __le32 immtdata;
};
__le32 byte_12;
__le32 byte_16;
@@ -926,7 +926,7 @@ struct hns_roce_v2_cq_db {
struct hns_roce_v2_ud_send_wqe {
__le32 byte_4;
__le32 msg_len;
- __be32 immtdata;
+ __le32 immtdata;
__le32 byte_16;
__le32 byte_20;
__le32 byte_24;
@@ -1012,7 +1012,7 @@ struct hns_roce_v2_rc_send_wqe {
__le32 msg_len;
union {
__le32 inv_key;
- __be32 immtdata;
+ __le32 immtdata;
};
__le32 byte_16;
__le32 byte_20;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 6709328d90f8..c7e034963738 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -822,6 +822,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
if (neigh && list_empty(&neigh->list)) {
kref_get(&mcast->ah->ref);
neigh->ah = mcast->ah;
+ neigh->ah->valid = 1;
list_add_tail(&neigh->list, &mcast->neigh_list);
}
}
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 54fe190fd4bc..48c5ccab00a0 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -1658,10 +1658,11 @@ static int mxt_parse_object_table(struct mxt_data *data,
break;
case MXT_TOUCH_MULTI_T9:
data->multitouch = MXT_TOUCH_MULTI_T9;
+ /* Only handle messages from first T9 instance */
data->T9_reportid_min = min_id;
- data->T9_reportid_max = max_id;
- data->num_touchids = object->num_report_ids
- * mxt_obj_instances(object);
+ data->T9_reportid_max = min_id +
+ object->num_report_ids - 1;
+ data->num_touchids = object->num_report_ids;
break;
case MXT_SPT_MESSAGECOUNT_T44:
data->T44_address = object->start_address;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 1d647104bccc..b73c6a7bf7f2 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -24,6 +24,7 @@
#include <linux/acpi_iort.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/crash_dump.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
#include <linux/err.h>
@@ -2211,8 +2212,12 @@ static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
reg &= ~clr;
reg |= set;
writel_relaxed(reg | GBPA_UPDATE, gbpa);
- return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
- 1, ARM_SMMU_POLL_TIMEOUT_US);
+ ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
+ 1, ARM_SMMU_POLL_TIMEOUT_US);
+
+ if (ret)
+ dev_err(smmu->dev, "GBPA not responding to update\n");
+ return ret;
}

static void arm_smmu_free_msis(void *data)
@@ -2392,8 +2397,15 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)

/* Clear CR0 and sync (disables SMMU and queue processing) */
reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
- if (reg & CR0_SMMUEN)
+ if (reg & CR0_SMMUEN) {
+ if (is_kdump_kernel()) {
+ arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
+ arm_smmu_device_disable(smmu);
+ return -EBUSY;
+ }
+
dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
+ }

ret = arm_smmu_device_disable(smmu);
if (ret)
@@ -2491,10 +2503,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
enables |= CR0_SMMUEN;
} else {
ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
- if (ret) {
- dev_err(smmu->dev, "GBPA not responding to update\n");
+ if (ret)
return ret;
- }
}
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
ARM_SMMU_CR0ACK);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 09b47260c74b..feb1664815b7 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -73,7 +73,7 @@ struct ipmmu_vmsa_domain {
struct io_pgtable_ops *iop;

unsigned int context_id;
- spinlock_t lock; /* Protects mappings */
+ struct mutex mutex; /* Protects mappings */
};

static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
@@ -595,7 +595,7 @@ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
if (!domain)
return NULL;

- spin_lock_init(&domain->lock);
+ mutex_init(&domain->mutex);

return &domain->io_domain;
}
@@ -641,7 +641,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
- unsigned long flags;
unsigned int i;
int ret = 0;

@@ -650,7 +649,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
return -ENXIO;
}

- spin_lock_irqsave(&domain->lock, flags);
+ mutex_lock(&domain->mutex);

if (!domain->mmu) {
/* The domain hasn't been used yet, initialize it. */
@@ -674,7 +673,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
} else
dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);

- spin_unlock_irqrestore(&domain->lock, flags);
+ mutex_unlock(&domain->mutex);

if (ret < 0)
return ret;
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 25c1ce811053..1fdd09ebb3f1 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -534,8 +534,9 @@ init_pmu(void)
int timeout;
struct adb_request req;

- out_8(&via[B], via[B] | TREQ); /* negate TREQ */
- out_8(&via[DIRB], (via[DIRB] | TREQ) & ~TACK); /* TACK in, TREQ out */
+ /* Negate TREQ. Set TACK to input and TREQ to output. */
+ out_8(&via[B], in_8(&via[B]) | TREQ);
+ out_8(&via[DIRB], (in_8(&via[DIRB]) | TREQ) & ~TACK);

pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
timeout = 100000;
@@ -1418,8 +1419,8 @@ pmu_sr_intr(void)
struct adb_request *req;
int bite = 0;

- if (via[B] & TREQ) {
- printk(KERN_ERR "PMU: spurious SR intr (%x)\n", via[B]);
+ if (in_8(&via[B]) & TREQ) {
+ printk(KERN_ERR "PMU: spurious SR intr (%x)\n", in_8(&via[B]));
out_8(&via[IFR], SR_INT);
return NULL;
}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index ce14a3d1f609..44df244807e5 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2250,7 +2250,7 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
{0, 2, "Invalid number of cache feature arguments"},
};

- int r;
+ int r, mode_ctr = 0;
unsigned argc;
const char *arg;
struct cache_features *cf = &ca->features;
@@ -2264,14 +2264,20 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
while (argc--) {
arg = dm_shift_arg(as);

- if (!strcasecmp(arg, "writeback"))
+ if (!strcasecmp(arg, "writeback")) {
cf->io_mode = CM_IO_WRITEBACK;
+ mode_ctr++;
+ }

- else if (!strcasecmp(arg, "writethrough"))
+ else if (!strcasecmp(arg, "writethrough")) {
cf->io_mode = CM_IO_WRITETHROUGH;
+ mode_ctr++;
+ }

- else if (!strcasecmp(arg, "passthrough"))
+ else if (!strcasecmp(arg, "passthrough")) {
cf->io_mode = CM_IO_PASSTHROUGH;
+ mode_ctr++;
+ }

else if (!strcasecmp(arg, "metadata2"))
cf->metadata_version = 2;
@@ -2282,6 +2288,11 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
}
}

+ if (mode_ctr > 1) {
+ *error = "Duplicate cache io_mode features requested";
+ return -EINVAL;
+ }
+
return 0;
}

diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2031506a0ecd..49107c52c8e6 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4521,6 +4521,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
s->failed++;
if (rdev && !test_bit(Faulty, &rdev->flags))
do_recovery = 1;
+ else if (!rdev) {
+ rdev = rcu_dereference(
+ conf->disks[i].replacement);
+ if (rdev && !test_bit(Faulty, &rdev->flags))
+ do_recovery = 1;
+ }
}

if (test_bit(R5_InJournal, &dev->flags))
diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
index a0d0b53c91d7..a5de65dcf784 100644
--- a/drivers/media/dvb-frontends/helene.c
+++ b/drivers/media/dvb-frontends/helene.c
@@ -897,7 +897,10 @@ static int helene_x_pon(struct helene_priv *priv)
helene_write_regs(priv, 0x99, cdata, sizeof(cdata));

/* 0x81 - 0x94 */
- data[0] = 0x18; /* xtal 24 MHz */
+ if (priv->xtal == SONY_HELENE_XTAL_16000)
+ data[0] = 0x10; /* xtal 16 MHz */
+ else
+ data[0] = 0x18; /* xtal 24 MHz */
data[1] = (uint8_t)(0x80 | (0x04 & 0x1F)); /* 4 x 25 = 100uA */
data[2] = (uint8_t)(0x80 | (0x26 & 0x7F)); /* 38 x 0.25 = 9.5pF */
data[3] = 0x80; /* REFOUT signal output 500mVpp */
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 7be636237acf..0f324055cc9f 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -1114,6 +1114,14 @@ static int initialize_vpif(void)
return err;
}

+static void free_vpif_objs(void)
+{
+ int i;
+
+ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++)
+ kfree(vpif_obj.dev[i]);
+}
+
static int vpif_async_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_subdev *asd)
@@ -1255,11 +1263,6 @@ static __init int vpif_probe(struct platform_device *pdev)
return -EINVAL;
}

- if (!pdev->dev.platform_data) {
- dev_warn(&pdev->dev, "Missing platform data. Giving up.\n");
- return -EINVAL;
- }
-
vpif_dev = &pdev->dev;
err = initialize_vpif();

@@ -1271,7 +1274,7 @@ static __init int vpif_probe(struct platform_device *pdev)
err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
if (err) {
v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
- return err;
+ goto vpif_free;
}

while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
@@ -1314,7 +1317,10 @@ static __init int vpif_probe(struct platform_device *pdev)
if (vpif_obj.sd[i])
vpif_obj.sd[i]->grp_id = 1 << i;
}
- vpif_probe_complete();
+ err = vpif_probe_complete();
+ if (err) {
+ goto probe_subdev_out;
+ }
} else {
vpif_obj.notifier.subdevs = vpif_obj.config->asd;
vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
@@ -1334,6 +1340,8 @@ static __init int vpif_probe(struct platform_device *pdev)
kfree(vpif_obj.sd);
vpif_unregister:
v4l2_device_unregister(&vpif_obj.v4l2_dev);
+vpif_free:
+ free_vpif_objs();

return err;
}
@@ -1355,8 +1363,8 @@ static int vpif_remove(struct platform_device *device)
ch = vpif_obj.dev[i];
/* Unregister video device */
video_unregister_device(&ch->video_dev);
- kfree(vpif_obj.dev[i]);
}
+ free_vpif_objs();

return 0;
}
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csid.c b/drivers/media/platform/qcom/camss-8x16/camss-csid.c
index 226f36ef7419..2bf65805f2c1 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-csid.c
+++ b/drivers/media/platform/qcom/camss-8x16/camss-csid.c
@@ -392,9 +392,6 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
!media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK]))
return -ENOLINK;

- dt = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SRC].code)->
- data_type;
-
if (tg->enabled) {
/* Config Test Generator */
struct v4l2_mbus_framefmt *f =
@@ -416,6 +413,9 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
writel_relaxed(val, csid->base +
CAMSS_CSID_TG_DT_n_CGG_0(0));

+ dt = csid_get_fmt_entry(
+ csid->fmt[MSM_CSID_PAD_SRC].code)->data_type;
+
/* 5:0 data type */
val = dt;
writel_relaxed(val, csid->base +
@@ -425,6 +425,9 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
val = tg->payload_mode;
writel_relaxed(val, csid->base +
CAMSS_CSID_TG_DT_n_CGG_2(0));
+
+ df = csid_get_fmt_entry(
+ csid->fmt[MSM_CSID_PAD_SRC].code)->decode_format;
} else {
struct csid_phy_config *phy = &csid->phy;

@@ -439,13 +442,16 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)

writel_relaxed(val,
csid->base + CAMSS_CSID_CORE_CTRL_1);
+
+ dt = csid_get_fmt_entry(
+ csid->fmt[MSM_CSID_PAD_SINK].code)->data_type;
+ df = csid_get_fmt_entry(
+ csid->fmt[MSM_CSID_PAD_SINK].code)->decode_format;
}

/* Config LUT */

dt_shift = (cid % 4) * 8;
- df = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SINK].code)->
- decode_format;

val = readl_relaxed(csid->base + CAMSS_CSID_CID_LUT_VC_n(vc));
val &= ~(0xff << dt_shift);
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index daef72d410a3..dc5ae8025832 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -339,6 +339,7 @@ enum rcar_csi2_pads {

struct rcar_csi2_info {
int (*init_phtw)(struct rcar_csi2 *priv, unsigned int mbps);
+ int (*confirm_start)(struct rcar_csi2 *priv);
const struct rcsi2_mbps_reg *hsfreqrange;
unsigned int csi0clkfreqrange;
bool clear_ulps;
@@ -545,6 +546,13 @@ static int rcsi2_start(struct rcar_csi2 *priv)
if (ret)
return ret;

+ /* Confirm start */
+ if (priv->info->confirm_start) {
+ ret = priv->info->confirm_start(priv);
+ if (ret)
+ return ret;
+ }
+
/* Clear Ultra Low Power interrupt. */
if (priv->info->clear_ulps)
rcsi2_write(priv, INTSTATE_REG,
@@ -880,6 +888,11 @@ static int rcsi2_init_phtw_h3_v3h_m3n(struct rcar_csi2 *priv, unsigned int mbps)
}

static int rcsi2_init_phtw_v3m_e3(struct rcar_csi2 *priv, unsigned int mbps)
+{
+ return rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_v3m_e3, 0x44);
+}
+
+static int rcsi2_confirm_start_v3m_e3(struct rcar_csi2 *priv)
{
static const struct phtw_value step1[] = {
{ .data = 0xed, .code = 0x34 },
@@ -890,12 +903,6 @@ static int rcsi2_init_phtw_v3m_e3(struct rcar_csi2 *priv, unsigned int mbps)
{ /* sentinel */ },
};

- int ret;
-
- ret = rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_v3m_e3, 0x44);
- if (ret)
- return ret;
-
return rcsi2_phtw_write_array(priv, step1);
}

@@ -949,6 +956,7 @@ static const struct rcar_csi2_info rcar_csi2_info_r8a77965 = {

static const struct rcar_csi2_info rcar_csi2_info_r8a77970 = {
.init_phtw = rcsi2_init_phtw_v3m_e3,
+ .confirm_start = rcsi2_confirm_start_v3m_e3,
};

static const struct of_device_id rcar_csi2_of_table[] = {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index a80251ed3143..780548dd650e 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -254,24 +254,24 @@ static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- struct s5p_mfc_buf *dst_buf, *src_buf;
- size_t dec_y_addr;
+ struct s5p_mfc_buf *dst_buf, *src_buf;
+ u32 dec_y_addr;
unsigned int frame_type;

/* Make sure we actually have a new frame before continuing. */
frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED)
return;
- dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
+ dec_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);

/* Copy timestamp / timecode from decoded src to dst and set
appropriate flags. */
src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
- if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
- == dec_y_addr) {
- dst_buf->b->timecode =
- src_buf->b->timecode;
+ u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
+
+ if (addr == dec_y_addr) {
+ dst_buf->b->timecode = src_buf->b->timecode;
dst_buf->b->vb2_buf.timestamp =
src_buf->b->vb2_buf.timestamp;
dst_buf->b->flags &=
@@ -307,10 +307,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
{
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf *dst_buf;
- size_t dspl_y_addr;
+ u32 dspl_y_addr;
unsigned int frame_type;

- dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
+ dspl_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
if (IS_MFCV6_PLUS(dev))
frame_type = s5p_mfc_hw_call(dev->mfc_ops,
get_disp_frame_type, ctx);
@@ -329,9 +329,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
/* The MFC returns address of the buffer, now we have to
* check which videobuf does it correspond to */
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
+ u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
+
/* Check if this is the buffer we're looking for */
- if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
- == dspl_y_addr) {
+ if (addr == dspl_y_addr) {
list_del(&dst_buf->list);
ctx->dst_queue_cnt--;
dst_buf->b->sequence = ctx->sequence;
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 0d4fdd34a710..9ce8b4d79d1f 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -2101,14 +2101,12 @@ static struct dvb_usb_device_properties s6x0_properties = {
}
};

-static struct dvb_usb_device_properties *p1100;
static const struct dvb_usb_device_description d1100 = {
"Prof 1100 USB ",
{&dw2102_table[PROF_1100], NULL},
{NULL},
};

-static struct dvb_usb_device_properties *s660;
static const struct dvb_usb_device_description d660 = {
"TeVii S660 USB",
{&dw2102_table[TEVII_S660], NULL},
@@ -2127,14 +2125,12 @@ static const struct dvb_usb_device_description d480_2 = {
{NULL},
};

-static struct dvb_usb_device_properties *p7500;
static const struct dvb_usb_device_description d7500 = {
"Prof 7500 USB DVB-S2",
{&dw2102_table[PROF_7500], NULL},
{NULL},
};

-static struct dvb_usb_device_properties *s421;
static const struct dvb_usb_device_description d421 = {
"TeVii S421 PCI",
{&dw2102_table[TEVII_S421], NULL},
@@ -2334,6 +2330,11 @@ static int dw2102_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
int retval = -ENOMEM;
+ struct dvb_usb_device_properties *p1100;
+ struct dvb_usb_device_properties *s660;
+ struct dvb_usb_device_properties *p7500;
+ struct dvb_usb_device_properties *s421;
+
p1100 = kmemdup(&s6x0_properties,
sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
if (!p1100)
@@ -2402,8 +2403,16 @@ static int dw2102_probe(struct usb_interface *intf,
0 == dvb_usb_device_init(intf, &t220_properties,
THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &tt_s2_4600_properties,
- THIS_MODULE, NULL, adapter_nr))
+ THIS_MODULE, NULL, adapter_nr)) {
+
+ /* clean up copied properties */
+ kfree(s421);
+ kfree(p7500);
+ kfree(s660);
+ kfree(p1100);
+
return 0;
+ }

retval = -ENODEV;
kfree(s421);
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 6c8438311d3b..ff5e41ac4723 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -3376,7 +3376,9 @@ void em28xx_free_device(struct kref *ref)
if (!dev->disconnected)
em28xx_release_resources(dev);

- kfree(dev->alt_max_pkt_size_isoc);
+ if (dev->ts == PRIMARY_TS)
+ kfree(dev->alt_max_pkt_size_isoc);
+
kfree(dev);
}
EXPORT_SYMBOL_GPL(em28xx_free_device);
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index f70845e7d8c6..45b24776a695 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -655,12 +655,12 @@ int em28xx_capture_start(struct em28xx *dev, int start)
rc = em28xx_write_reg_bits(dev,
EM2874_R5F_TS_ENABLE,
start ? EM2874_TS1_CAPTURE_ENABLE : 0x00,
- EM2874_TS1_CAPTURE_ENABLE);
+ EM2874_TS1_CAPTURE_ENABLE | EM2874_TS1_FILTER_ENABLE | EM2874_TS1_NULL_DISCARD);
else
rc = em28xx_write_reg_bits(dev,
EM2874_R5F_TS_ENABLE,
start ? EM2874_TS2_CAPTURE_ENABLE : 0x00,
- EM2874_TS2_CAPTURE_ENABLE);
+ EM2874_TS2_CAPTURE_ENABLE | EM2874_TS2_FILTER_ENABLE | EM2874_TS2_NULL_DISCARD);
} else {
/* FIXME: which is the best order? */
/* video registers are sampled by VREF */
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index b778d8a1983e..a73faf12f7e4 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -218,7 +218,9 @@ static int em28xx_start_streaming(struct em28xx_dvb *dvb)
dvb_alt = dev->dvb_alt_isoc;
}

- usb_set_interface(udev, dev->ifnum, dvb_alt);
+ if (!dev->board.has_dual_ts)
+ usb_set_interface(udev, dev->ifnum, dvb_alt);
+
rc = em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
if (rc < 0)
return rc;
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index 31112f622b88..475e5b3790ed 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -411,7 +411,7 @@ static int aemif_probe(struct platform_device *pdev)
if (ret < 0)
goto error;
}
- } else {
+ } else if (pdata) {
for (i = 0; i < pdata->num_sub_devices; i++) {
pdata->sub_devices[i].dev.parent = dev;
ret = platform_device_register(&pdata->sub_devices[i]);
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
index 36dcd98977d6..4f545fdc6ebc 100644
--- a/drivers/mfd/rave-sp.c
+++ b/drivers/mfd/rave-sp.c
@@ -776,6 +776,13 @@ static int rave_sp_probe(struct serdev_device *serdev)
return ret;

serdev_device_set_baudrate(serdev, baud);
+ serdev_device_set_flow_control(serdev, false);
+
+ ret = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
+ if (ret) {
+ dev_err(dev, "Failed to set parity\n");
+ return ret;
+ }

ret = rave_sp_get_status(sp);
if (ret) {
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index 47012c0899cd..7a30546880a4 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -209,14 +209,13 @@ static int ti_tscadc_probe(struct platform_device *pdev)
* The TSC_ADC_SS controller design assumes the OCP clock is
* at least 6x faster than the ADC clock.
*/
- clk = clk_get(&pdev->dev, "adc_tsc_fck");
+ clk = devm_clk_get(&pdev->dev, "adc_tsc_fck");
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get TSC fck\n");
err = PTR_ERR(clk);
goto err_disable_clk;
}
clock_rate = clk_get_rate(clk);
- clk_put(clk);
tscadc->clk_div = clock_rate / ADC_CLK;

/* TSCADC_CLKDIV needs to be configured to the value minus 1 */
diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
index 7b2dddcdd46d..42f7a12894d6 100644
--- a/drivers/misc/mic/scif/scif_api.c
+++ b/drivers/misc/mic/scif/scif_api.c
@@ -370,11 +370,10 @@ int scif_bind(scif_epd_t epd, u16 pn)
goto scif_bind_exit;
}
} else {
- pn = scif_get_new_port();
- if (!pn) {
- ret = -ENOSPC;
+ ret = scif_get_new_port();
+ if (ret < 0)
goto scif_bind_exit;
- }
+ pn = ret;
}

ep->state = SCIFEP_BOUND;
@@ -648,13 +647,12 @@ int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block)
err = -EISCONN;
break;
case SCIFEP_UNBOUND:
- ep->port.port = scif_get_new_port();
- if (!ep->port.port) {
- err = -ENOSPC;
- } else {
- ep->port.node = scif_info.nodeid;
- ep->conn_async_state = ASYNC_CONN_IDLE;
- }
+ err = scif_get_new_port();
+ if (err < 0)
+ break;
+ ep->port.port = err;
+ ep->port.node = scif_info.nodeid;
+ ep->conn_async_state = ASYNC_CONN_IDLE;
/* Fall through */
case SCIFEP_BOUND:
/*
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 5ec3f5a43718..14a5e9da32bd 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -756,14 +756,14 @@ static int kim_probe(struct platform_device *pdev)
err = gpio_request(kim_gdata->nshutdown, "kim");
if (unlikely(err)) {
pr_err(" gpio %d request failed ", kim_gdata->nshutdown);
- return err;
+ goto err_sysfs_group;
}

/* Configure nShutdown GPIO as output=0 */
err = gpio_direction_output(kim_gdata->nshutdown, 0);
if (unlikely(err)) {
pr_err(" unable to configure gpio %d", kim_gdata->nshutdown);
- return err;
+ goto err_sysfs_group;
}
/* get reference of pdev for request_firmware
*/
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index b01d15ec4c56..3e3e6a8f1abc 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -2668,8 +2668,8 @@ static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
return subop && instr_idx < subop->ninstrs;
}

-static int nand_subop_get_start_off(const struct nand_subop *subop,
- unsigned int instr_idx)
+static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
{
if (instr_idx)
return 0;
@@ -2688,12 +2688,12 @@ static int nand_subop_get_start_off(const struct nand_subop *subop,
*
* Given an address instruction, returns the offset of the first cycle to issue.
*/
-int nand_subop_get_addr_start_off(const struct nand_subop *subop,
- unsigned int instr_idx)
+unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
{
- if (!nand_subop_instr_is_valid(subop, instr_idx) ||
- subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
- return -EINVAL;
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
+ return 0;

return nand_subop_get_start_off(subop, instr_idx);
}
@@ -2710,14 +2710,14 @@ EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
*
* Given an address instruction, returns the number of address cycle to issue.
*/
-int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
- unsigned int instr_idx)
+unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int instr_idx)
{
int start_off, end_off;

- if (!nand_subop_instr_is_valid(subop, instr_idx) ||
- subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
- return -EINVAL;
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
+ return 0;

start_off = nand_subop_get_addr_start_off(subop, instr_idx);

@@ -2742,12 +2742,12 @@ EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
*
* Given a data instruction, returns the offset to start from.
*/
-int nand_subop_get_data_start_off(const struct nand_subop *subop,
- unsigned int instr_idx)
+unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
{
- if (!nand_subop_instr_is_valid(subop, instr_idx) ||
- !nand_instr_is_data(&subop->instrs[instr_idx]))
- return -EINVAL;
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx])))
+ return 0;

return nand_subop_get_start_off(subop, instr_idx);
}
@@ -2764,14 +2764,14 @@ EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
*
* Returns the length of the chunk of data to send/receive.
*/
-int nand_subop_get_data_len(const struct nand_subop *subop,
- unsigned int instr_idx)
+unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int instr_idx)
{
int start_off = 0, end_off;

- if (!nand_subop_instr_is_valid(subop, instr_idx) ||
- !nand_instr_is_data(&subop->instrs[instr_idx]))
- return -EINVAL;
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx])))
+ return 0;

start_off = nand_subop_get_data_start_off(subop, instr_idx);

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 82ac1d10f239..b4253d0e056b 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3196,7 +3196,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)

on_each_cpu(mvneta_percpu_enable, pp, true);
mvneta_start_dev(pp);
- mvneta_port_up(pp);

netdev_update_features(dev);

diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
index 0c5b68e7da51..9b3167054843 100644
--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
+++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
@@ -22,7 +22,7 @@
#include <linux/mdio-mux.h>
#include <linux/delay.h>

-#define MDIO_PARAM_OFFSET 0x00
+#define MDIO_PARAM_OFFSET 0x23c
#define MDIO_PARAM_MIIM_CYCLE 29
#define MDIO_PARAM_INTERNAL_SEL 25
#define MDIO_PARAM_BUS_ID 22
@@ -30,20 +30,22 @@
#define MDIO_PARAM_PHY_ID 16
#define MDIO_PARAM_PHY_DATA 0

-#define MDIO_READ_OFFSET 0x04
+#define MDIO_READ_OFFSET 0x240
#define MDIO_READ_DATA_MASK 0xffff
-#define MDIO_ADDR_OFFSET 0x08
+#define MDIO_ADDR_OFFSET 0x244

-#define MDIO_CTRL_OFFSET 0x0C
+#define MDIO_CTRL_OFFSET 0x248
#define MDIO_CTRL_WRITE_OP 0x1
#define MDIO_CTRL_READ_OP 0x2

-#define MDIO_STAT_OFFSET 0x10
+#define MDIO_STAT_OFFSET 0x24c
#define MDIO_STAT_DONE 1

#define BUS_MAX_ADDR 32
#define EXT_BUS_START_ADDR 16

+#define MDIO_REG_ADDR_SPACE_SIZE 0x250
+
struct iproc_mdiomux_desc {
void *mux_handle;
void __iomem *base;
@@ -169,6 +171,14 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
md->dev = &pdev->dev;

res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res->start & 0xfff) {
+ /* For backward compatibility in case the
+ * base address is specified with an offset.
+ */
+ dev_info(&pdev->dev, "fix base address in dt-blob\n");
+ res->start &= ~0xfff;
+ res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1;
+ }
md->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(md->base)) {
dev_err(&pdev->dev, "failed to ioremap register\n");
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 836e0a47b94a..747c6951b5c1 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -3085,6 +3085,13 @@ static int ath10k_update_channel_list(struct ath10k *ar)
passive = channel->flags & IEEE80211_CHAN_NO_IR;
ch->passive = passive;

+ /* the firmware is ignoring the "radar" flag of the
+ * channel and is scanning actively using Probe Requests
+ * on "Radar detection"/DFS channels which are not
+ * marked as "available"
+ */
+ ch->passive |= ch->chan_radar;
+
ch->freq = channel->center_freq;
ch->band_center_freq1 = channel->center_freq;
ch->min_power = 0;
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 8c49a26fc571..21eb3a598a86 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1584,6 +1584,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->keep_alive_pattern_size = __cpu_to_le32(0);
cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
+ cfg->wmi_send_separate = __cpu_to_le32(0);
+ cfg->num_ocb_vdevs = __cpu_to_le32(0);
+ cfg->num_ocb_channels = __cpu_to_le32(0);
+ cfg->num_ocb_schedules = __cpu_to_le32(0);
+ cfg->host_capab = __cpu_to_le32(0);

ath10k_wmi_put_host_mem_chunks(ar, chunks);

diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 3e1e340cd834..1cb93d09b8a9 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1670,6 +1670,11 @@ struct wmi_tlv_resource_config {
__le32 keep_alive_pattern_size;
__le32 max_tdls_concurrent_sleep_sta;
__le32 max_tdls_concurrent_buffer_sta;
+ __le32 wmi_send_separate;
+ __le32 num_ocb_vdevs;
+ __le32 num_ocb_channels;
+ __le32 num_ocb_schedules;
+ __le32 host_capab;
} __packed;

struct wmi_tlv_init_cmd {
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index e60bea4604e4..fcd9d5eeae72 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -2942,16 +2942,19 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
struct ieee80211_channel *channel;
int chan_pwr, new_pwr;
+ u16 ctl = NO_CTL;

if (!chan)
return;

+ if (!test)
+ ctl = ath9k_regd_get_ctl(reg, chan);
+
channel = chan->chan;
chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER);
new_pwr = min_t(int, chan_pwr, reg->power_limit);

- ah->eep_ops->set_txpower(ah, chan,
- ath9k_regd_get_ctl(reg, chan),
+ ah->eep_ops->set_txpower(ah, chan, ctl,
get_antenna_gain(ah, chan), new_pwr, test);
}

diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 7fdb152be0bb..a249ee747dc9 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -86,7 +86,8 @@ static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = info->status.status_driver_data[0];

- if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
+ if (info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
+ IEEE80211_TX_STATUS_EOSP)) {
ieee80211_tx_status(hw, skb);
return;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 8520523b91b4..d8d8443c1c93 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1003,6 +1003,10 @@ static int iwl_pci_resume(struct device *device)
if (!trans->op_mode)
return 0;

+ /* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */
+ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ return 0;
+
/* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
iwl_pcie_conf_msix_hw(trans_pcie);

diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 7229991ae70d..a2a98087eb41 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1539,18 +1539,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,

iwl_pcie_enable_rx_wake(trans, true);

- /*
- * Reconfigure IVAR table in case of MSIX or reset ict table in
- * MSI mode since HW reset erased it.
- * Also enables interrupts - none will happen as
- * the device doesn't know we're waking it up, only when
- * the opmode actually tells it after this call.
- */
- iwl_pcie_conf_msix_hw(trans_pcie);
- if (!trans_pcie->msix_enabled)
- iwl_pcie_reset_ict(trans);
- iwl_enable_interrupts(trans);
-
iwl_set_bit(trans, CSR_GP_CNTRL,
BIT(trans->cfg->csr->flag_mac_access_req));
iwl_set_bit(trans, CSR_GP_CNTRL,
@@ -1568,6 +1556,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return ret;
}

+ /*
+ * Reconfigure IVAR table in case of MSIX or reset ict table in
+ * MSI mode since HW reset erased it.
+ * Also enables interrupts - none will happen as
+ * the device doesn't know we're waking it up, only when
+ * the opmode actually tells it after this call.
+ */
+ iwl_pcie_conf_msix_hw(trans_pcie);
+ if (!trans_pcie->msix_enabled)
+ iwl_pcie_reset_ict(trans);
+ iwl_enable_interrupts(trans);
+
iwl_pcie_set_pwr(trans, false);

if (!reset) {
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 0f15696195f8..078a4940bc5c 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -59,7 +59,7 @@ static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len)
static void wl1271_rx_status(struct wl1271 *wl,
struct wl1271_rx_descriptor *desc,
struct ieee80211_rx_status *status,
- u8 beacon)
+ u8 beacon, u8 probe_rsp)
{
memset(status, 0, sizeof(struct ieee80211_rx_status));

@@ -106,6 +106,9 @@ static void wl1271_rx_status(struct wl1271 *wl,
}
}

+ if (beacon || probe_rsp)
+ status->boottime_ns = ktime_get_boot_ns();
+
if (beacon)
wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
status->band);
@@ -191,7 +194,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
if (ieee80211_is_data_present(hdr->frame_control))
is_data = 1;

- wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
+ wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon,
+ ieee80211_is_probe_resp(hdr->frame_control));
wlcore_hw_set_rx_csum(wl, desc, skb);

seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
index cf0aa7cee5b0..a939e8d31735 100644
--- a/drivers/pci/controller/pcie-mobiveil.c
+++ b/drivers/pci/controller/pcie-mobiveil.c
@@ -23,6 +23,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>

+#include "../pci.h"
+
/* register offsets and bit positions */

/*
@@ -130,7 +132,7 @@ struct mobiveil_pcie {
void __iomem *config_axi_slave_base; /* endpoint config base */
void __iomem *csr_axi_slave_base; /* root port config base */
void __iomem *apb_csr_base; /* MSI register base */
- void __iomem *pcie_reg_base; /* Physical PCIe Controller Base */
+ phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */
struct irq_domain *intx_domain;
raw_spinlock_t intx_mask_lock;
int irq;
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 47cd0c037433..f96af1467984 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -14,6 +14,8 @@
#include <linux/poll.h>
#include <linux/wait.h>

+#include <linux/nospec.h>
+
MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
@@ -909,6 +911,8 @@ static int ioctl_port_to_pff(struct switchtec_dev *stdev,
default:
if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
return -EINVAL;
+ p.port = array_index_nospec(p.port,
+ ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
break;
}
diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
index d6d183e9db17..b5903fffb3d0 100644
--- a/drivers/pinctrl/berlin/berlin.c
+++ b/drivers/pinctrl/berlin/berlin.c
@@ -216,10 +216,8 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
}

/* we will reallocate later */
- pctrl->functions = devm_kcalloc(&pdev->dev,
- max_functions,
- sizeof(*pctrl->functions),
- GFP_KERNEL);
+ pctrl->functions = kcalloc(max_functions,
+ sizeof(*pctrl->functions), GFP_KERNEL);
if (!pctrl->functions)
return -ENOMEM;

@@ -257,8 +255,10 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
function++;
}

- if (!found)
+ if (!found) {
+ kfree(pctrl->functions);
return -EINVAL;
+ }

if (!function->groups) {
function->groups =
@@ -267,8 +267,10 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
sizeof(char *),
GFP_KERNEL);

- if (!function->groups)
+ if (!function->groups) {
+ kfree(pctrl->functions);
return -ENOMEM;
+ }
}

groups = function->groups;
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 1c6bb15579e1..b04edc22dad7 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -383,7 +383,7 @@ static void imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
const char *name;
int i, ret;

- if (group > pctldev->num_groups)
+ if (group >= pctldev->num_groups)
return;

seq_puts(s, "\n");
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 04ae139671c8..b91db89eb924 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -552,7 +552,8 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
/* Each status bit covers four pins */
for (i = 0; i < 4; i++) {
regval = readl(regs + i);
- if (!(regval & PIN_IRQ_PENDING))
+ if (!(regval & PIN_IRQ_PENDING) ||
+ !(regval & BIT(INTERRUPT_MASK_OFF)))
continue;
irq = irq_find_mapping(gc->irq.domain, irqnr + i);
generic_handle_irq(irq);
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index fc12badf3805..d84fab616abf 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -232,6 +232,8 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
tps->strobes = devm_kcalloc(&pdev->dev,
TPS65217_NUM_REGULATOR, sizeof(u8),
GFP_KERNEL);
+ if (!tps->strobes)
+ return -ENOMEM;

platform_set_drvdata(pdev, tps);

diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index b714a543a91d..8122807db380 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/rpmsg.h>
#include <linux/of_device.h>
+#include <linux/pm_domain.h>
#include <linux/slab.h>

#include "rpmsg_internal.h"
@@ -449,6 +450,10 @@ static int rpmsg_dev_probe(struct device *dev)
struct rpmsg_endpoint *ept = NULL;
int err;

+ err = dev_pm_domain_attach(dev, true);
+ if (err)
+ goto out;
+
if (rpdrv->callback) {
strncpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
chinfo.src = rpdev->src;
@@ -490,6 +495,8 @@ static int rpmsg_dev_remove(struct device *dev)

rpdrv->remove(rpdev);

+ dev_pm_domain_detach(dev, true);
+
if (rpdev->ept)
rpmsg_destroy_ept(rpdev->ept);

diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 99ba4a770406..27521fc3ef5a 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2038,6 +2038,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)

if (twa_initialize_device_extension(tw_dev)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
+ retval = -ENOMEM;
goto out_free_device_extension;
}

@@ -2060,6 +2061,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
tw_dev->base_addr = ioremap(mem_addr, mem_len);
if (!tw_dev->base_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
+ retval = -ENOMEM;
goto out_release_mem_region;
}

@@ -2067,8 +2069,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
TW_DISABLE_INTERRUPTS(tw_dev);

/* Initialize the card */
- if (twa_reset_sequence(tw_dev, 0))
+ if (twa_reset_sequence(tw_dev, 0)) {
+ retval = -ENOMEM;
goto out_iounmap;
+ }

/* Set host specific parameters */
if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index cf9f2a09b47d..40c1e6e64f58 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1594,6 +1594,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)

if (twl_initialize_device_extension(tw_dev)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
+ retval = -ENOMEM;
goto out_free_device_extension;
}

@@ -1608,6 +1609,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
tw_dev->base_addr = pci_iomap(pdev, 1, 0);
if (!tw_dev->base_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
+ retval = -ENOMEM;
goto out_release_mem_region;
}

@@ -1617,6 +1619,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
/* Initialize the card */
if (twl_reset_sequence(tw_dev, 0)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
+ retval = -ENOMEM;
goto out_iounmap;
}

diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index f6179e3d6953..961ea6f7def8 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -2280,6 +2280,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)

if (tw_initialize_device_extension(tw_dev)) {
printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension.");
+ retval = -ENOMEM;
goto out_free_device_extension;
}

@@ -2294,6 +2295,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
tw_dev->base_addr = pci_resource_start(pdev, 0);
if (!tw_dev->base_addr) {
printk(KERN_WARNING "3w-xxxx: Failed to get io address.");
+ retval = -ENOMEM;
goto out_release_mem_region;
}

diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 20b249a649dd..902004dc8dc7 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -672,7 +672,7 @@ struct lpfc_hba {
#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
#define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */
-#define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */
+#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */

uint32_t hba_flag; /* hba generic flags */
#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 76a5a99605aa..d723fd1d7b26 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -2687,7 +2687,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_nvme_rport *oldrport;
struct nvme_fc_remote_port *remote_port;
struct nvme_fc_port_info rpinfo;
- struct lpfc_nodelist *prev_ndlp;
+ struct lpfc_nodelist *prev_ndlp = NULL;

lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
"6006 Register NVME PORT. DID x%06x nlptype x%x\n",
@@ -2736,23 +2736,29 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_unlock_irq(&vport->phba->hbalock);
rport = remote_port->private;
if (oldrport) {
+ /* New remoteport record does not guarantee valid
+ * host private memory area.
+ */
+ prev_ndlp = oldrport->ndlp;
if (oldrport == remote_port->private) {
- /* Same remoteport. Just reuse. */
+ /* Same remoteport - ndlp should match.
+ * Just reuse.
+ */
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
LOG_NVME_DISC,
"6014 Rebinding lport to "
"remoteport %p wwpn 0x%llx, "
- "Data: x%x x%x %p x%x x%06x\n",
+ "Data: x%x x%x %p %p x%x x%06x\n",
remote_port,
remote_port->port_name,
remote_port->port_id,
remote_port->port_role,
+ prev_ndlp,
ndlp,
ndlp->nlp_type,
ndlp->nlp_DID);
return 0;
}
- prev_ndlp = rport->ndlp;

/* Sever the ndlp<->rport association
* before dropping the ndlp ref from
@@ -2786,13 +2792,13 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NVME_DISC | LOG_NODE,
"6022 Binding new rport to "
- "lport %p Remoteport %p WWNN 0x%llx, "
+ "lport %p Remoteport %p rport %p WWNN 0x%llx, "
"Rport WWPN 0x%llx DID "
- "x%06x Role x%x, ndlp %p\n",
- lport, remote_port,
+ "x%06x Role x%x, ndlp %p prev_ndlp %p\n",
+ lport, remote_port, rport,
rpinfo.node_name, rpinfo.port_name,
rpinfo.port_id, rpinfo.port_role,
- ndlp);
+ ndlp, prev_ndlp);
} else {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NVME_DISC | LOG_NODE,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index ec550ee0108e..75d34def2361 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1074,9 +1074,12 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
case PDS_PLOGI_COMPLETE:
case PDS_PRLI_PENDING:
case PDS_PRLI2_PENDING:
- ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC relogin needed\n",
- __func__, __LINE__, fcport->port_name);
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ /* Set discovery state back to GNL to Relogin attempt */
+ if (qla_dual_mode_enabled(vha) ||
+ qla_ini_mode_enabled(vha)) {
+ fcport->disc_state = DSC_GNL;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ }
return;
case PDS_LOGO_PENDING:
case PDS_PORT_UNAVAILABLE:
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 1027b0cb7fa3..6dc1b1bd8069 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -982,8 +982,9 @@ void qlt_free_session_done(struct work_struct *work)

logo.id = sess->d_id;
logo.cmd_count = 0;
+ if (!own)
+ qlt_send_first_logo(vha, &logo);
sess->send_els_logo = 0;
- qlt_send_first_logo(vha, &logo);
}

if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 731ca0d8520a..9f3c263756a8 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -571,6 +571,15 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
}
break;

+ case T268_BUF_TYPE_REQ_MIRROR:
+ case T268_BUF_TYPE_RSP_MIRROR:
+ /*
+ * Mirror pointers are not implemented in the
+ * driver, instead shadow pointers are used by
+ * the drier. Skip these entries.
+ */
+ qla27xx_skip_entry(ent, buf);
+ break;
default:
ql_dbg(ql_dbg_async, vha, 0xd02b,
"%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index ee5081ba5313..1fc87a3260cc 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -316,6 +316,7 @@ void __transport_register_session(
{
const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
unsigned char buf[PR_REG_ISID_LEN];
+ unsigned long flags;

se_sess->se_tpg = se_tpg;
se_sess->fabric_sess_ptr = fabric_sess_ptr;
@@ -352,7 +353,7 @@ void __transport_register_session(
se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
}

- spin_lock_irq(&se_nacl->nacl_sess_lock);
+ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
/*
* The se_nacl->nacl_sess pointer will be set to the
* last active I_T Nexus for each struct se_node_acl.
@@ -361,7 +362,7 @@ void __transport_register_session(

list_add_tail(&se_sess->sess_acl_list,
&se_nacl->acl_sess_list);
- spin_unlock_irq(&se_nacl->nacl_sess_lock);
+ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
}
list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);

diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index d8dc3d22051f..b8dc5efc606b 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1745,9 +1745,11 @@ static int tcmu_configure_device(struct se_device *dev)

info = &udev->uio_info;

+ mutex_lock(&udev->cmdr_lock);
udev->data_bitmap = kcalloc(BITS_TO_LONGS(udev->max_blocks),
sizeof(unsigned long),
GFP_KERNEL);
+ mutex_unlock(&udev->cmdr_lock);
if (!udev->data_bitmap) {
ret = -ENOMEM;
goto err_bitmap_alloc;
@@ -1957,7 +1959,7 @@ static match_table_t tokens = {
{Opt_hw_block_size, "hw_block_size=%u"},
{Opt_hw_max_sectors, "hw_max_sectors=%u"},
{Opt_nl_reply_supported, "nl_reply_supported=%d"},
- {Opt_max_data_area_mb, "max_data_area_mb=%u"},
+ {Opt_max_data_area_mb, "max_data_area_mb=%d"},
{Opt_err, NULL}
};

@@ -1985,13 +1987,48 @@ static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
return 0;
}

+static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
+{
+ int val, ret;
+
+ ret = match_int(arg, &val);
+ if (ret < 0) {
+ pr_err("match_int() failed for max_data_area_mb=. Error %d.\n",
+ ret);
+ return ret;
+ }
+
+ if (val <= 0) {
+ pr_err("Invalid max_data_area %d.\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&udev->cmdr_lock);
+ if (udev->data_bitmap) {
+ pr_err("Cannot set max_data_area_mb after it has been enabled.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ udev->max_blocks = TCMU_MBS_TO_BLOCKS(val);
+ if (udev->max_blocks > tcmu_global_max_blocks) {
+ pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
+ val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
+ udev->max_blocks = tcmu_global_max_blocks;
+ }
+
+unlock:
+ mutex_unlock(&udev->cmdr_lock);
+ return ret;
+}
+
static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
const char *page, ssize_t count)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
char *orig, *ptr, *opts, *arg_p;
substring_t args[MAX_OPT_ARGS];
- int ret = 0, token, tmpval;
+ int ret = 0, token;

opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -2044,37 +2081,7 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
pr_err("kstrtoint() failed for nl_reply_supported=\n");
break;
case Opt_max_data_area_mb:
- if (dev->export_count) {
- pr_err("Unable to set max_data_area_mb while exports exist\n");
- ret = -EINVAL;
- break;
- }
-
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
- break;
- }
- ret = kstrtoint(arg_p, 0, &tmpval);
- kfree(arg_p);
- if (ret < 0) {
- pr_err("kstrtoint() failed for max_data_area_mb=\n");
- break;
- }
-
- if (tmpval <= 0) {
- pr_err("Invalid max_data_area %d\n", tmpval);
- ret = -EINVAL;
- break;
- }
-
- udev->max_blocks = TCMU_MBS_TO_BLOCKS(tmpval);
- if (udev->max_blocks > tcmu_global_max_blocks) {
- pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
- tmpval,
- TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
- udev->max_blocks = tcmu_global_max_blocks;
- }
+ ret = tcmu_set_max_blocks_param(udev, &args[0]);
break;
default:
break;
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 45fb284d4c11..e77e63070e99 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -598,7 +598,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
enr_bits |= 3 << (i * 8);
}

- if (enr_bits)
+ if (common->base && enr_bits)
rcar_thermal_common_write(common, ENR, enr_bits);

dev_info(dev, "%d sensor probed\n", i);
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index 11278836ed12..0bd47007c57f 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -142,6 +142,7 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)

INIT_LIST_HEAD(&hwmon->tz_list);
strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
+ strreplace(hwmon->type, '-', '_');
hwmon->device = hwmon_device_register_with_info(NULL, hwmon->type,
hwmon, NULL, NULL);
if (IS_ERR(hwmon->device)) {
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index bdd17d2aaafd..b121d8f8f3d7 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -1881,7 +1881,7 @@ static __init int register_PCI(int i, struct pci_dev *dev)
ByteIO_t UPCIRingInd = 0;

if (!dev || !pci_match_id(rocket_pci_ids, dev) ||
- pci_enable_device(dev))
+ pci_enable_device(dev) || i >= NUM_BOARDS)
return 0;

rcktpt_io_addr[i] = pci_resource_start(dev, 0);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index f68c1121fa7c..6c58ad1abd7e 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -622,6 +622,12 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
ssize_t retval;
s32 irq_on;

+ if (count != sizeof(s32))
+ return -EINVAL;
+
+ if (copy_from_user(&irq_on, buf, count))
+ return -EFAULT;
+
mutex_lock(&idev->info_lock);
if (!idev->info) {
retval = -EINVAL;
@@ -633,21 +639,11 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
goto out;
}

- if (count != sizeof(s32)) {
- retval = -EINVAL;
- goto out;
- }
-
if (!idev->info->irqcontrol) {
retval = -ENOSYS;
goto out;
}

- if (copy_from_user(&irq_on, buf, count)) {
- retval = -EFAULT;
- goto out;
- }
-
retval = idev->info->irqcontrol(idev->info, irq_on);

out:
@@ -955,8 +951,6 @@ int __uio_register_device(struct module *owner,
if (ret)
goto err_uio_dev_add_attributes;

- info->uio_dev = idev;
-
if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
/*
* Note that we deliberately don't use devm_request_irq
@@ -972,6 +966,7 @@ int __uio_register_device(struct module *owner,
goto err_request_irq;
}

+ info->uio_dev = idev;
return 0;

err_request_irq:
diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h
index 9400a9f6318a..5057b9f0f846 100644
--- a/fs/autofs/autofs_i.h
+++ b/fs/autofs/autofs_i.h
@@ -26,6 +26,7 @@
#include <linux/list.h>
#include <linux/completion.h>
#include <linux/file.h>
+#include <linux/magic.h>

/* This is the range of ioctl() numbers we claim as ours */
#define AUTOFS_IOC_FIRST AUTOFS_IOC_READY
@@ -124,7 +125,8 @@ struct autofs_sb_info {

static inline struct autofs_sb_info *autofs_sbi(struct super_block *sb)
{
- return (struct autofs_sb_info *)(sb->s_fs_info);
+ return sb->s_magic != AUTOFS_SUPER_MAGIC ?
+ NULL : (struct autofs_sb_info *)(sb->s_fs_info);
}

static inline struct autofs_info *autofs_dentry_ino(struct dentry *dentry)
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index b51980fc274e..846c052569dd 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -10,7 +10,6 @@
#include <linux/seq_file.h>
#include <linux/pagemap.h>
#include <linux/parser.h>
-#include <linux/magic.h>

#include "autofs_i.h"

diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 53cac20650d8..4ab0bccfa281 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5935,7 +5935,7 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
* root: the root of the parent directory
* rsv: block reservation
* items: the number of items that we need do reservation
- * qgroup_reserved: used to return the reserved size in qgroup
+ * use_global_rsv: allow fallback to the global block reservation
*
* This function is used to reserve the space for snapshot/subvolume
* creation and deletion. Those operations are different with the
@@ -5945,10 +5945,10 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
* the space reservation mechanism in start_transaction().
*/
int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
- struct btrfs_block_rsv *rsv,
- int items,
+ struct btrfs_block_rsv *rsv, int items,
bool use_global_rsv)
{
+ u64 qgroup_num_bytes = 0;
u64 num_bytes;
int ret;
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -5956,12 +5956,11 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,

if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
/* One for parent inode, two for dir entries */
- num_bytes = 3 * fs_info->nodesize;
- ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
+ qgroup_num_bytes = 3 * fs_info->nodesize;
+ ret = btrfs_qgroup_reserve_meta_prealloc(root,
+ qgroup_num_bytes, true);
if (ret)
return ret;
- } else {
- num_bytes = 0;
}

num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);
@@ -5973,8 +5972,8 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
if (ret == -ENOSPC && use_global_rsv)
ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);

- if (ret && num_bytes)
- btrfs_qgroup_free_meta_prealloc(root, num_bytes);
+ if (ret && qgroup_num_bytes)
+ btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);

return ret;
}
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index b077544b5232..f3d6be0c657b 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3463,6 +3463,25 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,

same_lock_start = min_t(u64, loff, dst_loff);
same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
+ } else {
+ /*
+ * If the source and destination inodes are different, the
+ * source's range end offset matches the source's i_size, that
+ * i_size is not a multiple of the sector size, and the
+ * destination range does not go past the destination's i_size,
+ * we must round down the length to the nearest sector size
+ * multiple. If we don't do this adjustment we end replacing
+ * with zeroes the bytes in the range that starts at the
+ * deduplication range's end offset and ends at the next sector
+ * size multiple.
+ */
+ if (loff + olen == i_size_read(src) &&
+ dst_loff + len < i_size_read(dst)) {
+ const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
+
+ len = round_down(i_size_read(src), sz) - loff;
+ olen = len;
+ }
}

again:
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 9d02563b2147..44043f809a3c 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2523,7 +2523,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb_vol *volume_info)
if (tcon == NULL)
return -ENOMEM;

- snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->serverName);
+ snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->server->hostname);

/* cannot fail */
nls_codepage = load_nls_default();
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 9051b9dfd590..d279fa5472db 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -469,6 +469,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_READ;
oparms.create_options = CREATE_NOT_DIR;
+ if (backup_cred(cifs_sb))
+ oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
oparms.disposition = FILE_OPEN;
oparms.path = path;
oparms.fid = &fid;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index ee6c4a952ce9..5ecbc99f46e4 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -626,7 +626,10 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES;
oparms.disposition = FILE_OPEN;
- oparms.create_options = 0;
+ if (backup_cred(cifs_sb))
+ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+ else
+ oparms.create_options = 0;
oparms.fid = &fid;
oparms.reconnect = false;

@@ -775,7 +778,10 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_EA;
oparms.disposition = FILE_OPEN;
- oparms.create_options = 0;
+ if (backup_cred(cifs_sb))
+ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+ else
+ oparms.create_options = 0;
oparms.fid = &fid;
oparms.reconnect = false;

@@ -854,7 +860,10 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = FILE_WRITE_EA;
oparms.disposition = FILE_OPEN;
- oparms.create_options = 0;
+ if (backup_cred(cifs_sb))
+ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+ else
+ oparms.create_options = 0;
oparms.fid = &fid;
oparms.reconnect = false;

@@ -1460,7 +1469,10 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
oparms.disposition = FILE_OPEN;
- oparms.create_options = 0;
+ if (backup_cred(cifs_sb))
+ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+ else
+ oparms.create_options = 0;
oparms.fid = fid;
oparms.reconnect = false;

@@ -1735,7 +1747,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES;
oparms.disposition = FILE_OPEN;
- oparms.create_options = 0;
+ if (backup_cred(cifs_sb))
+ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+ else
+ oparms.create_options = 0;
oparms.fid = &fid;
oparms.reconnect = false;

@@ -3463,7 +3478,7 @@ struct smb_version_values smb21_values = {
struct smb_version_values smb3any_values = {
.version_string = SMB3ANY_VERSION_STRING,
.protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
.large_lock_type = 0,
.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
.shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3484,7 +3499,7 @@ struct smb_version_values smb3any_values = {
struct smb_version_values smbdefault_values = {
.version_string = SMBDEFAULT_VERSION_STRING,
.protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
.large_lock_type = 0,
.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
.shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3505,7 +3520,7 @@ struct smb_version_values smbdefault_values = {
struct smb_version_values smb30_values = {
.version_string = SMB30_VERSION_STRING,
.protocol_id = SMB30_PROT_ID,
- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
.large_lock_type = 0,
.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
.shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3526,7 +3541,7 @@ struct smb_version_values smb30_values = {
struct smb_version_values smb302_values = {
.version_string = SMB302_VERSION_STRING,
.protocol_id = SMB302_PROT_ID,
- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
.large_lock_type = 0,
.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
.shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3548,7 +3563,7 @@ struct smb_version_values smb302_values = {
struct smb_version_values smb311_values = {
.version_string = SMB311_VERSION_STRING,
.protocol_id = SMB311_PROT_ID,
- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
.large_lock_type = 0,
.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
.shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 44e511a35559..82be1dfeca33 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -2179,6 +2179,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
*oplock == SMB2_OPLOCK_LEVEL_NONE)
req->RequestedOplockLevel = *oplock;
+ else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
+ (oparms->create_options & CREATE_NOT_FILE))
+ req->RequestedOplockLevel = *oplock; /* no srv lease support */
else {
rc = add_lease_context(server, iov, &n_iov,
oparms->fid->lease_key, oplock);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 4d8b1de83143..b6f2dc8163e1 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1680,18 +1680,20 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
sbi->total_valid_block_count -= diff;
if (!*count) {
spin_unlock(&sbi->stat_lock);
- percpu_counter_sub(&sbi->alloc_valid_block_count, diff);
goto enospc;
}
}
spin_unlock(&sbi->stat_lock);

- if (unlikely(release))
+ if (unlikely(release)) {
+ percpu_counter_sub(&sbi->alloc_valid_block_count, release);
dquot_release_reservation_block(inode, release);
+ }
f2fs_i_blocks_write(inode, *count, true, true);
return 0;

enospc:
+ percpu_counter_sub(&sbi->alloc_valid_block_count, release);
dquot_release_reservation_block(inode, release);
return -ENOSPC;
}
@@ -1954,8 +1956,13 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
pgoff_t index, bool for_write)
{
#ifdef CONFIG_F2FS_FAULT_INJECTION
- struct page *page = find_lock_page(mapping, index);
+ struct page *page;

+ if (!for_write)
+ page = find_get_page_flags(mapping, index,
+ FGP_LOCK | FGP_ACCESSED);
+ else
+ page = find_lock_page(mapping, index);
if (page)
return page;

@@ -2812,7 +2819,7 @@ int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
struct writeback_control *wbc,
bool do_balance, enum iostat_type io_type);
-void f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
+int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 3ffa341cf586..4c9f9bcbd2d9 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1882,7 +1882,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct super_block *sb = sbi->sb;
__u32 in;
- int ret;
+ int ret = 0;

if (!capable(CAP_SYS_ADMIN))
return -EPERM;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 9093be6e7a7d..37ab2d10a872 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -986,7 +986,13 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
goto next;

sum = page_address(sum_page);
- f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
+ if (type != GET_SUM_TYPE((&sum->footer))) {
+ f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
+ "type [%d, %d] in SSA and SIT",
+ segno, type, GET_SUM_TYPE((&sum->footer)));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ goto next;
+ }

/*
* this is to avoid deadlock:
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 043830be5662..2bcb2d36f024 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -130,6 +130,16 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
if (err)
return err;

+ if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
+ f2fs_put_dnode(dn);
+ set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
+ f2fs_msg(fio.sbi->sb, KERN_WARNING,
+ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
+ "run fsck to fix.",
+ __func__, dn->inode->i_ino, dn->data_blkaddr);
+ return -EINVAL;
+ }
+
f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));

f2fs_do_read_inline_data(page, dn->inode_page);
@@ -363,6 +373,17 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
if (err)
goto out;

+ if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
+ f2fs_put_dnode(&dn);
+ set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
+ f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING,
+ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
+ "run fsck to fix.",
+ __func__, dir->i_ino, dn.data_blkaddr);
+ err = -EINVAL;
+ goto out;
+ }
+
f2fs_wait_on_page_writeback(page, DATA, true);

dentry_blk = page_address(page);
@@ -477,6 +498,7 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
return 0;
recover:
lock_page(ipage);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
f2fs_i_depth_write(dir, 0);
f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index f121c864f4c0..cf0f944fcaea 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -197,6 +197,16 @@ static bool sanity_check_inode(struct inode *inode)
__func__, inode->i_ino);
return false;
}
+
+ if (f2fs_has_extra_attr(inode) &&
+ !f2fs_sb_has_extra_attr(sbi->sb)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: inode (ino=%lx) is with extra_attr, "
+ "but extra_attr feature is off",
+ __func__, inode->i_ino);
+ return false;
+ }
return true;
}

@@ -249,6 +259,11 @@ static int do_read_inode(struct inode *inode)

get_inline_info(inode, ri);

+ if (!sanity_check_inode(inode)) {
+ f2fs_put_page(node_page, 1);
+ return -EINVAL;
+ }
+
fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
le16_to_cpu(ri->i_extra_isize) : 0;

@@ -330,10 +345,6 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
ret = do_read_inode(inode);
if (ret)
goto bad_inode;
- if (!sanity_check_inode(inode)) {
- ret = -EINVAL;
- goto bad_inode;
- }
make_now:
if (ino == F2FS_NODE_INO(sbi)) {
inode->i_mapping->a_ops = &f2fs_node_aops;
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 10643b11bd59..52ed02b0327c 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1633,7 +1633,9 @@ int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
!is_cold_node(page)))
continue;
lock_node:
- if (!trylock_page(page))
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ lock_page(page);
+ else if (!trylock_page(page))
continue;

if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
@@ -1968,7 +1970,7 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
kmem_cache_free(free_nid_slab, i);
}

-static void scan_nat_page(struct f2fs_sb_info *sbi,
+static int scan_nat_page(struct f2fs_sb_info *sbi,
struct page *nat_page, nid_t start_nid)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -1986,7 +1988,10 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
break;

blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
- f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
+
+ if (blk_addr == NEW_ADDR)
+ return -EINVAL;
+
if (blk_addr == NULL_ADDR) {
add_free_nid(sbi, start_nid, true, true);
} else {
@@ -1995,6 +2000,8 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
spin_unlock(&NM_I(sbi)->nid_list_lock);
}
}
+
+ return 0;
}

static void scan_curseg_cache(struct f2fs_sb_info *sbi)
@@ -2050,11 +2057,11 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
up_read(&nm_i->nat_tree_lock);
}

-static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
+static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
bool sync, bool mount)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- int i = 0;
+ int i = 0, ret;
nid_t nid = nm_i->next_scan_nid;

if (unlikely(nid >= nm_i->max_nid))
@@ -2062,17 +2069,17 @@ static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,

/* Enough entries */
if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
- return;
+ return 0;

if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
- return;
+ return 0;

if (!mount) {
/* try to find free nids in free_nid_bitmap */
scan_free_nid_bits(sbi);

if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
- return;
+ return 0;
}

/* readahead nat pages to be scanned */
@@ -2086,8 +2093,16 @@ static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
nm_i->nat_block_bitmap)) {
struct page *page = get_current_nat_page(sbi, nid);

- scan_nat_page(sbi, page, nid);
+ ret = scan_nat_page(sbi, page, nid);
f2fs_put_page(page, 1);
+
+ if (ret) {
+ up_read(&nm_i->nat_tree_lock);
+ f2fs_bug_on(sbi, !mount);
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "NAT is corrupt, run fsck to fix it");
+ return -EINVAL;
+ }
}

nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
@@ -2108,13 +2123,19 @@ static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,

f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
nm_i->ra_nid_pages, META_NAT, false);
+
+ return 0;
}

-void f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
+int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
{
+ int ret;
+
mutex_lock(&NM_I(sbi)->build_lock);
- __f2fs_build_free_nids(sbi, sync, mount);
+ ret = __f2fs_build_free_nids(sbi, sync, mount);
mutex_unlock(&NM_I(sbi)->build_lock);
+
+ return ret;
}

/*
@@ -2801,8 +2822,7 @@ int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
/* load free nid status from nat_bits table */
load_free_nid_bitmap(sbi);

- f2fs_build_free_nids(sbi, true, true);
- return 0;
+ return f2fs_build_free_nids(sbi, true, true);
}

void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 38f25f0b193a..ad70e62c5da4 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -241,8 +241,8 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
struct page *page = NULL;
block_t blkaddr;
unsigned int loop_cnt = 0;
- unsigned int free_blocks = sbi->user_block_count -
- valid_user_blocks(sbi);
+ unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
+ valid_user_blocks(sbi);
int err = 0;

/* get node pages in the current segment */
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 9efce174c51a..43fecd5eb252 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1643,21 +1643,30 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
unsigned int start = 0, end = -1;
unsigned int secno, start_segno;
bool force = (cpc->reason & CP_DISCARD);
+ bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;

mutex_lock(&dirty_i->seglist_lock);

while (1) {
int i;
+
+ if (need_align && end != -1)
+ end--;
start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
if (start >= MAIN_SEGS(sbi))
break;
end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
start + 1);

- for (i = start; i < end; i++)
- clear_bit(i, prefree_map);
+ if (need_align) {
+ start = rounddown(start, sbi->segs_per_sec);
+ end = roundup(end, sbi->segs_per_sec);
+ }

- dirty_i->nr_dirty[PRE] -= end - start;
+ for (i = start; i < end; i++) {
+ if (test_and_clear_bit(i, prefree_map))
+ dirty_i->nr_dirty[PRE]--;
+ }

if (!test_opt(sbi, DISCARD))
continue;
@@ -2437,6 +2446,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
struct discard_policy dpolicy;
unsigned long long trimmed = 0;
int err = 0;
+ bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;

if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
return -EINVAL;
@@ -2454,6 +2464,10 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
GET_SEGNO(sbi, end);
+ if (need_align) {
+ start_segno = rounddown(start_segno, sbi->segs_per_sec);
+ end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
+ }

cpc.reason = CP_DISCARD;
cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index f18fc82fbe99..38c549d77a80 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -448,6 +448,8 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
if (test_and_clear_bit(segno, free_i->free_segmap)) {
free_i->free_segments++;

+ if (IS_CURSEC(sbi, secno))
+ goto skip_free;
next = find_next_bit(free_i->free_segmap,
start_segno + sbi->segs_per_sec, start_segno);
if (next >= start_segno + sbi->segs_per_sec) {
@@ -455,6 +457,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
free_i->free_sections++;
}
}
+skip_free:
spin_unlock(&free_i->segmap_lock);
}

diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 3995e926ba3a..128d489acebb 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -2229,9 +2229,9 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
return 1;
}

- if (secs_per_zone > total_sections) {
+ if (secs_per_zone > total_sections || !secs_per_zone) {
f2fs_msg(sb, KERN_INFO,
- "Wrong secs_per_zone (%u > %u)",
+ "Wrong secs_per_zone / total_sections (%u, %u)",
secs_per_zone, total_sections);
return 1;
}
@@ -2282,12 +2282,17 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
unsigned int ovp_segments, reserved_segments;
unsigned int main_segs, blocks_per_seg;
+ unsigned int sit_segs, nat_segs;
+ unsigned int sit_bitmap_size, nat_bitmap_size;
+ unsigned int log_blocks_per_seg;
int i;

total = le32_to_cpu(raw_super->segment_count);
fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
- fsmeta += le32_to_cpu(raw_super->segment_count_sit);
- fsmeta += le32_to_cpu(raw_super->segment_count_nat);
+ sit_segs = le32_to_cpu(raw_super->segment_count_sit);
+ fsmeta += sit_segs;
+ nat_segs = le32_to_cpu(raw_super->segment_count_nat);
+ fsmeta += nat_segs;
fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
fsmeta += le32_to_cpu(raw_super->segment_count_ssa);

@@ -2318,6 +2323,18 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
return 1;
}

+ sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
+ nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
+ log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
+
+ if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
+ nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong bitmap size: sit: %u, nat:%u",
+ sit_bitmap_size, nat_bitmap_size);
+ return 1;
+ }
+
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
return 1;
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 2e7e611deaef..bca1236fd6fa 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -9,6 +9,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/compiler.h>
#include <linux/proc_fs.h>
#include <linux/f2fs_fs.h>
#include <linux/seq_file.h>
@@ -286,8 +287,10 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
bool gc_entry = (!strcmp(a->attr.name, "gc_urgent") ||
a->struct_type == GC_THREAD);

- if (gc_entry)
- down_read(&sbi->sb->s_umount);
+ if (gc_entry) {
+ if (!down_read_trylock(&sbi->sb->s_umount))
+ return -EAGAIN;
+ }
ret = __sbi_store(a, sbi, buf, count);
if (gc_entry)
up_read(&sbi->sb->s_umount);
@@ -516,7 +519,8 @@ static struct kobject f2fs_feat = {
.kset = &f2fs_kset,
};

-static int segment_info_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused segment_info_seq_show(struct seq_file *seq,
+ void *offset)
{
struct super_block *sb = seq->private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -543,7 +547,8 @@ static int segment_info_seq_show(struct seq_file *seq, void *offset)
return 0;
}

-static int segment_bits_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
+ void *offset)
{
struct super_block *sb = seq->private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -567,7 +572,8 @@ static int segment_bits_seq_show(struct seq_file *seq, void *offset)
return 0;
}

-static int iostat_info_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused iostat_info_seq_show(struct seq_file *seq,
+ void *offset)
{
struct super_block *sb = seq->private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 5d57e818d0c3..6d049dfddb14 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -215,9 +215,9 @@ static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
{
u32 oldseq, newseq;

- /* Is the stateid still not initialised? */
+ /* Is the stateid not initialised? */
if (!pnfs_layout_is_valid(lo))
- return NFS4ERR_DELAY;
+ return NFS4ERR_NOMATCHING_LAYOUT;

/* Mismatched stateid? */
if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index a813979b5be0..cb905c0e606c 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -883,16 +883,21 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)

if (hdr_arg.minorversion == 0) {
cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident);
- if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp))
+ if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) {
+ if (cps.clp)
+ nfs_put_client(cps.clp);
goto out_invalidcred;
+ }
}

cps.minorversion = hdr_arg.minorversion;
hdr_res.taglen = hdr_arg.taglen;
hdr_res.tag = hdr_arg.tag;
- if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0)
+ if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) {
+ if (cps.clp)
+ nfs_put_client(cps.clp);
return rpc_system_err;
-
+ }
while (status == 0 && nops != hdr_arg.nops) {
status = process_op(nops, rqstp, &xdr_in,
rqstp->rq_argp, &xdr_out, rqstp->rq_resp,
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 979631411a0e..d7124fb12041 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -1127,7 +1127,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
nfs_server_copy_userdata(server, parent_server);

/* Get a client representation */
-#ifdef CONFIG_SUNRPC_XPRT_RDMA
+#if IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA)
rpc_set_port(data->addr, NFS_RDMA_PORT);
error = nfs4_set_client(server, data->hostname,
data->addr,
@@ -1139,7 +1139,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
parent_client->cl_net);
if (!error)
goto init_server;
-#endif /* CONFIG_SUNRPC_XPRT_RDMA */
+#endif /* IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA) */

rpc_set_port(data->addr, NFS_PORT);
error = nfs4_set_client(server, data->hostname,
@@ -1153,7 +1153,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
if (error < 0)
goto error;

-#ifdef CONFIG_SUNRPC_XPRT_RDMA
+#if IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA)
init_server:
#endif
error = nfs_init_server_rpcclient(server, parent_server->client->cl_timeout, data->authflavor);
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 773bcb1d4044..5482dd6ae9ef 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -520,6 +520,7 @@ struct hid_input {
const char *name;
bool registered;
struct list_head reports; /* the list of reports */
+ unsigned int application; /* application usage for this input */
};

enum hid_type {
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 22651e124071..a590419e46c5 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -340,7 +340,7 @@ struct kioctx_table;
struct mm_struct {
struct vm_area_struct *mmap; /* list of VMAs */
struct rb_root mm_rb;
- u32 vmacache_seqnum; /* per-thread vmacache */
+ u64 vmacache_seqnum; /* per-thread vmacache */
#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index 5fe87687664c..d7016dcb245e 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -32,7 +32,7 @@
#define VMACACHE_MASK (VMACACHE_SIZE - 1)

struct vmacache {
- u32 seqnum;
+ u64 seqnum;
struct vm_area_struct *vmas[VMACACHE_SIZE];
};

diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 3e8ec3b8a39c..87c635d6c773 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -986,14 +986,14 @@ struct nand_subop {
unsigned int last_instr_end_off;
};

-int nand_subop_get_addr_start_off(const struct nand_subop *subop,
- unsigned int op_id);
-int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
- unsigned int op_id);
-int nand_subop_get_data_start_off(const struct nand_subop *subop,
- unsigned int op_id);
-int nand_subop_get_data_len(const struct nand_subop *subop,
- unsigned int op_id);
+unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int op_id);

/**
* struct nand_op_parser_addr_constraints - Constraints for address instructions
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 5c7f010676a7..47a3441cf4c4 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -105,7 +105,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_DEBUG_VM_VMACACHE
VMACACHE_FIND_CALLS,
VMACACHE_FIND_HITS,
- VMACACHE_FULL_FLUSHES,
#endif
#ifdef CONFIG_SWAP
SWAP_RA,
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
index a5b3aa8d281f..a09b28f76460 100644
--- a/include/linux/vmacache.h
+++ b/include/linux/vmacache.h
@@ -16,7 +16,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
}

-extern void vmacache_flush_all(struct mm_struct *mm);
extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
unsigned long addr);
@@ -30,10 +29,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
static inline void vmacache_invalidate(struct mm_struct *mm)
{
mm->vmacache_seqnum++;
-
- /* deal with overflows */
- if (unlikely(mm->vmacache_seqnum == 0))
- vmacache_flush_all(mm);
}

#endif /* __LINUX_VMACACHE_H */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 7363f18e65a5..813282cc8af6 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -902,13 +902,13 @@ struct ethtool_rx_flow_spec {
static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
{
return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
-};
+}

static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
{
return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
-};
+}

/**
* struct ethtool_rxnfc - command to get or set RX flow classification rules
diff --git a/kernel/cpu.c b/kernel/cpu.c
index f80afc674f02..517907b082df 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -608,15 +608,15 @@ static void cpuhp_thread_fun(unsigned int cpu)
bool bringup = st->bringup;
enum cpuhp_state state;

+ if (WARN_ON_ONCE(!st->should_run))
+ return;
+
/*
* ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
* that if we see ->should_run we also see the rest of the state.
*/
smp_mb();

- if (WARN_ON_ONCE(!st->should_run))
- return;
-
cpuhp_lock_acquire(bringup);

if (st->single) {
@@ -928,7 +928,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
if (ret) {
st->target = prev_state;
- undo_cpu_down(cpu, st);
+ if (st->state < prev_state)
+ undo_cpu_down(cpu, st);
break;
}
}
@@ -981,7 +982,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
* to do the further cleanups.
*/
ret = cpuhp_down_callbacks(cpu, st, target);
- if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
+ if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
cpuhp_reset_state(st, prev_state);
__cpuhp_kick_ap(st);
}
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index f89a78e2792b..443941aa784e 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -129,19 +129,40 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags)
spin_unlock_irqrestore(&watchdog_lock, *flags);
}

+static int clocksource_watchdog_kthread(void *data);
+static void __clocksource_change_rating(struct clocksource *cs, int rating);
+
/*
* Interval: 0.5sec Threshold: 0.0625s
*/
#define WATCHDOG_INTERVAL (HZ >> 1)
#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)

+static void clocksource_watchdog_work(struct work_struct *work)
+{
+ /*
+ * We cannot directly run clocksource_watchdog_kthread() here, because
+ * clocksource_select() calls timekeeping_notify() which uses
+ * stop_machine(). One cannot use stop_machine() from a workqueue() due
+ * lock inversions wrt CPU hotplug.
+ *
+ * Also, we only ever run this work once or twice during the lifetime
+ * of the kernel, so there is no point in creating a more permanent
+ * kthread for this.
+ *
+ * If kthread_run fails the next watchdog scan over the
+ * watchdog_list will find the unstable clock again.
+ */
+ kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
+}
+
static void __clocksource_unstable(struct clocksource *cs)
{
cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
cs->flags |= CLOCK_SOURCE_UNSTABLE;

/*
- * If the clocksource is registered clocksource_watchdog_work() will
+ * If the clocksource is registered clocksource_watchdog_kthread() will
* re-rate and re-select.
*/
if (list_empty(&cs->list)) {
@@ -152,7 +173,7 @@ static void __clocksource_unstable(struct clocksource *cs)
if (cs->mark_unstable)
cs->mark_unstable(cs);

- /* kick clocksource_watchdog_work() */
+ /* kick clocksource_watchdog_kthread() */
if (finished_booting)
schedule_work(&watchdog_work);
}
@@ -162,7 +183,7 @@ static void __clocksource_unstable(struct clocksource *cs)
* @cs: clocksource to be marked unstable
*
* This function is called by the x86 TSC code to mark clocksources as unstable;
- * it defers demotion and re-selection to a work.
+ * it defers demotion and re-selection to a kthread.
*/
void clocksource_mark_unstable(struct clocksource *cs)
{
@@ -387,9 +408,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
}
}

-static void __clocksource_change_rating(struct clocksource *cs, int rating);
-
-static int __clocksource_watchdog_work(void)
+static int __clocksource_watchdog_kthread(void)
{
struct clocksource *cs, *tmp;
unsigned long flags;
@@ -414,12 +433,13 @@ static int __clocksource_watchdog_work(void)
return select;
}

-static void clocksource_watchdog_work(struct work_struct *work)
+static int clocksource_watchdog_kthread(void *data)
{
mutex_lock(&clocksource_mutex);
- if (__clocksource_watchdog_work())
+ if (__clocksource_watchdog_kthread())
clocksource_select();
mutex_unlock(&clocksource_mutex);
+ return 0;
}

static bool clocksource_is_watchdog(struct clocksource *cs)
@@ -438,7 +458,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
static void clocksource_select_watchdog(bool fallback) { }
static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
static inline void clocksource_resume_watchdog(void) { }
-static inline int __clocksource_watchdog_work(void) { return 0; }
+static inline int __clocksource_watchdog_kthread(void) { return 0; }
static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
void clocksource_mark_unstable(struct clocksource *cs) { }

@@ -672,7 +692,7 @@ static int __init clocksource_done_booting(void)
/*
* Run the watchdog first to eliminate unstable clock sources
*/
- __clocksource_watchdog_work();
+ __clocksource_watchdog_kthread();
clocksource_select();
mutex_unlock(&clocksource_mutex);
return 0;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index cc2d23e6ff61..786f8c014e7e 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1657,6 +1657,22 @@ static inline void __run_timers(struct timer_base *base)

raw_spin_lock_irq(&base->lock);

+ /*
+ * timer_base::must_forward_clk must be cleared before running
+ * timers so that any timer functions that call mod_timer() will
+ * not try to forward the base. Idle tracking / clock forwarding
+ * logic is only used with BASE_STD timers.
+ *
+ * The must_forward_clk flag is cleared unconditionally also for
+ * the deferrable base. The deferrable base is not affected by idle
+ * tracking and never forwarded, so clearing the flag is a NOOP.
+ *
+ * The fact that the deferrable base is never forwarded can cause
+ * large variations in granularity for deferrable timers, but they
+ * can be deferred for long periods due to idle anyway.
+ */
+ base->must_forward_clk = false;
+
while (time_after_eq(jiffies, base->clk)) {

levels = collect_expired_timers(base, heads);
@@ -1676,19 +1692,6 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);

- /*
- * must_forward_clk must be cleared before running timers so that any
- * timer functions that call mod_timer will not try to forward the
- * base. idle trcking / clock forwarding logic is only used with
- * BASE_STD timers.
- *
- * The deferrable base does not do idle tracking at all, so we do
- * not forward it. This can result in very large variations in
- * granularity for deferrable timers, but they can be deferred for
- * long periods due to idle.
- */
- base->must_forward_clk = false;
-
__run_timers(base);
if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
diff --git a/mm/debug.c b/mm/debug.c
index 38c926520c97..bd10aad8539a 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -114,7 +114,7 @@ EXPORT_SYMBOL(dump_vma);

void dump_mm(const struct mm_struct *mm)
{
- pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n"
+ pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
#ifdef CONFIG_MMU
"get_unmapped_area %px\n"
#endif
@@ -142,7 +142,7 @@ void dump_mm(const struct mm_struct *mm)
"tlb_flush_pending %d\n"
"def_flags: %#lx(%pGv)\n",

- mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
+ mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
#ifdef CONFIG_MMU
mm->get_unmapped_area,
#endif
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 7deb49f69e27..785252397e35 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1341,7 +1341,8 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
if (__PageMovable(page))
return pfn;
if (PageHuge(page)) {
- if (page_huge_active(page))
+ if (hugepage_migration_supported(page_hstate(page)) &&
+ page_huge_active(page))
return pfn;
else
pfn = round_up(pfn + 1,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3222193c46c6..65f2e6481c99 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -7649,6 +7649,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
* handle each tail page individually in migration.
*/
if (PageHuge(page)) {
+
+ if (!hugepage_migration_supported(page_hstate(page)))
+ goto unmovable;
+
iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
continue;
}
diff --git a/mm/vmacache.c b/mm/vmacache.c
index db7596eb6132..f1729617dc85 100644
--- a/mm/vmacache.c
+++ b/mm/vmacache.c
@@ -7,44 +7,6 @@
#include <linux/mm.h>
#include <linux/vmacache.h>

-/*
- * Flush vma caches for threads that share a given mm.
- *
- * The operation is safe because the caller holds the mmap_sem
- * exclusively and other threads accessing the vma cache will
- * have mmap_sem held at least for read, so no extra locking
- * is required to maintain the vma cache.
- */
-void vmacache_flush_all(struct mm_struct *mm)
-{
- struct task_struct *g, *p;
-
- count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
-
- /*
- * Single threaded tasks need not iterate the entire
- * list of process. We can avoid the flushing as well
- * since the mm's seqnum was increased and don't have
- * to worry about other threads' seqnum. Current's
- * flush will occur upon the next lookup.
- */
- if (atomic_read(&mm->mm_users) == 1)
- return;
-
- rcu_read_lock();
- for_each_process_thread(g, p) {
- /*
- * Only flush the vmacache pointers as the
- * mm seqnum is already set and curr's will
- * be set upon invalidation when the next
- * lookup is done.
- */
- if (mm == p->mm)
- vmacache_flush(p);
- }
- rcu_read_unlock();
-}
-
/*
* This task may be accessing a foreign mm via (for example)
* get_user_pages()->find_vma(). The vmacache is task-local and this
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 3bba8f4b08a9..253975cce943 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -775,7 +775,7 @@ static int hidp_setup_hid(struct hidp_session *session,
hid->version = req->version;
hid->country = req->country;

- strncpy(hid->name, req->name, sizeof(req->name) - 1);
+ strncpy(hid->name, req->name, sizeof(hid->name));

snprintf(hid->phys, sizeof(hid->phys), "%pMR",
&l2cap_pi(session->ctrl_sock->sk)->chan->src);
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 2589a6b78aa1..013fdb6fa07a 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1786,7 +1786,7 @@ static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
if (itr->app.selector == app->selector &&
itr->app.protocol == app->protocol &&
itr->ifindex == ifindex &&
- (!prio || itr->app.priority == prio))
+ ((prio == -1) || itr->app.priority == prio))
return itr;
}

@@ -1821,7 +1821,8 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
u8 prio = 0;

spin_lock_bh(&dcb_lock);
- if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
+ itr = dcb_app_lookup(app, dev->ifindex, -1);
+ if (itr)
prio = itr->app.priority;
spin_unlock_bh(&dcb_lock);

@@ -1849,7 +1850,8 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)

spin_lock_bh(&dcb_lock);
/* Search for existing match and replace */
- if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
+ itr = dcb_app_lookup(new, dev->ifindex, -1);
+ if (itr) {
if (new->priority)
itr->app.priority = new->priority;
else {
@@ -1882,7 +1884,8 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
u8 prio = 0;

spin_lock_bh(&dcb_lock);
- if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
+ itr = dcb_app_lookup(app, dev->ifindex, -1);
+ if (itr)
prio |= 1 << itr->app.priority;
spin_unlock_bh(&dcb_lock);

diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 932985ca4e66..3f80a5ca4050 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1612,6 +1612,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
*/
if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
!ieee80211_has_morefrags(hdr->frame_control) &&
+ !is_multicast_ether_addr(hdr->addr1) &&
(ieee80211_is_mgmt(hdr->frame_control) ||
ieee80211_is_data(hdr->frame_control)) &&
!(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 20a171ac4bb2..16849969c138 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -3910,7 +3910,8 @@ void snd_hda_bus_reset_codecs(struct hda_bus *bus)

list_for_each_codec(codec, bus) {
/* FIXME: maybe a better way needed for forced reset */
- cancel_delayed_work_sync(&codec->jackpoll_work);
+ if (current_work() != &codec->jackpoll_work.work)
+ cancel_delayed_work_sync(&codec->jackpoll_work);
#ifdef CONFIG_PM
if (hda_codec_is_power_on(codec)) {
hda_call_codec_suspend(codec);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index f6af3e1c2b93..d14b05f68d6d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6530,6 +6530,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x82c0, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 5feae9666822..55d6c9488d8e 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1165,6 +1165,9 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
snd_pcm_sframes_t codec_delay = 0;
int i;

+ /* clearing the previous total delay */
+ runtime->delay = 0;
+
for_each_rtdcom(rtd, rtdcom) {
component = rtdcom->component;

@@ -1176,6 +1179,8 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
offset = component->driver->ops->pointer(substream);
break;
}
+ /* base delay if assigned in pointer callback */
+ delay = runtime->delay;

if (cpu_dai->driver->ops->delay)
delay += cpu_dai->driver->ops->delay(substream, cpu_dai);
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index f5a3b402589e..67b042738ed7 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -905,8 +905,8 @@ bindir = $(abspath $(prefix)/$(bindir_relative))
mandir = share/man
infodir = share/info
perfexecdir = libexec/perf-core
-perf_include_dir = lib/include/perf
-perf_examples_dir = lib/examples/perf
+perf_include_dir = lib/perf/include
+perf_examples_dir = lib/perf/examples
sharedir = $(prefix)/share
template_dir = share/perf-core/templates
STRACE_GROUPS_DIR = share/perf-core/strace/groups
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index 6a8738f7ead3..eab66e3b0a19 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -2349,6 +2349,9 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
" s Toggle full length of symbol and source line columns \n"
" q Return back to cacheline list \n";

+ if (!he)
+ return 0;
+
/* Display compact version first. */
c2c.symbol_full = false;

diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index d215714f48df..21bf7f5a3cf5 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -25,7 +25,9 @@ static inline unsigned long long rdclock(void)
return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
}

+#ifndef MAX_NR_CPUS
#define MAX_NR_CPUS 1024
+#endif

extern const char *input_name;
extern bool perf_host, perf_guest;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 94fce4f537e9..0d5504751cc5 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -848,6 +848,12 @@ static void apply_config_terms(struct perf_evsel *evsel,
}
}

+static bool is_dummy_event(struct perf_evsel *evsel)
+{
+ return (evsel->attr.type == PERF_TYPE_SOFTWARE) &&
+ (evsel->attr.config == PERF_COUNT_SW_DUMMY);
+}
+
/*
* The enable_on_exec/disabled value strategy:
*
@@ -1086,6 +1092,14 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
else
perf_evsel__reset_sample_bit(evsel, PERIOD);
}
+
+ /*
+ * For initial_delay, a dummy event is added implicitly.
+ * The software event will trigger -EOPNOTSUPP error out,
+ * if BRANCH_STACK bit is set.
+ */
+ if (opts->initial_delay && is_dummy_event(evsel))
+ perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
}

static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
diff --git a/tools/testing/nvdimm/pmem-dax.c b/tools/testing/nvdimm/pmem-dax.c
index b53596ad601b..2e7fd8227969 100644
--- a/tools/testing/nvdimm/pmem-dax.c
+++ b/tools/testing/nvdimm/pmem-dax.c
@@ -31,17 +31,21 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
if (get_nfit_res(pmem->phys_addr + offset)) {
struct page *page;

- *kaddr = pmem->virt_addr + offset;
+ if (kaddr)
+ *kaddr = pmem->virt_addr + offset;
page = vmalloc_to_page(pmem->virt_addr + offset);
- *pfn = page_to_pfn_t(page);
+ if (pfn)
+ *pfn = page_to_pfn_t(page);
pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
__func__, pmem, pgoff, page_to_pfn(page));

return 1;
}

- *kaddr = pmem->virt_addr + offset;
- *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
+ if (kaddr)
+ *kaddr = pmem->virt_addr + offset;
+ if (pfn)
+ *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);

/*
* If badblocks are present, limit known good range to the
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 41106d9d5cc7..f9c856c8e472 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -6997,7 +6997,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_in_map = { 3 },
@@ -7020,7 +7020,7 @@ static struct bpf_test tests[] = {
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_in_map = { 3 },
@@ -7042,7 +7042,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_in_map = { 3 },
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json b/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
index 70952bd98ff9..13147a1f5731 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
@@ -17,7 +17,7 @@
"cmdUnderTest": "$TC actions add action connmark",
"expExitCode": "0",
"verifyCmd": "$TC actions list action connmark",
- "matchPattern": "action order [0-9]+: connmark zone 0 pipe",
+ "matchPattern": "action order [0-9]+: connmark zone 0 pipe",
"matchCount": "1",
"teardown": [
"$TC actions flush action connmark"
@@ -41,7 +41,7 @@
"cmdUnderTest": "$TC actions add action connmark pass index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action connmark index 1",
- "matchPattern": "action order [0-9]+: connmark zone 0 pass.*index 1 ref",
+ "matchPattern": "action order [0-9]+: connmark zone 0 pass.*index 1 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action connmark"
@@ -65,7 +65,7 @@
"cmdUnderTest": "$TC actions add action connmark drop index 100",
"expExitCode": "0",
"verifyCmd": "$TC actions get action connmark index 100",
- "matchPattern": "action order [0-9]+: connmark zone 0 drop.*index 100 ref",
+ "matchPattern": "action order [0-9]+: connmark zone 0 drop.*index 100 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action connmark"
@@ -89,7 +89,7 @@
"cmdUnderTest": "$TC actions add action connmark pipe index 455",
"expExitCode": "0",
"verifyCmd": "$TC actions get action connmark index 455",
- "matchPattern": "action order [0-9]+: connmark zone 0 pipe.*index 455 ref",
+ "matchPattern": "action order [0-9]+: connmark zone 0 pipe.*index 455 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action connmark"
@@ -113,7 +113,7 @@
"cmdUnderTest": "$TC actions add action connmark reclassify index 7",
"expExitCode": "0",
"verifyCmd": "$TC actions list action connmark",
- "matchPattern": "action order [0-9]+: connmark zone 0 reclassify.*index 7 ref",
+ "matchPattern": "action order [0-9]+: connmark zone 0 reclassify.*index 7 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action connmark"
@@ -137,7 +137,7 @@
"cmdUnderTest": "$TC actions add action connmark continue index 17",
"expExitCode": "0",
"verifyCmd": "$TC actions list action connmark",
- "matchPattern": "action order [0-9]+: connmark zone 0 continue.*index 17 ref",
+ "matchPattern": "action order [0-9]+: connmark zone 0 continue.*index 17 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action connmark"
@@ -161,7 +161,7 @@
"cmdUnderTest": "$TC actions add action connmark jump 10 index 17",
"expExitCode": "0",
"verifyCmd": "$TC actions list action connmark",
- "matchPattern": "action order [0-9]+: connmark zone 0 jump 10.*index 17 ref",
+ "matchPattern": "action order [0-9]+: connmark zone 0 jump 10.*index 17 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action connmark"
@@ -185,7 +185,7 @@
"cmdUnderTest": "$TC actions add action connmark zone 100 pipe index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action connmark index 1",
- "matchPattern": "action order [0-9]+: connmark zone 100 pipe.*index 1 ref",
+ "matchPattern": "action order [0-9]+: connmark zone 100 pipe.*index 1 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action connmark"
@@ -209,7 +209,7 @@
"cmdUnderTest": "$TC actions add action connmark zone 65536 reclassify index 21",
"expExitCode": "255",
"verifyCmd": "$TC actions get action connmark index 1",
- "matchPattern": "action order [0-9]+: connmark zone 65536 reclassify.*index 21 ref",
+ "matchPattern": "action order [0-9]+: connmark zone 65536 reclassify.*index 21 ref",
"matchCount": "0",
"teardown": [
"$TC actions flush action connmark"
@@ -233,7 +233,7 @@
"cmdUnderTest": "$TC actions add action connmark zone 655 unsupp_arg pass index 2",
"expExitCode": "255",
"verifyCmd": "$TC actions get action connmark index 2",
- "matchPattern": "action order [0-9]+: connmark zone 655 unsupp_arg pass.*index 2 ref",
+ "matchPattern": "action order [0-9]+: connmark zone 655 unsupp_arg pass.*index 2 ref",
"matchCount": "0",
"teardown": [
"$TC actions flush action connmark"
@@ -258,7 +258,7 @@
"cmdUnderTest": "$TC actions replace action connmark zone 555 reclassify index 555",
"expExitCode": "0",
"verifyCmd": "$TC actions get action connmark index 555",
- "matchPattern": "action order [0-9]+: connmark zone 555 reclassify.*index 555 ref",
+ "matchPattern": "action order [0-9]+: connmark zone 555 reclassify.*index 555 ref",
"matchCount": "1",
"teardown": [
"$TC actions flush action connmark"
@@ -282,7 +282,7 @@
"cmdUnderTest": "$TC actions add action connmark zone 555 pipe index 5 cookie aabbccddeeff112233445566778800a1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action connmark index 5",
- "matchPattern": "action order [0-9]+: connmark zone 555 pipe.*index 5 ref.*cookie aabbccddeeff112233445566778800a1",
+ "matchPattern": "action order [0-9]+: connmark zone 555 pipe.*index 5 ref.*cookie aabbccddeeff112233445566778800a1",
"matchCount": "1",
"teardown": [
"$TC actions flush action connmark"
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
index 6e4edfae1799..db49fd0f8445 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
@@ -44,7 +44,8 @@
"matchPattern": "action order [0-9]*: mirred \\(Egress Redirect to device lo\\).*index 2 ref",
"matchCount": "1",
"teardown": [
- "$TC actions flush action mirred"
+ "$TC actions flush action mirred",
+ "$TC actions flush action gact"
]
},
{
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index c2b95a22959b..fd8c88463928 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1831,13 +1831,20 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
unsigned long end = hva + PAGE_SIZE;
+ kvm_pfn_t pfn = pte_pfn(pte);
pte_t stage2_pte;

if (!kvm->arch.pgd)
return;

trace_kvm_set_spte_hva(hva);
- stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
+
+ /*
+ * We've moved a page around, probably through CoW, so let's treat it
+ * just like a translation fault and clean the cache to the PoC.
+ */
+ clean_dcache_guest_page(pfn, PAGE_SIZE);
+ stage2_pte = pfn_pte(pfn, PAGE_S2);
handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
}

\
 
 \ /
  Last update: 2018-09-19 23:20    [W:0.508 / U:1.692 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site