summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/ahci_da850.c8
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/block/xen-blkback/xenbus.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c31
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c26
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c16
-rw-r--r--drivers/i2c/busses/i2c-ismt.c6
-rw-r--r--drivers/infiniband/core/umem_odp.c19
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c9
-rw-r--r--drivers/input/joystick/xpad.c24
-rw-r--r--drivers/input/mouse/synaptics.c35
-rw-r--r--drivers/iommu/amd_iommu_v2.c8
-rw-r--r--drivers/iommu/intel-svm.c9
-rw-r--r--drivers/irqchip/irq-mips-gic.c5
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm.c12
-rw-r--r--drivers/misc/mic/scif/scif_dma.c11
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c12
-rw-r--r--drivers/mmc/core/block.c6
-rw-r--r--drivers/mmc/host/sdhci-xenon.c19
-rw-r--r--drivers/net/dsa/bcm_sf2.c4
-rw-r--r--drivers/net/dsa/bcm_sf2.h1
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c8
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c27
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c92
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c41
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c139
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c60
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c26
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c5
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c8
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c7
-rw-r--r--drivers/net/macsec.c1
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/phy/phy_device.c6
-rw-r--r--drivers/net/usb/cdc_ncm.c7
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c9
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c1
-rw-r--r--drivers/nvme/host/pci.c22
-rw-r--r--drivers/nvme/host/rdma.c8
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c2
-rw-r--r--drivers/scsi/ipr.c1
-rw-r--r--drivers/scsi/qedf/qedf_els.c14
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/xen/gntdev.c8
82 files changed, 531 insertions, 465 deletions
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index 1a50cd3b4233..9b34dff64536 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -216,12 +216,16 @@ static int ahci_da850_probe(struct platform_device *pdev)
return rc;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res)
+ if (!res) {
+ rc = -ENODEV;
goto disable_resources;
+ }
pwrdn_reg = devm_ioremap(dev, res->start, resource_size(res));
- if (!pwrdn_reg)
+ if (!pwrdn_reg) {
+ rc = -ENOMEM;
goto disable_resources;
+ }
da850_sata_init(dev, pwrdn_reg, hpriv->mmio, mpy);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index fa7dd4394c02..1945a8ea2099 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2411,6 +2411,9 @@ static void ata_dev_config_trusted(struct ata_device *dev)
u64 trusted_cap;
unsigned int err;
+ if (!ata_id_has_trusted(dev->id))
+ return;
+
if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
ata_dev_warn(dev,
"Security Log not supported\n");
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 792da683e70d..2adb8599be93 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -244,6 +244,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
{
struct pending_req *req, *n;
unsigned int j, r;
+ bool busy = false;
for (r = 0; r < blkif->nr_rings; r++) {
struct xen_blkif_ring *ring = &blkif->rings[r];
@@ -261,8 +262,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
* don't have any discard_io or other_io requests. So, checking
* for inflight IO is enough.
*/
- if (atomic_read(&ring->inflight) > 0)
- return -EBUSY;
+ if (atomic_read(&ring->inflight) > 0) {
+ busy = true;
+ continue;
+ }
if (ring->irq) {
unbind_from_irqhandler(ring->irq, ring);
@@ -300,6 +303,9 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
ring->active = false;
}
+ if (busy)
+ return -EBUSY;
+
blkif->nr_ring_pages = 0;
/*
* blkif->rings was allocated in connect_ring, so we should free it in
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 6558a3ed57a7..e1cde6b80027 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -147,36 +147,6 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
}
/**
- * amdgpu_mn_invalidate_page - callback to notify about mm change
- *
- * @mn: our notifier
- * @mn: the mm this callback is about
- * @address: address of invalidate page
- *
- * Invalidation of a single page. Blocks for all BOs mapping it
- * and unmap them by move them into system domain again.
- */
-static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
-{
- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
- struct interval_tree_node *it;
-
- mutex_lock(&rmn->lock);
-
- it = interval_tree_iter_first(&rmn->objects, address, address);
- if (it) {
- struct amdgpu_mn_node *node;
-
- node = container_of(it, struct amdgpu_mn_node, it);
- amdgpu_mn_invalidate_node(node, address, address);
- }
-
- mutex_unlock(&rmn->lock);
-}
-
-/**
* amdgpu_mn_invalidate_range_start - callback to notify about mm change
*
* @mn: our notifier
@@ -215,7 +185,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
static const struct mmu_notifier_ops amdgpu_mn_ops = {
.release = amdgpu_mn_release,
- .invalidate_page = amdgpu_mn_invalidate_page,
.invalidate_range_start = amdgpu_mn_invalidate_range_start,
};
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 2d51a2269fc6..5131bfb94f06 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -597,9 +597,9 @@ static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap)
static void sii8620_mt_read_devcap_reg_recv(struct sii8620 *ctx,
struct sii8620_mt_msg *msg)
{
- u8 reg = msg->reg[0] & 0x7f;
+ u8 reg = msg->reg[1] & 0x7f;
- if (msg->reg[0] & 0x80)
+ if (msg->reg[1] & 0x80)
ctx->xdevcap[reg] = msg->ret;
else
ctx->devcap[reg] = msg->ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 61e06f0e8cd3..625ba24f143f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1567,10 +1567,34 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
}
+/**
+ * vmw_kms_atomic_commit - Perform an atomic state commit
+ *
+ * @dev: DRM device
+ * @state: the driver state object
+ * @nonblock: Whether nonblocking behaviour is requested
+ *
+ * This is a simple wrapper around drm_atomic_helper_commit() for
+ * us to clear the nonblocking value.
+ *
+ * Nonblocking commits currently cause synchronization issues
+ * for vmwgfx.
+ *
+ * RETURNS
+ * Zero for success or negative error code on failure.
+ */
+int vmw_kms_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock)
+{
+ return drm_atomic_helper_commit(dev, state, false);
+}
+
+
static const struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
.atomic_check = vmw_kms_atomic_check_modeset,
- .atomic_commit = drm_atomic_helper_commit,
+ .atomic_commit = vmw_kms_atomic_commit,
};
static int vmw_kms_generic_present(struct vmw_private *dev_priv,
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 57248bccadbc..2b98a173136f 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -256,7 +256,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
struct dw_i2c_dev *dev;
u32 acpi_speed, ht = 0;
struct resource *mem;
- int irq, ret;
+ int i, irq, ret;
+ const int supported_speeds[] = { 0, 100000, 400000, 1000000, 3400000 };
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -297,9 +298,16 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
}
acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
- /* Some broken DSTDs use 1MiHz instead of 1MHz */
- if (acpi_speed == 1048576)
- acpi_speed = 1000000;
+ /*
+ * Some DSTDs use a non standard speed, round down to the lowest
+ * standard speed.
+ */
+ for (i = 1; i < ARRAY_SIZE(supported_speeds); i++) {
+ if (acpi_speed < supported_speeds[i])
+ break;
+ }
+ acpi_speed = supported_speeds[i - 1];
+
/*
* Find bus speed from the "clock-frequency" device property, ACPI
* or by using fast mode if neither is set.
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index e98e44e584a4..22ffcb73c185 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -341,8 +341,10 @@ static int ismt_process_desc(const struct ismt_desc *desc,
break;
case I2C_SMBUS_BLOCK_DATA:
case I2C_SMBUS_I2C_BLOCK_DATA:
- memcpy(&data->block[1], dma_buffer, desc->rxbytes);
- data->block[0] = desc->rxbytes;
+ if (desc->rxbytes != dma_buffer[0] + 1)
+ return -EMSGSIZE;
+
+ memcpy(data->block, dma_buffer, desc->rxbytes);
break;
}
return 0;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 8c4ec564e495..55e8f5ed8b3c 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -166,24 +166,6 @@ static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
return 0;
}
-static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
-{
- struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
-
- if (!context->invalidate_range)
- return;
-
- ib_ucontext_notifier_start_account(context);
- down_read(&context->umem_rwsem);
- rbt_ib_umem_for_each_in_range(&context->umem_tree, address,
- address + PAGE_SIZE,
- invalidate_page_trampoline, NULL);
- up_read(&context->umem_rwsem);
- ib_ucontext_notifier_end_account(context);
-}
-
static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
u64 end, void *cookie)
{
@@ -237,7 +219,6 @@ static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
static const struct mmu_notifier_ops ib_umem_notifiers = {
.release = ib_umem_notifier_release,
- .invalidate_page = ib_umem_notifier_invalidate_page,
.invalidate_range_start = ib_umem_notifier_invalidate_range_start,
.invalidate_range_end = ib_umem_notifier_invalidate_range_end,
};
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index ccbf52c8ff6f..e4b56a0dd6d0 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -67,8 +67,6 @@ struct mmu_rb_handler {
static unsigned long mmu_node_start(struct mmu_rb_node *);
static unsigned long mmu_node_last(struct mmu_rb_node *);
-static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *,
- unsigned long);
static inline void mmu_notifier_range_start(struct mmu_notifier *,
struct mm_struct *,
unsigned long, unsigned long);
@@ -82,7 +80,6 @@ static void do_remove(struct mmu_rb_handler *handler,
static void handle_remove(struct work_struct *work);
static const struct mmu_notifier_ops mn_opts = {
- .invalidate_page = mmu_notifier_page,
.invalidate_range_start = mmu_notifier_range_start,
};
@@ -285,12 +282,6 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
handler->ops->remove(handler->ops_arg, node);
}
-static inline void mmu_notifier_page(struct mmu_notifier *mn,
- struct mm_struct *mm, unsigned long addr)
-{
- mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE);
-}
-
static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 298a6ba51411..ca0e19ae7a90 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -476,10 +476,21 @@ static const u8 xboxone_hori_init[] = {
};
/*
- * A rumble packet is required for some PowerA pads to start
+ * A specific rumble packet is required for some PowerA pads to start
* sending input reports. One of those pads is (0x24c6:0x543a).
*/
-static const u8 xboxone_zerorumble_init[] = {
+static const u8 xboxone_rumblebegin_init[] = {
+ 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00,
+ 0x1D, 0x1D, 0xFF, 0x00, 0x00
+};
+
+/*
+ * A rumble packet with zero FF intensity will immediately
+ * terminate the rumbling required to init PowerA pads.
+ * This should happen fast enough that the motors don't
+ * spin up to enough speed to actually vibrate the gamepad.
+ */
+static const u8 xboxone_rumbleend_init[] = {
0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00
};
@@ -494,9 +505,12 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
- XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_zerorumble_init),
- XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_zerorumble_init),
- XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_zerorumble_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumbleend_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumbleend_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumbleend_init),
};
struct xpad_output_packet {
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 16c30460ef04..5af0b7d200bc 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -535,16 +535,17 @@ static void synaptics_apply_quirks(struct psmouse *psmouse,
}
}
+static bool synaptics_has_agm(struct synaptics_data *priv)
+{
+ return (SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) ||
+ SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c));
+}
+
static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
{
static u8 param = 0xc8;
- struct synaptics_data *priv = psmouse->private;
int error;
- if (!(SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) ||
- SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c)))
- return 0;
-
error = psmouse_sliced_command(psmouse, SYN_QUE_MODEL);
if (error)
return error;
@@ -553,9 +554,6 @@ static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
if (error)
return error;
- /* Advanced gesture mode also sends multi finger data */
- priv->info.capabilities |= BIT(1);
-
return 0;
}
@@ -578,7 +576,7 @@ static int synaptics_set_mode(struct psmouse *psmouse)
if (error)
return error;
- if (priv->absolute_mode) {
+ if (priv->absolute_mode && synaptics_has_agm(priv)) {
error = synaptics_set_advanced_gesture_mode(psmouse);
if (error) {
psmouse_err(psmouse,
@@ -766,9 +764,7 @@ static int synaptics_parse_hw_state(const u8 buf[],
((buf[0] & 0x04) >> 1) |
((buf[3] & 0x04) >> 2));
- if ((SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) ||
- SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c)) &&
- hw->w == 2) {
+ if (synaptics_has_agm(priv) && hw->w == 2) {
synaptics_parse_agm(buf, priv, hw);
return 1;
}
@@ -1033,6 +1029,15 @@ static void synaptics_image_sensor_process(struct psmouse *psmouse,
synaptics_report_mt_data(psmouse, sgm, num_fingers);
}
+static bool synaptics_has_multifinger(struct synaptics_data *priv)
+{
+ if (SYN_CAP_MULTIFINGER(priv->info.capabilities))
+ return true;
+
+ /* Advanced gesture mode also sends multi finger data */
+ return synaptics_has_agm(priv);
+}
+
/*
* called for each full received packet from the touchpad
*/
@@ -1079,7 +1084,7 @@ static void synaptics_process_packet(struct psmouse *psmouse)
if (SYN_CAP_EXTENDED(info->capabilities)) {
switch (hw.w) {
case 0 ... 1:
- if (SYN_CAP_MULTIFINGER(info->capabilities))
+ if (synaptics_has_multifinger(priv))
num_fingers = hw.w + 2;
break;
case 2:
@@ -1123,7 +1128,7 @@ static void synaptics_process_packet(struct psmouse *psmouse)
input_report_abs(dev, ABS_TOOL_WIDTH, finger_width);
input_report_key(dev, BTN_TOOL_FINGER, num_fingers == 1);
- if (SYN_CAP_MULTIFINGER(info->capabilities)) {
+ if (synaptics_has_multifinger(priv)) {
input_report_key(dev, BTN_TOOL_DOUBLETAP, num_fingers == 2);
input_report_key(dev, BTN_TOOL_TRIPLETAP, num_fingers == 3);
}
@@ -1283,7 +1288,7 @@ static void set_input_params(struct psmouse *psmouse,
__set_bit(BTN_TOUCH, dev->keybit);
__set_bit(BTN_TOOL_FINGER, dev->keybit);
- if (SYN_CAP_MULTIFINGER(info->capabilities)) {
+ if (synaptics_has_multifinger(priv)) {
__set_bit(BTN_TOOL_DOUBLETAP, dev->keybit);
__set_bit(BTN_TOOL_TRIPLETAP, dev->keybit);
}
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 6629c472eafd..dccf5b76eff2 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -391,13 +391,6 @@ static int mn_clear_flush_young(struct mmu_notifier *mn,
return 0;
}
-static void mn_invalidate_page(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
-{
- __mn_flush_page(mn, address);
-}
-
static void mn_invalidate_range(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end)
@@ -436,7 +429,6 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
static const struct mmu_notifier_ops iommu_mn = {
.release = mn_release,
.clear_flush_young = mn_clear_flush_young,
- .invalidate_page = mn_invalidate_page,
.invalidate_range = mn_invalidate_range,
};
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index f167c0d84ebf..f620dccec8ee 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -223,14 +223,6 @@ static void intel_change_pte(struct mmu_notifier *mn, struct mm_struct *mm,
intel_flush_svm_range(svm, address, 1, 1, 0);
}
-static void intel_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm,
- unsigned long address)
-{
- struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
-
- intel_flush_svm_range(svm, address, 1, 1, 0);
-}
-
/* Pages have been freed at this point */
static void intel_invalidate_range(struct mmu_notifier *mn,
struct mm_struct *mm,
@@ -285,7 +277,6 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
static const struct mmu_notifier_ops intel_mmuops = {
.release = intel_mm_release,
.change_pte = intel_change_pte,
- .invalidate_page = intel_invalidate_page,
.invalidate_range = intel_invalidate_range,
};
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 6ab1d3afec02..48ee1bad473f 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -1020,8 +1020,11 @@ static int __init gic_of_init(struct device_node *node,
gic_len = resource_size(&res);
}
- if (mips_cm_present())
+ if (mips_cm_present()) {
write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
+ /* Ensure GIC region is enabled before trying to access it */
+ __sync();
+ }
gic_present = true;
__gic_init(gic_base, gic_len, cpu_vec, 0, node);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 0e8ab5bb3575..d24e4b05f5da 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -504,7 +504,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
if (queue_dying) {
atomic_inc(&m->pg_init_in_progress);
activate_or_offline_path(pgpath);
- return DM_MAPIO_REQUEUE;
}
return DM_MAPIO_DELAY_REQUEUE;
}
@@ -1458,7 +1457,6 @@ static int noretry_error(blk_status_t error)
case BLK_STS_TARGET:
case BLK_STS_NEXUS:
case BLK_STS_MEDIUM:
- case BLK_STS_RESOURCE:
return 1;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 2edbcc2d7d3f..d669fddd9290 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -27,16 +27,6 @@
#define DM_MSG_PREFIX "core"
-#ifdef CONFIG_PRINTK
-/*
- * ratelimit state to be used in DMXXX_LIMIT().
- */
-DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
- DEFAULT_RATELIMIT_INTERVAL,
- DEFAULT_RATELIMIT_BURST);
-EXPORT_SYMBOL(dm_ratelimit_state);
-#endif
-
/*
* Cookies are numeric values sent with CHANGE and REMOVE
* uevents while resuming, removing or renaming the device.
@@ -1523,7 +1513,7 @@ static void __split_and_process_bio(struct mapped_device *md,
}
/* drop the extra reference count */
- dec_pending(ci.io, error);
+ dec_pending(ci.io, errno_to_blk_status(error));
}
/*-----------------------------------------------------------------
* CRUD END
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c
index 64d5760d069a..63d6246d6dff 100644
--- a/drivers/misc/mic/scif/scif_dma.c
+++ b/drivers/misc/mic/scif/scif_dma.c
@@ -200,16 +200,6 @@ static void scif_mmu_notifier_release(struct mmu_notifier *mn,
schedule_work(&scif_info.misc_work);
}
-static void scif_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
-{
- struct scif_mmu_notif *mmn;
-
- mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier);
- scif_rma_destroy_tcw(mmn, address, PAGE_SIZE);
-}
-
static void scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
@@ -235,7 +225,6 @@ static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
static const struct mmu_notifier_ops scif_mmu_notifier_ops = {
.release = scif_mmu_notifier_release,
.clear_flush_young = NULL,
- .invalidate_page = scif_mmu_notifier_invalidate_page,
.invalidate_range_start = scif_mmu_notifier_invalidate_range_start,
.invalidate_range_end = scif_mmu_notifier_invalidate_range_end};
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c
index e936d43895d2..9918eda0e05f 100644
--- a/drivers/misc/sgi-gru/grutlbpurge.c
+++ b/drivers/misc/sgi-gru/grutlbpurge.c
@@ -247,17 +247,6 @@ static void gru_invalidate_range_end(struct mmu_notifier *mn,
gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n", gms, start, end);
}
-static void gru_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm,
- unsigned long address)
-{
- struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
- ms_notifier);
-
- STAT(mmu_invalidate_page);
- gru_flush_tlb_range(gms, address, PAGE_SIZE);
- gru_dbg(grudev, "gms %p, address 0x%lx\n", gms, address);
-}
-
static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm)
{
struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
@@ -269,7 +258,6 @@ static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm)
static const struct mmu_notifier_ops gru_mmuops = {
- .invalidate_page = gru_invalidate_page,
.invalidate_range_start = gru_invalidate_range_start,
.invalidate_range_end = gru_invalidate_range_end,
.release = gru_release,
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 80d1ec693d2d..8bd7aba811e9 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1213,7 +1213,7 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
break;
}
mq_rq->drv_op_result = ret;
- blk_end_request_all(req, ret);
+ blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
@@ -1718,9 +1718,9 @@ static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
if (err)
req_pending = old_req_pending;
else
- req_pending = blk_end_request(req, 0, blocks << 9);
+ req_pending = blk_end_request(req, BLK_STS_OK, blocks << 9);
} else {
- req_pending = blk_end_request(req, 0, brq->data.bytes_xfered);
+ req_pending = blk_end_request(req, BLK_STS_OK, brq->data.bytes_xfered);
}
return req_pending;
}
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index bc1781bb070b..c580af05b033 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -210,8 +210,27 @@ static void xenon_set_uhs_signaling(struct sdhci_host *host,
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
}
+static void xenon_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+ u8 pwr = host->pwr;
+
+ sdhci_set_power_noreg(host, mode, vdd);
+
+ if (host->pwr == pwr)
+ return;
+
+ if (host->pwr == 0)
+ vdd = 0;
+
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+}
+
static const struct sdhci_ops sdhci_xenon_ops = {
.set_clock = sdhci_set_clock,
+ .set_power = xenon_set_power,
.set_bus_width = sdhci_set_bus_width,
.reset = xenon_reset,
.set_uhs_signaling = xenon_set_uhs_signaling,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 648f91b58d1e..9b6ce7c3f6c3 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1048,6 +1048,7 @@ struct bcm_sf2_of_data {
u32 type;
const u16 *reg_offsets;
unsigned int core_reg_align;
+ unsigned int num_cfp_rules;
};
/* Register offsets for the SWITCH_REG_* block */
@@ -1071,6 +1072,7 @@ static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
.type = BCM7445_DEVICE_ID,
.core_reg_align = 0,
.reg_offsets = bcm_sf2_7445_reg_offsets,
+ .num_cfp_rules = 256,
};
static const u16 bcm_sf2_7278_reg_offsets[] = {
@@ -1093,6 +1095,7 @@ static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
.type = BCM7278_DEVICE_ID,
.core_reg_align = 1,
.reg_offsets = bcm_sf2_7278_reg_offsets,
+ .num_cfp_rules = 128,
};
static const struct of_device_id bcm_sf2_of_match[] = {
@@ -1149,6 +1152,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
priv->type = data->type;
priv->reg_offsets = data->reg_offsets;
priv->core_reg_align = data->core_reg_align;
+ priv->num_cfp_rules = data->num_cfp_rules;
/* Auto-detection using standard registers will not work, so
* provide an indication of what kind of device we are for
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 7d3030e04f11..7f9125eef3df 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -72,6 +72,7 @@ struct bcm_sf2_priv {
u32 type;
const u16 *reg_offsets;
unsigned int core_reg_align;
+ unsigned int num_cfp_rules;
/* spinlock protecting access to the indirect registers */
spinlock_t indir_lock;
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 2fb32d67065f..8a1da7e67707 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -98,7 +98,7 @@ static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
{
u32 reg;
- WARN_ON(addr >= CFP_NUM_RULES);
+ WARN_ON(addr >= priv->num_cfp_rules);
reg = core_readl(priv, CORE_CFP_ACC);
reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
@@ -109,7 +109,7 @@ static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
{
/* Entry #0 is reserved */
- return CFP_NUM_RULES - 1;
+ return priv->num_cfp_rules - 1;
}
static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
@@ -523,7 +523,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
if (!(reg & OP_STR_DONE))
break;
- } while (index < CFP_NUM_RULES);
+ } while (index < priv->num_cfp_rules);
/* Put the TCAM size here */
nfc->data = bcm_sf2_cfp_rule_size(priv);
@@ -544,7 +544,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
case ETHTOOL_GRXCLSRLCNT:
/* Subtract the default, unusable rule */
nfc->rule_cnt = bitmap_weight(priv->cfp.used,
- CFP_NUM_RULES) - 1;
+ priv->num_cfp_rules) - 1;
/* We support specifying rule locations */
nfc->data |= RX_CLS_LOC_SPECIAL;
break;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 1d307f2def2d..6e253d913fe2 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1661,21 +1661,21 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
return 0;
}
-static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
+static void xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
{
int ret;
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
- return 0;
+ return;
if (!IS_ENABLED(CONFIG_MDIO_XGENE))
- return 0;
+ return;
ret = xgene_enet_phy_connect(pdata->ndev);
if (!ret)
pdata->mdio_driver = true;
- return 0;
+ return;
}
static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
@@ -1779,10 +1779,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
if (ret)
return ret;
- ret = xgene_enet_check_phy_handle(pdata);
- if (ret)
- return ret;
-
xgene_enet_gpiod_get(pdata);
pdata->clk = devm_clk_get(&pdev->dev, NULL);
@@ -2097,9 +2093,11 @@ static int xgene_enet_probe(struct platform_device *pdev)
goto err;
}
+ xgene_enet_check_phy_handle(pdata);
+
ret = xgene_enet_init_hw(pdata);
if (ret)
- goto err;
+ goto err2;
link_state = pdata->mac_ops->link_state;
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
@@ -2117,29 +2115,30 @@ static int xgene_enet_probe(struct platform_device *pdev)
spin_lock_init(&pdata->stats_lock);
ret = xgene_extd_stats_init(pdata);
if (ret)
- goto err2;
+ goto err1;
xgene_enet_napi_add(pdata);
ret = register_netdev(ndev);
if (ret) {
netdev_err(ndev, "Failed to register netdev\n");
- goto err2;
+ goto err1;
}
return 0;
-err2:
+err1:
/*
* If necessary, free_netdev() will call netif_napi_del() and undo
* the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
*/
+ xgene_enet_delete_desc_rings(pdata);
+
+err2:
if (pdata->mdio_driver)
xgene_enet_phy_disconnect(pdata);
else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
xgene_enet_mdio_remove(pdata);
-err1:
- xgene_enet_delete_desc_rings(pdata);
err:
free_netdev(ndev);
return ret;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index fce0fd3f23ff..bf9b3f020e10 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -105,8 +105,7 @@ struct aq_hw_ops {
int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
- int (*hw_get_link_status)(struct aq_hw_s *self,
- struct aq_hw_link_status_s *link_status);
+ int (*hw_get_link_status)(struct aq_hw_s *self);
int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 9ee1c5016784..6ac9e2602d6d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -103,6 +103,8 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
else
cfg->vecs = 1U;
+ cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
+
cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func);
if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
@@ -123,33 +125,30 @@ static void aq_nic_service_timer_cb(unsigned long param)
struct net_device *ndev = aq_nic_get_ndev(self);
int err = 0;
unsigned int i = 0U;
- struct aq_hw_link_status_s link_status;
struct aq_ring_stats_rx_s stats_rx;
struct aq_ring_stats_tx_s stats_tx;
if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
goto err_exit;
- err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status);
+ err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
if (err < 0)
goto err_exit;
- self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
- self->aq_nic_cfg.is_interrupt_moderation);
-
- if (memcmp(&link_status, &self->link_status, sizeof(link_status))) {
- if (link_status.mbps) {
- aq_utils_obj_set(&self->header.flags,
- AQ_NIC_FLAG_STARTED);
- aq_utils_obj_clear(&self->header.flags,
- AQ_NIC_LINK_DOWN);
- netif_carrier_on(self->ndev);
- } else {
- netif_carrier_off(self->ndev);
- aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
- }
+ self->link_status = self->aq_hw->aq_link_status;
- self->link_status = link_status;
+ self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
+ self->aq_nic_cfg.is_interrupt_moderation);
+
+ if (self->link_status.mbps) {
+ aq_utils_obj_set(&self->header.flags,
+ AQ_NIC_FLAG_STARTED);
+ aq_utils_obj_clear(&self->header.flags,
+ AQ_NIC_LINK_DOWN);
+ netif_carrier_on(self->ndev);
+ } else {
+ netif_carrier_off(self->ndev);
+ aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
}
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
@@ -597,14 +596,11 @@ exit:
}
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
-__releases(&ring->lock)
-__acquires(&ring->lock)
{
struct aq_ring_s *ring = NULL;
unsigned int frags = 0U;
unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
unsigned int tc = 0U;
- unsigned int trys = AQ_CFG_LOCK_TRYS;
int err = NETDEV_TX_OK;
bool is_nic_in_bad_state;
@@ -628,36 +624,21 @@ __acquires(&ring->lock)
goto err_exit;
}
- do {
- if (spin_trylock(&ring->header.lock)) {
- frags = aq_nic_map_skb(self, skb, ring);
-
- if (likely(frags)) {
- err = self->aq_hw_ops.hw_ring_tx_xmit(
- self->aq_hw,
- ring, frags);
- if (err >= 0) {
- if (aq_ring_avail_dx(ring) <
- AQ_CFG_SKB_FRAGS_MAX + 1)
- aq_nic_ndev_queue_stop(
- self,
- ring->idx);
-
- ++ring->stats.tx.packets;
- ring->stats.tx.bytes += skb->len;
- }
- } else {
- err = NETDEV_TX_BUSY;
- }
+ frags = aq_nic_map_skb(self, skb, ring);
- spin_unlock(&ring->header.lock);
- break;
- }
- } while (--trys);
+ if (likely(frags)) {
+ err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw,
+ ring,
+ frags);
+ if (err >= 0) {
+ if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1)
+ aq_nic_ndev_queue_stop(self, ring->idx);
- if (!trys) {
+ ++ring->stats.tx.packets;
+ ring->stats.tx.bytes += skb->len;
+ }
+ } else {
err = NETDEV_TX_BUSY;
- goto err_exit;
}
err_exit:
@@ -688,11 +669,26 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
netdev_for_each_mc_addr(ha, ndev) {
ether_addr_copy(self->mc_list.ar[i++], ha->addr);
++self->mc_list.count;
+
+ if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX)
+ break;
}
- return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
+ if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) {
+ /* Number of filters is too big: atlantic does not support this.
+ * Force all multi filter to support this.
+ * With this we disable all UC filters and setup "all pass"
+ * multicast mask
+ */
+ self->packet_filter |= IFF_ALLMULTI;
+ self->aq_hw->aq_nic_cfg->mc_list_count = 0;
+ return self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
+ self->packet_filter);
+ } else {
+ return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
self->mc_list.ar,
self->mc_list.count);
+ }
}
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 9a0817938eca..ec5579fb8268 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -101,7 +101,6 @@ int aq_ring_init(struct aq_ring_s *self)
self->hw_head = 0;
self->sw_head = 0;
self->sw_tail = 0;
- spin_lock_init(&self->header.lock);
return 0;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
index f6012b34abe6..e12bcdfb874a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
@@ -17,7 +17,6 @@
#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_)
struct aq_obj_s {
- spinlock_t lock; /* spinlock for nic/rings processing */
atomic_t flags;
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index ad5b4d4dac7f..fee446af748f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -34,8 +34,6 @@ struct aq_vec_s {
#define AQ_VEC_RX_ID 1
static int aq_vec_poll(struct napi_struct *napi, int budget)
-__releases(&self->lock)
-__acquires(&self->lock)
{
struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
struct aq_ring_s *ring = NULL;
@@ -47,7 +45,7 @@ __acquires(&self->lock)
if (!self) {
err = -EINVAL;
- } else if (spin_trylock(&self->header.lock)) {
+ } else {
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
if (self->aq_hw_ops->hw_ring_tx_head_update) {
@@ -105,11 +103,8 @@ __acquires(&self->lock)
self->aq_hw_ops->hw_irq_enable(self->aq_hw,
1U << self->aq_ring_param.vec_idx);
}
-
-err_exit:
- spin_unlock(&self->header.lock);
}
-
+err_exit:
return work_done;
}
@@ -185,8 +180,6 @@ int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
self->aq_hw_ops = aq_hw_ops;
self->aq_hw = aq_hw;
- spin_lock_init(&self->header.lock);
-
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
err = aq_ring_init(&ring[AQ_VEC_TX_ID]);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index faeb4935ef3e..c5a02df7a48b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -629,6 +629,12 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1;
else if (0x0U == (pkt_type & 0x1CU))
buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1;
+
+ /* Checksum offload workaround for small packets */
+ if (rxd_wb->pkt_len <= 60) {
+ buff->is_ip_cso = 0U;
+ buff->is_cso_err = 0U;
+ }
}
is_err &= ~0x18U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 1bceb7358e5c..21784cc39dab 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -645,6 +645,12 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
else if (0x0U == (pkt_type & 0x1CU))
buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
+
+ /* Checksum offload workaround for small packets */
+ if (rxd_wb->pkt_len <= 60) {
+ buff->is_ip_cso = 0U;
+ buff->is_cso_err = 0U;
+ }
}
is_err &= ~0x18U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 8d6d8f5804da..4f5ec9a0fbfb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -141,6 +141,12 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected,
aq_hw_read_reg(self, 0x18U));
+
+ if (err < 0)
+ pr_err("%s: Bad FW version detected: expected=%x, actual=%x\n",
+ AQ_CFG_DRV_NAME,
+ aq_hw_caps->fw_ver_expected,
+ aq_hw_read_reg(self, 0x18U));
return err;
}
@@ -313,11 +319,11 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
err_exit:;
}
-int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self,
- struct aq_hw_link_status_s *link_status)
+int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
{
u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT;
+ struct aq_hw_link_status_s *link_status = &self->aq_link_status;
if (!link_speed_mask) {
link_status->mbps = 0U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index a66aee51ab5b..e0360a6b2202 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -180,8 +180,7 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
enum hal_atl_utils_fw_state_e state);
-int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self,
- struct aq_hw_link_status_s *link_status);
+int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self);
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index dc3052751bc1..c28fa5a8734c 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -597,7 +597,7 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
{
- dev_kfree_skb_any(cb->skb);
+ dev_consume_skb_any(cb->skb);
cb->skb = NULL;
dma_unmap_addr_set(cb, dma_addr, 0);
}
@@ -1346,6 +1346,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
if (!ring->cbs) {
+ dma_free_coherent(kdev, sizeof(struct dma_desc),
+ ring->desc_cpu, ring->desc_dma);
netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index e7c8539cbddf..f20b3d2a4c23 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4647,7 +4647,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->port_id = le16_to_cpu(resp->port_id);
bp->dev->dev_port = pf->port_id;
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
- memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
@@ -4687,16 +4686,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
- mutex_unlock(&bp->hwrm_cmd_lock);
-
- if (is_valid_ether_addr(vf->mac_addr)) {
- /* overwrite netdev dev_adr with admin VF MAC */
- memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
- } else {
- eth_hw_addr_random(bp->dev);
- rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
- }
- return rc;
#endif
}
@@ -7152,6 +7141,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
netdev_reset_tc(dev);
}
+ bp->tx_nr_rings += bp->tx_nr_rings_xdp;
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings;
bp->num_stat_ctxs = bp->cp_nr_rings;
@@ -7661,6 +7651,28 @@ void bnxt_restore_pf_fw_resources(struct bnxt *bp)
bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
}
+static int bnxt_init_mac_addr(struct bnxt *bp)
+{
+ int rc = 0;
+
+ if (BNXT_PF(bp)) {
+ memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
+ } else {
+#ifdef CONFIG_BNXT_SRIOV
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ if (is_valid_ether_addr(vf->mac_addr)) {
+ /* overwrite netdev dev_adr with admin VF MAC */
+ memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+ } else {
+ eth_hw_addr_random(bp->dev);
+ rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
+ }
+#endif
+ }
+ return rc;
+}
+
static void bnxt_parse_log_pcie_link(struct bnxt *bp)
{
enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -7789,7 +7801,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = -1;
goto init_err_pci_clean;
}
-
+ rc = bnxt_init_mac_addr(bp);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to initialize mac address.\n");
+ rc = -EADDRNOTAVAIL;
+ goto init_err_pci_clean;
+ }
rc = bnxt_hwrm_queue_qportcfg(bp);
if (rc) {
netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 77da75a55c02..997e10e8b863 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -84,6 +84,8 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1);
+ if (ulp->msix_requested)
+ edev->en_ops->bnxt_free_msix(edev, ulp_id);
}
if (ulp->max_async_event_id)
bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index a981c4ee9d72..fea3f9a5fb2d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1360,7 +1360,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
if (skb) {
pkts_compl++;
bytes_compl += GENET_CB(skb)->bytes_sent;
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
}
txbds_processed++;
@@ -1875,7 +1875,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
cb = ring->cbs + i;
skb = bcmgenet_rx_refill(priv, cb);
if (skb)
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
if (!cb->skb)
return -ENOMEM;
}
@@ -1894,7 +1894,7 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
if (skb)
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 82bf7aac6cdb..0293b41171a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -369,12 +369,12 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
list_del(&entry.list);
spin_unlock(&adap->mbox_lock);
ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
- t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
+ t4_record_mbox(adap, cmd, size, access, ret);
return ret;
}
/* Copy in the new mailbox command and send it on its way ... */
- t4_record_mbox(adap, cmd, MBOX_LEN, access, 0);
+ t4_record_mbox(adap, cmd, size, access, 0);
for (i = 0; i < size; i += 8)
t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
@@ -426,7 +426,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
}
ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
- t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
+ t4_record_mbox(adap, cmd, size, access, ret);
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
*(const u8 *)cmd, mbox);
t4_report_fw_error(adap);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 34dae51effd4..59da7ac3c108 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1863,7 +1863,6 @@ err_setup_mdio:
err_ioremap:
release_resource(priv->res);
err_req_mem:
- netif_napi_del(&priv->napi);
free_netdev(netdev);
err_alloc_etherdev:
return err;
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 6e67d22fd0d5..1c7da16ad0ff 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
goto no_mem;
}
+ pdev->dev.of_node = node;
+ pdev->dev.parent = priv->dev;
set_dma_ops(&pdev->dev, get_dma_ops(priv->dev));
ret = platform_device_add_data(pdev, &data, sizeof(data));
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 48d21c1e09f2..4d598ca8503a 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -6504,7 +6504,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
struct resource *res;
const char *dt_mac_addr;
const char *mac_from;
- char hw_mac_addr[ETH_ALEN];
+ char hw_mac_addr[ETH_ALEN] = {0};
u32 id;
int features;
int phy_mode;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 0039b4725405..2f26fb34d741 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -263,6 +263,7 @@ struct mlx5e_dcbx {
/* The only setting that cannot be read from FW */
u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
+ u8 cap;
};
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 2eb54d36e16e..c1d384fca4dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -288,13 +288,8 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_dcbx *dcbx = &priv->dcbx;
- u8 mode = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_VER_CEE;
-
- if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
- mode |= DCB_CAP_DCBX_HOST;
- return mode;
+ return priv->dcbx.cap;
}
static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
@@ -312,6 +307,7 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
/* set dcbx to fw controlled */
if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
+ dcbx->cap &= ~DCB_CAP_DCBX_HOST;
return 0;
}
@@ -324,6 +320,8 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
return 1;
+ dcbx->cap = mode;
+
return 0;
}
@@ -628,9 +626,9 @@ static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
*cap = false;
break;
case DCB_CAP_ATTR_DCBX:
- *cap = (DCB_CAP_DCBX_LLD_MANAGED |
- DCB_CAP_DCBX_VER_CEE |
- DCB_CAP_DCBX_STATIC);
+ *cap = priv->dcbx.cap |
+ DCB_CAP_DCBX_VER_CEE |
+ DCB_CAP_DCBX_VER_IEEE;
break;
default:
*cap = 0;
@@ -754,8 +752,16 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
{
struct mlx5e_dcbx *dcbx = &priv->dcbx;
+ if (!MLX5_CAP_GEN(priv->mdev, qos))
+ return;
+
if (MLX5_CAP_GEN(priv->mdev, dcbx))
mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
+ priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
+ DCB_CAP_DCBX_VER_IEEE;
+ if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
+ priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
+
mlx5e_ets_init(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 917fade5f5d5..f5594014715b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -641,8 +641,10 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
new_channels.params = priv->channels.params;
new_channels.params.num_channels = count;
- mlx5e_build_default_indir_rqt(priv->mdev, new_channels.params.indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, count);
+ if (!netif_is_rxfh_configured(priv->netdev))
+ mlx5e_build_default_indir_rqt(priv->mdev,
+ new_channels.params.indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, count);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 57f31fa478ce..6ad7f07e7861 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1969,6 +1969,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
}
mlx5e_build_common_cq_param(priv, param);
+ param->cq_period_mode = params->rx_cq_period_mode;
}
static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 325b2c8c1c6d..7344433259fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -222,13 +222,13 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
if (unlikely(!page))
return -ENOMEM;
- dma_info->page = page;
dma_info->addr = dma_map_page(rq->pdev, page, 0,
RQ_PAGE_SIZE(rq), rq->buff.map_dir);
if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
put_page(page);
return -ENOMEM;
}
+ dma_info->page = page;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3c536f560dd2..7f282e8f4e7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1443,12 +1443,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int ret;
- dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
- ret = dst->error;
- if (ret) {
- dst_release(dst);
+ ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
+ fl6);
+ if (ret < 0)
return ret;
- }
*out_ttl = ip6_dst_hoplimit(dst);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index aaa0f4ebba9a..31353e5c3c78 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -128,10 +128,10 @@ static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
return mlx5e_skb_l2_header_offset(skb);
}
-static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
- struct sk_buff *skb)
+static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
+ struct sk_buff *skb)
{
- int hlen;
+ u16 hlen;
switch (mode) {
case MLX5_INLINE_MODE_NONE:
@@ -140,19 +140,22 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
hlen = eth_get_headlen(skb->data, skb_headlen(skb));
if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
hlen += VLAN_HLEN;
- return hlen;
+ break;
case MLX5_INLINE_MODE_IP:
/* When transport header is set to zero, it means no transport
* header. When transport header is set to 0xff's, it means
* transport header wasn't set.
*/
- if (skb_transport_offset(skb))
- return mlx5e_skb_l3_header_offset(skb);
+ if (skb_transport_offset(skb)) {
+ hlen = mlx5e_skb_l3_header_offset(skb);
+ break;
+ }
/* fall through */
case MLX5_INLINE_MODE_L2:
default:
- return mlx5e_skb_l2_header_offset(skb);
+ hlen = mlx5e_skb_l2_header_offset(skb);
}
+ return min_t(u16, hlen, skb->len);
}
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 95b64025ce36..5bc0593bd76e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -815,7 +815,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
struct mlx5_eswitch_rep *rep;
int vport;
- for (vport = 0; vport < nvports; vport++) {
+ for (vport = nvports - 1; vport >= 0; vport--) {
rep = &esw->offloads.vport_reps[vport];
if (!rep->valid)
continue;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c065132b956d..16885827367b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1186,7 +1186,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
}
}
- clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
out:
mutex_unlock(&dev->intf_state_mutex);
@@ -1261,7 +1260,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_drain_health_recovery(dev);
mutex_lock(&dev->intf_state_mutex);
- if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
+ if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
__func__);
if (cleanup)
@@ -1270,7 +1269,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
}
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
- set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
if (mlx5_device_registered(dev))
mlx5_detach_device(dev);
@@ -1565,8 +1563,6 @@ static void shutdown(struct pci_dev *pdev)
int err;
dev_info(&pdev->dev, "Shutdown was called\n");
- /* Notify mlx5 clients that the kernel is being shut down */
- set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
err = mlx5_try_fast_unload(dev);
if (err)
mlx5_unload_one(dev, priv, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index f774de6f5fcb..520f6382dfde 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -201,13 +201,13 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev,
static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
- /* arm_srq structs missing using identical xrc ones */
- u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
- u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
+ u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
+ u32 srq_out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
- MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
- MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
- MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm);
+ MLX5_SET(arm_rq_in, srq_in, opcode, MLX5_CMD_OP_ARM_RQ);
+ MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
+ MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn);
+ MLX5_SET(arm_rq_in, srq_in, lwm, lwm);
return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
srq_out, sizeof(srq_out));
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 60bf8f27cc00..c6a3e61b53bd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4139,6 +4139,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
return -EINVAL;
if (!info->linking)
break;
+ if (netdev_has_any_upper_dev(upper_dev))
+ return -EINVAL;
if (netif_is_lag_master(upper_dev) &&
!mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
info->upper_info))
@@ -4258,6 +4260,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
upper_dev = info->upper_dev;
if (!netif_is_bridge_master(upper_dev))
return -EINVAL;
+ if (!info->linking)
+ break;
+ if (netdev_has_any_upper_dev(upper_dev))
+ return -EINVAL;
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 5eb1606765c5..d39ffbfcc436 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -705,6 +705,7 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool is_port_mc_router)
{
struct mlxsw_sp_bridge_port *bridge_port;
+ int err;
if (switchdev_trans_ph_prepare(trans))
return 0;
@@ -715,11 +716,17 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
if (!bridge_port->bridge_device->multicast_enabled)
- return 0;
+ goto out;
- return mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
- MLXSW_SP_FLOOD_TYPE_MC,
- is_port_mc_router);
+ err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
+ MLXSW_SP_FLOOD_TYPE_MC,
+ is_port_mc_router);
+ if (err)
+ return err;
+
+out:
+ bridge_port->mrouter = is_port_mc_router;
+ return 0;
}
static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 0e08404480ef..d25b5038c3a2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -42,33 +42,29 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
struct tc_cls_flower_offload *flow, u8 key_type,
bool mask_version)
{
+ struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
struct flow_dissector_key_vlan *flow_vlan;
u16 tmp_tci;
+ memset(frame, 0, sizeof(struct nfp_flower_meta_two));
/* Populate the metadata frame. */
frame->nfp_flow_key_layer = key_type;
frame->mask_id = ~0;
- if (mask_version) {
- frame->tci = cpu_to_be16(~0);
- return;
- }
-
- flow_vlan = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- flow->key);
-
- /* Populate the tci field. */
- if (!flow_vlan->vlan_id) {
- tmp_tci = 0;
- } else {
- tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
- flow_vlan->vlan_priority) |
- FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
- flow_vlan->vlan_id) |
- NFP_FLOWER_MASK_VLAN_CFI;
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+ flow_vlan = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ target);
+ /* Populate the tci field. */
+ if (flow_vlan->vlan_id) {
+ tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ flow_vlan->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ flow_vlan->vlan_id) |
+ NFP_FLOWER_MASK_VLAN_CFI;
+ frame->tci = cpu_to_be16(tmp_tci);
+ }
}
- frame->tci = cpu_to_be16(tmp_tci);
}
static void
@@ -99,17 +95,18 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_eth_addrs *flow_mac;
-
- flow_mac = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- target);
+ struct flow_dissector_key_eth_addrs *addr;
memset(frame, 0, sizeof(struct nfp_flower_mac_mpls));
- /* Populate mac frame. */
- ether_addr_copy(frame->mac_dst, &flow_mac->dst[0]);
- ether_addr_copy(frame->mac_src, &flow_mac->src[0]);
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ addr = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ target);
+ /* Populate mac frame. */
+ ether_addr_copy(frame->mac_dst, &addr->dst[0]);
+ ether_addr_copy(frame->mac_src, &addr->src[0]);
+ }
if (mask_version)
frame->mpls_lse = cpu_to_be32(~0);
@@ -121,14 +118,17 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ports *flow_tp;
+ struct flow_dissector_key_ports *tp;
- flow_tp = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- target);
+ memset(frame, 0, sizeof(struct nfp_flower_tp_ports));
- frame->port_src = flow_tp->src;
- frame->port_dst = flow_tp->dst;
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
+ tp = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ target);
+ frame->port_src = tp->src;
+ frame->port_dst = tp->dst;
+ }
}
static void
@@ -137,25 +137,27 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ipv4_addrs *flow_ipv4;
- struct flow_dissector_key_basic *flow_basic;
-
- flow_ipv4 = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- target);
-
- flow_basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- target);
+ struct flow_dissector_key_ipv4_addrs *addr;
+ struct flow_dissector_key_basic *basic;
- /* Populate IPv4 frame. */
- frame->reserved = 0;
- frame->ipv4_src = flow_ipv4->src;
- frame->ipv4_dst = flow_ipv4->dst;
- frame->proto = flow_basic->ip_proto;
/* Wildcard TOS/TTL for now. */
- frame->tos = 0;
- frame->ttl = 0;
+ memset(frame, 0, sizeof(struct nfp_flower_ipv4));
+
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ addr = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ target);
+ frame->ipv4_src = addr->src;
+ frame->ipv4_dst = addr->dst;
+ }
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ target);
+ frame->proto = basic->ip_proto;
+ }
}
static void
@@ -164,26 +166,27 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ipv6_addrs *flow_ipv6;
- struct flow_dissector_key_basic *flow_basic;
-
- flow_ipv6 = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- target);
+ struct flow_dissector_key_ipv6_addrs *addr;
+ struct flow_dissector_key_basic *basic;
- flow_basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- target);
-
- /* Populate IPv6 frame. */
- frame->reserved = 0;
- frame->ipv6_src = flow_ipv6->src;
- frame->ipv6_dst = flow_ipv6->dst;
- frame->proto = flow_basic->ip_proto;
/* Wildcard LABEL/TOS/TTL for now. */
- frame->ipv6_flow_label_exthdr = 0;
- frame->tos = 0;
- frame->ttl = 0;
+ memset(frame, 0, sizeof(struct nfp_flower_ipv6));
+
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ addr = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ target);
+ frame->ipv6_src = addr->src;
+ frame->ipv6_dst = addr->dst;
+ }
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ target);
+ frame->proto = basic->ip_proto;
+ }
}
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 4ad10bd5e139..74a96d6bb05c 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -105,43 +105,62 @@ static int
nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
struct tc_cls_flower_offload *flow)
{
- struct flow_dissector_key_control *mask_enc_ctl;
- struct flow_dissector_key_basic *mask_basic;
- struct flow_dissector_key_basic *key_basic;
+ struct flow_dissector_key_basic *mask_basic = NULL;
+ struct flow_dissector_key_basic *key_basic = NULL;
+ struct flow_dissector_key_ip *mask_ip = NULL;
u32 key_layer_two;
u8 key_layer;
int key_size;
- mask_enc_ctl = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_CONTROL,
- flow->mask);
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_dissector_key_control *mask_enc_ctl =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_CONTROL,
+ flow->mask);
+ /* We are expecting a tunnel. For now we ignore offloading. */
+ if (mask_enc_ctl->addr_type)
+ return -EOPNOTSUPP;
+ }
- mask_basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- flow->mask);
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ mask_basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ flow->mask);
+
+ key_basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ flow->key);
+ }
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP))
+ mask_ip = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ flow->mask);
- key_basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- flow->key);
key_layer_two = 0;
key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
key_size = sizeof(struct nfp_flower_meta_one) +
sizeof(struct nfp_flower_in_port) +
sizeof(struct nfp_flower_mac_mpls);
- /* We are expecting a tunnel. For now we ignore offloading. */
- if (mask_enc_ctl->addr_type)
- return -EOPNOTSUPP;
-
- if (mask_basic->n_proto) {
+ if (mask_basic && mask_basic->n_proto) {
/* Ethernet type is present in the key. */
switch (key_basic->n_proto) {
case cpu_to_be16(ETH_P_IP):
+ if (mask_ip && mask_ip->tos)
+ return -EOPNOTSUPP;
+ if (mask_ip && mask_ip->ttl)
+ return -EOPNOTSUPP;
key_layer |= NFP_FLOWER_LAYER_IPV4;
key_size += sizeof(struct nfp_flower_ipv4);
break;
case cpu_to_be16(ETH_P_IPV6):
+ if (mask_ip && mask_ip->tos)
+ return -EOPNOTSUPP;
+ if (mask_ip && mask_ip->ttl)
+ return -EOPNOTSUPP;
key_layer |= NFP_FLOWER_LAYER_IPV6;
key_size += sizeof(struct nfp_flower_ipv6);
break;
@@ -152,6 +171,11 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
case cpu_to_be16(ETH_P_ARP):
return -EOPNOTSUPP;
+ /* Currently we do not offload MPLS. */
+ case cpu_to_be16(ETH_P_MPLS_UC):
+ case cpu_to_be16(ETH_P_MPLS_MC):
+ return -EOPNOTSUPP;
+
/* Will be included in layer 2. */
case cpu_to_be16(ETH_P_8021Q):
break;
@@ -166,7 +190,7 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
}
}
- if (mask_basic->ip_proto) {
+ if (mask_basic && mask_basic->ip_proto) {
/* Ethernet type is present in the key. */
switch (key_basic->ip_proto) {
case IPPROTO_TCP:
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index d67969d3e484..3f199db2002e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -98,21 +98,20 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
struct nfp_pf *pf = pci_get_drvdata(pdev);
int err;
- mutex_lock(&pf->lock);
-
if (num_vfs > pf->limit_vfs) {
nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n",
pf->limit_vfs);
- err = -EINVAL;
- goto err_unlock;
+ return -EINVAL;
}
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
dev_warn(&pdev->dev, "Failed to enable PCI SR-IOV: %d\n", err);
- goto err_unlock;
+ return err;
}
+ mutex_lock(&pf->lock);
+
err = nfp_app_sriov_enable(pf->app, num_vfs);
if (err) {
dev_warn(&pdev->dev,
@@ -129,9 +128,8 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
return num_vfs;
err_sriov_disable:
- pci_disable_sriov(pdev);
-err_unlock:
mutex_unlock(&pf->lock);
+ pci_disable_sriov(pdev);
return err;
#endif
return 0;
@@ -158,10 +156,10 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
pf->num_vfs = 0;
+ mutex_unlock(&pf->lock);
+
pci_disable_sriov(pdev);
dev_dbg(&pdev->dev, "Removed VFs.\n");
-
- mutex_unlock(&pf->lock);
#endif
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 9f77ce038a4a..66a09e490cf5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -895,6 +895,8 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
netdev_tx_sent_queue(nd_q, txbuf->real_len);
+ skb_tx_timestamp(skb);
+
tx_ring->wr_p += nr_frags + 1;
if (nfp_net_tx_ring_should_stop(tx_ring))
nfp_net_tx_ring_stop(nd_q, tx_ring);
@@ -903,8 +905,6 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
if (!skb->xmit_more || netif_xmit_stopped(nd_q))
nfp_net_tx_xmit_more_flush(tx_ring);
- skb_tx_timestamp(skb);
-
return NETDEV_TX_OK;
err_unmap:
@@ -1751,6 +1751,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
continue;
}
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+ nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
if (likely(!meta.portid)) {
netdev = dp->netdev;
} else {
@@ -1759,16 +1763,12 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nn = netdev_priv(dp->netdev);
netdev = nfp_app_repr_get(nn->app, meta.portid);
if (unlikely(!netdev)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+ nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb);
continue;
}
nfp_repr_inc_rx_stats(netdev, pkt_len);
}
- nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
-
- nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
-
skb_reserve(skb, pkt_off);
skb_put(skb, pkt_len);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 5797dbf2b507..34b985384d26 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -456,13 +456,9 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf)
{
int err;
- err = nfp_net_pf_app_start_ctrl(pf);
- if (err)
- return err;
-
err = nfp_app_start(pf->app, pf->ctrl_vnic);
if (err)
- goto err_ctrl_stop;
+ return err;
if (pf->num_vfs) {
err = nfp_app_sriov_enable(pf->app, pf->num_vfs);
@@ -474,8 +470,6 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf)
err_app_stop:
nfp_app_stop(pf->app);
-err_ctrl_stop:
- nfp_net_pf_app_stop_ctrl(pf);
return err;
}
@@ -484,7 +478,6 @@ static void nfp_net_pf_app_stop(struct nfp_pf *pf)
if (pf->num_vfs)
nfp_app_sriov_disable(pf->app);
nfp_app_stop(pf->app);
- nfp_net_pf_app_stop_ctrl(pf);
}
static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
@@ -559,7 +552,7 @@ err_unmap_ctrl:
static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
{
- nfp_net_pf_app_stop(pf);
+ nfp_net_pf_app_stop_ctrl(pf);
/* stop app first, to avoid double free of ctrl vNIC's ddir */
nfp_net_debugfs_dir_clean(&pf->ddir);
@@ -690,6 +683,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
{
struct nfp_net_fw_version fw_ver;
u8 __iomem *ctrl_bar, *qc_bar;
+ struct nfp_net *nn;
int stride;
int err;
@@ -766,7 +760,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_free_vnics;
- err = nfp_net_pf_app_start(pf);
+ err = nfp_net_pf_app_start_ctrl(pf);
if (err)
goto err_free_irqs;
@@ -774,12 +768,20 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_stop_app;
+ err = nfp_net_pf_app_start(pf);
+ if (err)
+ goto err_clean_vnics;
+
mutex_unlock(&pf->lock);
return 0;
+err_clean_vnics:
+ list_for_each_entry(nn, &pf->vnics, vnic_list)
+ if (nfp_net_is_data_vnic(nn))
+ nfp_net_pf_clean_vnic(pf, nn);
err_stop_app:
- nfp_net_pf_app_stop(pf);
+ nfp_net_pf_app_stop_ctrl(pf);
err_free_irqs:
nfp_net_pf_free_irqs(pf);
err_free_vnics:
@@ -803,6 +805,8 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
if (list_empty(&pf->vnics))
goto out;
+ nfp_net_pf_app_stop(pf);
+
list_for_each_entry(nn, &pf->vnics, vnic_list)
if (nfp_net_is_data_vnic(nn))
nfp_net_pf_clean_vnic(pf, nn);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 28ea0af89aef..e3223f2fe2ff 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -724,7 +724,7 @@ static void ql_build_coredump_seg_header(
seg_hdr->cookie = MPI_COREDUMP_COOKIE;
seg_hdr->segNum = seg_number;
seg_hdr->segSize = seg_size;
- memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
+ strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
}
/*
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index bd07a15d3b7c..e03fcf914690 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6863,8 +6863,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
tp->TxDescArray + entry);
if (skb) {
- tp->dev->stats.tx_dropped++;
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
tx_skb->skb = NULL;
}
}
@@ -7319,7 +7318,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
tp->tx_stats.packets++;
tp->tx_stats.bytes += tx_skb->skb->len;
u64_stats_update_end(&tp->tx_stats.syncp);
- dev_kfree_skb_any(tx_skb->skb);
+ dev_consume_skb_any(tx_skb->skb);
tx_skb->skb = NULL;
}
dirty_tx++;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index 73427e29df2a..fbd00cb0cb7d 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -47,6 +47,8 @@ static int sxgbe_probe_config_dt(struct platform_device *pdev,
plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
sizeof(*plat->mdio_bus_data),
GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return -ENOMEM;
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL);
if (!dma_cfg)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 17d4bbaeb65c..6e359572b9f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -269,7 +269,10 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
ctrl |= val << reg_shift;
- if (dwmac->f2h_ptp_ref_clk) {
+ if (dwmac->f2h_ptp_ref_clk ||
+ phymode == PHY_INTERFACE_MODE_MII ||
+ phymode == PHY_INTERFACE_MODE_GMII ||
+ phymode == PHY_INTERFACE_MODE_SGMII) {
ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
&module);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index fffd6d5fc907..39c2122a4f26 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -979,14 +979,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
}
static const struct of_device_id sun8i_dwmac_match[] = {
- { .compatible = "allwinner,sun8i-h3-emac",
- .data = &emac_variant_h3 },
- { .compatible = "allwinner,sun8i-v3s-emac",
- .data = &emac_variant_v3s },
- { .compatible = "allwinner,sun8i-a83t-emac",
- .data = &emac_variant_a83t },
- { .compatible = "allwinner,sun50i-a64-emac",
- .data = &emac_variant_a64 },
{ }
};
MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index 56ba411421f0..38d1cc557c11 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -96,7 +96,7 @@ int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
if (of_machine_is_compatible("ti,dra7"))
return davinci_emac_3517_get_macid(dev, 0x514, slave, mac_addr);
- dev_err(dev, "incompatible machine/device type for reading mac address\n");
+ dev_info(dev, "incompatible machine/device type for reading mac address\n");
return -ENOENT;
}
EXPORT_SYMBOL_GPL(ti_cm_get_macid);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 0d78727f1a14..d91cbc6c3ca4 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1269,7 +1269,12 @@ static void netvsc_link_change(struct work_struct *w)
bool notify = false, reschedule = false;
unsigned long flags, next_reconfig, delay;
- rtnl_lock();
+ /* if changes are happening, comeback later */
+ if (!rtnl_trylock()) {
+ schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
+ return;
+ }
+
net_device = rtnl_dereference(ndev_ctx->nvdev);
if (!net_device)
goto out_unlock;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 5e1ab1160856..98e4deaa3a6a 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3521,6 +3521,7 @@ module_init(macsec_init);
module_exit(macsec_exit);
MODULE_ALIAS_RTNL_LINK("macsec");
+MODULE_ALIAS_GENL_FAMILY("macsec");
MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 5068c582d502..d0626bf5c540 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -749,9 +749,6 @@ void phy_stop_machine(struct phy_device *phydev)
if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
-
- /* Now we can run the state machine synchronously */
- phy_state_machine(&phydev->state_queue.work);
}
/**
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 1790f7fec125..2f742ae5b92e 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -864,15 +864,17 @@ EXPORT_SYMBOL(phy_attached_info);
#define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)"
void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
{
+ const char *drv_name = phydev->drv ? phydev->drv->name : "unbound";
+
if (!fmt) {
dev_info(&phydev->mdio.dev, ATTACHED_FMT "\n",
- phydev->drv->name, phydev_name(phydev),
+ drv_name, phydev_name(phydev),
phydev->irq);
} else {
va_list ap;
dev_info(&phydev->mdio.dev, ATTACHED_FMT,
- phydev->drv->name, phydev_name(phydev),
+ drv_name, phydev_name(phydev),
phydev->irq);
va_start(ap, fmt);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 8f572b9f3625..9c80e80c5493 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1758,6 +1758,13 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long)&wwan_noarp_info,
},
+ /* u-blox TOBY-L4 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1546, 0x1010,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+ },
+
/* Generic CDC-NCM devices */
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 98f17b05c68b..b06169ea60dc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1058,7 +1058,7 @@ static void free_old_xmit_skbs(struct send_queue *sq)
bytes += skb->len;
packets++;
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
}
/* Avoid overhead when no packets have been processed
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index fa315d84e98e..a1ea9ef97ed9 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -787,6 +787,8 @@ int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
+void iwl_pcie_rx_allocator_work(struct work_struct *data);
+
/* common functions that are used by gen2 transport */
void iwl_pcie_apm_config(struct iwl_trans *trans);
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 351c4423125a..942736d3fa75 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -597,7 +597,7 @@ static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
rxq->free_count += RX_CLAIM_REQ_ALLOC;
}
-static void iwl_pcie_rx_allocator_work(struct work_struct *data)
+void iwl_pcie_rx_allocator_work(struct work_struct *data)
{
struct iwl_rb_allocator *rba_p =
container_of(data, struct iwl_rb_allocator, rx_alloc);
@@ -900,10 +900,6 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
return err;
}
def_rxq = trans_pcie->rxq;
- if (!rba->alloc_wq)
- rba->alloc_wq = alloc_workqueue("rb_allocator",
- WQ_HIGHPRI | WQ_UNBOUND, 1);
- INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
spin_lock(&rba->lock);
atomic_set(&rba->req_pending, 0);
@@ -1017,10 +1013,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
}
cancel_work_sync(&rba->rx_alloc);
- if (rba->alloc_wq) {
- destroy_workqueue(rba->alloc_wq);
- rba->alloc_wq = NULL;
- }
iwl_pcie_free_rbs_pool(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index f95eec52508e..3927bbf04f72 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1786,6 +1786,11 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_tx_free(trans);
iwl_pcie_rx_free(trans);
+ if (trans_pcie->rba.alloc_wq) {
+ destroy_workqueue(trans_pcie->rba.alloc_wq);
+ trans_pcie->rba.alloc_wq = NULL;
+ }
+
if (trans_pcie->msix_enabled) {
for (i = 0; i < trans_pcie->alloc_vecs; i++) {
irq_set_affinity_hint(
@@ -3169,6 +3174,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->inta_mask = CSR_INI_SET_MASK;
}
+ trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
+ WQ_HIGHPRI | WQ_UNBOUND, 1);
+ INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
+
#ifdef CONFIG_IWLWIFI_PCIE_RTPM
trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
#else
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 08f0477f78d9..9915d83a4a30 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1571,6 +1571,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
wl->state = WL1251_STATE_OFF;
mutex_init(&wl->mutex);
+ spin_lock_init(&wl->wl_lock);
wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE;
wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 925467b31a33..ea892e732268 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -109,6 +109,7 @@ struct nvme_dev {
/* host memory buffer support: */
u64 host_mem_size;
u32 nr_host_mem_descs;
+ dma_addr_t host_mem_descs_dma;
struct nvme_host_mem_buf_desc *host_mem_descs;
void **host_mem_desc_bufs;
};
@@ -1565,16 +1566,10 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
{
- size_t len = dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs);
+ u64 dma_addr = dev->host_mem_descs_dma;
struct nvme_command c;
- u64 dma_addr;
int ret;
- dma_addr = dma_map_single(dev->dev, dev->host_mem_descs, len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, dma_addr))
- return -ENOMEM;
-
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_set_features;
c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
@@ -1591,7 +1586,6 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
"failed to set host mem (err %d, flags %#x).\n",
ret, bits);
}
- dma_unmap_single(dev->dev, dma_addr, len, DMA_TO_DEVICE);
return ret;
}
@@ -1609,7 +1603,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
kfree(dev->host_mem_desc_bufs);
dev->host_mem_desc_bufs = NULL;
- kfree(dev->host_mem_descs);
+ dma_free_coherent(dev->dev,
+ dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
+ dev->host_mem_descs, dev->host_mem_descs_dma);
dev->host_mem_descs = NULL;
}
@@ -1617,6 +1613,7 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
{
struct nvme_host_mem_buf_desc *descs;
u32 chunk_size, max_entries, len;
+ dma_addr_t descs_dma;
int i = 0;
void **bufs;
u64 size = 0, tmp;
@@ -1627,7 +1624,8 @@ retry:
tmp = (preferred + chunk_size - 1);
do_div(tmp, chunk_size);
max_entries = tmp;
- descs = kcalloc(max_entries, sizeof(*descs), GFP_KERNEL);
+ descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs),
+ &descs_dma, GFP_KERNEL);
if (!descs)
goto out;
@@ -1661,6 +1659,7 @@ retry:
dev->nr_host_mem_descs = i;
dev->host_mem_size = size;
dev->host_mem_descs = descs;
+ dev->host_mem_descs_dma = descs_dma;
dev->host_mem_desc_bufs = bufs;
return 0;
@@ -1674,7 +1673,8 @@ out_free_bufs:
kfree(bufs);
out_free_descs:
- kfree(descs);
+ dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
+ descs_dma);
out:
/* try a smaller chunk size if we failed early */
if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) {
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index da04df1af231..a03299d77922 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -920,7 +920,11 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
int nr;
- nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, PAGE_SIZE);
+ /*
+ * Align the MR to a 4K page size to match the ctrl page size and
+ * the block virtual boundary.
+ */
+ nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
if (nr < count) {
if (nr < 0)
return nr;
@@ -1583,7 +1587,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
goto out_cleanup_queue;
ctrl->ctrl.max_hw_sectors =
- (ctrl->max_fr_pages - 1) << (PAGE_SHIFT - 9);
+ (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
error = nvme_init_identify(&ctrl->ctrl);
if (error)
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index ba6ac83a6c25..5ccfdc80d0ec 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -481,7 +481,7 @@ static int ccwchain_fetch_tic(struct ccwchain *chain,
ccw_tail = ccw_head + (iter->ch_len - 1) * sizeof(struct ccw1);
if ((ccw_head <= ccw->cda) && (ccw->cda <= ccw_tail)) {
- ccw->cda = (__u32) (addr_t) (iter->ch_ccw +
+ ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) +
(ccw->cda - ccw_head));
return 0;
}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index da5bdbdcce52..f838bd73befa 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4945,6 +4945,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
}
if (ipr_is_vset_device(res)) {
sdev->scsi_level = SCSI_SPC_3;
+ sdev->no_report_opcodes = 1;
blk_queue_rq_timeout(sdev->request_queue,
IPR_VSET_RW_TIMEOUT);
blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index eb07f1de8afa..59c18ca4cda9 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -489,7 +489,7 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
/* If a SRR times out, simply free resources */
if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO)
- goto out_free;
+ goto out_put;
/* Normalize response data into struct fc_frame */
mp_req = &(srr_req->mp_req);
@@ -501,7 +501,7 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
if (!fp) {
QEDF_ERR(&(qedf->dbg_ctx),
"fc_frame_alloc failure.\n");
- goto out_free;
+ goto out_put;
}
/* Copy frame header from firmware into fp */
@@ -526,9 +526,10 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
}
fc_frame_free(fp);
-out_free:
+out_put:
/* Put reference for original command since SRR completed */
kref_put(&orig_io_req->refcount, qedf_release_cmd);
+out_free:
kfree(cb_arg);
}
@@ -780,7 +781,7 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
/* If a REC times out, free resources */
if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO)
- goto out_free;
+ goto out_put;
/* Normalize response data into struct fc_frame */
mp_req = &(rec_req->mp_req);
@@ -792,7 +793,7 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
if (!fp) {
QEDF_ERR(&(qedf->dbg_ctx),
"fc_frame_alloc failure.\n");
- goto out_free;
+ goto out_put;
}
/* Copy frame header from firmware into fp */
@@ -884,9 +885,10 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
out_free_frame:
fc_frame_free(fp);
-out_free:
+out_put:
/* Put reference for original command since REC completed */
kref_put(&orig_io_req->refcount, qedf_release_cmd);
+out_free:
kfree(cb_arg);
}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index d7ff71e0c85c..84e782d8e7c3 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1021,7 +1021,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
read_lock_irqsave(&sfp->rq_list_lock, iflags);
val = 0;
list_for_each_entry(srp, &sfp->rq_list, entry) {
- if (val > SG_MAX_QUEUE)
+ if (val >= SG_MAX_QUEUE)
break;
memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
rinfo[val].req_state = srp->done + 1;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index f3bf8f4e2d6c..82360594fa8e 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -484,13 +484,6 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
mutex_unlock(&priv->lock);
}
-static void mn_invl_page(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
-{
- mn_invl_range_start(mn, mm, address, address + PAGE_SIZE);
-}
-
static void mn_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
@@ -522,7 +515,6 @@ static void mn_release(struct mmu_notifier *mn,
static const struct mmu_notifier_ops gntdev_mmu_ops = {
.release = mn_release,
- .invalidate_page = mn_invl_page,
.invalidate_range_start = mn_invl_range_start,
};