summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBoris Brezillon <boris.brezillon@bootlin.com>2018-12-18 19:59:16 +0100
committerBoris Brezillon <boris.brezillon@bootlin.com>2018-12-18 19:59:16 +0100
commitccec4a4a4f27b22e51ec6a143319db49b7570581 (patch)
treef08ea185955126ee6828dd5618ef24b6caec1ec4
parent7677ea0e8843e1a45e35253c0c5e22db11a99a62 (diff)
parent732774437ae01d9882e60314e303898e63c7f038 (diff)
Merge tag 'nand/for-4.21' of git://git.infradead.org/linux-mtd into mtd/next
NAND core changes: - kernel-doc miscellaneous fixes. - Third batch of fixes/cleanup to the raw NAND core impacting various controller drivers (ams-delta, marvell, fsmc, denali, tegra, vf610): * Stopping to pass mtd_info objects to internal functions * Reorganizing code to avoid forward declarations * Dropping useless test in nand_legacy_set_defaults() * Moving nand_exec_op() to internal.h * Adding nand_[de]select_target() helpers * Passing the CS line to be selected in struct nand_operation * Making ->select_chip() optional when ->exec_op() is implemented * Deprecating the ->select_chip() hook * Moving the ->exec_op() method to nand_controller_ops * Moving ->setup_data_interface() to nand_controller_ops * Deprecating the dummy_controller field * Fixing JEDEC detection * Providing a helper for polling GPIO R/B pin Raw NAND chip drivers changes: - Macronix: * Flagging 1.8V AC chips with a broken GET_FEATURES(TIMINGS) Raw NAND controllers drivers changes: - Ams-delta: * Fixing the error path * SPDX tag added * May be compiled with COMPILE_TEST=y * Conversion to ->exec_op() interface * Dropping .IOADDR_R/W use * Use GPIO API for data I/O - Denali: * Removing denali_reset_banks() * Removing ->dev_ready() hook * Including <linux/bits.h> instead of <linux/bitops.h> * Changes to comply with the above fixes/cleanup done in the core. - FSMC: * Adding an SPDX tag to replace the license text * Making conversion from chip to fsmc consistent * Fixing unchecked return value in fsmc_read_page_hwecc * Changes to comply with the above fixes/cleanup done in the core. - Marvell: * Preventing timeouts on a loaded machine (fix) * Changes to comply with the above fixes/cleanup done in the core. - OMAP2: * Pass the parent of pdev to dma_request_chan() (fix) - R852: * Use generic DMA API - sh_flctl: * Converting to SPDX identifiers - Sunxi: * Write pageprog related opcodes to the right register: WCMD_SET (fix) - Tegra: * Stop implementing ->select_chip() - VF610: * Adding an SPDX tag to replace the license text * Changes to comply with the above fixes/cleanup done in the core. - Various trivial/spelling/coding style fixes. SPI-NAND drivers changes: - Removing the depreacated mt29f_spinand driver from staging. - Adding support for: * Toshiba TC58CVG2S0H * GigaDevice GD5FxGQ4xA * Winbond W25N01GV
-rw-r--r--.mailmap1
-rw-r--r--Documentation/ABI/testing/sysfs-class-led-trigger-pattern4
-rw-r--r--Documentation/devicetree/bindings/arm/shmobile.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-omap.txt8
-rw-r--r--Documentation/i2c/busses/i2c-nvidia-gpu18
-rw-r--r--Documentation/x86/x86_64/mm.txt34
-rw-r--r--Documentation/x86/zero-page.txt2
-rw-r--r--MAINTAINERS29
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/include/asm/termios.h8
-rw-r--r--arch/alpha/include/uapi/asm/ioctls.h5
-rw-r--r--arch/alpha/include/uapi/asm/termbits.h17
-rw-r--r--arch/arm/boot/dts/imx53-ppd.dts2
-rw-r--r--arch/arm/boot/dts/imx6sll.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dtsi7
-rw-r--r--arch/arm/boot/dts/vf610m4-colibri.dts4
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/include/asm/pgtable-2level.h2
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c22
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi3
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7795.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77980-condor.dts47
-rw-r--r--arch/arm64/include/asm/processor.h8
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/arm64/mm/mmu.c2
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h4
-rw-r--r--arch/microblaze/include/asm/pgtable.h2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper.c2
-rw-r--r--arch/mips/mm/dma-noncoherent.c2
-rw-r--r--arch/nds32/include/asm/pgtable.h2
-rw-r--r--arch/parisc/include/asm/pgtable.h2
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/boot/compressed/Makefile16
-rw-r--r--arch/s390/configs/debug_defconfig14
-rw-r--r--arch/s390/configs/performance_defconfig13
-rw-r--r--arch/s390/defconfig79
-rw-r--r--arch/s390/include/asm/mmu_context.h5
-rw-r--r--arch/s390/include/asm/pgalloc.h6
-rw-r--r--arch/s390/include/asm/pgtable.h18
-rw-r--r--arch/s390/include/asm/processor.h4
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/include/asm/tlb.h6
-rw-r--r--arch/s390/kernel/entry.S6
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c2
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c33
-rw-r--r--arch/s390/kernel/vdso32/Makefile6
-rw-r--r--arch/s390/kernel/vdso64/Makefile6
-rw-r--r--arch/s390/kernel/vmlinux.lds.S4
-rw-r--r--arch/s390/mm/pgalloc.c1
-rw-r--r--arch/s390/numa/numa.c1
-rw-r--r--arch/um/drivers/ubd_kern.c12
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/Makefile4
-rw-r--r--arch/x86/include/asm/mce.h2
-rw-r--r--arch/x86/include/asm/mshyperv.h2
-rw-r--r--arch/x86/include/asm/page_64_types.h12
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h4
-rw-r--r--arch/x86/include/asm/qspinlock.h13
-rw-r--r--arch/x86/include/asm/xen/page.h35
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c6
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c11
-rw-r--r--arch/x86/kernel/cpu/vmware.c2
-rw-r--r--arch/x86/kernel/ldt.c59
-rw-r--r--arch/x86/kernel/vsmp_64.c84
-rw-r--r--arch/x86/xen/mmu_pv.c6
-rw-r--r--arch/x86/xen/p2m.c3
-rw-r--r--arch/x86/xen/spinlock.c14
-rw-r--r--block/bio.c1
-rw-r--r--block/blk-lib.c26
-rw-r--r--block/blk-merge.c5
-rw-r--r--block/blk.h12
-rw-r--r--drivers/acpi/nfit/mce.c8
-rw-r--r--drivers/ata/sata_rcar.c6
-rw-r--r--drivers/block/xen-blkfront.c1
-rw-r--r--drivers/clk/clk-fixed-factor.c1
-rw-r--r--drivers/clk/meson/axg.c13
-rw-r--r--drivers/clk/meson/gxbb.c12
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c2
-rw-r--r--drivers/clocksource/i8253.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c27
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c79
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c3
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h4
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c53
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c115
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h10
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h36
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h20
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c17
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c18
-rw-r--r--drivers/gpu/drm/i915/intel_display.c19
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c5
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c3
-rw-r--r--drivers/hid/hid-alps.c18
-rw-r--r--drivers/hid/hid-asus.c3
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c19
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c8
-rw-r--r--drivers/hid/usbhid/hiddev.c18
-rw-r--r--drivers/hwmon/hwmon.c8
-rw-r--r--drivers/hwmon/ibmpowernv.c7
-rw-r--r--drivers/i2c/busses/Kconfig11
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c368
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c15
-rw-r--r--drivers/leds/trigger/ledtrig-pattern.c27
-rw-r--r--drivers/mtd/devices/Kconfig2
-rw-r--r--drivers/mtd/maps/sa1100-flash.c10
-rw-r--r--drivers/mtd/nand/raw/Kconfig2
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c263
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c9
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c2
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c2
-rw-r--r--drivers/mtd/nand/raw/cafe_nand.c4
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c4
-rw-r--r--drivers/mtd/nand/raw/denali.c59
-rw-r--r--drivers/mtd/nand/raw/denali.h2
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c4
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/fsl_upm.c2
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c303
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c29
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c4
-rw-r--r--drivers/mtd/nand/raw/internals.h33
-rw-r--r--drivers/mtd/nand/raw/jz4740_nand.c8
-rw-r--r--drivers/mtd/nand/raw/jz4780_bch.c2
-rw-r--r--drivers/mtd/nand/raw/jz4780_nand.c2
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c2
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c2
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c51
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c4
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c4
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c16
-rw-r--r--drivers/mtd/nand/raw/nand_base.c770
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c285
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c8
-rw-r--r--drivers/mtd/nand/raw/nand_jedec.c2
-rw-r--r--drivers/mtd/nand/raw/nand_legacy.c35
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c7
-rw-r--r--drivers/mtd/nand/raw/nandsim.c2
-rw-r--r--drivers/mtd/nand/raw/ndfc.c2
-rw-r--r--drivers/mtd/nand/raw/omap2.c2
-rw-r--r--drivers/mtd/nand/raw/plat_nand.c2
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c2
-rw-r--r--drivers/mtd/nand/raw/r852.c30
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c7
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c21
-rw-r--r--drivers/mtd/nand/raw/sm_common.c2
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c6
-rw-r--r--drivers/mtd/nand/raw/tango_nand.c4
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c32
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c98
-rw-r--r--drivers/mtd/nand/raw/xway_nand.c2
-rw-r--r--drivers/mtd/nand/spi/Makefile2
-rw-r--r--drivers/mtd/nand/spi/core.c2
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c148
-rw-r--r--drivers/mtd/nand/spi/toshiba.c137
-rw-r--r--drivers/mtd/nand/spi/winbond.c8
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c2
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c6
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c10
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c35
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c61
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c21
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h1
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c15
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c10
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c86
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c12
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c69
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c8
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c2
-rw-r--r--drivers/net/fddi/defza.c7
-rw-r--r--drivers/net/fddi/defza.h3
-rw-r--r--drivers/net/phy/broadcom.c18
-rw-r--r--drivers/net/phy/realtek.c2
-rw-r--r--drivers/net/usb/smsc95xx.c9
-rw-r--r--drivers/nvme/host/core.c4
-rw-r--r--drivers/nvme/host/multipath.c1
-rw-r--r--drivers/nvme/target/core.c2
-rw-r--r--drivers/nvme/target/rdma.c19
-rw-r--r--drivers/of/device.c4
-rw-r--r--drivers/of/of_numa.c9
-rw-r--r--drivers/s390/net/qeth_core.h27
-rw-r--r--drivers/s390/net/qeth_core_main.c172
-rw-r--r--drivers/s390/net/qeth_core_mpc.h4
-rw-r--r--drivers/s390/net/qeth_l2_main.c39
-rw-r--r--drivers/s390/net/qeth_l3_main.c207
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/mt29f_spinand/Kconfig16
-rw-r--r--drivers/staging/mt29f_spinand/Makefile1
-rw-r--r--drivers/staging/mt29f_spinand/TODO13
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c980
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.h106
-rw-r--r--drivers/tty/serial/sh-sci.c8
-rw-r--r--drivers/tty/tty_baudrate.c4
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/usb/typec/ucsi/Kconfig10
-rw-r--r--drivers/usb/typec/ucsi/Makefile2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c307
-rw-r--r--drivers/xen/grant-table.c2
-rw-r--r--drivers/xen/privcmd-buf.c22
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/disk-io.c63
-rw-r--r--fs/btrfs/free-space-cache.c22
-rw-r--r--fs/btrfs/inode.c37
-rw-r--r--fs/btrfs/ioctl.c14
-rw-r--r--fs/btrfs/super.c6
-rw-r--r--fs/btrfs/tree-checker.c2
-rw-r--r--fs/btrfs/tree-log.c17
-rw-r--r--fs/ceph/file.c11
-rw-r--r--fs/ceph/mds_client.c12
-rw-r--r--fs/ceph/quota.c3
-rw-r--r--fs/ext4/inode.c5
-rw-r--r--fs/ext4/namei.c5
-rw-r--r--fs/ext4/resize.c28
-rw-r--r--fs/ext4/super.c17
-rw-r--r--fs/ext4/xattr.c27
-rw-r--r--fs/namespace.c22
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c11
-rw-r--r--fs/xfs/xfs_ioctl.c2
-rw-r--r--fs/xfs/xfs_message.c2
-rw-r--r--include/asm-generic/4level-fixup.h2
-rw-r--r--include/asm-generic/5level-fixup.h2
-rw-r--r--include/asm-generic/pgtable-nop4d-hack.h2
-rw-r--r--include/asm-generic/pgtable-nop4d.h2
-rw-r--r--include/asm-generic/pgtable-nopmd.h2
-rw-r--r--include/asm-generic/pgtable-nopud.h2
-rw-r--r--include/asm-generic/pgtable.h16
-rw-r--r--include/linux/ceph/ceph_features.h8
-rw-r--r--include/linux/compiler-gcc.h12
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/compiler_attributes.h14
-rw-r--r--include/linux/compiler_types.h4
-rw-r--r--include/linux/hid.h4
-rw-r--r--include/linux/i8253.h1
-rw-r--r--include/linux/mm.h8
-rw-r--r--include/linux/mtd/nand.h7
-rw-r--r--include/linux/mtd/rawnand.h158
-rw-r--r--include/linux/mtd/sh_flctl.h16
-rw-r--r--include/linux/mtd/spinand.h2
-rw-r--r--include/linux/netdevice.h20
-rw-r--r--include/linux/netfilter/ipset/ip_set.h2
-rw-r--r--include/linux/netfilter/ipset/ip_set_comment.h4
-rw-r--r--include/linux/nmi.h2
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/if_inet6.h2
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h39
-rw-r--r--include/uapi/linux/kfd_ioctl.h18
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h4
-rw-r--r--include/uapi/linux/netfilter_bridge.h4
-rw-r--r--include/uapi/linux/sctp.h3
-rw-r--r--include/xen/xen-ops.h12
-rw-r--r--kernel/bpf/core.c4
-rw-r--r--kernel/bpf/syscall.c35
-rw-r--r--kernel/resource.c19
-rw-r--r--kernel/sched/core.c5
-rw-r--r--kernel/sched/fair.c4
-rw-r--r--kernel/time/posix-cpu-timers.c3
-rw-r--r--kernel/trace/trace_probe.c2
-rw-r--r--kernel/user_namespace.c12
-rw-r--r--lib/raid6/test/Makefile4
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/flow_dissector.c4
-rw-r--r--net/core/netpoll.c3
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sock.c1
-rw-r--r--net/ipv4/inet_fragment.c29
-rw-r--r--net/ipv4/ip_fragment.c12
-rw-r--r--net/ipv4/ip_sockglue.c6
-rw-r--r--net/ipv6/af_inet6.c5
-rw-r--r--net/ipv6/anycast.c80
-rw-r--r--net/ipv6/ip6_fib.c4
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c13
-rw-r--r--net/netfilter/ipset/ip_set_core.c43
-rw-r--r--net/netfilter/ipset/ip_set_hash_netportnet.c8
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c17
-rw-r--r--net/netfilter/nf_conntrack_core.c13
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c13
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c11
-rw-r--r--net/netfilter/nf_conntrack_proto_icmp.c11
-rw-r--r--net/netfilter/nf_conntrack_proto_icmpv6.c11
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c11
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c15
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c11
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c47
-rw-r--r--net/netfilter/nft_compat.c21
-rw-r--r--net/netfilter/nft_numgen.c127
-rw-r--r--net/netfilter/nft_osf.c2
-rw-r--r--net/netfilter/xt_IDLETIMER.c20
-rw-r--r--net/openvswitch/conntrack.c3
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/call_event.c18
-rw-r--r--net/rxrpc/output.c35
-rw-r--r--net/sched/act_mirred.c3
-rw-r--r--net/sched/cls_flower.c14
-rw-r--r--net/sched/sch_netem.c9
-rw-r--r--net/sctp/outqueue.c2
-rw-r--r--net/tipc/link.c11
-rwxr-xr-xscripts/kconfig/merge_config.sh7
-rwxr-xr-xscripts/package/builddeb6
-rwxr-xr-xscripts/package/mkdebian7
-rwxr-xr-xscripts/package/mkspec11
-rwxr-xr-xscripts/setlocalversion2
-rw-r--r--sound/pci/hda/thinkpad_helper.c4
-rw-r--r--tools/arch/arm64/include/asm/barrier.h133
-rw-r--r--tools/perf/Documentation/perf-list.txt1
-rw-r--r--tools/perf/Makefile.perf2
-rw-r--r--tools/perf/builtin-record.c7
-rw-r--r--tools/perf/builtin-stat.c28
-rw-r--r--tools/perf/builtin-top.c3
-rw-r--r--tools/perf/builtin-trace.c34
-rw-r--r--tools/perf/examples/bpf/augmented_raw_syscalls.c131
-rw-r--r--tools/perf/jvmti/jvmti_agent.c49
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py493
-rw-r--r--tools/perf/tests/attr/test-record-group-sampling1
-rw-r--r--tools/perf/util/evlist.c27
-rw-r--r--tools/perf/util/evlist.h3
-rw-r--r--tools/perf/util/evsel.c1
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c4
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-log.c5
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-log.h1
-rw-r--r--tools/perf/util/intel-pt.c16
-rw-r--r--tools/perf/util/pmu.c2
400 files changed, 5319 insertions, 4018 deletions
diff --git a/.mailmap b/.mailmap
index a95c436130b5..b4b0b0b768dd 100644
--- a/.mailmap
+++ b/.mailmap
@@ -160,6 +160,7 @@ Peter Oruba <peter@oruba.de>
Peter Oruba <peter.oruba@amd.com>
Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
Praveen BP <praveenbp@ti.com>
+Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net>
Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com>
diff --git a/Documentation/ABI/testing/sysfs-class-led-trigger-pattern b/Documentation/ABI/testing/sysfs-class-led-trigger-pattern
index fb3d1e03b881..1e5d172e0646 100644
--- a/Documentation/ABI/testing/sysfs-class-led-trigger-pattern
+++ b/Documentation/ABI/testing/sysfs-class-led-trigger-pattern
@@ -37,8 +37,8 @@ Description:
0-| / \/ \/
+---0----1----2----3----4----5----6------------> time (s)
- 2. To make the LED go instantly from one brigntess value to another,
- we should use use zero-time lengths (the brightness must be same as
+ 2. To make the LED go instantly from one brightness value to another,
+ we should use zero-time lengths (the brightness must be same as
the previous tuple's). So the format should be:
"brightness_1 duration_1 brightness_1 0 brightness_2 duration_2
brightness_2 0 ...". For example:
diff --git a/Documentation/devicetree/bindings/arm/shmobile.txt b/Documentation/devicetree/bindings/arm/shmobile.txt
index f5e0f82fd503..58c4256d37a3 100644
--- a/Documentation/devicetree/bindings/arm/shmobile.txt
+++ b/Documentation/devicetree/bindings/arm/shmobile.txt
@@ -27,7 +27,7 @@ SoCs:
compatible = "renesas,r8a77470"
- RZ/G2M (R8A774A1)
compatible = "renesas,r8a774a1"
- - RZ/G2E (RA8774C0)
+ - RZ/G2E (R8A774C0)
compatible = "renesas,r8a774c0"
- R-Car M1A (R8A77781)
compatible = "renesas,r8a7778"
diff --git a/Documentation/devicetree/bindings/i2c/i2c-omap.txt b/Documentation/devicetree/bindings/i2c/i2c-omap.txt
index 7e49839d4124..4b90ba9f31b7 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-omap.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-omap.txt
@@ -1,8 +1,12 @@
I2C for OMAP platforms
Required properties :
-- compatible : Must be "ti,omap2420-i2c", "ti,omap2430-i2c", "ti,omap3-i2c"
- or "ti,omap4-i2c"
+- compatible : Must be
+ "ti,omap2420-i2c" for OMAP2420 SoCs
+ "ti,omap2430-i2c" for OMAP2430 SoCs
+ "ti,omap3-i2c" for OMAP3 SoCs
+ "ti,omap4-i2c" for OMAP4+ SoCs
+ "ti,am654-i2c", "ti,omap4-i2c" for AM654 SoCs
- ti,hwmods : Must be "i2c<n>", n being the instance number (1-based)
- #address-cells = <1>;
- #size-cells = <0>;
diff --git a/Documentation/i2c/busses/i2c-nvidia-gpu b/Documentation/i2c/busses/i2c-nvidia-gpu
new file mode 100644
index 000000000000..31884d2b2eb5
--- /dev/null
+++ b/Documentation/i2c/busses/i2c-nvidia-gpu
@@ -0,0 +1,18 @@
+Kernel driver i2c-nvidia-gpu
+
+Datasheet: not publicly available.
+
+Authors:
+ Ajay Gupta <ajayg@nvidia.com>
+
+Description
+-----------
+
+i2c-nvidia-gpu is a driver for I2C controller included in NVIDIA Turing
+and later GPUs and it is used to communicate with Type-C controller on GPUs.
+
+If your 'lspci -v' listing shows something like the following,
+
+01:00.3 Serial bus controller [0c80]: NVIDIA Corporation Device 1ad9 (rev a1)
+
+then this driver should support the I2C controller of your GPU.
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index 73aaaa3da436..804f9426ed17 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -34,23 +34,24 @@ __________________|____________|__________________|_________|___________________
____________________________________________________________|___________________________________________________________
| | | |
ffff800000000000 | -128 TB | ffff87ffffffffff | 8 TB | ... guard hole, also reserved for hypervisor
- ffff880000000000 | -120 TB | ffffc7ffffffffff | 64 TB | direct mapping of all physical memory (page_offset_base)
- ffffc80000000000 | -56 TB | ffffc8ffffffffff | 1 TB | ... unused hole
+ ffff880000000000 | -120 TB | ffff887fffffffff | 0.5 TB | LDT remap for PTI
+ ffff888000000000 | -119.5 TB | ffffc87fffffffff | 64 TB | direct mapping of all physical memory (page_offset_base)
+ ffffc88000000000 | -55.5 TB | ffffc8ffffffffff | 0.5 TB | ... unused hole
ffffc90000000000 | -55 TB | ffffe8ffffffffff | 32 TB | vmalloc/ioremap space (vmalloc_base)
ffffe90000000000 | -23 TB | ffffe9ffffffffff | 1 TB | ... unused hole
ffffea0000000000 | -22 TB | ffffeaffffffffff | 1 TB | virtual memory map (vmemmap_base)
ffffeb0000000000 | -21 TB | ffffebffffffffff | 1 TB | ... unused hole
ffffec0000000000 | -20 TB | fffffbffffffffff | 16 TB | KASAN shadow memory
- fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
- | | | | vaddr_end for KASLR
- fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
- fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | LDT remap for PTI
- ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
__________________|____________|__________________|_________|____________________________________________________________
|
- | Identical layout to the 47-bit one from here on:
+ | Identical layout to the 56-bit one from here on:
____________________________________________________________|____________________________________________________________
| | | |
+ fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
+ | | | | vaddr_end for KASLR
+ fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
+ fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | ... unused hole
+ ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole
ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space
ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole
@@ -83,7 +84,7 @@ Notes:
__________________|____________|__________________|_________|___________________________________________________________
| | | |
0000800000000000 | +64 PB | ffff7fffffffffff | ~16K PB | ... huge, still almost 64 bits wide hole of non-canonical
- | | | | virtual memory addresses up to the -128 TB
+ | | | | virtual memory addresses up to the -64 PB
| | | | starting offset of kernel mappings.
__________________|____________|__________________|_________|___________________________________________________________
|
@@ -91,23 +92,24 @@ __________________|____________|__________________|_________|___________________
____________________________________________________________|___________________________________________________________
| | | |
ff00000000000000 | -64 PB | ff0fffffffffffff | 4 PB | ... guard hole, also reserved for hypervisor
- ff10000000000000 | -60 PB | ff8fffffffffffff | 32 PB | direct mapping of all physical memory (page_offset_base)
- ff90000000000000 | -28 PB | ff9fffffffffffff | 4 PB | LDT remap for PTI
+ ff10000000000000 | -60 PB | ff10ffffffffffff | 0.25 PB | LDT remap for PTI
+ ff11000000000000 | -59.75 PB | ff90ffffffffffff | 32 PB | direct mapping of all physical memory (page_offset_base)
+ ff91000000000000 | -27.75 PB | ff9fffffffffffff | 3.75 PB | ... unused hole
ffa0000000000000 | -24 PB | ffd1ffffffffffff | 12.5 PB | vmalloc/ioremap space (vmalloc_base)
ffd2000000000000 | -11.5 PB | ffd3ffffffffffff | 0.5 PB | ... unused hole
ffd4000000000000 | -11 PB | ffd5ffffffffffff | 0.5 PB | virtual memory map (vmemmap_base)
ffd6000000000000 | -10.5 PB | ffdeffffffffffff | 2.25 PB | ... unused hole
ffdf000000000000 | -8.25 PB | fffffdffffffffff | ~8 PB | KASAN shadow memory
- fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
- | | | | vaddr_end for KASLR
- fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
- fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | ... unused hole
- ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
__________________|____________|__________________|_________|____________________________________________________________
|
| Identical layout to the 47-bit one from here on:
____________________________________________________________|____________________________________________________________
| | | |
+ fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
+ | | | | vaddr_end for KASLR
+ fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
+ fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | ... unused hole
+ ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole
ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space
ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole
diff --git a/Documentation/x86/zero-page.txt b/Documentation/x86/zero-page.txt
index 97b7adbceda4..68aed077f7b6 100644
--- a/Documentation/x86/zero-page.txt
+++ b/Documentation/x86/zero-page.txt
@@ -25,7 +25,7 @@ Offset Proto Name Meaning
0C8/004 ALL ext_cmd_line_ptr cmd_line_ptr high 32bits
140/080 ALL edid_info Video mode setup (struct edid_info)
1C0/020 ALL efi_info EFI 32 information (struct efi_info)
-1E0/004 ALL alk_mem_k Alternative mem check, in KB
+1E0/004 ALL alt_mem_k Alternative mem check, in KB
1E4/004 ALL scratch Scratch field for the kernel setup code
1E8/001 ALL e820_entries Number of entries in e820_table (below)
1E9/001 ALL eddbuf_entries Number of entries in eddbuf (below)
diff --git a/MAINTAINERS b/MAINTAINERS
index db9153774c6b..a22b0c23f419 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6607,9 +6607,9 @@ F: arch/*/include/asm/suspend*.h
HID CORE LAYER
M: Jiri Kosina <jikos@kernel.org>
-R: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+M: Benjamin Tissoires <benjamin.tissoires@redhat.com>
L: linux-input@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
S: Maintained
F: drivers/hid/
F: include/linux/hid*
@@ -6861,6 +6861,13 @@ L: linux-acpi@vger.kernel.org
S: Maintained
F: drivers/i2c/i2c-core-acpi.c
+I2C CONTROLLER DRIVER FOR NVIDIA GPU
+M: Ajay Gupta <ajayg@nvidia.com>
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: Documentation/i2c/busses/i2c-nvidia-gpu
+F: drivers/i2c/busses/i2c-nvidia-gpu.c
+
I2C MUXES
M: Peter Rosin <peda@axentia.se>
L: linux-i2c@vger.kernel.org
@@ -8367,7 +8374,7 @@ F: drivers/media/dvb-frontends/lgdt3305.*
LIBATA PATA ARASAN COMPACT FLASH CONTROLLER
M: Viresh Kumar <vireshk@kernel.org>
L: linux-ide@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained
F: include/linux/pata_arasan_cf_data.h
F: drivers/ata/pata_arasan_cf.c
@@ -8384,7 +8391,7 @@ F: drivers/ata/ata_generic.c
LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-ide@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained
F: drivers/ata/pata_ftide010.c
F: drivers/ata/sata_gemini.c
@@ -8403,7 +8410,7 @@ F: include/linux/ahci_platform.h
LIBATA SATA PROMISE TX2/TX4 CONTROLLER DRIVER
M: Mikael Pettersson <mikpelinux@gmail.com>
L: linux-ide@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained
F: drivers/ata/sata_promise.*
@@ -10784,6 +10791,14 @@ L: linux-omap@vger.kernel.org
S: Maintained
F: arch/arm/mach-omap2/omap_hwmod.*
+OMAP I2C DRIVER
+M: Vignesh R <vigneshr@ti.com>
+L: linux-omap@vger.kernel.org
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/i2c/i2c-omap.txt
+F: drivers/i2c/busses/i2c-omap.c
+
OMAP IMAGING SUBSYSTEM (OMAP3 ISP and OMAP4 ISS)
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
@@ -15436,9 +15451,9 @@ F: include/linux/usb/gadget*
USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...)
M: Jiri Kosina <jikos@kernel.org>
-R: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+M: Benjamin Tissoires <benjamin.tissoires@redhat.com>
L: linux-usb@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
S: Maintained
F: Documentation/hid/hiddev.txt
F: drivers/hid/usbhid/
diff --git a/Makefile b/Makefile
index 9fce8b91c15f..2f36db897895 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 20
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
NAME = "People's Front"
# *DOCUMENTATION*
diff --git a/arch/alpha/include/asm/termios.h b/arch/alpha/include/asm/termios.h
index 6a8c53dec57e..b7c77bb1bfd2 100644
--- a/arch/alpha/include/asm/termios.h
+++ b/arch/alpha/include/asm/termios.h
@@ -73,9 +73,15 @@
})
#define user_termios_to_kernel_termios(k, u) \
- copy_from_user(k, u, sizeof(struct termios))
+ copy_from_user(k, u, sizeof(struct termios2))
#define kernel_termios_to_user_termios(u, k) \
+ copy_to_user(u, k, sizeof(struct termios2))
+
+#define user_termios_to_kernel_termios_1(k, u) \
+ copy_from_user(k, u, sizeof(struct termios))
+
+#define kernel_termios_to_user_termios_1(u, k) \
copy_to_user(u, k, sizeof(struct termios))
#endif /* _ALPHA_TERMIOS_H */
diff --git a/arch/alpha/include/uapi/asm/ioctls.h b/arch/alpha/include/uapi/asm/ioctls.h
index 1e9121c9b3c7..971311605288 100644
--- a/arch/alpha/include/uapi/asm/ioctls.h
+++ b/arch/alpha/include/uapi/asm/ioctls.h
@@ -32,6 +32,11 @@
#define TCXONC _IO('t', 30)
#define TCFLSH _IO('t', 31)
+#define TCGETS2 _IOR('T', 42, struct termios2)
+#define TCSETS2 _IOW('T', 43, struct termios2)
+#define TCSETSW2 _IOW('T', 44, struct termios2)
+#define TCSETSF2 _IOW('T', 45, struct termios2)
+
#define TIOCSWINSZ _IOW('t', 103, struct winsize)
#define TIOCGWINSZ _IOR('t', 104, struct winsize)
#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
diff --git a/arch/alpha/include/uapi/asm/termbits.h b/arch/alpha/include/uapi/asm/termbits.h
index de6c8360fbe3..4575ba34a0ea 100644
--- a/arch/alpha/include/uapi/asm/termbits.h
+++ b/arch/alpha/include/uapi/asm/termbits.h
@@ -26,6 +26,19 @@ struct termios {
speed_t c_ospeed; /* output speed */
};
+/* Alpha has identical termios and termios2 */
+
+struct termios2 {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_cc[NCCS]; /* control characters */
+ cc_t c_line; /* line discipline (== c_cc[19]) */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
/* Alpha has matching termios and ktermios */
struct ktermios {
@@ -152,6 +165,7 @@ struct ktermios {
#define B3000000 00034
#define B3500000 00035
#define B4000000 00036
+#define BOTHER 00037
#define CSIZE 00001400
#define CS5 00000000
@@ -169,6 +183,9 @@ struct ktermios {
#define CMSPAR 010000000000 /* mark or space (stick) parity */
#define CRTSCTS 020000000000 /* flow control */
+#define CIBAUD 07600000
+#define IBSHIFT 16
+
/* c_lflag bits */
#define ISIG 0x00000080
#define ICANON 0x00000100
diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts
index b560ff88459b..5ff9a179c83c 100644
--- a/arch/arm/boot/dts/imx53-ppd.dts
+++ b/arch/arm/boot/dts/imx53-ppd.dts
@@ -55,7 +55,7 @@
};
chosen {
- stdout-path = "&uart1:115200n8";
+ stdout-path = "serial0:115200n8";
};
memory@70000000 {
diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi
index ed9a980bce85..beefa1b2049d 100644
--- a/arch/arm/boot/dts/imx6sll.dtsi
+++ b/arch/arm/boot/dts/imx6sll.dtsi
@@ -740,7 +740,7 @@
i2c1: i2c@21a0000 {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "fs,imx6sll-i2c", "fsl,imx21-i2c";
+ compatible = "fsl,imx6sll-i2c", "fsl,imx21-i2c";
reg = <0x021a0000 0x4000>;
interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SLL_CLK_I2C1>;
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
index 53b3408b5fab..7d7d679945d2 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
+++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
@@ -117,7 +117,9 @@
regulator-name = "enet_3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
+ gpio = <&gpio2 6 GPIO_ACTIVE_LOW>;
+ regulator-boot-on;
+ regulator-always-on;
};
reg_pcie_gpio: regulator-pcie-gpio {
@@ -180,6 +182,7 @@
phy-supply = <&reg_enet_3v3>;
phy-mode = "rgmii";
phy-handle = <&ethphy1>;
+ phy-reset-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
status = "okay";
mdio {
@@ -373,6 +376,8 @@
MX6SX_PAD_RGMII1_RD3__ENET1_RX_DATA_3 0x3081
MX6SX_PAD_RGMII1_RX_CTL__ENET1_RX_EN 0x3081
MX6SX_PAD_ENET2_RX_CLK__ENET2_REF_CLK_25M 0x91
+ /* phy reset */
+ MX6SX_PAD_ENET2_CRS__GPIO2_IO_7 0x10b0
>;
};
diff --git a/arch/arm/boot/dts/vf610m4-colibri.dts b/arch/arm/boot/dts/vf610m4-colibri.dts
index 41ec66a96990..ca6249558760 100644
--- a/arch/arm/boot/dts/vf610m4-colibri.dts
+++ b/arch/arm/boot/dts/vf610m4-colibri.dts
@@ -50,8 +50,8 @@
compatible = "fsl,vf610m4";
chosen {
- bootargs = "console=ttyLP2,115200 clk_ignore_unused init=/linuxrc rw";
- stdout-path = "&uart2";
+ bootargs = "clk_ignore_unused init=/linuxrc rw";
+ stdout-path = "serial2:115200";
};
memory@8c000000 {
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 1c7616815a86..63af6234c1b6 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -1,7 +1,6 @@
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
-CONFIG_PREEMPT=y
CONFIG_CGROUPS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EMBEDDED=y
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index 92fd2c8a9af0..12659ce5c1f3 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -10,7 +10,7 @@
#ifndef _ASM_PGTABLE_2LEVEL_H
#define _ASM_PGTABLE_2LEVEL_H
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
/*
* Hardware-wise, we have a two level page table structure, where the first
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 3d191fd52910..d594f60c3224 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -296,23 +296,13 @@ struct modem_private_data {
static struct modem_private_data modem_priv;
-static struct resource ams_delta_nand_resources[] = {
- [0] = {
- .start = OMAP1_MPUIO_BASE,
- .end = OMAP1_MPUIO_BASE +
- OMAP_MPUIO_IO_CNTL + sizeof(u32) - 1,
- .flags = IORESOURCE_MEM,
- },
-};
-
static struct platform_device ams_delta_nand_device = {
.name = "ams-delta-nand",
.id = -1,
- .num_resources = ARRAY_SIZE(ams_delta_nand_resources),
- .resource = ams_delta_nand_resources,
};
-#define OMAP_GPIO_LABEL "gpio-0-15"
+#define OMAP_GPIO_LABEL "gpio-0-15"
+#define OMAP_MPUIO_LABEL "mpuio"
static struct gpiod_lookup_table ams_delta_nand_gpio_table = {
.table = {
@@ -324,6 +314,14 @@ static struct gpiod_lookup_table ams_delta_nand_gpio_table = {
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NWE, "nwe", 0),
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_ALE, "ale", 0),
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_CLE, "cle", 0),
+ GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 0, "data", 0, 0),
+ GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 1, "data", 1, 0),
+ GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 2, "data", 2, 0),
+ GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 3, "data", 3, 0),
+ GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 4, "data", 4, 0),
+ GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 5, "data", 5, 0),
+ GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 6, "data", 6, 0),
+ GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 7, "data", 7, 0),
{ },
},
};
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 6fe52819e014..339eb17c9808 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -112,7 +112,7 @@ ENTRY(cpu_v7_hvc_switch_mm)
hvc #0
ldmfd sp!, {r0 - r3}
b cpu_v7_switch_mm
-ENDPROC(cpu_v7_smc_switch_mm)
+ENDPROC(cpu_v7_hvc_switch_mm)
#endif
ENTRY(cpu_v7_iciallu_switch_mm)
mov r3, #0
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index 8253a1a9e985..fef7351e9f67 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -139,6 +139,7 @@
clock-names = "stmmaceth";
tx-fifo-depth = <16384>;
rx-fifo-depth = <16384>;
+ snps,multicast-filter-bins = <256>;
status = "disabled";
};
@@ -154,6 +155,7 @@
clock-names = "stmmaceth";
tx-fifo-depth = <16384>;
rx-fifo-depth = <16384>;
+ snps,multicast-filter-bins = <256>;
status = "disabled";
};
@@ -169,6 +171,7 @@
clock-names = "stmmaceth";
tx-fifo-depth = <16384>;
rx-fifo-depth = <16384>;
+ snps,multicast-filter-bins = <256>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
index b5f2273caca4..a79c8d369e0b 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
@@ -652,7 +652,7 @@
clock-names = "fck", "brg_int", "scif_clk";
dmas = <&dmac1 0x35>, <&dmac1 0x34>,
<&dmac2 0x35>, <&dmac2 0x34>;
- dma-names = "tx", "rx";
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
resets = <&cpg 518>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/renesas/r8a77980-condor.dts b/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
index fe2e2c051cc9..5a7012be0d6a 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
@@ -15,7 +15,7 @@
aliases {
serial0 = &scif0;
- ethernet0 = &avb;
+ ethernet0 = &gether;
};
chosen {
@@ -97,23 +97,6 @@
};
};
-&avb {
- pinctrl-0 = <&avb_pins>;
- pinctrl-names = "default";
-
- phy-mode = "rgmii-id";
- phy-handle = <&phy0>;
- renesas,no-ether-link;
- status = "okay";
-
- phy0: ethernet-phy@0 {
- rxc-skew-ps = <1500>;
- reg = <0>;
- interrupt-parent = <&gpio1>;
- interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
- };
-};
-
&canfd {
pinctrl-0 = <&canfd0_pins>;
pinctrl-names = "default";
@@ -139,6 +122,23 @@
clock-frequency = <32768>;
};
+&gether {
+ pinctrl-0 = <&gether_pins>;
+ pinctrl-names = "default";
+
+ phy-mode = "rgmii-id";
+ phy-handle = <&phy0>;
+ renesas,no-ether-link;
+ status = "okay";
+
+ phy0: ethernet-phy@0 {
+ rxc-skew-ps = <1500>;
+ reg = <0>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <23 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
&i2c0 {
pinctrl-0 = <&i2c0_pins>;
pinctrl-names = "default";
@@ -236,16 +236,17 @@
};
&pfc {
- avb_pins: avb {
- groups = "avb_mdio", "avb_rgmii";
- function = "avb";
- };
-
canfd0_pins: canfd0 {
groups = "canfd0_data_a";
function = "canfd0";
};
+ gether_pins: gether {
+ groups = "gether_mdio_a", "gether_rgmii",
+ "gether_txcrefclk", "gether_txcrefclk_mega";
+ function = "gether";
+ };
+
i2c0_pins: i2c0 {
groups = "i2c0";
function = "i2c0";
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 3e2091708b8e..6b0d4dff5012 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -24,6 +24,14 @@
#define KERNEL_DS UL(-1)
#define USER_DS (TASK_SIZE_64 - 1)
+/*
+ * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
+ * no point in shifting all network buffers by 2 bytes just to make some IP
+ * header fields appear aligned in memory, potentially sacrificing some DMA
+ * performance on some platforms.
+ */
+#define NET_IP_ALIGN 0
+
#ifndef __ASSEMBLY__
#ifdef __KERNEL__
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 9d9582cac6c4..9b432d9fcada 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -483,8 +483,6 @@ void __init arm64_memblock_init(void)
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
dma_contiguous_reserve(arm64_dma_phys_limit);
-
- memblock_allow_resize();
}
void __init bootmem_init(void)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 394b8d554def..d1d6601b385d 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -659,6 +659,8 @@ void __init paging_init(void)
memblock_free(__pa_symbol(init_pg_dir),
__pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
+
+ memblock_allow_resize();
}
/*
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index 6181e4134483..fe3ddd73a0cc 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -55,12 +55,12 @@
*/
#ifdef CONFIG_SUN3
#define PTRS_PER_PTE 16
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 2048
#elif defined(CONFIG_COLDFIRE)
#define PTRS_PER_PTE 512
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 1024
#else
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index f64ebb9c9a41..e14b6621c933 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -63,7 +63,7 @@ extern int mem_init_done;
#include <asm-generic/4level-fixup.h>
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index 75108ec669eb..6c79e8a16a26 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -67,7 +67,7 @@ void (*cvmx_override_pko_queue_priority) (int pko_port,
void (*cvmx_override_ipd_port_setup) (int ipd_port);
/* Port count per interface */
-static int interface_port_count[5];
+static int interface_port_count[9];
/**
* Return the number of interfaces the chip has. Each interface
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index e6c9485cadcf..cb38461391cb 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -50,7 +50,7 @@ void *arch_dma_alloc(struct device *dev, size_t size,
void *ret;
ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
- if (!ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
+ if (ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
dma_cache_wback_inv((unsigned long) ret, size);
ret = (void *)UNCAC_ADDR(ret);
}
diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h
index d3e19a55cf53..9f52db930c00 100644
--- a/arch/nds32/include/asm/pgtable.h
+++ b/arch/nds32/include/asm/pgtable.h
@@ -4,7 +4,7 @@
#ifndef _ASMNDS32_PGTABLE_H
#define _ASMNDS32_PGTABLE_H
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
#include <asm-generic/4level-fixup.h>
#include <asm-generic/sizes.h>
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index b941ac7d4e70..c7bb74e22436 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -111,7 +111,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#if CONFIG_PGTABLE_LEVELS == 3
#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
#else
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
#define BITS_PER_PMD 0
#endif
#define PTRS_PER_PMD (1UL << BITS_PER_PMD)
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 0b33577932c3..e21053e5e0da 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -27,7 +27,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-option,-ffreestanding)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
UTS_MACHINE := s390x
-STACK_SIZE := $(if $(CONFIG_KASAN),32768,16384)
+STACK_SIZE := $(if $(CONFIG_KASAN),65536,16384)
CHECKFLAGS += -D__s390__ -D__s390x__
export LD_BFD
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 593039620487..b1bdd15e3429 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -22,10 +22,10 @@ OBJCOPYFLAGS :=
OBJECTS := $(addprefix $(obj)/,$(obj-y))
LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
-$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS)
+$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) FORCE
$(call if_changed,ld)
-OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info
+OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info --set-section-flags .vmlinux.info=load
$(obj)/info.bin: vmlinux FORCE
$(call if_changed,objcopy)
@@ -46,17 +46,17 @@ suffix-$(CONFIG_KERNEL_LZMA) := .lzma
suffix-$(CONFIG_KERNEL_LZO) := .lzo
suffix-$(CONFIG_KERNEL_XZ) := .xz
-$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
$(call if_changed,gzip)
-$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE
$(call if_changed,bzip2)
-$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE
$(call if_changed,lz4)
-$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
$(call if_changed,lzma)
-$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
$(call if_changed,lzo)
-$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
$(call if_changed,xzkern)
OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 259d1698ac50..c69cb04b7a59 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -64,6 +64,8 @@ CONFIG_NUMA=y
CONFIG_PREEMPT=y
CONFIG_HZ_100=y
CONFIG_KEXEC_FILE=y
+CONFIG_EXPOLINE=y
+CONFIG_EXPOLINE_AUTO=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
@@ -84,9 +86,11 @@ CONFIG_PCI_DEBUG=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
CONFIG_CHSC_SCH=y
+CONFIG_VFIO_AP=m
CONFIG_CRASH_DUMP=y
CONFIG_BINFMT_MISC=m
CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -161,8 +165,6 @@ CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CT_NETLINK_TIMEOUT=m
CONFIG_NF_TABLES=m
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
CONFIG_NFT_CT=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
@@ -365,6 +367,8 @@ CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
CONFIG_OPENVSWITCH=m
+CONFIG_VSOCKETS=m
+CONFIG_VIRTIO_VSOCKETS=m
CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
@@ -461,6 +465,7 @@ CONFIG_PPTP=m
CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
+CONFIG_ISM=m
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -486,9 +491,12 @@ CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
CONFIG_VFIO=m
CONFIG_VFIO_PCI=m
+CONFIG_VFIO_MDEV=m
+CONFIG_VFIO_MDEV_DEVICE=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
+CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
@@ -615,7 +623,6 @@ CONFIG_DEBUG_CREDENTIALS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=300
CONFIG_NOTIFIER_ERROR_INJECTION=m
-CONFIG_PM_NOTIFIER_ERROR_INJECT=m
CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
CONFIG_FAULT_INJECTION=y
CONFIG_FAILSLAB=y
@@ -727,3 +734,4 @@ CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_KVM_S390_UCONTROL=y
CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 37fd60c20e22..32f539dc9c19 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -65,6 +65,8 @@ CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_KEXEC_FILE=y
+CONFIG_EXPOLINE=y
+CONFIG_EXPOLINE_AUTO=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
@@ -82,9 +84,11 @@ CONFIG_PCI=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
CONFIG_CHSC_SCH=y
+CONFIG_VFIO_AP=m
CONFIG_CRASH_DUMP=y
CONFIG_BINFMT_MISC=m
CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -159,8 +163,6 @@ CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CT_NETLINK_TIMEOUT=m
CONFIG_NF_TABLES=m
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
CONFIG_NFT_CT=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
@@ -362,6 +364,8 @@ CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
CONFIG_OPENVSWITCH=m
+CONFIG_VSOCKETS=m
+CONFIG_VIRTIO_VSOCKETS=m
CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
@@ -458,6 +462,7 @@ CONFIG_PPTP=m
CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
+CONFIG_ISM=m
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -483,9 +488,12 @@ CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
CONFIG_VFIO=m
CONFIG_VFIO_PCI=m
+CONFIG_VFIO_MDEV=m
+CONFIG_VFIO_MDEV_DEVICE=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
+CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
@@ -666,3 +674,4 @@ CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_KVM_S390_UCONTROL=y
CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 7cb6a52f727d..4d58a92b5d97 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -26,14 +26,23 @@ CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
-CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
+CONFIG_LIVEPATCH=y
+CONFIG_NR_CPUS=256
+CONFIG_NUMA=y
+CONFIG_HZ_100=y
+CONFIG_KEXEC_FILE=y
+CONFIG_CRASH_DUMP=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
+CONFIG_CMM=m
CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
@@ -44,11 +53,7 @@ CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_DEFAULT_DEADLINE=y
-CONFIG_LIVEPATCH=y
-CONFIG_NR_CPUS=256
-CONFIG_NUMA=y
-CONFIG_HZ_100=y
-CONFIG_KEXEC_FILE=y
+CONFIG_BINFMT_MISC=m
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
@@ -60,9 +65,6 @@ CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
CONFIG_ZSMALLOC_STAT=y
CONFIG_IDLE_PAGE_TRACKING=y
-CONFIG_CRASH_DUMP=y
-CONFIG_BINFMT_MISC=m
-CONFIG_HIBERNATION=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -98,6 +100,7 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_VIRTIO_BLK=y
CONFIG_SCSI=y
+# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
@@ -131,6 +134,7 @@ CONFIG_EQUALIZER=m
CONFIG_TUN=m
CONFIG_VIRTIO_NET=y
# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
@@ -157,33 +161,6 @@ CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_HUGETLBFS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_INFO_DWARF4=y
-CONFIG_GDB_SCRIPTS=y
-CONFIG_UNUSED_SYMBOLS=y
-CONFIG_DEBUG_SECTION_MISMATCH=y
-CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_PAGEALLOC=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_PANIC_ON_OOPS=y
-CONFIG_PROVE_LOCKING=y
-CONFIG_LOCK_STAT=y
-CONFIG_DEBUG_LOCKDEP=y
-CONFIG_DEBUG_ATOMIC_SLEEP=y
-CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_SG=y
-CONFIG_DEBUG_NOTIFIERS=y
-CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_LATENCYTOP=y
-CONFIG_SCHED_TRACER=y
-CONFIG_FTRACE_SYSCALLS=y
-CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
-CONFIG_STACK_TRACER=y
-CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_FUNCTION_PROFILER=y
-# CONFIG_RUNTIME_TESTING_MENU is not set
-CONFIG_S390_PTDUMP=y
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_TEST=m
@@ -193,6 +170,7 @@ CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_CMAC=m
@@ -231,7 +209,6 @@ CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_ZCRYPT=m
-CONFIG_ZCRYPT_MULTIDEVNODES=y
CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_SHA1_S390=m
@@ -247,4 +224,30 @@ CONFIG_CRC7=m
# CONFIG_XZ_DEC_ARM is not set
# CONFIG_XZ_DEC_ARMTHUMB is not set
# CONFIG_XZ_DEC_SPARC is not set
-CONFIG_CMM=m
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_LOCK_STAT=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_LATENCYTOP=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
+CONFIG_STACK_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_FUNCTION_PROFILER=y
+# CONFIG_RUNTIME_TESTING_MENU is not set
+CONFIG_S390_PTDUMP=y
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index dbd689d556ce..ccbb53e22024 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -46,8 +46,6 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.asce_limit = STACK_TOP_MAX;
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION3;
- /* pgd_alloc() did not account this pud */
- mm_inc_nr_puds(mm);
break;
case -PAGE_SIZE:
/* forked 5-level task, set new asce with new_mm->pgd */
@@ -63,9 +61,6 @@ static inline int init_new_context(struct task_struct *tsk,
/* forked 2-level compat task, set new asce with new mm->pgd */
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
- /* pgd_alloc() did not account this pmd */
- mm_inc_nr_pmds(mm);
- mm_inc_nr_puds(mm);
}
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
return 0;
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index f0f9bcf94c03..5ee733720a57 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -36,11 +36,11 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry)
static inline unsigned long pgd_entry_type(struct mm_struct *mm)
{
- if (mm->context.asce_limit <= _REGION3_SIZE)
+ if (mm_pmd_folded(mm))
return _SEGMENT_ENTRY_EMPTY;
- if (mm->context.asce_limit <= _REGION2_SIZE)
+ if (mm_pud_folded(mm))
return _REGION3_ENTRY_EMPTY;
- if (mm->context.asce_limit <= _REGION1_SIZE)
+ if (mm_p4d_folded(mm))
return _REGION2_ENTRY_EMPTY;
return _REGION1_ENTRY_EMPTY;
}
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 411d435e7a7d..063732414dfb 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -493,6 +493,24 @@ static inline int is_module_addr(void *addr)
_REGION_ENTRY_PROTECT | \
_REGION_ENTRY_NOEXEC)
+static inline bool mm_p4d_folded(struct mm_struct *mm)
+{
+ return mm->context.asce_limit <= _REGION1_SIZE;
+}
+#define mm_p4d_folded(mm) mm_p4d_folded(mm)
+
+static inline bool mm_pud_folded(struct mm_struct *mm)
+{
+ return mm->context.asce_limit <= _REGION2_SIZE;
+}
+#define mm_pud_folded(mm) mm_pud_folded(mm)
+
+static inline bool mm_pmd_folded(struct mm_struct *mm)
+{
+ return mm->context.asce_limit <= _REGION3_SIZE;
+}
+#define mm_pmd_folded(mm) mm_pmd_folded(mm)
+
static inline int mm_has_pgste(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 302795c47c06..81038ab357ce 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -236,7 +236,7 @@ static inline unsigned long current_stack_pointer(void)
return sp;
}
-static __no_sanitize_address_or_inline unsigned short stap(void)
+static __no_kasan_or_inline unsigned short stap(void)
{
unsigned short cpu_address;
@@ -330,7 +330,7 @@ static inline void __load_psw(psw_t psw)
* Set PSW mask to specified value, while leaving the
* PSW addr pointing to the next instruction.
*/
-static __no_sanitize_address_or_inline void __load_psw_mask(unsigned long mask)
+static __no_kasan_or_inline void __load_psw_mask(unsigned long mask)
{
unsigned long addr;
psw_t psw;
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 27248f42a03c..ce4e17c9aad6 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -14,7 +14,7 @@
* General size of kernel stacks
*/
#ifdef CONFIG_KASAN
-#define THREAD_SIZE_ORDER 3
+#define THREAD_SIZE_ORDER 4
#else
#define THREAD_SIZE_ORDER 2
#endif
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 457b7ba0fbb6..b31c779cf581 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -136,7 +136,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
{
- if (tlb->mm->context.asce_limit <= _REGION3_SIZE)
+ if (mm_pmd_folded(tlb->mm))
return;
pgtable_pmd_page_dtor(virt_to_page(pmd));
tlb_remove_table(tlb, pmd);
@@ -152,7 +152,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
unsigned long address)
{
- if (tlb->mm->context.asce_limit <= _REGION1_SIZE)
+ if (mm_p4d_folded(tlb->mm))
return;
tlb_remove_table(tlb, p4d);
}
@@ -167,7 +167,7 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long address)
{
- if (tlb->mm->context.asce_limit <= _REGION2_SIZE)
+ if (mm_pud_folded(tlb->mm))
return;
tlb_remove_table(tlb, pud);
}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 724fba4d09d2..39191a0feed1 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -236,10 +236,10 @@ ENTRY(__switch_to)
stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
lghi %r4,__TASK_stack
lghi %r1,__TASK_thread
- lg %r5,0(%r4,%r3) # start of kernel stack of next
+ llill %r5,STACK_INIT
stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev
- lgr %r15,%r5
- aghi %r15,STACK_INIT # end of kernel stack of next
+ lg %r15,0(%r4,%r3) # start of kernel stack of next
+ agr %r15,%r5 # end of kernel stack of next
stg %r3,__LC_CURRENT # store task struct of next
stg %r15,__LC_KERNEL_STACK # store end of kernel stack
lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index cc085e2d2ce9..74091fd3101e 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -373,7 +373,7 @@ static int __hw_perf_event_init(struct perf_event *event)
return -ENOENT;
if (ev > PERF_CPUM_CF_MAX_CTR)
- return -EINVAL;
+ return -ENOENT;
/* Obtain the counter set to which the specified counter belongs */
set = get_counter_set(ev);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 7bf604ff50a1..bfabeb1889cc 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1842,10 +1842,30 @@ static void cpumsf_pmu_del(struct perf_event *event, int flags)
CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF);
CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
-static struct attribute *cpumsf_pmu_events_attr[] = {
- CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
- NULL,
- NULL,
+/* Attribute list for CPU_SF.
+ *
+ * The availablitiy depends on the CPU_MF sampling facility authorization
+ * for basic + diagnositic samples. This is determined at initialization
+ * time by the sampling facility device driver.
+ * If the authorization for basic samples is turned off, it should be
+ * also turned off for diagnostic sampling.
+ *
+ * During initialization of the device driver, check the authorization
+ * level for diagnostic sampling and installs the attribute
+ * file for diagnostic sampling if necessary.
+ *
+ * For now install a placeholder to reference all possible attributes:
+ * SF_CYCLES_BASIC and SF_CYCLES_BASIC_DIAG.
+ * Add another entry for the final NULL pointer.
+ */
+enum {
+ SF_CYCLES_BASIC_ATTR_IDX = 0,
+ SF_CYCLES_BASIC_DIAG_ATTR_IDX,
+ SF_CYCLES_ATTR_MAX
+};
+
+static struct attribute *cpumsf_pmu_events_attr[SF_CYCLES_ATTR_MAX + 1] = {
+ [SF_CYCLES_BASIC_ATTR_IDX] = CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC)
};
PMU_FORMAT_ATTR(event, "config:0-63");
@@ -2040,7 +2060,10 @@ static int __init init_cpum_sampling_pmu(void)
if (si.ad) {
sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
- cpumsf_pmu_events_attr[1] =
+ /* Sampling of diagnostic data authorized,
+ * install event into attribute list of PMU device.
+ */
+ cpumsf_pmu_events_attr[SF_CYCLES_BASIC_DIAG_ATTR_IDX] =
CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
}
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index eb8aebea3ea7..e76309fbbcb3 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -37,7 +37,7 @@ KASAN_SANITIZE := n
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
# link rule for the .so file, .lds has to be first
-$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32)
+$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
$(call if_changed,vdso32ld)
# strip rule for the .so file
@@ -46,12 +46,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
# assembly rules for the .S files
-$(obj-vdso32): %.o: %.S
+$(obj-vdso32): %.o: %.S FORCE
$(call if_changed_dep,vdso32as)
# actual build commands
quiet_cmd_vdso32ld = VDSO32L $@
- cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
+ cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
quiet_cmd_vdso32as = VDSO32A $@
cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $<
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index a22b2cf86eec..f849ac61c5da 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -37,7 +37,7 @@ KASAN_SANITIZE := n
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
# link rule for the .so file, .lds has to be first
-$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64)
+$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
$(call if_changed,vdso64ld)
# strip rule for the .so file
@@ -46,12 +46,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
# assembly rules for the .S files
-$(obj-vdso64): %.o: %.S
+$(obj-vdso64): %.o: %.S FORCE
$(call if_changed_dep,vdso64as)
# actual build commands
quiet_cmd_vdso64ld = VDSO64L $@
- cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
+ cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
quiet_cmd_vdso64as = VDSO64A $@
cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 21eb7407d51b..8429ab079715 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -154,14 +154,14 @@ SECTIONS
* uncompressed image info used by the decompressor
* it should match struct vmlinux_info
*/
- .vmlinux.info 0 : {
+ .vmlinux.info 0 (INFO) : {
QUAD(_stext) /* default_lma */
QUAD(startup_continue) /* entry */
QUAD(__bss_start - _stext) /* image_size */
QUAD(__bss_stop - __bss_start) /* bss_size */
QUAD(__boot_data_start) /* bootdata_off */
QUAD(__boot_data_end - __boot_data_start) /* bootdata_size */
- }
+ } :NONE
/* Debugging sections. */
STABS_DEBUG
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 76d89ee8b428..814f26520aa2 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -101,6 +101,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
mm->context.asce_limit = _REGION1_SIZE;
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+ mm_inc_nr_puds(mm);
} else {
crst_table_init(table, _REGION1_ENTRY_EMPTY);
pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
index ae0d9e889534..d31bde0870d8 100644
--- a/arch/s390/numa/numa.c
+++ b/arch/s390/numa/numa.c
@@ -53,6 +53,7 @@ int __node_distance(int a, int b)
{
return mode->distance ? mode->distance(a, b) : 0;
}
+EXPORT_SYMBOL(__node_distance);
int numa_debug_enabled;
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 74c002ddc0ce..28c40624bcb6 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -1305,6 +1305,7 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
io_req->fds[0] = dev->cow.fd;
else
io_req->fds[0] = dev->fd;
+ io_req->error = 0;
if (req_op(req) == REQ_OP_FLUSH) {
io_req->op = UBD_FLUSH;
@@ -1313,9 +1314,7 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
io_req->cow_offset = -1;
io_req->offset = off;
io_req->length = bvec->bv_len;
- io_req->error = 0;
io_req->sector_mask = 0;
-
io_req->op = rq_data_dir(req) == READ ? UBD_READ : UBD_WRITE;
io_req->offsets[0] = 0;
io_req->offsets[1] = dev->cow.data_offset;
@@ -1341,11 +1340,14 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
+ struct ubd *ubd_dev = hctx->queue->queuedata;
struct request *req = bd->rq;
int ret = 0;
blk_mq_start_request(req);
+ spin_lock_irq(&ubd_dev->lock);
+
if (req_op(req) == REQ_OP_FLUSH) {
ret = ubd_queue_one_vec(hctx, req, 0, NULL);
} else {
@@ -1361,9 +1363,11 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
}
}
out:
- if (ret < 0) {
+ spin_unlock_irq(&ubd_dev->lock);
+
+ if (ret < 0)
blk_mq_requeue_request(req, true);
- }
+
return BLK_STS_OK;
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ba7e3464ee92..9d734f3c8234 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -525,7 +525,6 @@ config X86_VSMP
bool "ScaleMP vSMP"
select HYPERVISOR_GUEST
select PARAVIRT
- select PARAVIRT_XXL
depends on X86_64 && PCI
depends on X86_EXTENDED_PLATFORM
depends on SMP
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 5b562e464009..88398fdf8129 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -213,8 +213,6 @@ ifdef CONFIG_X86_64
KBUILD_LDFLAGS += $(call ld-option, -z max-page-size=0x200000)
endif
-# Speed up the build
-KBUILD_CFLAGS += -pipe
# Workaround for a gcc prelease that unfortunately was shipped in a suse release
KBUILD_CFLAGS += -Wno-sign-compare
#
@@ -239,7 +237,7 @@ archheaders:
archmacros:
$(Q)$(MAKE) $(build)=arch/x86/kernel arch/x86/kernel/macros.s
-ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s -Wa,-
+ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s
export ASM_MACRO_FLAGS
KBUILD_CFLAGS += $(ASM_MACRO_FLAGS)
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 4da9b1c58d28..c1a812bd5a27 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -221,6 +221,8 @@ static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_am
int mce_available(struct cpuinfo_x86 *c);
bool mce_is_memory_error(struct mce *m);
+bool mce_is_correctable(struct mce *m);
+int mce_usable_address(struct mce *m);
DECLARE_PER_CPU(unsigned, mce_exception_count);
DECLARE_PER_CPU(unsigned, mce_poll_count);
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 0d6271cce198..1d0a7778e163 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -232,7 +232,7 @@ static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
: "cc");
}
#endif
- return hv_status;
+ return hv_status;
}
/*
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index cd0cf1c568b4..8f657286d599 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -33,12 +33,14 @@
/*
* Set __PAGE_OFFSET to the most negative possible address +
- * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
- * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
- * what Xen requires.
+ * PGDIR_SIZE*17 (pgd slot 273).
+ *
+ * The gap is to allow a space for LDT remap for PTI (1 pgd slot) and space for
+ * a hypervisor (16 slots). Choosing 16 slots for a hypervisor is arbitrary,
+ * but it's what Xen requires.
*/
-#define __PAGE_OFFSET_BASE_L5 _AC(0xff10000000000000, UL)
-#define __PAGE_OFFSET_BASE_L4 _AC(0xffff880000000000, UL)
+#define __PAGE_OFFSET_BASE_L5 _AC(0xff11000000000000, UL)
+#define __PAGE_OFFSET_BASE_L4 _AC(0xffff888000000000, UL)
#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
#define __PAGE_OFFSET page_offset_base
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 04edd2d58211..84bd9bdc1987 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -111,9 +111,7 @@ extern unsigned int ptrs_per_p4d;
*/
#define MAXMEM (1UL << MAX_PHYSMEM_BITS)
-#define LDT_PGD_ENTRY_L4 -3UL
-#define LDT_PGD_ENTRY_L5 -112UL
-#define LDT_PGD_ENTRY (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4)
+#define LDT_PGD_ENTRY -240UL
#define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
#define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE)
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 87623c6b13db..bd5ac6cc37db 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -13,12 +13,15 @@
#define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
{
- u32 val = 0;
-
- if (GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
- "I", _Q_PENDING_OFFSET))
- val |= _Q_PENDING_VAL;
+ u32 val;
+ /*
+ * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto
+ * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a
+ * statement expression, which GCC doesn't like.
+ */
+ val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
+ "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL;
val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
return val;
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 123e669bf363..790ce08e41f2 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -9,7 +9,7 @@
#include <linux/mm.h>
#include <linux/device.h>
-#include <linux/uaccess.h>
+#include <asm/extable.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -93,12 +93,39 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
*/
static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
{
- return __put_user(val, (unsigned long __user *)addr);
+ int ret = 0;
+
+ asm volatile("1: mov %[val], %[ptr]\n"
+ "2:\n"
+ ".section .fixup, \"ax\"\n"
+ "3: sub $1, %[ret]\n"
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+ : [ret] "+r" (ret), [ptr] "=m" (*addr)
+ : [val] "r" (val));
+
+ return ret;
}
-static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
+static inline int xen_safe_read_ulong(const unsigned long *addr,
+ unsigned long *val)
{
- return __get_user(*val, (unsigned long __user *)addr);
+ int ret = 0;
+ unsigned long rval = ~0ul;
+
+ asm volatile("1: mov %[ptr], %[rval]\n"
+ "2:\n"
+ ".section .fixup, \"ax\"\n"
+ "3: sub $1, %[ret]\n"
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+ : [ret] "+r" (ret), [rval] "+r" (rval)
+ : [ptr] "m" (*addr));
+ *val = rval;
+
+ return ret;
}
#ifdef CONFIG_XEN_PV
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 8c66d2fc8f81..36d2696c9563 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -485,7 +485,7 @@ static void mce_report_event(struct pt_regs *regs)
* be somewhat complicated (e.g. segment offset would require an instruction
* parser). So only support physical addresses up to page granuality for now.
*/
-static int mce_usable_address(struct mce *m)
+int mce_usable_address(struct mce *m)
{
if (!(m->status & MCI_STATUS_ADDRV))
return 0;
@@ -505,6 +505,7 @@ static int mce_usable_address(struct mce *m)
return 1;
}
+EXPORT_SYMBOL_GPL(mce_usable_address);
bool mce_is_memory_error(struct mce *m)
{
@@ -534,7 +535,7 @@ bool mce_is_memory_error(struct mce *m)
}
EXPORT_SYMBOL_GPL(mce_is_memory_error);
-static bool mce_is_correctable(struct mce *m)
+bool mce_is_correctable(struct mce *m)
{
if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
return false;
@@ -547,6 +548,7 @@ static bool mce_is_correctable(struct mce *m)
return true;
}
+EXPORT_SYMBOL_GPL(mce_is_correctable);
static bool cec_add_mce(struct mce *m)
{
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 1c72f3819eb1..e81a2db42df7 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -20,6 +20,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kexec.h>
+#include <linux/i8253.h>
#include <asm/processor.h>
#include <asm/hypervisor.h>
#include <asm/hyperv-tlfs.h>
@@ -295,6 +296,16 @@ static void __init ms_hyperv_init_platform(void)
if (efi_enabled(EFI_BOOT))
x86_platform.get_nmi_reason = hv_get_nmi_reason;
+ /*
+ * Hyper-V VMs have a PIT emulation quirk such that zeroing the
+ * counter register during PIT shutdown restarts the PIT. So it
+ * continues to interrupt @18.2 HZ. Setting i8253_clear_counter
+ * to false tells pit_shutdown() not to zero the counter so that
+ * the PIT really is shutdown. Generation 2 VMs don't have a PIT,
+ * and setting this value has no effect.
+ */
+ i8253_clear_counter_on_shutdown = false;
+
#if IS_ENABLED(CONFIG_HYPERV)
/*
* Setup the hook to get control post apic initialization.
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index d9ab49bed8af..0eda91f8eeac 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -77,7 +77,7 @@ static __init int setup_vmw_sched_clock(char *s)
}
early_param("no-vmw-sched-clock", setup_vmw_sched_clock);
-static unsigned long long vmware_sched_clock(void)
+static unsigned long long notrace vmware_sched_clock(void)
{
unsigned long long ns;
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index ab18e0884dc6..6135ae8ce036 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -199,14 +199,6 @@ static void sanity_check_ldt_mapping(struct mm_struct *mm)
/*
* If PTI is enabled, this maps the LDT into the kernelmode and
* usermode tables for the given mm.
- *
- * There is no corresponding unmap function. Even if the LDT is freed, we
- * leave the PTEs around until the slot is reused or the mm is destroyed.
- * This is harmless: the LDT is always in ordinary memory, and no one will
- * access the freed slot.
- *
- * If we wanted to unmap freed LDTs, we'd also need to do a flush to make
- * it useful, and the flush would slow down modify_ldt().
*/
static int
map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
@@ -214,8 +206,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
unsigned long va;
bool is_vmalloc;
spinlock_t *ptl;
- pgd_t *pgd;
- int i;
+ int i, nr_pages;
if (!static_cpu_has(X86_FEATURE_PTI))
return 0;
@@ -229,16 +220,11 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
/* Check if the current mappings are sane */
sanity_check_ldt_mapping(mm);
- /*
- * Did we already have the top level entry allocated? We can't
- * use pgd_none() for this because it doens't do anything on
- * 4-level page table kernels.
- */
- pgd = pgd_offset(mm, LDT_BASE_ADDR);
-
is_vmalloc = is_vmalloc_addr(ldt->entries);
- for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) {
+ nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
+
+ for (i = 0; i < nr_pages; i++) {
unsigned long offset = i << PAGE_SHIFT;
const void *src = (char *)ldt->entries + offset;
unsigned long pfn;
@@ -272,13 +258,39 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
/* Propagate LDT mapping to the user page-table */
map_ldt_struct_to_user(mm);
- va = (unsigned long)ldt_slot_va(slot);
- flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, PAGE_SHIFT, false);
-
ldt->slot = slot;
return 0;
}
+static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
+{
+ unsigned long va;
+ int i, nr_pages;
+
+ if (!ldt)
+ return;
+
+ /* LDT map/unmap is only required for PTI */
+ if (!static_cpu_has(X86_FEATURE_PTI))
+ return;
+
+ nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
+
+ for (i = 0; i < nr_pages; i++) {
+ unsigned long offset = i << PAGE_SHIFT;
+ spinlock_t *ptl;
+ pte_t *ptep;
+
+ va = (unsigned long)ldt_slot_va(ldt->slot) + offset;
+ ptep = get_locked_pte(mm, va, &ptl);
+ pte_clear(mm, va, ptep);
+ pte_unmap_unlock(ptep, ptl);
+ }
+
+ va = (unsigned long)ldt_slot_va(ldt->slot);
+ flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false);
+}
+
#else /* !CONFIG_PAGE_TABLE_ISOLATION */
static int
@@ -286,6 +298,10 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
{
return 0;
}
+
+static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
+{
+}
#endif /* CONFIG_PAGE_TABLE_ISOLATION */
static void free_ldt_pgtables(struct mm_struct *mm)
@@ -524,6 +540,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
}
install_ldt(mm, new_ldt);
+ unmap_ldt_struct(mm, old_ldt);
free_ldt_struct(old_ldt);
error = 0;
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index 1eae5af491c2..891a75dbc131 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -26,65 +26,8 @@
#define TOPOLOGY_REGISTER_OFFSET 0x10
-#if defined CONFIG_PCI && defined CONFIG_PARAVIRT_XXL
-/*
- * Interrupt control on vSMPowered systems:
- * ~AC is a shadow of IF. If IF is 'on' AC should be 'off'
- * and vice versa.
- */
-
-asmlinkage __visible unsigned long vsmp_save_fl(void)
-{
- unsigned long flags = native_save_fl();
-
- if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC))
- flags &= ~X86_EFLAGS_IF;
- return flags;
-}
-PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
-
-__visible void vsmp_restore_fl(unsigned long flags)
-{
- if (flags & X86_EFLAGS_IF)
- flags &= ~X86_EFLAGS_AC;
- else
- flags |= X86_EFLAGS_AC;
- native_restore_fl(flags);
-}
-PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
-
-asmlinkage __visible void vsmp_irq_disable(void)
-{
- unsigned long flags = native_save_fl();
-
- native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
-}
-PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
-
-asmlinkage __visible void vsmp_irq_enable(void)
-{
- unsigned long flags = native_save_fl();
-
- native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
-}
-PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
-
-static unsigned __init vsmp_patch(u8 type, void *ibuf,
- unsigned long addr, unsigned len)
-{
- switch (type) {
- case PARAVIRT_PATCH(irq.irq_enable):
- case PARAVIRT_PATCH(irq.irq_disable):
- case PARAVIRT_PATCH(irq.save_fl):
- case PARAVIRT_PATCH(irq.restore_fl):
- return paravirt_patch_default(type, ibuf, addr, len);
- default:
- return native_patch(type, ibuf, addr, len);
- }
-
-}
-
-static void __init set_vsmp_pv_ops(void)
+#ifdef CONFIG_PCI
+static void __init set_vsmp_ctl(void)
{
void __iomem *address;
unsigned int cap, ctl, cfg;
@@ -109,28 +52,12 @@ static void __init set_vsmp_pv_ops(void)
}
#endif
- if (cap & ctl & (1 << 4)) {
- /* Setup irq ops and turn on vSMP IRQ fastpath handling */
- pv_ops.irq.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
- pv_ops.irq.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
- pv_ops.irq.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
- pv_ops.irq.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
- pv_ops.init.patch = vsmp_patch;
- ctl &= ~(1 << 4);
- }
writel(ctl, address + 4);
ctl = readl(address + 4);
pr_info("vSMP CTL: control set to:0x%08x\n", ctl);
early_iounmap(address, 8);
}
-#else
-static void __init set_vsmp_pv_ops(void)
-{
-}
-#endif
-
-#ifdef CONFIG_PCI
static int is_vsmp = -1;
static void __init detect_vsmp_box(void)
@@ -164,11 +91,14 @@ static int is_vsmp_box(void)
{
return 0;
}
+static void __init set_vsmp_ctl(void)
+{
+}
#endif
static void __init vsmp_cap_cpus(void)
{
-#if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP)
+#if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
void __iomem *address;
unsigned int cfg, topology, node_shift, maxcpus;
@@ -221,6 +151,6 @@ void __init vsmp_init(void)
vsmp_cap_cpus();
- set_vsmp_pv_ops();
+ set_vsmp_ctl();
return;
}
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 0d7b3ae4960b..a5d7ed125337 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1905,7 +1905,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
init_top_pgt[0] = __pgd(0);
/* Pre-constructed entries are in pfn, so convert to mfn */
- /* L4[272] -> level3_ident_pgt */
+ /* L4[273] -> level3_ident_pgt */
/* L4[511] -> level3_kernel_pgt */
convert_pfn_mfn(init_top_pgt);
@@ -1925,8 +1925,8 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
addr[0] = (unsigned long)pgd;
addr[1] = (unsigned long)l3;
addr[2] = (unsigned long)l2;
- /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
- * Both L4[272][0] and L4[511][510] have entries that point to the same
+ /* Graft it onto L4[273][0]. Note that we creating an aliasing problem:
+ * Both L4[273][0] and L4[511][510] have entries that point to the same
* L2 (PMD) tables. Meaning that if you modify it in __va space
* it will be also modified in the __ka space! (But if you just
* modify the PMD table to point to other PTE's or none, then you
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index b06731705529..055e37e43541 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -656,8 +656,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
/*
* The interface requires atomic updates on p2m elements.
- * xen_safe_write_ulong() is using __put_user which does an atomic
- * store via asm().
+ * xen_safe_write_ulong() is using an atomic store via asm().
*/
if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
return true;
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 441c88262169..1c8a8816a402 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -9,6 +9,7 @@
#include <linux/log2.h>
#include <linux/gfp.h>
#include <linux/slab.h>
+#include <linux/atomic.h>
#include <asm/paravirt.h>
#include <asm/qspinlock.h>
@@ -21,6 +22,7 @@
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
static DEFINE_PER_CPU(char *, irq_name);
+static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
static bool xen_pvspin = true;
static void xen_qlock_kick(int cpu)
@@ -39,25 +41,25 @@ static void xen_qlock_kick(int cpu)
*/
static void xen_qlock_wait(u8 *byte, u8 val)
{
- unsigned long flags;
int irq = __this_cpu_read(lock_kicker_irq);
+ atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest);
/* If kicker interrupts not initialized yet, just spin */
if (irq == -1 || in_nmi())
return;
- /* Guard against reentry. */
- local_irq_save(flags);
+ /* Detect reentry. */
+ atomic_inc(nest_cnt);
- /* If irq pending already clear it. */
- if (xen_test_irq_pending(irq)) {
+ /* If irq pending already and no nested call clear it. */
+ if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) {
xen_clear_irq_pending(irq);
} else if (READ_ONCE(*byte) == val) {
/* Block until irq becomes pending (or a spurious wakeup) */
xen_poll_irq(irq);
}
- local_irq_restore(flags);
+ atomic_dec(nest_cnt);
}
static irqreturn_t dummy_handler(int irq, void *dev_id)
diff --git a/block/bio.c b/block/bio.c
index d5368a445561..a50d59236b19 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1260,6 +1260,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
if (ret)
goto cleanup;
} else {
+ zero_fill_bio(bio);
iov_iter_advance(iter, bio->bi_iter.bi_size);
}
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 76f867ea9a9b..e8b3bb9bf375 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -51,16 +51,12 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
if ((sector | nr_sects) & bs_mask)
return -EINVAL;
- while (nr_sects) {
- unsigned int req_sects = nr_sects;
- sector_t end_sect;
-
- if (!req_sects)
- goto fail;
- if (req_sects > UINT_MAX >> 9)
- req_sects = UINT_MAX >> 9;
+ if (!nr_sects)
+ return -EINVAL;
- end_sect = sector + req_sects;
+ while (nr_sects) {
+ unsigned int req_sects = min_t(unsigned int, nr_sects,
+ bio_allowed_max_sectors(q));
bio = blk_next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;
@@ -68,8 +64,8 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
bio_set_op_attrs(bio, op, 0);
bio->bi_iter.bi_size = req_sects << 9;
+ sector += req_sects;
nr_sects -= req_sects;
- sector = end_sect;
/*
* We can loop for a long time in here, if someone does
@@ -82,14 +78,6 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
*biop = bio;
return 0;
-
-fail:
- if (bio) {
- submit_bio_wait(bio);
- bio_put(bio);
- }
- *biop = NULL;
- return -EOPNOTSUPP;
}
EXPORT_SYMBOL(__blkdev_issue_discard);
@@ -161,7 +149,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
return -EOPNOTSUPP;
/* Ensure that max_write_same_sectors doesn't overflow bi_size */
- max_write_same_sectors = UINT_MAX >> 9;
+ max_write_same_sectors = bio_allowed_max_sectors(q);
while (nr_sects) {
bio = blk_next_bio(bio, 1, gfp_mask);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 6b5ad275ed56..e7696c47489a 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -46,7 +46,7 @@ static inline bool bio_will_gap(struct request_queue *q,
bio_get_first_bvec(prev_rq->bio, &pb);
else
bio_get_first_bvec(prev, &pb);
- if (pb.bv_offset)
+ if (pb.bv_offset & queue_virt_boundary(q))
return true;
/*
@@ -90,7 +90,8 @@ static struct bio *blk_bio_discard_split(struct request_queue *q,
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
- max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+ max_discard_sectors = min(q->limits.max_discard_sectors,
+ bio_allowed_max_sectors(q));
max_discard_sectors -= max_discard_sectors % granularity;
if (unlikely(!max_discard_sectors)) {
diff --git a/block/blk.h b/block/blk.h
index a1841b8ff129..0089fefdf771 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -169,7 +169,7 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
static inline bool __bvec_gap_to_prev(struct request_queue *q,
struct bio_vec *bprv, unsigned int offset)
{
- return offset ||
+ return (offset & queue_virt_boundary(q)) ||
((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
}
@@ -396,6 +396,16 @@ static inline unsigned long blk_rq_deadline(struct request *rq)
}
/*
+ * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
+ * is defined as 'unsigned int', meantime it has to aligned to with logical
+ * block size which is the minimum accepted unit by hardware.
+ */
+static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
+{
+ return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
+}
+
+/*
* Internal io_context interface
*/
void get_io_context(struct io_context *ioc);
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index e9626bf6ca29..d6c1b10f6c25 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -25,8 +25,12 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
struct acpi_nfit_desc *acpi_desc;
struct nfit_spa *nfit_spa;
- /* We only care about memory errors */
- if (!mce_is_memory_error(mce))
+ /* We only care about uncorrectable memory errors */
+ if (!mce_is_memory_error(mce) || mce_is_correctable(mce))
+ return NOTIFY_DONE;
+
+ /* Verify the address reported in the MCE is valid. */
+ if (!mce_usable_address(mce))
return NOTIFY_DONE;
/*
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 10ecb232245d..4b1ff5bc256a 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Renesas R-Car SATA driver
*
* Author: Vladimir Barinov <source@cogentembedded.com>
* Copyright (C) 2013-2015 Cogent Embedded, Inc.
* Copyright (C) 2013-2015 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 56452cabce5b..0ed4b200fa58 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1919,6 +1919,7 @@ static int negotiate_mq(struct blkfront_info *info)
GFP_KERNEL);
if (!info->rinfo) {
xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
+ info->nr_rings = 0;
return -ENOMEM;
}
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index ef0ca9414f37..ff83e899df71 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -210,6 +210,7 @@ static int of_fixed_factor_clk_remove(struct platform_device *pdev)
{
struct clk *clk = platform_get_drvdata(pdev);
+ of_clk_del_provider(pdev->dev.of_node);
clk_unregister_fixed_factor(clk);
return 0;
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index c981159b02c0..792735d7e46e 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -325,6 +325,7 @@ static struct clk_regmap axg_fclk_div2 = {
.ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "fclk_div2_div" },
.num_parents = 1,
+ .flags = CLK_IS_CRITICAL,
},
};
@@ -349,6 +350,18 @@ static struct clk_regmap axg_fclk_div3 = {
.ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "fclk_div3_div" },
.num_parents = 1,
+ /*
+ * FIXME:
+ * This clock, as fdiv2, is used by the SCPI FW and is required
+ * by the platform to operate correctly.
+ * Until the following condition are met, we need this clock to
+ * be marked as critical:
+ * a) The SCPI generic driver claims and enable all the clocks
+ * it needs
+ * b) CCF has a clock hand-off mechanism to make the sure the
+ * clock stays on until the proper driver comes along
+ */
+ .flags = CLK_IS_CRITICAL,
},
};
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 9309cfaaa464..4ada9668fd49 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -506,6 +506,18 @@ static struct clk_regmap gxbb_fclk_div3 = {
.ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "fclk_div3_div" },
.num_parents = 1,
+ /*
+ * FIXME:
+ * This clock, as fdiv2, is used by the SCPI FW and is required
+ * by the platform to operate correctly.
+ * Until the following condition are met, we need this clock to
+ * be marked as critical:
+ * a) The SCPI generic driver claims and enable all the clocks
+ * it needs
+ * b) CCF has a clock hand-off mechanism to make the sure the
+ * clock stays on until the proper driver comes along
+ */
+ .flags = CLK_IS_CRITICAL,
},
};
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index e4ca6a45f313..ef1b267cb058 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -265,7 +265,7 @@ static struct clk_fixed_factor cxo = {
.div = 1,
.hw.init = &(struct clk_init_data){
.name = "cxo",
- .parent_names = (const char *[]){ "xo_board" },
+ .parent_names = (const char *[]){ "xo-board" },
.num_parents = 1,
.ops = &clk_fixed_factor_ops,
},
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 9c38895542f4..d4350bb10b83 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -20,6 +20,13 @@
DEFINE_RAW_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
+/*
+ * Handle PIT quirk in pit_shutdown() where zeroing the counter register
+ * restarts the PIT, negating the shutdown. On platforms with the quirk,
+ * platform specific code can set this to false.
+ */
+bool i8253_clear_counter_on_shutdown __ro_after_init = true;
+
#ifdef CONFIG_CLKSRC_I8253
/*
* Since the PIT overflows every tick, its not very useful
@@ -109,8 +116,11 @@ static int pit_shutdown(struct clock_event_device *evt)
raw_spin_lock(&i8253_lock);
outb_p(0x30, PIT_MODE);
- outb_p(0, PIT_CH0);
- outb_p(0, PIT_CH0);
+
+ if (i8253_clear_counter_on_shutdown) {
+ outb_p(0, PIT_CH0);
+ outb_p(0, PIT_CH0);
+ }
raw_spin_unlock(&i8253_lock);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d0102cfc8efb..104b2e0d893b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -151,6 +151,7 @@ extern int amdgpu_compute_multipipe;
extern int amdgpu_gpu_recovery;
extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
+extern uint amdgpu_dc_feature_mask;
extern struct amdgpu_mgpu_info mgpu_info;
#ifdef CONFIG_DRM_AMDGPU_SI
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 943dbf3c5da1..8de55f7f1a3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -127,6 +127,9 @@ int amdgpu_compute_multipipe = -1;
int amdgpu_gpu_recovery = -1; /* auto */
int amdgpu_emu_mode = 0;
uint amdgpu_smu_memory_pool_size = 0;
+/* FBC (bit 0) disabled by default*/
+uint amdgpu_dc_feature_mask = 0;
+
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
};
@@ -631,6 +634,14 @@ module_param(halt_if_hws_hang, int, 0644);
MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
#endif
+/**
+ * DOC: dcfeaturemask (uint)
+ * Override display features enabled. See enum DC_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ * The default is the current set of stable display features.
+ */
+MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))");
+module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
+
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
index 2d4473557b0d..d13fc4fcb517 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -49,6 +49,7 @@ int vega20_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index b0df6dc9a775..c1262f62cd9f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -429,6 +429,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->asic_type < CHIP_RAVEN)
init_data.flags.gpu_vm_support = true;
+ if (amdgpu_dc_feature_mask & DC_FBC_MASK)
+ init_data.flags.fbc_support = true;
+
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
@@ -1524,13 +1527,6 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
{
struct amdgpu_display_manager *dm = bl_get_data(bd);
- /*
- * PWM interperts 0 as 100% rather than 0% because of HW
- * limitation for level 0.So limiting minimum brightness level
- * to 1.
- */
- if (bd->props.brightness < 1)
- return 1;
if (dc_link_set_backlight_level(dm->backlight_link,
bd->props.brightness, 0, 0))
return 0;
@@ -2707,18 +2703,11 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
drm_connector = &aconnector->base;
if (!aconnector->dc_sink) {
- /*
- * Create dc_sink when necessary to MST
- * Don't apply fake_sink to MST
- */
- if (aconnector->mst_port) {
- dm_dp_mst_dc_sink_create(drm_connector);
- return stream;
+ if (!aconnector->mst_port) {
+ sink = create_fake_sink(aconnector);
+ if (!sink)
+ return stream;
}
-
- sink = create_fake_sink(aconnector);
- if (!sink)
- return stream;
} else {
sink = aconnector->dc_sink;
}
@@ -3308,7 +3297,7 @@ void dm_drm_plane_destroy_state(struct drm_plane *plane,
static const struct drm_plane_funcs dm_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
+ .destroy = drm_primary_helper_destroy,
.reset = dm_drm_plane_reset,
.atomic_duplicate_state = dm_drm_plane_duplicate_state,
.atomic_destroy_state = dm_drm_plane_destroy_state,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 978b34a5011c..924a38a1fc44 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -160,8 +160,6 @@ struct amdgpu_dm_connector {
struct mutex hpd_lock;
bool fake_enable;
-
- bool mst_connected;
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 03601d717fed..d02c32a1039c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -205,40 +205,6 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
.atomic_get_property = amdgpu_dm_connector_atomic_get_property
};
-void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
-{
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct dc_sink *dc_sink;
- struct dc_sink_init_data init_params = {
- .link = aconnector->dc_link,
- .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
-
- /* FIXME none of this is safe. we shouldn't touch aconnector here in
- * atomic_check
- */
-
- /*
- * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
- */
- if (!aconnector->port || !aconnector->port->aux.ddc.algo)
- return;
-
- ASSERT(aconnector->edid);
-
- dc_sink = dc_link_add_remote_sink(
- aconnector->dc_link,
- (uint8_t *)aconnector->edid,
- (aconnector->edid->extensions + 1) * EDID_LENGTH,
- &init_params);
-
- dc_sink->priv = aconnector;
- aconnector->dc_sink = dc_sink;
-
- if (aconnector->dc_sink)
- amdgpu_dm_update_freesync_caps(
- connector, aconnector->edid);
-}
-
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -319,12 +285,7 @@ dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector)
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder;
struct drm_encoder *encoder;
- const struct drm_connector_helper_funcs *connector_funcs =
- connector->base.helper_private;
- struct drm_encoder *enc_master =
- connector_funcs->best_encoder(&connector->base);
- DRM_DEBUG_KMS("enc master is %p\n", enc_master);
amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
if (!amdgpu_encoder)
return NULL;
@@ -354,25 +315,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- aconnector = to_amdgpu_dm_connector(connector);
- if (aconnector->mst_port == master
- && !aconnector->port) {
- DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n",
- aconnector, connector->base.id, aconnector->mst_port);
-
- aconnector->port = port;
- drm_connector_set_path_property(connector, pathprop);
-
- drm_connector_list_iter_end(&conn_iter);
- aconnector->mst_connected = true;
- return &aconnector->base;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
if (!aconnector)
@@ -421,8 +363,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
*/
amdgpu_dm_connector_funcs_reset(connector);
- aconnector->mst_connected = true;
-
DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
aconnector, connector->base.id, aconnector->mst_port);
@@ -434,6 +374,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector)
{
+ struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
+ struct drm_device *dev = master->base.dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
@@ -447,7 +390,10 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
aconnector->dc_sink = NULL;
}
- aconnector->mst_connected = false;
+ drm_connector_unregister(connector);
+ if (adev->mode_info.rfbdev)
+ drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector);
+ drm_connector_put(connector);
}
static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
@@ -458,18 +404,10 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
drm_kms_helper_hotplug_event(dev);
}
-static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
-{
- mutex_lock(&connector->dev->mode_config.mutex);
- drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
- mutex_unlock(&connector->dev->mode_config.mutex);
-}
-
static void dm_dp_mst_register_connector(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
if (adev->mode_info.rfbdev)
drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
@@ -477,9 +415,6 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
drm_connector_register(connector);
-
- if (aconnector->mst_connected)
- dm_dp_mst_link_status_reset(connector);
}
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 8cf51da26657..2da851b40042 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -31,6 +31,5 @@ struct amdgpu_dm_connector;
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector);
-void dm_dp_mst_dc_sink_create(struct drm_connector *connector);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index fb04a4ad141f..5da2186b3615 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1722,7 +1722,7 @@ static void write_i2c_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
- offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n",
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
/* Write failure */
@@ -1734,7 +1734,7 @@ static void write_i2c_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
- offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n",
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
/* Write failure */
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 199527171100..b57fa61b3034 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -169,6 +169,7 @@ struct link_training_settings;
struct dc_config {
bool gpu_vm_support;
bool disable_disp_pll_sharing;
+ bool fbc_support;
};
enum visual_confirm {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index b75ede5f84f7..b459867a05b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1736,7 +1736,12 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
if (events->force_trigger)
value |= 0x1;
- value |= 0x84;
+ if (num_pipes) {
+ struct dc *dc = pipe_ctx[0]->stream->ctx->dc;
+
+ if (dc->fbc_compressor)
+ value |= 0x84;
+ }
for (i = 0; i < num_pipes; i++)
pipe_ctx[i]->stream_res.tg->funcs->
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index e3624ca24574..7c9fd9052ee2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -1362,7 +1362,8 @@ static bool construct(
pool->base.sw_i2cs[i] = NULL;
}
- dc->fbc_compressor = dce110_compressor_create(ctx);
+ if (dc->config.fbc_support)
+ dc->fbc_compressor = dce110_compressor_create(ctx);
if (!underlay_create(ctx, &pool->base))
goto res_create_fail;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 2083c308007c..470d7b89071a 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -133,6 +133,10 @@ enum PP_FEATURE_MASK {
PP_AVFS_MASK = 0x40000,
};
+enum DC_FEATURE_MASK {
+ DC_FBC_MASK = 0x1,
+};
+
/**
* struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
*/
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index d2e7c0fa96c2..8eb0bb241210 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1325,7 +1325,7 @@ struct atom_smu_info_v3_3 {
struct atom_common_table_header table_header;
uint8_t smuip_min_ver;
uint8_t smuip_max_ver;
- uint8_t smu_rsd1;
+ uint8_t waflclk_ss_mode;
uint8_t gpuclk_ss_mode;
uint16_t sclk_ss_percentage;
uint16_t sclk_ss_rate_10hz;
@@ -1355,7 +1355,10 @@ struct atom_smu_info_v3_3 {
uint32_t syspll3_1_vco_freq_10khz;
uint32_t bootup_fclk_10khz;
uint32_t bootup_waflclk_10khz;
- uint32_t reserved[3];
+ uint32_t smu_info_caps;
+ uint16_t waflclk_ss_percentage; // in unit of 0.001%
+ uint16_t smuinitoffset;
+ uint32_t reserved;
};
/*
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 57143d51e3ee..99861f32b1f9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -120,6 +120,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.disable_auto_wattman = 1;
data->registry_data.auto_wattman_debug = 0;
data->registry_data.auto_wattman_sample_period = 100;
+ data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD;
data->registry_data.auto_wattman_threshold = 50;
data->registry_data.gfxoff_controlled_by_driver = 1;
data->gfxoff_allowed = false;
@@ -829,6 +830,28 @@ static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
return 0;
}
+static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr)
+{
+ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_DPM_UCLK].enabled)
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetUclkFastSwitch,
+ 1);
+
+ return 0;
+}
+
+static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr)
+{
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
+
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetFclkGfxClkRatio,
+ data->registry_data.fclk_gfxclk_ratio);
+}
+
static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data =
@@ -1532,6 +1555,16 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"[EnableDPMTasks] Failed to enable all smu features!",
return result);
+ result = vega20_notify_smc_display_change(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "[EnableDPMTasks] Failed to notify smc display change!",
+ return result);
+
+ result = vega20_send_clock_ratio(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "[EnableDPMTasks] Failed to send clock ratio!",
+ return result);
+
/* Initialize UVD/VCE powergating state */
vega20_init_powergate_state(hwmgr);
@@ -1972,19 +2005,6 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
return ret;
}
-static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr,
- bool has_disp)
-{
- struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
-
- if (data->smu_features[GNLD_DPM_UCLK].enabled)
- return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetUclkFastSwitch,
- has_disp ? 1 : 0);
-
- return 0;
-}
-
int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock_req)
{
@@ -2044,13 +2064,6 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
struct pp_display_clock_request clock_req;
int ret = 0;
- if ((hwmgr->display_config->num_display > 1) &&
- !hwmgr->display_config->multi_monitor_in_sync &&
- !hwmgr->display_config->nb_pstate_switch_disable)
- vega20_notify_smc_display_change(hwmgr, false);
- else
- vega20_notify_smc_display_change(hwmgr, true);
-
min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
index 56fe6a0d42e8..25faaa5c5b10 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
@@ -328,6 +328,7 @@ struct vega20_registry_data {
uint8_t disable_auto_wattman;
uint32_t auto_wattman_debug;
uint32_t auto_wattman_sample_period;
+ uint32_t fclk_gfxclk_ratio;
uint8_t auto_wattman_threshold;
uint8_t log_avfs_param;
uint8_t enable_enginess;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
index 45d64a81e945..4f63a736ea0e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
@@ -105,7 +105,8 @@
#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x4B
#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x4C
#define PPSMC_MSG_WaflTest 0x4D
-// Unused ID 0x4E to 0x50
+#define PPSMC_MSG_SetFclkGfxClkRatio 0x4E
+// Unused ID 0x4F to 0x50
#define PPSMC_MSG_AllowGfxOff 0x51
#define PPSMC_MSG_DisallowGfxOff 0x52
#define PPSMC_MSG_GetPptLimit 0x53
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index e7c3ed6c9a2e..9b476368aa31 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -93,7 +93,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
* If the GPU managed to complete this jobs fence, the timout is
* spurious. Bail out.
*/
- if (fence_completed(gpu, submit->out_fence->seqno))
+ if (dma_fence_is_signaled(submit->out_fence))
return;
/*
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 94529aa82339..aef487dd8731 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -164,13 +164,6 @@ static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
return frm;
}
-static u32 decon_get_vblank_counter(struct exynos_drm_crtc *crtc)
-{
- struct decon_context *ctx = crtc->ctx;
-
- return decon_get_frame_count(ctx, false);
-}
-
static void decon_setup_trigger(struct decon_context *ctx)
{
if (!ctx->crtc->i80_mode && !(ctx->out_type & I80_HW_TRG))
@@ -536,7 +529,6 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.disable = decon_disable,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
- .get_vblank_counter = decon_get_vblank_counter,
.atomic_begin = decon_atomic_begin,
.update_plane = decon_update_plane,
.disable_plane = decon_disable_plane,
@@ -554,7 +546,6 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
int ret;
ctx->drm_dev = drm_dev;
- drm_dev->max_vblank_count = 0xffffffff;
for (win = ctx->first_win; win < WINDOWS_NR; win++) {
ctx->configs[win].pixel_formats = decon_formats;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index eea90251808f..2696289ecc78 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -162,16 +162,6 @@ static void exynos_drm_crtc_disable_vblank(struct drm_crtc *crtc)
exynos_crtc->ops->disable_vblank(exynos_crtc);
}
-static u32 exynos_drm_crtc_get_vblank_counter(struct drm_crtc *crtc)
-{
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
-
- if (exynos_crtc->ops->get_vblank_counter)
- return exynos_crtc->ops->get_vblank_counter(exynos_crtc);
-
- return 0;
-}
-
static const struct drm_crtc_funcs exynos_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
@@ -181,7 +171,6 @@ static const struct drm_crtc_funcs exynos_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = exynos_drm_crtc_enable_vblank,
.disable_vblank = exynos_drm_crtc_disable_vblank,
- .get_vblank_counter = exynos_drm_crtc_get_vblank_counter,
};
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index ec9604f1272b..5e61e707f955 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -135,7 +135,6 @@ struct exynos_drm_crtc_ops {
void (*disable)(struct exynos_drm_crtc *crtc);
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
- u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc);
enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc,
const struct drm_display_mode *mode);
bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 07af7758066d..d81e62ae286a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -14,6 +14,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
#include <drm/drm_atomic_helper.h>
@@ -1474,12 +1475,12 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
{
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
struct drm_connector *connector = &dsi->connector;
+ struct drm_device *drm = encoder->dev;
int ret;
connector->polled = DRM_CONNECTOR_POLL_HPD;
- ret = drm_connector_init(encoder->dev, connector,
- &exynos_dsi_connector_funcs,
+ ret = drm_connector_init(drm, connector, &exynos_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
if (ret) {
DRM_ERROR("Failed to initialize connector with drm\n");
@@ -1489,7 +1490,12 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
connector->status = connector_status_disconnected;
drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
drm_connector_attach_encoder(connector, encoder);
+ if (!drm->registered)
+ return 0;
+ connector->funcs->reset(connector);
+ drm_fb_helper_add_one_connector(drm->fb_helper, connector);
+ drm_connector_register(connector);
return 0;
}
@@ -1527,7 +1533,9 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
}
dsi->panel = of_drm_find_panel(device->dev.of_node);
- if (dsi->panel) {
+ if (IS_ERR(dsi->panel)) {
+ dsi->panel = NULL;
+ } else {
drm_panel_attach(dsi->panel, &dsi->connector);
dsi->connector.status = connector_status_connected;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 918dd2c82209..01d182289efa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -192,7 +192,7 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
struct drm_fb_helper *helper;
int ret;
- if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
+ if (!dev->mode_config.num_crtc)
return 0;
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 2402395a068d..58e166effa45 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1905,7 +1905,6 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
vgpu_free_mm(mm);
return ERR_PTR(-ENOMEM);
}
- mm->ggtt_mm.last_partial_off = -1UL;
return mm;
}
@@ -1930,7 +1929,6 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
invalidate_ppgtt_mm(mm);
} else {
vfree(mm->ggtt_mm.virtual_ggtt);
- mm->ggtt_mm.last_partial_off = -1UL;
}
vgpu_free_mm(mm);
@@ -2168,6 +2166,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
struct intel_gvt_gtt_entry e, m;
dma_addr_t dma_addr;
int ret;
+ struct intel_gvt_partial_pte *partial_pte, *pos, *n;
+ bool partial_update = false;
if (bytes != 4 && bytes != 8)
return -EINVAL;
@@ -2178,68 +2178,57 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
if (!vgpu_gmadr_is_valid(vgpu, gma))
return 0;
- ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
-
+ e.type = GTT_TYPE_GGTT_PTE;
memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
bytes);
/* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
- * write, we assume the two 4 bytes writes are consecutive.
- * Otherwise, we abort and report error
+ * write, save the first 4 bytes in a list and update virtual
+ * PTE. Only update shadow PTE when the second 4 bytes comes.
*/
if (bytes < info->gtt_entry_size) {
- if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
- /* the first partial part*/
- ggtt_mm->ggtt_mm.last_partial_off = off;
- ggtt_mm->ggtt_mm.last_partial_data = e.val64;
- return 0;
- } else if ((g_gtt_index ==
- (ggtt_mm->ggtt_mm.last_partial_off >>
- info->gtt_entry_size_shift)) &&
- (off != ggtt_mm->ggtt_mm.last_partial_off)) {
- /* the second partial part */
-
- int last_off = ggtt_mm->ggtt_mm.last_partial_off &
- (info->gtt_entry_size - 1);
-
- memcpy((void *)&e.val64 + last_off,
- (void *)&ggtt_mm->ggtt_mm.last_partial_data +
- last_off, bytes);
-
- ggtt_mm->ggtt_mm.last_partial_off = -1UL;
- } else {
- int last_offset;
-
- gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
- ggtt_mm->ggtt_mm.last_partial_off, off,
- bytes, info->gtt_entry_size);
-
- /* set host ggtt entry to scratch page and clear
- * virtual ggtt entry as not present for last
- * partially write offset
- */
- last_offset = ggtt_mm->ggtt_mm.last_partial_off &
- (~(info->gtt_entry_size - 1));
-
- ggtt_get_host_entry(ggtt_mm, &m, last_offset);
- ggtt_invalidate_pte(vgpu, &m);
- ops->set_pfn(&m, gvt->gtt.scratch_mfn);
- ops->clear_present(&m);
- ggtt_set_host_entry(ggtt_mm, &m, last_offset);
- ggtt_invalidate(gvt->dev_priv);
-
- ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
- ops->clear_present(&e);
- ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
-
- ggtt_mm->ggtt_mm.last_partial_off = off;
- ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+ bool found = false;
+
+ list_for_each_entry_safe(pos, n,
+ &ggtt_mm->ggtt_mm.partial_pte_list, list) {
+ if (g_gtt_index == pos->offset >>
+ info->gtt_entry_size_shift) {
+ if (off != pos->offset) {
+ /* the second partial part*/
+ int last_off = pos->offset &
+ (info->gtt_entry_size - 1);
+
+ memcpy((void *)&e.val64 + last_off,
+ (void *)&pos->data + last_off,
+ bytes);
+
+ list_del(&pos->list);
+ kfree(pos);
+ found = true;
+ break;
+ }
+
+ /* update of the first partial part */
+ pos->data = e.val64;
+ ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
+ return 0;
+ }
+ }
- return 0;
+ if (!found) {
+ /* the first partial part */
+ partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL);
+ if (!partial_pte)
+ return -ENOMEM;
+ partial_pte->offset = off;
+ partial_pte->data = e.val64;
+ list_add_tail(&partial_pte->list,
+ &ggtt_mm->ggtt_mm.partial_pte_list);
+ partial_update = true;
}
}
- if (ops->test_present(&e)) {
+ if (!partial_update && (ops->test_present(&e))) {
gfn = ops->get_pfn(&e);
m = e;
@@ -2263,16 +2252,18 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
} else
ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
} else {
- ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index);
- ggtt_invalidate_pte(vgpu, &m);
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
ops->clear_present(&m);
}
out:
+ ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
+
+ ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
+ ggtt_invalidate_pte(vgpu, &e);
+
ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
ggtt_invalidate(gvt->dev_priv);
- ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
return 0;
}
@@ -2430,6 +2421,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
intel_vgpu_reset_ggtt(vgpu, false);
+ INIT_LIST_HEAD(&gtt->ggtt_mm->ggtt_mm.partial_pte_list);
+
return create_scratch_page_tree(vgpu);
}
@@ -2454,6 +2447,14 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
{
+ struct intel_gvt_partial_pte *pos;
+
+ list_for_each_entry(pos,
+ &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, list) {
+ gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
+ pos->offset, pos->data);
+ kfree(pos);
+ }
intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
vgpu->gtt.ggtt_mm = NULL;
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 7a9b36176efb..d8cb04cc946d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -35,7 +35,6 @@
#define _GVT_GTT_H_
#define I915_GTT_PAGE_SHIFT 12
-#define I915_GTT_PAGE_MASK (~(I915_GTT_PAGE_SIZE - 1))
struct intel_vgpu_mm;
@@ -133,6 +132,12 @@ enum intel_gvt_mm_type {
#define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES
+struct intel_gvt_partial_pte {
+ unsigned long offset;
+ u64 data;
+ struct list_head list;
+};
+
struct intel_vgpu_mm {
enum intel_gvt_mm_type type;
struct intel_vgpu *vgpu;
@@ -157,8 +162,7 @@ struct intel_vgpu_mm {
} ppgtt_mm;
struct {
void *virtual_ggtt;
- unsigned long last_partial_off;
- u64 last_partial_data;
+ struct list_head partial_pte_list;
} ggtt_mm;
};
};
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 90f50f67909a..aa280bb07125 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1609,7 +1609,7 @@ static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
return 0;
}
-static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
+static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
vgpu_vreg(vgpu, offset) = 0;
@@ -2607,6 +2607,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
+ MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
return 0;
}
@@ -3205,9 +3208,6 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
- MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
- MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
-
MMIO_D(RC6_CTX_BASE, D_BXT);
MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 10e63eea5492..36a5147cd01e 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -131,7 +131,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
{RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
- {RCS, GEN9_CSFE_CHICKEN1_RCS, 0x0, false}, /* 0x20d4 */
+ {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
{RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
{RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 44e2c0f5ec50..ffdbbac4400e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1175,8 +1175,6 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
return -EINVAL;
}
- dram_info->valid_dimm = true;
-
/*
* If any of the channel is single rank channel, worst case output
* will be same as if single rank memory, so consider single rank
@@ -1193,8 +1191,7 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
return -EINVAL;
}
- if (ch0.is_16gb_dimm || ch1.is_16gb_dimm)
- dram_info->is_16gb_dimm = true;
+ dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0,
val_ch1,
@@ -1314,7 +1311,6 @@ bxt_get_dram_info(struct drm_i915_private *dev_priv)
return -EINVAL;
}
- dram_info->valid_dimm = true;
dram_info->valid = true;
return 0;
}
@@ -1327,12 +1323,17 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
int ret;
dram_info->valid = false;
- dram_info->valid_dimm = false;
- dram_info->is_16gb_dimm = false;
dram_info->rank = I915_DRAM_RANK_INVALID;
dram_info->bandwidth_kbps = 0;
dram_info->num_channels = 0;
+ /*
+ * Assume 16Gb DIMMs are present until proven otherwise.
+ * This is only used for the level 0 watermark latency
+ * w/a which does not apply to bxt/glk.
+ */
+ dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
+
if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv))
return;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8624b4bdc242..9102571e9692 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1948,7 +1948,6 @@ struct drm_i915_private {
struct dram_info {
bool valid;
- bool valid_dimm;
bool is_16gb_dimm;
u8 num_channels;
enum dram_rank {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 09187286d346..1aaccbe7e1de 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -460,7 +460,7 @@ eb_validate_vma(struct i915_execbuffer *eb,
* any non-page-aligned or non-canonical addresses.
*/
if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
- entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK)))
+ entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
return -EINVAL;
/* pad_to_size was once a reserved field, so sanitize it */
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 56c7f8637311..47c302543799 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1757,7 +1757,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
if (i == 4)
continue;
- seq_printf(m, "\t\t(%03d, %04d) %08lx: ",
+ seq_printf(m, "\t\t(%03d, %04d) %08llx: ",
pde, pte,
(pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE);
for (i = 0; i < 4; i++) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 7e2af5f4f39b..28039290655c 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -42,13 +42,15 @@
#include "i915_selftest.h"
#include "i915_timeline.h"
-#define I915_GTT_PAGE_SIZE_4K BIT(12)
-#define I915_GTT_PAGE_SIZE_64K BIT(16)
-#define I915_GTT_PAGE_SIZE_2M BIT(21)
+#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
+#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
+#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21)
#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
+#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
+
#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
#define I915_FENCE_REG_NONE -1
@@ -659,20 +661,20 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
u64 start, u64 end, unsigned int flags);
/* Flags used by pin/bind&friends. */
-#define PIN_NONBLOCK BIT(0)
-#define PIN_MAPPABLE BIT(1)
-#define PIN_ZONE_4G BIT(2)
-#define PIN_NONFAULT BIT(3)
-#define PIN_NOEVICT BIT(4)
-
-#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */
-#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */
-#define PIN_USER BIT(7) /* I915_VMA_LOCAL_BIND */
-#define PIN_UPDATE BIT(8)
-
-#define PIN_HIGH BIT(9)
-#define PIN_OFFSET_BIAS BIT(10)
-#define PIN_OFFSET_FIXED BIT(11)
+#define PIN_NONBLOCK BIT_ULL(0)
+#define PIN_MAPPABLE BIT_ULL(1)
+#define PIN_ZONE_4G BIT_ULL(2)
+#define PIN_NONFAULT BIT_ULL(3)
+#define PIN_NOEVICT BIT_ULL(4)
+
+#define PIN_MBZ BIT_ULL(5) /* I915_VMA_PIN_OVERFLOW */
+#define PIN_GLOBAL BIT_ULL(6) /* I915_VMA_GLOBAL_BIND */
+#define PIN_USER BIT_ULL(7) /* I915_VMA_LOCAL_BIND */
+#define PIN_UPDATE BIT_ULL(8)
+
+#define PIN_HIGH BIT_ULL(9)
+#define PIN_OFFSET_BIAS BIT_ULL(10)
+#define PIN_OFFSET_FIXED BIT_ULL(11)
#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)
#endif
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 7c491ea3d052..e31c27e45734 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2095,8 +2095,12 @@ enum i915_power_well_id {
/* ICL PHY DFLEX registers */
#define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0)
-#define DFLEXDPMLE1_DPMLETC_MASK(n) (0xf << (4 * (n)))
-#define DFLEXDPMLE1_DPMLETC(n, x) ((x) << (4 * (n)))
+#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port)))
+#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port)))
+#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port)))
+#define DFLEXDPMLE1_DPMLETC_ML3(tc_port) (8 << (4 * (tc_port)))
+#define DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) (12 << (4 * (tc_port)))
+#define DFLEXDPMLE1_DPMLETC_ML3_0(tc_port) (15 << (4 * (tc_port)))
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
@@ -4593,12 +4597,12 @@ enum {
#define DRM_DIP_ENABLE (1 << 28)
#define PSR_VSC_BIT_7_SET (1 << 27)
-#define VSC_SELECT_MASK (0x3 << 26)
-#define VSC_SELECT_SHIFT 26
-#define VSC_DIP_HW_HEA_DATA (0 << 26)
-#define VSC_DIP_HW_HEA_SW_DATA (1 << 26)
-#define VSC_DIP_HW_DATA_SW_HEA (2 << 26)
-#define VSC_DIP_SW_HEA_DATA (3 << 26)
+#define VSC_SELECT_MASK (0x3 << 25)
+#define VSC_SELECT_SHIFT 25
+#define VSC_DIP_HW_HEA_DATA (0 << 25)
+#define VSC_DIP_HW_HEA_SW_DATA (1 << 25)
+#define VSC_DIP_HW_DATA_SW_HEA (2 << 25)
+#define VSC_DIP_SW_HEA_DATA (3 << 25)
#define VDIP_ENABLE_PPS (1 << 24)
/* Panel power sequencing */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 769f3f586661..ee3ca2de983b 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -144,6 +144,9 @@ static const struct {
/* HDMI N/CTS table */
#define TMDS_297M 297000
#define TMDS_296M 296703
+#define TMDS_594M 594000
+#define TMDS_593M 593407
+
static const struct {
int sample_rate;
int clock;
@@ -164,6 +167,20 @@ static const struct {
{ 176400, TMDS_297M, 18816, 247500 },
{ 192000, TMDS_296M, 23296, 281250 },
{ 192000, TMDS_297M, 20480, 247500 },
+ { 44100, TMDS_593M, 8918, 937500 },
+ { 44100, TMDS_594M, 9408, 990000 },
+ { 48000, TMDS_593M, 5824, 562500 },
+ { 48000, TMDS_594M, 6144, 594000 },
+ { 32000, TMDS_593M, 5824, 843750 },
+ { 32000, TMDS_594M, 3072, 445500 },
+ { 88200, TMDS_593M, 17836, 937500 },
+ { 88200, TMDS_594M, 18816, 990000 },
+ { 96000, TMDS_593M, 11648, 562500 },
+ { 96000, TMDS_594M, 12288, 594000 },
+ { 176400, TMDS_593M, 35672, 937500 },
+ { 176400, TMDS_594M, 37632, 990000 },
+ { 192000, TMDS_593M, 23296, 562500 },
+ { 192000, TMDS_594M, 24576, 594000 },
};
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 29075c763428..8d74276029e6 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -2138,16 +2138,8 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
int pixel_rate)
{
- if (INTEL_GEN(dev_priv) >= 10)
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return DIV_ROUND_UP(pixel_rate, 2);
- else if (IS_GEMINILAKE(dev_priv))
- /*
- * FIXME: Avoid using a pixel clock that is more than 99% of the cdclk
- * as a temporary workaround. Use a higher cdclk instead. (Note that
- * intel_compute_max_dotclk() limits the max pixel clock to 99% of max
- * cdclk.)
- */
- return DIV_ROUND_UP(pixel_rate * 100, 2 * 99);
else if (IS_GEN9(dev_priv) ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return pixel_rate;
@@ -2543,14 +2535,8 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
{
int max_cdclk_freq = dev_priv->max_cdclk_freq;
- if (INTEL_GEN(dev_priv) >= 10)
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return 2 * max_cdclk_freq;
- else if (IS_GEMINILAKE(dev_priv))
- /*
- * FIXME: Limiting to 99% as a temporary workaround. See
- * intel_min_cdclk() for details.
- */
- return 2 * max_cdclk_freq * 99 / 100;
else if (IS_GEN9(dev_priv) ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return max_cdclk_freq;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9741cc419e1b..23d8008a93bb 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12768,17 +12768,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_check_cpu_fifo_underruns(dev_priv);
intel_check_pch_fifo_underruns(dev_priv);
- if (!new_crtc_state->active) {
- /*
- * Make sure we don't call initial_watermarks
- * for ILK-style watermark updates.
- *
- * No clue what this is supposed to achieve.
- */
- if (INTEL_GEN(dev_priv) >= 9)
- dev_priv->display.initial_watermarks(intel_state,
- to_intel_crtc_state(new_crtc_state));
- }
+ /* FIXME unify this for all platforms */
+ if (!new_crtc_state->active &&
+ !HAS_GMCH_DISPLAY(dev_priv) &&
+ dev_priv->display.initial_watermarks)
+ dev_priv->display.initial_watermarks(intel_state,
+ to_intel_crtc_state(new_crtc_state));
}
}
@@ -14646,7 +14641,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
fb->height < SKL_MIN_YUV_420_SRC_H ||
(fb->width % 4) != 0 || (fb->height % 4) != 0)) {
DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
- return -EINVAL;
+ goto err;
}
for (i = 0; i < fb->format->num_planes; i++) {
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index cdf19553ffac..5d5336fbe7b0 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -297,8 +297,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
lpe_audio_platdev_destroy(dev_priv);
irq_free_desc(dev_priv->lpe_audio.irq);
-}
+ dev_priv->lpe_audio.irq = -1;
+ dev_priv->lpe_audio.platdev = NULL;
+}
/**
* intel_lpe_audio_notify() - notify lpe audio event
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 1db9b8328275..245f0022bcfd 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2881,8 +2881,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
* any underrun. If not able to get Dimm info assume 16GB dimm
* to avoid any underrun.
*/
- if (!dev_priv->dram_info.valid_dimm ||
- dev_priv->dram_info.is_16gb_dimm)
+ if (dev_priv->dram_info.is_16gb_dimm)
wm[0] += 1;
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 8d03f64eabd7..5c22f2c8d4cf 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -551,7 +551,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
err = igt_check_page_sizes(vma);
if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
- pr_err("page_sizes.gtt=%u, expected %lu\n",
+ pr_err("page_sizes.gtt=%u, expected %llu\n",
vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
err = -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 8e2e269db97e..127d81513671 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1337,7 +1337,7 @@ static int igt_gtt_reserve(void *arg)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
if (vma->node.start != total ||
vma->node.size != 2*I915_GTT_PAGE_SIZE) {
- pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+ pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
vma->node.start, vma->node.size,
total, 2*I915_GTT_PAGE_SIZE);
err = -EINVAL;
@@ -1386,7 +1386,7 @@ static int igt_gtt_reserve(void *arg)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
if (vma->node.start != total ||
vma->node.size != 2*I915_GTT_PAGE_SIZE) {
- pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+ pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
vma->node.start, vma->node.size,
total, 2*I915_GTT_PAGE_SIZE);
err = -EINVAL;
@@ -1430,7 +1430,7 @@ static int igt_gtt_reserve(void *arg)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
if (vma->node.start != offset ||
vma->node.size != 2*I915_GTT_PAGE_SIZE) {
- pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+ pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
vma->node.start, vma->node.size,
offset, 2*I915_GTT_PAGE_SIZE);
err = -EINVAL;
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index af7dcb6da351..e7eb0d1e17be 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -75,7 +75,7 @@ static void sun4i_lvds_encoder_enable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Enabling LVDS output\n");
- if (!IS_ERR(tcon->panel)) {
+ if (tcon->panel) {
drm_panel_prepare(tcon->panel);
drm_panel_enable(tcon->panel);
}
@@ -88,7 +88,7 @@ static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Disabling LVDS output\n");
- if (!IS_ERR(tcon->panel)) {
+ if (tcon->panel) {
drm_panel_disable(tcon->panel);
drm_panel_unprepare(tcon->panel);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index bf068da6b12e..f4a22689eb54 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -135,7 +135,7 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Enabling RGB output\n");
- if (!IS_ERR(tcon->panel)) {
+ if (tcon->panel) {
drm_panel_prepare(tcon->panel);
drm_panel_enable(tcon->panel);
}
@@ -148,7 +148,7 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Disabling RGB output\n");
- if (!IS_ERR(tcon->panel)) {
+ if (tcon->panel) {
drm_panel_disable(tcon->panel);
drm_panel_unprepare(tcon->panel);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index c78cd35a1294..f949287d926c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -491,7 +491,8 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
sun4i_tcon0_mode_set_common(tcon, mode);
/* Set dithering if needed */
- sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector);
+ if (tcon->panel)
+ sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector);
/* Adjust clock delay */
clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
@@ -555,7 +556,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
* Following code is a way to avoid quirks all around TCON
* and DOTCLOCK drivers.
*/
- if (!IS_ERR(tcon->panel)) {
+ if (tcon->panel) {
struct drm_panel *panel = tcon->panel;
struct drm_connector *connector = panel->connector;
struct drm_display_info display_info = connector->display_info;
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index cf2a18571d48..a132c37d7334 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -380,6 +380,9 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
mutex_unlock(&vgasr_mutex);
return -EINVAL;
}
+ /* notify if GPU has been already bound */
+ if (ops->gpu_bound)
+ ops->gpu_bound(pdev, id);
}
mutex_unlock(&vgasr_mutex);
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index aec253b44156..3cd7229b6e54 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -660,6 +660,20 @@ exit:
return ret;
}
+static int alps_sp_open(struct input_dev *dev)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+
+ return hid_hw_open(hid);
+}
+
+static void alps_sp_close(struct input_dev *dev)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+
+ hid_hw_close(hid);
+}
+
static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
{
struct alps_dev *data = hid_get_drvdata(hdev);
@@ -733,6 +747,10 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
input2->id.version = input->id.version;
input2->dev.parent = input->dev.parent;
+ input_set_drvdata(input2, hdev);
+ input2->open = alps_sp_open;
+ input2->close = alps_sp_close;
+
__set_bit(EV_KEY, input2->evbit);
data->sp_btn_cnt = (data->sp_btn_info & 0x0F);
for (i = 0; i < data->sp_btn_cnt; i++)
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index dc6d6477e961..a1fa2fc8c9b5 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -359,6 +359,9 @@ static bool asus_kbd_wmi_led_control_present(struct hid_device *hdev)
u32 value;
int ret;
+ if (!IS_ENABLED(CONFIG_ASUS_WMI))
+ return false;
+
ret = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS2,
ASUS_WMI_DEVID_KBD_BACKLIGHT, 0, &value);
hid_dbg(hdev, "WMI backlight check: rc %d value %x", ret, value);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index f63489c882bb..c0d668944dbe 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -927,6 +927,9 @@
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003 0x3003
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008
+#define I2C_VENDOR_ID_RAYDIUM 0x2386
+#define I2C_PRODUCT_ID_RAYDIUM_4B33 0x4b33
+
#define USB_VENDOR_ID_RAZER 0x1532
#define USB_DEVICE_ID_RAZER_BLADE_14 0x011D
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 52c3b01917e7..8237dd86fb17 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -107,7 +107,6 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 4aab96cf0818..3cde7c1b9c33 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -49,6 +49,7 @@
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2)
+#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP BIT(3)
/* flags */
#define I2C_HID_STARTED 0
@@ -158,6 +159,8 @@ struct i2c_hid {
bool irq_wake_enabled;
struct mutex reset_lock;
+
+ unsigned long sleep_delay;
};
static const struct i2c_hid_quirks {
@@ -172,6 +175,8 @@ static const struct i2c_hid_quirks {
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
I2C_HID_QUIRK_NO_RUNTIME_PM },
+ { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33,
+ I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
{ 0, 0 }
};
@@ -387,6 +392,7 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
{
struct i2c_hid *ihid = i2c_get_clientdata(client);
int ret;
+ unsigned long now, delay;
i2c_hid_dbg(ihid, "%s\n", __func__);
@@ -404,9 +410,22 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
goto set_pwr_exit;
}
+ if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
+ power_state == I2C_HID_PWR_ON) {
+ now = jiffies;
+ if (time_after(ihid->sleep_delay, now)) {
+ delay = jiffies_to_usecs(ihid->sleep_delay - now);
+ usleep_range(delay, delay + 1);
+ }
+ }
+
ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
0, NULL, 0, NULL, 0);
+ if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
+ power_state == I2C_HID_PWR_SLEEP)
+ ihid->sleep_delay = jiffies + msecs_to_jiffies(20);
+
if (ret)
dev_err(&client->dev, "failed to change power setting.\n");
diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
index cac262a912c1..89f2976f9c53 100644
--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
@@ -331,6 +331,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
.driver_data = (void *)&sipodev_desc
},
{
+ .ident = "Direkt-Tek DTLAPY133-1",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "DTLAPY133-1"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
.ident = "Mediacom Flexbook Edge 11",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 23872d08308c..a746017fac17 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -512,14 +512,24 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
if (cmd == HIDIOCGCOLLECTIONINDEX) {
if (uref->usage_index >= field->maxusage)
goto inval;
+ uref->usage_index =
+ array_index_nospec(uref->usage_index,
+ field->maxusage);
} else if (uref->usage_index >= field->report_count)
goto inval;
}
- if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
- (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
- uref->usage_index + uref_multi->num_values > field->report_count))
- goto inval;
+ if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) {
+ if (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
+ uref->usage_index + uref_multi->num_values >
+ field->report_count)
+ goto inval;
+
+ uref->usage_index =
+ array_index_nospec(uref->usage_index,
+ field->report_count -
+ uref_multi->num_values);
+ }
switch (cmd) {
case HIDIOCGUSAGE:
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 975c95169884..84f61cec6319 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -649,8 +649,10 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
if (info[i]->config[j] & HWMON_T_INPUT) {
err = hwmon_thermal_add_sensor(dev,
hwdev, j);
- if (err)
- goto free_device;
+ if (err) {
+ device_unregister(hdev);
+ goto ida_remove;
+ }
}
}
}
@@ -658,8 +660,6 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
return hdev;
-free_device:
- device_unregister(hdev);
free_hwmon:
kfree(hwdev);
ida_remove:
diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c
index 0ccca87f5271..293dd1c6c7b3 100644
--- a/drivers/hwmon/ibmpowernv.c
+++ b/drivers/hwmon/ibmpowernv.c
@@ -181,7 +181,7 @@ static ssize_t show_label(struct device *dev, struct device_attribute *devattr,
return sprintf(buf, "%s\n", sdata->label);
}
-static int __init get_logical_cpu(int hwcpu)
+static int get_logical_cpu(int hwcpu)
{
int cpu;
@@ -192,9 +192,8 @@ static int __init get_logical_cpu(int hwcpu)
return -ENOENT;
}
-static void __init make_sensor_label(struct device_node *np,
- struct sensor_data *sdata,
- const char *label)
+static void make_sensor_label(struct device_node *np,
+ struct sensor_data *sdata, const char *label)
{
u32 id;
size_t n;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 56ccb1ea7da5..f2c681971201 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -224,6 +224,15 @@ config I2C_NFORCE2_S4985
This driver can also be built as a module. If so, the module
will be called i2c-nforce2-s4985.
+config I2C_NVIDIA_GPU
+ tristate "NVIDIA GPU I2C controller"
+ depends on PCI
+ help
+ If you say yes to this option, support will be included for the
+ NVIDIA GPU I2C controller which is used to communicate with the GPU's
+ Type-C controller. This driver can also be built as a module called
+ i2c-nvidia-gpu.
+
config I2C_SIS5595
tristate "SiS 5595"
depends on PCI
@@ -752,7 +761,7 @@ config I2C_OCORES
config I2C_OMAP
tristate "OMAP I2C adapter"
- depends on ARCH_OMAP
+ depends on ARCH_OMAP || ARCH_K3
default y if MACH_OMAP_H3 || MACH_OMAP_OSK
help
If you say yes to this option, support will be included for the
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 18b26af82b1c..5f0cb6915969 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_I2C_ISCH) += i2c-isch.o
obj-$(CONFIG_I2C_ISMT) += i2c-ismt.o
obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o
obj-$(CONFIG_I2C_NFORCE2_S4985) += i2c-nforce2-s4985.o
+obj-$(CONFIG_I2C_NVIDIA_GPU) += i2c-nvidia-gpu.o
obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o
obj-$(CONFIG_I2C_SIS5595) += i2c-sis5595.o
obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
new file mode 100644
index 000000000000..8822357bca0c
--- /dev/null
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nvidia GPU I2C controller Driver
+ *
+ * Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
+ * Author: Ajay Gupta <ajayg@nvidia.com>
+ */
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/unaligned.h>
+
+/* I2C definitions */
+#define I2C_MST_CNTL 0x00
+#define I2C_MST_CNTL_GEN_START BIT(0)
+#define I2C_MST_CNTL_GEN_STOP BIT(1)
+#define I2C_MST_CNTL_CMD_READ (1 << 2)
+#define I2C_MST_CNTL_CMD_WRITE (2 << 2)
+#define I2C_MST_CNTL_BURST_SIZE_SHIFT 6
+#define I2C_MST_CNTL_GEN_NACK BIT(28)
+#define I2C_MST_CNTL_STATUS GENMASK(30, 29)
+#define I2C_MST_CNTL_STATUS_OKAY (0 << 29)
+#define I2C_MST_CNTL_STATUS_NO_ACK (1 << 29)
+#define I2C_MST_CNTL_STATUS_TIMEOUT (2 << 29)
+#define I2C_MST_CNTL_STATUS_BUS_BUSY (3 << 29)
+#define I2C_MST_CNTL_CYCLE_TRIGGER BIT(31)
+
+#define I2C_MST_ADDR 0x04
+
+#define I2C_MST_I2C0_TIMING 0x08
+#define I2C_MST_I2C0_TIMING_SCL_PERIOD_100KHZ 0x10e
+#define I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT 16
+#define I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT_MAX 255
+#define I2C_MST_I2C0_TIMING_TIMEOUT_CHECK BIT(24)
+
+#define I2C_MST_DATA 0x0c
+
+#define I2C_MST_HYBRID_PADCTL 0x20
+#define I2C_MST_HYBRID_PADCTL_MODE_I2C BIT(0)
+#define I2C_MST_HYBRID_PADCTL_I2C_SCL_INPUT_RCV BIT(14)
+#define I2C_MST_HYBRID_PADCTL_I2C_SDA_INPUT_RCV BIT(15)
+
+struct gpu_i2c_dev {
+ struct device *dev;
+ void __iomem *regs;
+ struct i2c_adapter adapter;
+ struct i2c_board_info *gpu_ccgx_ucsi;
+};
+
+static void gpu_enable_i2c_bus(struct gpu_i2c_dev *i2cd)
+{
+ u32 val;
+
+ /* enable I2C */
+ val = readl(i2cd->regs + I2C_MST_HYBRID_PADCTL);
+ val |= I2C_MST_HYBRID_PADCTL_MODE_I2C |
+ I2C_MST_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
+ I2C_MST_HYBRID_PADCTL_I2C_SDA_INPUT_RCV;
+ writel(val, i2cd->regs + I2C_MST_HYBRID_PADCTL);
+
+ /* enable 100KHZ mode */
+ val = I2C_MST_I2C0_TIMING_SCL_PERIOD_100KHZ;
+ val |= (I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT_MAX
+ << I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT);
+ val |= I2C_MST_I2C0_TIMING_TIMEOUT_CHECK;
+ writel(val, i2cd->regs + I2C_MST_I2C0_TIMING);
+}
+
+static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd)
+{
+ unsigned long target = jiffies + msecs_to_jiffies(1000);
+ u32 val;
+
+ do {
+ val = readl(i2cd->regs + I2C_MST_CNTL);
+ if (!(val & I2C_MST_CNTL_CYCLE_TRIGGER))
+ break;
+ if ((val & I2C_MST_CNTL_STATUS) !=
+ I2C_MST_CNTL_STATUS_BUS_BUSY)
+ break;
+ usleep_range(500, 600);
+ } while (time_is_after_jiffies(target));
+
+ if (time_is_before_jiffies(target)) {
+ dev_err(i2cd->dev, "i2c timeout error %x\n", val);
+ return -ETIME;
+ }
+
+ val = readl(i2cd->regs + I2C_MST_CNTL);
+ switch (val & I2C_MST_CNTL_STATUS) {
+ case I2C_MST_CNTL_STATUS_OKAY:
+ return 0;
+ case I2C_MST_CNTL_STATUS_NO_ACK:
+ return -EIO;
+ case I2C_MST_CNTL_STATUS_TIMEOUT:
+ return -ETIME;
+ default:
+ return 0;
+ }
+}
+
+static int gpu_i2c_read(struct gpu_i2c_dev *i2cd, u8 *data, u16 len)
+{
+ int status;
+ u32 val;
+
+ val = I2C_MST_CNTL_GEN_START | I2C_MST_CNTL_CMD_READ |
+ (len << I2C_MST_CNTL_BURST_SIZE_SHIFT) |
+ I2C_MST_CNTL_CYCLE_TRIGGER | I2C_MST_CNTL_GEN_NACK;
+ writel(val, i2cd->regs + I2C_MST_CNTL);
+
+ status = gpu_i2c_check_status(i2cd);
+ if (status < 0)
+ return status;
+
+ val = readl(i2cd->regs + I2C_MST_DATA);
+ switch (len) {
+ case 1:
+ data[0] = val;
+ break;
+ case 2:
+ put_unaligned_be16(val, data);
+ break;
+ case 3:
+ put_unaligned_be16(val >> 8, data);
+ data[2] = val;
+ break;
+ case 4:
+ put_unaligned_be32(val, data);
+ break;
+ default:
+ break;
+ }
+ return status;
+}
+
+static int gpu_i2c_start(struct gpu_i2c_dev *i2cd)
+{
+ writel(I2C_MST_CNTL_GEN_START, i2cd->regs + I2C_MST_CNTL);
+ return gpu_i2c_check_status(i2cd);
+}
+
+static int gpu_i2c_stop(struct gpu_i2c_dev *i2cd)
+{
+ writel(I2C_MST_CNTL_GEN_STOP, i2cd->regs + I2C_MST_CNTL);
+ return gpu_i2c_check_status(i2cd);
+}
+
+static int gpu_i2c_write(struct gpu_i2c_dev *i2cd, u8 data)
+{
+ u32 val;
+
+ writel(data, i2cd->regs + I2C_MST_DATA);
+
+ val = I2C_MST_CNTL_CMD_WRITE | (1 << I2C_MST_CNTL_BURST_SIZE_SHIFT);
+ writel(val, i2cd->regs + I2C_MST_CNTL);
+
+ return gpu_i2c_check_status(i2cd);
+}
+
+static int gpu_i2c_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct gpu_i2c_dev *i2cd = i2c_get_adapdata(adap);
+ int status, status2;
+ int i, j;
+
+ /*
+ * The controller supports maximum 4 byte read due to known
+ * limitation of sending STOP after every read.
+ */
+ for (i = 0; i < num; i++) {
+ if (msgs[i].flags & I2C_M_RD) {
+ /* program client address before starting read */
+ writel(msgs[i].addr, i2cd->regs + I2C_MST_ADDR);
+ /* gpu_i2c_read has implicit start */
+ status = gpu_i2c_read(i2cd, msgs[i].buf, msgs[i].len);
+ if (status < 0)
+ goto stop;
+ } else {
+ u8 addr = i2c_8bit_addr_from_msg(msgs + i);
+
+ status = gpu_i2c_start(i2cd);
+ if (status < 0) {
+ if (i == 0)
+ return status;
+ goto stop;
+ }
+
+ status = gpu_i2c_write(i2cd, addr);
+ if (status < 0)
+ goto stop;
+
+ for (j = 0; j < msgs[i].len; j++) {
+ status = gpu_i2c_write(i2cd, msgs[i].buf[j]);
+ if (status < 0)
+ goto stop;
+ }
+ }
+ }
+ status = gpu_i2c_stop(i2cd);
+ if (status < 0)
+ return status;
+
+ return i;
+stop:
+ status2 = gpu_i2c_stop(i2cd);
+ if (status2 < 0)
+ dev_err(i2cd->dev, "i2c stop failed %d\n", status2);
+ return status;
+}
+
+static const struct i2c_adapter_quirks gpu_i2c_quirks = {
+ .max_read_len = 4,
+ .flags = I2C_AQ_COMB_WRITE_THEN_READ,
+};
+
+static u32 gpu_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm gpu_i2c_algorithm = {
+ .master_xfer = gpu_i2c_master_xfer,
+ .functionality = gpu_i2c_functionality,
+};
+
+/*
+ * This driver is for Nvidia GPU cards with USB Type-C interface.
+ * We want to identify the cards using vendor ID and class code only
+ * to avoid dependency of adding product id for any new card which
+ * requires this driver.
+ * Currently there is no class code defined for UCSI device over PCI
+ * so using UNKNOWN class for now and it will be updated when UCSI
+ * over PCI gets a class code.
+ * There is no other NVIDIA cards with UNKNOWN class code. Even if the
+ * driver gets loaded for an undesired card then eventually i2c_read()
+ * (initiated from UCSI i2c_client) will timeout or UCSI commands will
+ * timeout.
+ */
+#define PCI_CLASS_SERIAL_UNKNOWN 0x0c80
+static const struct pci_device_id gpu_i2c_ids[] = {
+ { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_UNKNOWN << 8, 0xffffff00},
+ { }
+};
+MODULE_DEVICE_TABLE(pci, gpu_i2c_ids);
+
+static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq)
+{
+ struct i2c_client *ccgx_client;
+
+ i2cd->gpu_ccgx_ucsi = devm_kzalloc(i2cd->dev,
+ sizeof(*i2cd->gpu_ccgx_ucsi),
+ GFP_KERNEL);
+ if (!i2cd->gpu_ccgx_ucsi)
+ return -ENOMEM;
+
+ strlcpy(i2cd->gpu_ccgx_ucsi->type, "ccgx-ucsi",
+ sizeof(i2cd->gpu_ccgx_ucsi->type));
+ i2cd->gpu_ccgx_ucsi->addr = 0x8;
+ i2cd->gpu_ccgx_ucsi->irq = irq;
+ ccgx_client = i2c_new_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi);
+ if (!ccgx_client)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int gpu_i2c_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct gpu_i2c_dev *i2cd;
+ int status;
+
+ i2cd = devm_kzalloc(&pdev->dev, sizeof(*i2cd), GFP_KERNEL);
+ if (!i2cd)
+ return -ENOMEM;
+
+ i2cd->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, i2cd);
+
+ status = pcim_enable_device(pdev);
+ if (status < 0) {
+ dev_err(&pdev->dev, "pcim_enable_device failed %d\n", status);
+ return status;
+ }
+
+ pci_set_master(pdev);
+
+ i2cd->regs = pcim_iomap(pdev, 0, 0);
+ if (!i2cd->regs) {
+ dev_err(&pdev->dev, "pcim_iomap failed\n");
+ return -ENOMEM;
+ }
+
+ status = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (status < 0) {
+ dev_err(&pdev->dev, "pci_alloc_irq_vectors err %d\n", status);
+ return status;
+ }
+
+ gpu_enable_i2c_bus(i2cd);
+
+ i2c_set_adapdata(&i2cd->adapter, i2cd);
+ i2cd->adapter.owner = THIS_MODULE;
+ strlcpy(i2cd->adapter.name, "NVIDIA GPU I2C adapter",
+ sizeof(i2cd->adapter.name));
+ i2cd->adapter.algo = &gpu_i2c_algorithm;
+ i2cd->adapter.quirks = &gpu_i2c_quirks;
+ i2cd->adapter.dev.parent = &pdev->dev;
+ status = i2c_add_adapter(&i2cd->adapter);
+ if (status < 0)
+ goto free_irq_vectors;
+
+ status = gpu_populate_client(i2cd, pdev->irq);
+ if (status < 0) {
+ dev_err(&pdev->dev, "gpu_populate_client failed %d\n", status);
+ goto del_adapter;
+ }
+
+ return 0;
+
+del_adapter:
+ i2c_del_adapter(&i2cd->adapter);
+free_irq_vectors:
+ pci_free_irq_vectors(pdev);
+ return status;
+}
+
+static void gpu_i2c_remove(struct pci_dev *pdev)
+{
+ struct gpu_i2c_dev *i2cd = dev_get_drvdata(&pdev->dev);
+
+ i2c_del_adapter(&i2cd->adapter);
+ pci_free_irq_vectors(pdev);
+}
+
+static int gpu_i2c_resume(struct device *dev)
+{
+ struct gpu_i2c_dev *i2cd = dev_get_drvdata(dev);
+
+ gpu_enable_i2c_bus(i2cd);
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(gpu_i2c_driver_pm, NULL, gpu_i2c_resume, NULL);
+
+static struct pci_driver gpu_i2c_driver = {
+ .name = "nvidia-gpu",
+ .id_table = gpu_i2c_ids,
+ .probe = gpu_i2c_probe,
+ .remove = gpu_i2c_remove,
+ .driver = {
+ .pm = &gpu_i2c_driver_pm,
+ },
+};
+
+module_pci_driver(gpu_i2c_driver);
+
+MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>");
+MODULE_DESCRIPTION("Nvidia GPU I2C controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 527f55c8c4c7..db075bc0d952 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -571,18 +571,19 @@ static int geni_i2c_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
- ret = i2c_add_adapter(&gi2c->adap);
- if (ret) {
- dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret);
- return ret;
- }
-
gi2c->suspended = 1;
pm_runtime_set_suspended(gi2c->se.dev);
pm_runtime_set_autosuspend_delay(gi2c->se.dev, I2C_AUTO_SUSPEND_DELAY);
pm_runtime_use_autosuspend(gi2c->se.dev);
pm_runtime_enable(gi2c->se.dev);
+ ret = i2c_add_adapter(&gi2c->adap);
+ if (ret) {
+ dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret);
+ pm_runtime_disable(gi2c->se.dev);
+ return ret;
+ }
+
return 0;
}
@@ -590,8 +591,8 @@ static int geni_i2c_remove(struct platform_device *pdev)
{
struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
- pm_runtime_disable(gi2c->se.dev);
i2c_del_adapter(&gi2c->adap);
+ pm_runtime_disable(gi2c->se.dev);
return 0;
}
diff --git a/drivers/leds/trigger/ledtrig-pattern.c b/drivers/leds/trigger/ledtrig-pattern.c
index ce7acd115dd8..1870cf87afe1 100644
--- a/drivers/leds/trigger/ledtrig-pattern.c
+++ b/drivers/leds/trigger/ledtrig-pattern.c
@@ -75,8 +75,6 @@ static void pattern_trig_timer_function(struct timer_list *t)
{
struct pattern_trig_data *data = from_timer(data, t, timer);
- mutex_lock(&data->lock);
-
for (;;) {
if (!data->is_indefinite && !data->repeat)
break;
@@ -87,9 +85,10 @@ static void pattern_trig_timer_function(struct timer_list *t)
data->curr->brightness);
mod_timer(&data->timer,
jiffies + msecs_to_jiffies(data->curr->delta_t));
-
- /* Skip the tuple with zero duration */
- pattern_trig_update_patterns(data);
+ if (!data->next->delta_t) {
+ /* Skip the tuple with zero duration */
+ pattern_trig_update_patterns(data);
+ }
/* Select next tuple */
pattern_trig_update_patterns(data);
} else {
@@ -116,8 +115,6 @@ static void pattern_trig_timer_function(struct timer_list *t)
break;
}
-
- mutex_unlock(&data->lock);
}
static int pattern_trig_start_pattern(struct led_classdev *led_cdev)
@@ -176,14 +173,10 @@ static ssize_t repeat_store(struct device *dev, struct device_attribute *attr,
if (res < -1 || res == 0)
return -EINVAL;
- /*
- * Clear previous patterns' performence firstly, and remove the timer
- * without mutex lock to avoid dead lock.
- */
- del_timer_sync(&data->timer);
-
mutex_lock(&data->lock);
+ del_timer_sync(&data->timer);
+
if (data->is_hw_pattern)
led_cdev->pattern_clear(led_cdev);
@@ -234,14 +227,10 @@ static ssize_t pattern_trig_store_patterns(struct led_classdev *led_cdev,
struct pattern_trig_data *data = led_cdev->trigger_data;
int ccount, cr, offset = 0, err = 0;
- /*
- * Clear previous patterns' performence firstly, and remove the timer
- * without mutex lock to avoid dead lock.
- */
- del_timer_sync(&data->timer);
-
mutex_lock(&data->lock);
+ del_timer_sync(&data->timer);
+
if (data->is_hw_pattern)
led_cdev->pattern_clear(led_cdev);
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index e514d57a0419..aa983422aa97 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -207,7 +207,7 @@ comment "Disk-On-Chip Device Drivers"
config MTD_DOCG3
tristate "M-Systems Disk-On-Chip G3"
select BCH
- select BCH_CONST_PARAMS
+ select BCH_CONST_PARAMS if !MTD_NAND_BCH
select BITREVERSE
help
This provides an MTD device driver for the M-Systems DiskOnChip
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 784c6e1a0391..fd5fe12d7461 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -221,7 +221,14 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
info->mtd = info->subdev[0].mtd;
ret = 0;
} else if (info->num_subdev > 1) {
- struct mtd_info *cdev[nr];
+ struct mtd_info **cdev;
+
+ cdev = kmalloc_array(nr, sizeof(*cdev), GFP_KERNEL);
+ if (!cdev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
/*
* We detected multiple devices. Concatenate them together.
*/
@@ -230,6 +237,7 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
info->mtd = mtd_concat_create(cdev, info->num_subdev,
plat->name);
+ kfree(cdev);
if (info->mtd == NULL) {
ret = -ENXIO;
goto err;
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index c7efc31384d5..1a55d3e3d4c5 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -70,7 +70,7 @@ config MTD_NAND_GPIO
config MTD_NAND_AMS_DELTA
tristate "NAND Flash device on Amstrad E3"
- depends on MACH_AMS_DELTA
+ depends on MACH_AMS_DELTA || COMPILE_TEST
default y
help
Support for NAND flash on Amstrad E3 (Delta).
diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
index 5ba180a291eb..8312182088c1 100644
--- a/drivers/mtd/nand/raw/ams-delta.c
+++ b/drivers/mtd/nand/raw/ams-delta.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
*
@@ -8,10 +9,6 @@
* Converted to platform driver by Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
* Partially stolen from plat_nand.c
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Overview:
* This is a device driver for the NAND flash device found on the
* Amstrad E3 (Delta).
@@ -24,18 +21,14 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
-#include <linux/platform_data/gpio-omap.h>
-
-#include <asm/io.h>
-#include <asm/sizes.h>
-
-#include <mach/hardware.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
/*
* MTD structure for E3 (Delta)
*/
-
struct ams_delta_nand {
+ struct nand_controller base;
struct nand_chip nand_chip;
struct gpio_desc *gpiod_rdy;
struct gpio_desc *gpiod_nce;
@@ -44,7 +37,7 @@ struct ams_delta_nand {
struct gpio_desc *gpiod_nwe;
struct gpio_desc *gpiod_ale;
struct gpio_desc *gpiod_cle;
- void __iomem *io_base;
+ struct gpio_descs *data_gpiods;
bool data_in;
};
@@ -73,99 +66,154 @@ static const struct mtd_partition partition_info[] = {
.size = 3 * SZ_256K },
};
-static void ams_delta_io_write(struct ams_delta_nand *priv, u_char byte)
+static void ams_delta_write_commit(struct ams_delta_nand *priv)
{
- writew(byte, priv->nand_chip.legacy.IO_ADDR_W);
gpiod_set_value(priv->gpiod_nwe, 0);
ndelay(40);
gpiod_set_value(priv->gpiod_nwe, 1);
}
-static u_char ams_delta_io_read(struct ams_delta_nand *priv)
+static void ams_delta_io_write(struct ams_delta_nand *priv, u8 byte)
+{
+ struct gpio_descs *data_gpiods = priv->data_gpiods;
+ DECLARE_BITMAP(values, BITS_PER_TYPE(byte)) = { byte, };
+
+ gpiod_set_raw_array_value(data_gpiods->ndescs, data_gpiods->desc,
+ data_gpiods->info, values);
+
+ ams_delta_write_commit(priv);
+}
+
+static void ams_delta_dir_output(struct ams_delta_nand *priv, u8 byte)
+{
+ struct gpio_descs *data_gpiods = priv->data_gpiods;
+ DECLARE_BITMAP(values, BITS_PER_TYPE(byte)) = { byte, };
+ int i;
+
+ for (i = 0; i < data_gpiods->ndescs; i++)
+ gpiod_direction_output_raw(data_gpiods->desc[i],
+ test_bit(i, values));
+
+ ams_delta_write_commit(priv);
+
+ priv->data_in = false;
+}
+
+static u8 ams_delta_io_read(struct ams_delta_nand *priv)
{
- u_char res;
+ u8 res;
+ struct gpio_descs *data_gpiods = priv->data_gpiods;
+ DECLARE_BITMAP(values, BITS_PER_TYPE(res)) = { 0, };
gpiod_set_value(priv->gpiod_nre, 0);
ndelay(40);
- res = readw(priv->nand_chip.legacy.IO_ADDR_R);
+
+ gpiod_get_raw_array_value(data_gpiods->ndescs, data_gpiods->desc,
+ data_gpiods->info, values);
+
gpiod_set_value(priv->gpiod_nre, 1);
+ res = values[0];
return res;
}
-static void ams_delta_dir_input(struct ams_delta_nand *priv, bool in)
+static void ams_delta_dir_input(struct ams_delta_nand *priv)
{
- writew(in ? ~0 : 0, priv->io_base + OMAP_MPUIO_IO_CNTL);
- priv->data_in = in;
+ struct gpio_descs *data_gpiods = priv->data_gpiods;
+ int i;
+
+ for (i = 0; i < data_gpiods->ndescs; i++)
+ gpiod_direction_input(data_gpiods->desc[i]);
+
+ priv->data_in = true;
}
-static void ams_delta_write_buf(struct nand_chip *this, const u_char *buf,
+static void ams_delta_write_buf(struct ams_delta_nand *priv, const u8 *buf,
int len)
{
- struct ams_delta_nand *priv = nand_get_controller_data(this);
- int i;
+ int i = 0;
- if (priv->data_in)
- ams_delta_dir_input(priv, false);
+ if (len > 0 && priv->data_in)
+ ams_delta_dir_output(priv, buf[i++]);
- for (i = 0; i < len; i++)
- ams_delta_io_write(priv, buf[i]);
+ while (i < len)
+ ams_delta_io_write(priv, buf[i++]);
}
-static void ams_delta_read_buf(struct nand_chip *this, u_char *buf, int len)
+static void ams_delta_read_buf(struct ams_delta_nand *priv, u8 *buf, int len)
{
- struct ams_delta_nand *priv = nand_get_controller_data(this);
int i;
if (!priv->data_in)
- ams_delta_dir_input(priv, true);
+ ams_delta_dir_input(priv);
for (i = 0; i < len; i++)
buf[i] = ams_delta_io_read(priv);
}
-static u_char ams_delta_read_byte(struct nand_chip *this)
+static void ams_delta_ctrl_cs(struct ams_delta_nand *priv, bool assert)
{
- u_char res;
-
- ams_delta_read_buf(this, &res, 1);
-
- return res;
+ gpiod_set_value(priv->gpiod_nce, assert ? 0 : 1);
}
-/*
- * Command control function
- *
- * ctrl:
- * NAND_NCE: bit 0 -> bit 2
- * NAND_CLE: bit 1 -> bit 7
- * NAND_ALE: bit 2 -> bit 6
- */
-static void ams_delta_hwcontrol(struct nand_chip *this, int cmd,
- unsigned int ctrl)
+static int ams_delta_exec_op(struct nand_chip *this,
+ const struct nand_operation *op, bool check_only)
{
struct ams_delta_nand *priv = nand_get_controller_data(this);
-
- if (ctrl & NAND_CTRL_CHANGE) {
- gpiod_set_value(priv->gpiod_nce, !(ctrl & NAND_NCE));
- gpiod_set_value(priv->gpiod_cle, !!(ctrl & NAND_CLE));
- gpiod_set_value(priv->gpiod_ale, !!(ctrl & NAND_ALE));
+ const struct nand_op_instr *instr;
+ int ret = 0;
+
+ if (check_only)
+ return 0;
+
+ ams_delta_ctrl_cs(priv, 1);
+
+ for (instr = op->instrs; instr < op->instrs + op->ninstrs; instr++) {
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ gpiod_set_value(priv->gpiod_cle, 1);
+ ams_delta_write_buf(priv, &instr->ctx.cmd.opcode, 1);
+ gpiod_set_value(priv->gpiod_cle, 0);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ gpiod_set_value(priv->gpiod_ale, 1);
+ ams_delta_write_buf(priv, instr->ctx.addr.addrs,
+ instr->ctx.addr.naddrs);
+ gpiod_set_value(priv->gpiod_ale, 0);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ ams_delta_read_buf(priv, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ ams_delta_write_buf(priv, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = priv->gpiod_rdy ?
+ nand_gpio_waitrdy(this, priv->gpiod_rdy,
+ instr->ctx.waitrdy.timeout_ms) :
+ nand_soft_waitrdy(this,
+ instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
+
+ if (ret)
+ break;
}
- if (cmd != NAND_CMD_NONE) {
- u_char byte = cmd;
+ ams_delta_ctrl_cs(priv, 0);
- ams_delta_write_buf(this, &byte, 1);
- }
-}
-
-static int ams_delta_nand_ready(struct nand_chip *this)
-{
- struct ams_delta_nand *priv = nand_get_controller_data(this);
-
- return gpiod_get_value(priv->gpiod_rdy);
+ return ret;
}
+static const struct nand_controller_ops ams_delta_ops = {
+ .exec_op = ams_delta_exec_op,
+};
/*
* Main initialization routine
@@ -175,61 +223,29 @@ static int ams_delta_init(struct platform_device *pdev)
struct ams_delta_nand *priv;
struct nand_chip *this;
struct mtd_info *mtd;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- void __iomem *io_base;
+ struct gpio_descs *data_gpiods;
int err = 0;
- if (!res)
- return -ENXIO;
-
/* Allocate memory for MTD device structure and private data */
priv = devm_kzalloc(&pdev->dev, sizeof(struct ams_delta_nand),
GFP_KERNEL);
- if (!priv) {
- pr_warn("Unable to allocate E3 NAND MTD device structure.\n");
+ if (!priv)
return -ENOMEM;
- }
+
this = &priv->nand_chip;
mtd = nand_to_mtd(this);
mtd->dev.parent = &pdev->dev;
- /*
- * Don't try to request the memory region from here,
- * it should have been already requested from the
- * gpio-omap driver and requesting it again would fail.
- */
-
- io_base = ioremap(res->start, resource_size(res));
- if (io_base == NULL) {
- dev_err(&pdev->dev, "ioremap failed\n");
- err = -EIO;
- goto out_free;
- }
-
- priv->io_base = io_base;
nand_set_controller_data(this, priv);
- /* Set address of NAND IO lines */
- this->legacy.IO_ADDR_R = io_base + OMAP_MPUIO_INPUT_LATCH;
- this->legacy.IO_ADDR_W = io_base + OMAP_MPUIO_OUTPUT;
- this->legacy.read_byte = ams_delta_read_byte;
- this->legacy.write_buf = ams_delta_write_buf;
- this->legacy.read_buf = ams_delta_read_buf;
- this->legacy.cmd_ctrl = ams_delta_hwcontrol;
-
priv->gpiod_rdy = devm_gpiod_get_optional(&pdev->dev, "rdy", GPIOD_IN);
if (IS_ERR(priv->gpiod_rdy)) {
err = PTR_ERR(priv->gpiod_rdy);
dev_warn(&pdev->dev, "RDY GPIO request failed (%d)\n", err);
- goto out_mtd;
+ return err;
}
- if (priv->gpiod_rdy)
- this->legacy.dev_ready = ams_delta_nand_ready;
-
- /* 25 us command delay time */
- this->legacy.chip_delay = 30;
this->ecc.mode = NAND_ECC_SOFT;
this->ecc.algo = NAND_ECC_HAMMING;
@@ -240,61 +256,75 @@ static int ams_delta_init(struct platform_device *pdev)
if (IS_ERR(priv->gpiod_nwp)) {
err = PTR_ERR(priv->gpiod_nwp);
dev_err(&pdev->dev, "NWP GPIO request failed (%d)\n", err);
- goto out_mtd;
+ return err;
}
priv->gpiod_nce = devm_gpiod_get(&pdev->dev, "nce", GPIOD_OUT_HIGH);
if (IS_ERR(priv->gpiod_nce)) {
err = PTR_ERR(priv->gpiod_nce);
dev_err(&pdev->dev, "NCE GPIO request failed (%d)\n", err);
- goto out_mtd;
+ return err;
}
priv->gpiod_nre = devm_gpiod_get(&pdev->dev, "nre", GPIOD_OUT_HIGH);
if (IS_ERR(priv->gpiod_nre)) {
err = PTR_ERR(priv->gpiod_nre);
dev_err(&pdev->dev, "NRE GPIO request failed (%d)\n", err);
- goto out_mtd;
+ return err;
}
priv->gpiod_nwe = devm_gpiod_get(&pdev->dev, "nwe", GPIOD_OUT_HIGH);
if (IS_ERR(priv->gpiod_nwe)) {
err = PTR_ERR(priv->gpiod_nwe);
dev_err(&pdev->dev, "NWE GPIO request failed (%d)\n", err);
- goto out_mtd;
+ return err;
}
priv->gpiod_ale = devm_gpiod_get(&pdev->dev, "ale", GPIOD_OUT_LOW);
if (IS_ERR(priv->gpiod_ale)) {
err = PTR_ERR(priv->gpiod_ale);
dev_err(&pdev->dev, "ALE GPIO request failed (%d)\n", err);
- goto out_mtd;
+ return err;
}
priv->gpiod_cle = devm_gpiod_get(&pdev->dev, "cle", GPIOD_OUT_LOW);
if (IS_ERR(priv->gpiod_cle)) {
err = PTR_ERR(priv->gpiod_cle);
dev_err(&pdev->dev, "CLE GPIO request failed (%d)\n", err);
- goto out_mtd;
+ return err;
}
- /* Initialize data port direction to a known state */
- ams_delta_dir_input(priv, true);
+ /* Request array of data pins, initialize them as input */
+ data_gpiods = devm_gpiod_get_array(&pdev->dev, "data", GPIOD_IN);
+ if (IS_ERR(data_gpiods)) {
+ err = PTR_ERR(data_gpiods);
+ dev_err(&pdev->dev, "data GPIO request failed: %d\n", err);
+ return err;
+ }
+ priv->data_gpiods = data_gpiods;
+ priv->data_in = true;
+
+ /* Initialize the NAND controller object embedded in ams_delta_nand. */
+ priv->base.ops = &ams_delta_ops;
+ nand_controller_init(&priv->base);
+ this->controller = &priv->base;
/* Scan to find existence of the device */
err = nand_scan(this, 1);
if (err)
- goto out_mtd;
+ return err;
/* Register the partitions */
- mtd_device_register(mtd, partition_info, ARRAY_SIZE(partition_info));
+ err = mtd_device_register(mtd, partition_info,
+ ARRAY_SIZE(partition_info));
+ if (err)
+ goto err_nand_cleanup;
- goto out;
+ return 0;
+
+err_nand_cleanup:
+ nand_cleanup(this);
- out_mtd:
- iounmap(io_base);
-out_free:
- out:
return err;
}
@@ -305,13 +335,10 @@ static int ams_delta_cleanup(struct platform_device *pdev)
{
struct ams_delta_nand *priv = platform_get_drvdata(pdev);
struct mtd_info *mtd = nand_to_mtd(&priv->nand_chip);
- void __iomem *io_base = priv->io_base;
- /* Release resources, unregister device */
+ /* Unregister device */
nand_release(mtd_to_nand(mtd));
- iounmap(io_base);
-
return 0;
}
@@ -325,6 +352,6 @@ static struct platform_driver ams_delta_nand_driver = {
module_platform_driver(ams_delta_nand_driver);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
MODULE_DESCRIPTION("Glue layer for NAND flash on Amstrad E3 (Delta)");
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index fb33f6be7c4f..dcd3bd73e549 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -1477,10 +1477,10 @@ static void atmel_nand_init(struct atmel_nand_controller *nc,
chip->legacy.write_byte = atmel_nand_write_byte;
chip->legacy.read_buf = atmel_nand_read_buf;
chip->legacy.write_buf = atmel_nand_write_buf;
- chip->select_chip = atmel_nand_select_chip;
+ chip->legacy.select_chip = atmel_nand_select_chip;
- if (nc->mck && nc->caps->ops->setup_data_interface)
- chip->setup_data_interface = atmel_nand_setup_data_interface;
+ if (!nc->mck || !nc->caps->ops->setup_data_interface)
+ chip->options |= NAND_KEEP_TIMINGS;
/* Some NANDs require a longer delay than the default one (20us). */
chip->legacy.chip_delay = 40;
@@ -1525,7 +1525,7 @@ static void atmel_hsmc_nand_init(struct atmel_nand_controller *nc,
/* Overload some methods for the HSMC controller. */
chip->legacy.cmd_ctrl = atmel_hsmc_nand_cmd_ctrl;
- chip->select_chip = atmel_hsmc_nand_select_chip;
+ chip->legacy.select_chip = atmel_hsmc_nand_select_chip;
}
static int atmel_nand_controller_remove_nand(struct atmel_nand *nand)
@@ -1908,6 +1908,7 @@ static int atmel_nand_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops atmel_nand_controller_ops = {
.attach_chip = atmel_nand_attach_chip,
+ .setup_data_interface = atmel_nand_setup_data_interface,
};
static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c
index 9731c1c487f6..a963002663ed 100644
--- a/drivers/mtd/nand/raw/au1550nd.c
+++ b/drivers/mtd/nand/raw/au1550nd.c
@@ -430,7 +430,7 @@ static int au1550nd_probe(struct platform_device *pdev)
ctx->cs = cs;
this->legacy.dev_ready = au1550_device_ready;
- this->select_chip = au1550_select_chip;
+ this->legacy.select_chip = au1550_select_chip;
this->legacy.cmdfunc = au1550_command;
/* 30 us command delay time */
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
index 9095a79ebc7d..a37cbfe56567 100644
--- a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
@@ -383,7 +383,7 @@ int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
u8 tbits, col_bits, col_size, row_bits, row_bsize;
u32 val;
- b47n->nand_chip.select_chip = bcm47xxnflash_ops_bcm4706_select_chip;
+ nand_chip->legacy.select_chip = bcm47xxnflash_ops_bcm4706_select_chip;
nand_chip->legacy.cmd_ctrl = bcm47xxnflash_ops_bcm4706_cmd_ctrl;
nand_chip->legacy.dev_ready = bcm47xxnflash_ops_bcm4706_dev_ready;
b47n->nand_chip.legacy.cmdfunc = bcm47xxnflash_ops_bcm4706_cmdfunc;
diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
index c1a745940d12..b1c0cd6b49da 100644
--- a/drivers/mtd/nand/raw/cafe_nand.c
+++ b/drivers/mtd/nand/raw/cafe_nand.c
@@ -708,7 +708,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
cafe->nand.legacy.read_byte = cafe_read_byte;
cafe->nand.legacy.read_buf = cafe_read_buf;
cafe->nand.legacy.write_buf = cafe_write_buf;
- cafe->nand.select_chip = cafe_select_chip;
+ cafe->nand.legacy.select_chip = cafe_select_chip;
cafe->nand.legacy.set_features = nand_get_set_features_notsupp;
cafe->nand.legacy.get_features = nand_get_set_features_notsupp;
@@ -780,7 +780,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
cafe->usedma = 0;
/* Scan to find existence of the device */
- cafe->nand.dummy_controller.ops = &cafe_nand_controller_ops;
+ cafe->nand.legacy.dummy_controller.ops = &cafe_nand_controller_ops;
err = nand_scan(&cafe->nand, 2);
if (err)
goto out_irq;
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index 80f228d23cd2..27bafa5e1ca1 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -762,7 +762,7 @@ static int nand_davinci_probe(struct platform_device *pdev)
info->chip.legacy.IO_ADDR_R = vaddr;
info->chip.legacy.IO_ADDR_W = vaddr;
info->chip.legacy.chip_delay = 0;
- info->chip.select_chip = nand_davinci_select_chip;
+ info->chip.legacy.select_chip = nand_davinci_select_chip;
/* options such as NAND_BBT_USE_FLASH */
info->chip.bbt_options = pdata->bbt_options;
@@ -801,7 +801,7 @@ static int nand_davinci_probe(struct platform_device *pdev)
spin_unlock_irq(&davinci_nand_lock);
/* Scan to find existence of the device(s) */
- info->chip.dummy_controller.ops = &davinci_nand_controller_ops;
+ info->chip.legacy.dummy_controller.ops = &davinci_nand_controller_ops;
ret = nand_scan(&info->chip, pdata->mask_chipsel ? 2 : 1);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index 830ea247277b..eebac35304c6 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -204,18 +204,6 @@ static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
return denali->irq_status;
}
-static uint32_t denali_check_irq(struct denali_nand_info *denali)
-{
- unsigned long flags;
- uint32_t irq_status;
-
- spin_lock_irqsave(&denali->irq_lock, flags);
- irq_status = denali->irq_status;
- spin_unlock_irqrestore(&denali->irq_lock, flags);
-
- return irq_status;
-}
-
static void denali_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
struct mtd_info *mtd = nand_to_mtd(chip);
@@ -288,8 +276,7 @@ static void denali_cmd_ctrl(struct nand_chip *chip, int dat, unsigned int ctrl)
return;
/*
- * Some commands are followed by chip->legacy.dev_ready or
- * chip->legacy.waitfunc.
+ * Some commands are followed by chip->legacy.waitfunc.
* irq_status must be cleared here to catch the R/B# interrupt later.
*/
if (ctrl & NAND_CTRL_CHANGE)
@@ -298,13 +285,6 @@ static void denali_cmd_ctrl(struct nand_chip *chip, int dat, unsigned int ctrl)
denali->host_write(denali, DENALI_BANK(denali) | type, dat);
}
-static int denali_dev_ready(struct nand_chip *chip)
-{
- struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
-
- return !!(denali_check_irq(denali) & INTR__INT_ACT);
-}
-
static int denali_check_erased_page(struct mtd_info *mtd,
struct nand_chip *chip, uint8_t *buf,
unsigned long uncor_ecc_flags,
@@ -1065,29 +1045,6 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
return 0;
}
-static void denali_reset_banks(struct denali_nand_info *denali)
-{
- u32 irq_status;
- int i;
-
- for (i = 0; i < denali->max_banks; i++) {
- denali->active_bank = i;
-
- denali_reset_irq(denali);
-
- iowrite32(DEVICE_RESET__BANK(i),
- denali->reg + DEVICE_RESET);
-
- irq_status = denali_wait_for_irq(denali,
- INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT);
- if (!(irq_status & INTR__INT_ACT))
- break;
- }
-
- dev_dbg(denali->dev, "%d chips connected\n", i);
- denali->max_banks = i;
-}
-
static void denali_hw_init(struct denali_nand_info *denali)
{
/*
@@ -1316,6 +1273,7 @@ static void denali_detach_chip(struct nand_chip *chip)
static const struct nand_controller_ops denali_controller_ops = {
.attach_chip = denali_attach_chip,
.detach_chip = denali_detach_chip,
+ .setup_data_interface = denali_setup_data_interface,
};
int denali_init(struct denali_nand_info *denali)
@@ -1341,12 +1299,6 @@ int denali_init(struct denali_nand_info *denali)
}
denali_enable_irq(denali);
- denali_reset_banks(denali);
- if (!denali->max_banks) {
- /* Error out earlier if no chip is found for some reasons. */
- ret = -ENODEV;
- goto disable_irq;
- }
denali->active_bank = DENALI_INVALID_BANK;
@@ -1355,11 +1307,10 @@ int denali_init(struct denali_nand_info *denali)
if (!mtd->name)
mtd->name = "denali-nand";
- chip->select_chip = denali_select_chip;
+ chip->legacy.select_chip = denali_select_chip;
chip->legacy.read_byte = denali_read_byte;
chip->legacy.write_byte = denali_write_byte;
chip->legacy.cmd_ctrl = denali_cmd_ctrl;
- chip->legacy.dev_ready = denali_dev_ready;
chip->legacy.waitfunc = denali_waitfunc;
if (features & FEATURES__INDEX_ADDR) {
@@ -1372,9 +1323,9 @@ int denali_init(struct denali_nand_info *denali)
/* clk rate info is needed for setup_data_interface */
if (denali->clk_rate && denali->clk_x_rate)
- chip->setup_data_interface = denali_setup_data_interface;
+ chip->options |= NAND_KEEP_TIMINGS;
- chip->dummy_controller.ops = &denali_controller_ops;
+ chip->legacy.dummy_controller.ops = &denali_controller_ops;
ret = nand_scan(chip, denali->max_banks);
if (ret)
goto disable_irq;
diff --git a/drivers/mtd/nand/raw/denali.h b/drivers/mtd/nand/raw/denali.h
index 57a5498f58bb..25c00601b8b3 100644
--- a/drivers/mtd/nand/raw/denali.h
+++ b/drivers/mtd/nand/raw/denali.h
@@ -7,7 +7,7 @@
#ifndef __DENALI_H__
#define __DENALI_H__
-#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/completion.h>
#include <linux/mtd/rawnand.h>
#include <linux/spinlock_types.h>
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 3a4c373affab..53f57e0f007e 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -1390,7 +1390,7 @@ static inline int __init doc2001plus_init(struct mtd_info *mtd)
this->legacy.read_buf = doc2001plus_readbuf;
doc->late_init = inftl_scan_bbt;
this->legacy.cmd_ctrl = NULL;
- this->select_chip = doc2001plus_select_chip;
+ this->legacy.select_chip = doc2001plus_select_chip;
this->legacy.cmdfunc = doc2001plus_command;
this->ecc.hwctl = doc2001plus_enable_hwecc;
@@ -1568,7 +1568,7 @@ static int __init doc_probe(unsigned long physadr)
mtd_set_ooblayout(mtd, &doc200x_ooblayout_ops);
nand_set_controller_data(nand, doc);
- nand->select_chip = doc200x_select_chip;
+ nand->legacy.select_chip = doc200x_select_chip;
nand->legacy.cmd_ctrl = doc200x_hwcontrol;
nand->legacy.dev_ready = doc200x_dev_ready;
nand->legacy.waitfunc = doc200x_wait;
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index d6ed697fcfe6..70f0d2b450ea 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -779,7 +779,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
chip->legacy.read_byte = fsl_elbc_read_byte;
chip->legacy.write_buf = fsl_elbc_write_buf;
chip->legacy.read_buf = fsl_elbc_read_buf;
- chip->select_chip = fsl_elbc_select_chip;
+ chip->legacy.select_chip = fsl_elbc_select_chip;
chip->legacy.cmdfunc = fsl_elbc_cmdfunc;
chip->legacy.waitfunc = fsl_elbc_wait;
chip->legacy.set_features = nand_get_set_features_notsupp;
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
index 6f4afc44381a..e65d274399f9 100644
--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -864,7 +864,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->legacy.write_buf = fsl_ifc_write_buf;
chip->legacy.read_buf = fsl_ifc_read_buf;
- chip->select_chip = fsl_ifc_select_chip;
+ chip->legacy.select_chip = fsl_ifc_select_chip;
chip->legacy.cmdfunc = fsl_ifc_cmdfunc;
chip->legacy.waitfunc = fsl_ifc_wait;
chip->legacy.set_features = nand_get_set_features_notsupp;
diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
index 673c5a0c9345..5ccc28ec0985 100644
--- a/drivers/mtd/nand/raw/fsl_upm.c
+++ b/drivers/mtd/nand/raw/fsl_upm.c
@@ -170,7 +170,7 @@ static int fun_chip_init(struct fsl_upm_nand *fun,
fun->chip.ecc.mode = NAND_ECC_SOFT;
fun->chip.ecc.algo = NAND_ECC_HAMMING;
if (fun->mchip_count > 1)
- fun->chip.select_chip = fun_select_chip;
+ fun->chip.legacy.select_chip = fun_select_chip;
if (fun->rnb_gpio[0] >= 0)
fun->chip.legacy.dev_ready = fun_chip_ready;
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index 70ac8d875218..325b4414dccc 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ST Microelectronics
* Flexible Static Memory Controller (FSMC)
@@ -10,10 +11,6 @@
* Based on drivers/mtd/nand/nomadik_nand.c (removed in v3.8)
* Copyright © 2007 STMicroelectronics Pvt. Ltd.
* Copyright © 2009 Alessandro Rubini
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
@@ -41,15 +38,14 @@
/* fsmc controller registers for NOR flash */
#define CTRL 0x0
/* ctrl register definitions */
- #define BANK_ENABLE (1 << 0)
- #define MUXED (1 << 1)
+ #define BANK_ENABLE BIT(0)
+ #define MUXED BIT(1)
#define NOR_DEV (2 << 2)
- #define WIDTH_8 (0 << 4)
- #define WIDTH_16 (1 << 4)
- #define RSTPWRDWN (1 << 6)
- #define WPROT (1 << 7)
- #define WRT_ENABLE (1 << 12)
- #define WAIT_ENB (1 << 13)
+ #define WIDTH_16 BIT(4)
+ #define RSTPWRDWN BIT(6)
+ #define WPROT BIT(7)
+ #define WRT_ENABLE BIT(12)
+ #define WAIT_ENB BIT(13)
#define CTRL_TIM 0x4
/* ctrl_tim register definitions */
@@ -57,43 +53,35 @@
#define FSMC_NOR_BANK_SZ 0x8
#define FSMC_NOR_REG_SIZE 0x40
-#define FSMC_NOR_REG(base, bank, reg) (base + \
- FSMC_NOR_BANK_SZ * (bank) + \
- reg)
+#define FSMC_NOR_REG(base, bank, reg) ((base) + \
+ (FSMC_NOR_BANK_SZ * (bank)) + \
+ (reg))
/* fsmc controller registers for NAND flash */
#define FSMC_PC 0x00
/* pc register definitions */
- #define FSMC_RESET (1 << 0)
- #define FSMC_WAITON (1 << 1)
- #define FSMC_ENABLE (1 << 2)
- #define FSMC_DEVTYPE_NAND (1 << 3)
- #define FSMC_DEVWID_8 (0 << 4)
- #define FSMC_DEVWID_16 (1 << 4)
- #define FSMC_ECCEN (1 << 6)
- #define FSMC_ECCPLEN_512 (0 << 7)
- #define FSMC_ECCPLEN_256 (1 << 7)
- #define FSMC_TCLR_1 (1)
+ #define FSMC_RESET BIT(0)
+ #define FSMC_WAITON BIT(1)
+ #define FSMC_ENABLE BIT(2)
+ #define FSMC_DEVTYPE_NAND BIT(3)
+ #define FSMC_DEVWID_16 BIT(4)
+ #define FSMC_ECCEN BIT(6)
+ #define FSMC_ECCPLEN_256 BIT(7)
#define FSMC_TCLR_SHIFT (9)
#define FSMC_TCLR_MASK (0xF)
- #define FSMC_TAR_1 (1)
#define FSMC_TAR_SHIFT (13)
#define FSMC_TAR_MASK (0xF)
#define STS 0x04
/* sts register definitions */
- #define FSMC_CODE_RDY (1 << 15)
+ #define FSMC_CODE_RDY BIT(15)
#define COMM 0x08
/* comm register definitions */
- #define FSMC_TSET_0 0
#define FSMC_TSET_SHIFT 0
#define FSMC_TSET_MASK 0xFF
- #define FSMC_TWAIT_6 6
#define FSMC_TWAIT_SHIFT 8
#define FSMC_TWAIT_MASK 0xFF
- #define FSMC_THOLD_4 4
#define FSMC_THOLD_SHIFT 16
#define FSMC_THOLD_MASK 0xFF
- #define FSMC_THIZ_1 1
#define FSMC_THIZ_SHIFT 24
#define FSMC_THIZ_MASK 0xFF
#define ATTRIB 0x0C
@@ -106,12 +94,12 @@
#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
struct fsmc_nand_timings {
- uint8_t tclr;
- uint8_t tar;
- uint8_t thiz;
- uint8_t thold;
- uint8_t twait;
- uint8_t tset;
+ u8 tclr;
+ u8 tar;
+ u8 thiz;
+ u8 thold;
+ u8 twait;
+ u8 tset;
};
enum access_mode {
@@ -122,19 +110,21 @@ enum access_mode {
/**
* struct fsmc_nand_data - structure for FSMC NAND device state
*
+ * @base: Inherit from the nand_controller struct
* @pid: Part ID on the AMBA PrimeCell format
- * @mtd: MTD info for a NAND flash.
* @nand: Chip related info for a NAND flash.
- * @partitions: Partition info for a NAND Flash.
- * @nr_partitions: Total number of partition of a NAND flash.
*
* @bank: Bank number for probed device.
+ * @dev: Parent device
+ * @mode: Access mode
* @clk: Clock structure for FSMC.
*
* @read_dma_chan: DMA channel for read access
* @write_dma_chan: DMA channel for write access to NAND
* @dma_access_complete: Completion structure
*
+ * @dev_timings: NAND timings
+ *
* @data_pa: NAND Physical port for Data.
* @data_va: NAND port for Data.
* @cmd_va: NAND port for Command.
@@ -142,6 +132,7 @@ enum access_mode {
* @regs_va: Registers base address for a given bank.
*/
struct fsmc_nand_data {
+ struct nand_controller base;
u32 pid;
struct nand_chip nand;
@@ -248,9 +239,9 @@ static const struct mtd_ooblayout_ops fsmc_ecc4_ooblayout_ops = {
.free = fsmc_ecc4_ooblayout_free,
};
-static inline struct fsmc_nand_data *mtd_to_fsmc(struct mtd_info *mtd)
+static inline struct fsmc_nand_data *nand_to_fsmc(struct nand_chip *chip)
{
- return container_of(mtd_to_nand(mtd), struct fsmc_nand_data, nand);
+ return container_of(chip, struct fsmc_nand_data, nand);
}
/*
@@ -262,8 +253,8 @@ static inline struct fsmc_nand_data *mtd_to_fsmc(struct mtd_info *mtd)
static void fsmc_nand_setup(struct fsmc_nand_data *host,
struct fsmc_nand_timings *tims)
{
- uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
- uint32_t tclr, tar, thiz, thold, twait, tset;
+ u32 value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
+ u32 tclr, tar, thiz, thold, twait, tset;
tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT;
tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT;
@@ -273,13 +264,9 @@ static void fsmc_nand_setup(struct fsmc_nand_data *host,
tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
if (host->nand.options & NAND_BUSWIDTH_16)
- writel_relaxed(value | FSMC_DEVWID_16,
- host->regs_va + FSMC_PC);
- else
- writel_relaxed(value | FSMC_DEVWID_8, host->regs_va + FSMC_PC);
+ value |= FSMC_DEVWID_16;
- writel_relaxed(readl(host->regs_va + FSMC_PC) | tclr | tar,
- host->regs_va + FSMC_PC);
+ writel_relaxed(value | tclr | tar, host->regs_va + FSMC_PC);
writel_relaxed(thiz | thold | twait | tset, host->regs_va + COMM);
writel_relaxed(thiz | thold | twait | tset, host->regs_va + ATTRIB);
}
@@ -290,7 +277,7 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
{
unsigned long hclk = clk_get_rate(host->clk);
unsigned long hclkn = NSEC_PER_SEC / hclk;
- uint32_t thiz, thold, twait, tset;
+ u32 thiz, thold, twait, tset;
if (sdrt->tRC_min < 30000)
return -EOPNOTSUPP;
@@ -343,7 +330,7 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
static int fsmc_setup_data_interface(struct nand_chip *nand, int csline,
const struct nand_data_interface *conf)
{
- struct fsmc_nand_data *host = nand_get_controller_data(nand);
+ struct fsmc_nand_data *host = nand_to_fsmc(nand);
struct fsmc_nand_timings tims;
const struct nand_sdr_timings *sdrt;
int ret;
@@ -369,7 +356,7 @@ static int fsmc_setup_data_interface(struct nand_chip *nand, int csline,
*/
static void fsmc_enable_hwecc(struct nand_chip *chip, int mode)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(nand_to_mtd(chip));
+ struct fsmc_nand_data *host = nand_to_fsmc(chip);
writel_relaxed(readl(host->regs_va + FSMC_PC) & ~FSMC_ECCPLEN_256,
host->regs_va + FSMC_PC);
@@ -384,18 +371,18 @@ static void fsmc_enable_hwecc(struct nand_chip *chip, int mode)
* FSMC. ECC is 13 bytes for 512 bytes of data (supports error correction up to
* max of 8-bits)
*/
-static int fsmc_read_hwecc_ecc4(struct nand_chip *chip, const uint8_t *data,
- uint8_t *ecc)
+static int fsmc_read_hwecc_ecc4(struct nand_chip *chip, const u8 *data,
+ u8 *ecc)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(nand_to_mtd(chip));
- uint32_t ecc_tmp;
+ struct fsmc_nand_data *host = nand_to_fsmc(chip);
+ u32 ecc_tmp;
unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
do {
if (readl_relaxed(host->regs_va + STS) & FSMC_CODE_RDY)
break;
- else
- cond_resched();
+
+ cond_resched();
} while (!time_after_eq(jiffies, deadline));
if (time_after_eq(jiffies, deadline)) {
@@ -404,25 +391,25 @@ static int fsmc_read_hwecc_ecc4(struct nand_chip *chip, const uint8_t *data,
}
ecc_tmp = readl_relaxed(host->regs_va + ECC1);
- ecc[0] = (uint8_t) (ecc_tmp >> 0);
- ecc[1] = (uint8_t) (ecc_tmp >> 8);
- ecc[2] = (uint8_t) (ecc_tmp >> 16);
- ecc[3] = (uint8_t) (ecc_tmp >> 24);
+ ecc[0] = ecc_tmp;
+ ecc[1] = ecc_tmp >> 8;
+ ecc[2] = ecc_tmp >> 16;
+ ecc[3] = ecc_tmp >> 24;
ecc_tmp = readl_relaxed(host->regs_va + ECC2);
- ecc[4] = (uint8_t) (ecc_tmp >> 0);
- ecc[5] = (uint8_t) (ecc_tmp >> 8);
- ecc[6] = (uint8_t) (ecc_tmp >> 16);
- ecc[7] = (uint8_t) (ecc_tmp >> 24);
+ ecc[4] = ecc_tmp;
+ ecc[5] = ecc_tmp >> 8;
+ ecc[6] = ecc_tmp >> 16;
+ ecc[7] = ecc_tmp >> 24;
ecc_tmp = readl_relaxed(host->regs_va + ECC3);
- ecc[8] = (uint8_t) (ecc_tmp >> 0);
- ecc[9] = (uint8_t) (ecc_tmp >> 8);
- ecc[10] = (uint8_t) (ecc_tmp >> 16);
- ecc[11] = (uint8_t) (ecc_tmp >> 24);
+ ecc[8] = ecc_tmp;
+ ecc[9] = ecc_tmp >> 8;
+ ecc[10] = ecc_tmp >> 16;
+ ecc[11] = ecc_tmp >> 24;
ecc_tmp = readl_relaxed(host->regs_va + STS);
- ecc[12] = (uint8_t) (ecc_tmp >> 16);
+ ecc[12] = ecc_tmp >> 16;
return 0;
}
@@ -432,22 +419,22 @@ static int fsmc_read_hwecc_ecc4(struct nand_chip *chip, const uint8_t *data,
* FSMC. ECC is 3 bytes for 512 bytes of data (supports error correction up to
* max of 1-bit)
*/
-static int fsmc_read_hwecc_ecc1(struct nand_chip *chip, const uint8_t *data,
- uint8_t *ecc)
+static int fsmc_read_hwecc_ecc1(struct nand_chip *chip, const u8 *data,
+ u8 *ecc)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(nand_to_mtd(chip));
- uint32_t ecc_tmp;
+ struct fsmc_nand_data *host = nand_to_fsmc(chip);
+ u32 ecc_tmp;
ecc_tmp = readl_relaxed(host->regs_va + ECC1);
- ecc[0] = (uint8_t) (ecc_tmp >> 0);
- ecc[1] = (uint8_t) (ecc_tmp >> 8);
- ecc[2] = (uint8_t) (ecc_tmp >> 16);
+ ecc[0] = ecc_tmp;
+ ecc[1] = ecc_tmp >> 8;
+ ecc[2] = ecc_tmp >> 16;
return 0;
}
/* Count the number of 0's in buff upto a max of max_bits */
-static int count_written_bits(uint8_t *buff, int size, int max_bits)
+static int count_written_bits(u8 *buff, int size, int max_bits)
{
int k, written_bits = 0;
@@ -468,7 +455,7 @@ static void dma_complete(void *param)
}
static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
- enum dma_data_direction direction)
+ enum dma_data_direction direction)
{
struct dma_chan *chan;
struct dma_device *dma_dev;
@@ -519,7 +506,7 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
time_left =
wait_for_completion_timeout(&host->dma_access_complete,
- msecs_to_jiffies(3000));
+ msecs_to_jiffies(3000));
if (time_left == 0) {
dmaengine_terminate_all(chan);
dev_err(host->dev, "wait_for_completion_timeout\n");
@@ -537,18 +524,19 @@ unmap_dma:
/*
* fsmc_write_buf - write buffer to chip
- * @mtd: MTD device structure
+ * @host: FSMC NAND controller
* @buf: data buffer
* @len: number of bytes to write
*/
-static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+static void fsmc_write_buf(struct fsmc_nand_data *host, const u8 *buf,
+ int len)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
int i;
- if (IS_ALIGNED((uintptr_t)buf, sizeof(uint32_t)) &&
- IS_ALIGNED(len, sizeof(uint32_t))) {
- uint32_t *p = (uint32_t *)buf;
+ if (IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
+ IS_ALIGNED(len, sizeof(u32))) {
+ u32 *p = (u32 *)buf;
+
len = len >> 2;
for (i = 0; i < len; i++)
writel_relaxed(p[i], host->data_va);
@@ -560,18 +548,18 @@ static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
/*
* fsmc_read_buf - read chip data into buffer
- * @mtd: MTD device structure
+ * @host: FSMC NAND controller
* @buf: buffer to store date
* @len: number of bytes to read
*/
-static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static void fsmc_read_buf(struct fsmc_nand_data *host, u8 *buf, int len)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
int i;
- if (IS_ALIGNED((uintptr_t)buf, sizeof(uint32_t)) &&
- IS_ALIGNED(len, sizeof(uint32_t))) {
- uint32_t *p = (uint32_t *)buf;
+ if (IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
+ IS_ALIGNED(len, sizeof(u32))) {
+ u32 *p = (u32 *)buf;
+
len = len >> 2;
for (i = 0; i < len; i++)
p[i] = readl_relaxed(host->data_va);
@@ -583,48 +571,42 @@ static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
/*
* fsmc_read_buf_dma - read chip data into buffer
- * @mtd: MTD device structure
+ * @host: FSMC NAND controller
* @buf: buffer to store date
* @len: number of bytes to read
*/
-static void fsmc_read_buf_dma(struct mtd_info *mtd, uint8_t *buf, int len)
+static void fsmc_read_buf_dma(struct fsmc_nand_data *host, u8 *buf,
+ int len)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
-
dma_xfer(host, buf, len, DMA_FROM_DEVICE);
}
/*
* fsmc_write_buf_dma - write buffer to chip
- * @mtd: MTD device structure
+ * @host: FSMC NAND controller
* @buf: data buffer
* @len: number of bytes to write
*/
-static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
- int len)
+static void fsmc_write_buf_dma(struct fsmc_nand_data *host, const u8 *buf,
+ int len)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
-
dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
}
/* fsmc_select_chip - assert or deassert nCE */
-static void fsmc_select_chip(struct nand_chip *chip, int chipnr)
+static void fsmc_ce_ctrl(struct fsmc_nand_data *host, bool assert)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(nand_to_mtd(chip));
- u32 pc;
-
- /* Support only one CS */
- if (chipnr > 0)
- return;
+ u32 pc = readl(host->regs_va + FSMC_PC);
- pc = readl(host->regs_va + FSMC_PC);
- if (chipnr < 0)
+ if (!assert)
writel_relaxed(pc & ~FSMC_ENABLE, host->regs_va + FSMC_PC);
else
writel_relaxed(pc | FSMC_ENABLE, host->regs_va + FSMC_PC);
- /* nCE line must be asserted before starting any operation */
+ /*
+ * nCE line changes must be applied before returning from this
+ * function.
+ */
mb();
}
@@ -637,14 +619,16 @@ static void fsmc_select_chip(struct nand_chip *chip, int chipnr)
static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
bool check_only)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
+ struct fsmc_nand_data *host = nand_to_fsmc(chip);
const struct nand_op_instr *instr = NULL;
int ret = 0;
unsigned int op_id;
int i;
pr_debug("Executing operation [%d instructions]:\n", op->ninstrs);
+
+ fsmc_ce_ctrl(host, true);
+
for (op_id = 0; op_id < op->ninstrs; op_id++) {
instr = &op->instrs[op_id];
@@ -671,10 +655,10 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
", force 8-bit" : "");
if (host->mode == USE_DMA_ACCESS)
- fsmc_read_buf_dma(mtd, instr->ctx.data.buf.in,
+ fsmc_read_buf_dma(host, instr->ctx.data.buf.in,
instr->ctx.data.len);
else
- fsmc_read_buf(mtd, instr->ctx.data.buf.in,
+ fsmc_read_buf(host, instr->ctx.data.buf.in,
instr->ctx.data.len);
break;
@@ -684,10 +668,11 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
", force 8-bit" : "");
if (host->mode == USE_DMA_ACCESS)
- fsmc_write_buf_dma(mtd, instr->ctx.data.buf.out,
+ fsmc_write_buf_dma(host,
+ instr->ctx.data.buf.out,
instr->ctx.data.len);
else
- fsmc_write_buf(mtd, instr->ctx.data.buf.out,
+ fsmc_write_buf(host, instr->ctx.data.buf.out,
instr->ctx.data.len);
break;
@@ -701,6 +686,8 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
}
}
+ fsmc_ce_ctrl(host, false);
+
return ret;
}
@@ -717,34 +704,35 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
* After this read, fsmc hardware generates and reports error data bits(up to a
* max of 8 bits)
*/
-static int fsmc_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
+static int fsmc_read_page_hwecc(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int i, j, s, stat, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
- uint8_t *p = buf;
- uint8_t *ecc_calc = chip->ecc.calc_buf;
- uint8_t *ecc_code = chip->ecc.code_buf;
- int off, len, group = 0;
+ u8 *p = buf;
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ u8 *ecc_code = chip->ecc.code_buf;
+ int off, len, ret, group = 0;
/*
- * ecc_oob is intentionally taken as uint16_t. In 16bit devices, we
+ * ecc_oob is intentionally taken as u16. In 16bit devices, we
* end up reading 14 bytes (7 words) from oob. The local array is
* to maintain word alignment
*/
- uint16_t ecc_oob[7];
- uint8_t *oob = (uint8_t *)&ecc_oob[0];
+ u16 ecc_oob[7];
+ u8 *oob = (u8 *)&ecc_oob[0];
unsigned int max_bitflips = 0;
for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
nand_read_page_op(chip, page, s * eccsize, NULL, 0);
chip->ecc.hwctl(chip, NAND_ECC_READ);
- nand_read_data_op(chip, p, eccsize, false);
+ ret = nand_read_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
for (j = 0; j < eccbytes;) {
struct mtd_oob_region oobregion;
- int ret;
ret = mtd_ooblayout_ecc(mtd, group++, &oobregion);
if (ret)
@@ -788,15 +776,15 @@ static int fsmc_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
* @calc_ecc: ecc calculated from read data
*
* calc_ecc is a 104 bit information containing maximum of 8 error
- * offset informations of 13 bits each in 512 bytes of read data.
+ * offset information of 13 bits each in 512 bytes of read data.
*/
-static int fsmc_bch8_correct_data(struct nand_chip *chip, uint8_t *dat,
- uint8_t *read_ecc, uint8_t *calc_ecc)
+static int fsmc_bch8_correct_data(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(nand_to_mtd(chip));
- uint32_t err_idx[8];
- uint32_t num_err, i;
- uint32_t ecc1, ecc2, ecc3, ecc4;
+ struct fsmc_nand_data *host = nand_to_fsmc(chip);
+ u32 err_idx[8];
+ u32 num_err, i;
+ u32 ecc1, ecc2, ecc3, ecc4;
num_err = (readl_relaxed(host->regs_va + STS) >> 10) & 0xF;
@@ -837,8 +825,8 @@ static int fsmc_bch8_correct_data(struct nand_chip *chip, uint8_t *dat,
* |---idx[7]--|--.....-----|---idx[2]--||---idx[1]--||---idx[0]--|
*
* calc_ecc is a 104 bit information containing maximum of 8 error
- * offset informations of 13 bits each. calc_ecc is copied into a
- * uint64_t array and error offset indexes are populated in err_idx
+ * offset information of 13 bits each. calc_ecc is copied into a
+ * u64 array and error offset indexes are populated in err_idx
* array
*/
ecc1 = readl_relaxed(host->regs_va + ECC1);
@@ -897,11 +885,13 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
nand->options |= NAND_SKIP_BBTSCAN;
host->dev_timings = devm_kzalloc(&pdev->dev,
- sizeof(*host->dev_timings), GFP_KERNEL);
+ sizeof(*host->dev_timings),
+ GFP_KERNEL);
if (!host->dev_timings)
return -ENOMEM;
+
ret = of_property_read_u8_array(np, "timings", (u8 *)host->dev_timings,
- sizeof(*host->dev_timings));
+ sizeof(*host->dev_timings));
if (ret)
host->dev_timings = NULL;
@@ -920,7 +910,7 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
static int fsmc_nand_attach_chip(struct nand_chip *nand)
{
struct mtd_info *mtd = nand_to_mtd(nand);
- struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
+ struct fsmc_nand_data *host = nand_to_fsmc(nand);
if (AMBA_REV_BITS(host->pid) >= 8) {
switch (mtd->oobsize) {
@@ -992,6 +982,8 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand)
static const struct nand_controller_ops fsmc_nand_controller_ops = {
.attach_chip = fsmc_nand_attach_chip,
+ .exec_op = fsmc_exec_op,
+ .setup_data_interface = fsmc_setup_data_interface,
};
/*
@@ -1061,10 +1053,13 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
* AMBA PrimeCell bus. However it is not a PrimeCell.
*/
for (pid = 0, i = 0; i < 4; i++)
- pid |= (readl(base + resource_size(res) - 0x20 + 4 * i) & 255) << (i * 8);
+ pid |= (readl(base + resource_size(res) - 0x20 + 4 * i) &
+ 255) << (i * 8);
+
host->pid = pid;
- dev_info(&pdev->dev, "FSMC device partno %03x, manufacturer %02x, "
- "revision %02x, config %02x\n",
+
+ dev_info(&pdev->dev,
+ "FSMC device partno %03x, manufacturer %02x, revision %02x, config %02x\n",
AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid),
AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid));
@@ -1075,12 +1070,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
/* Link all private pointers */
mtd = nand_to_mtd(&host->nand);
- nand_set_controller_data(nand, host);
nand_set_flash_node(nand, pdev->dev.of_node);
mtd->dev.parent = &pdev->dev;
- nand->exec_op = fsmc_exec_op;
- nand->select_chip = fsmc_select_chip;
/*
* Setup default ECC mode. nand_dt_init() called from nand_scan_ident()
@@ -1106,10 +1098,10 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
}
}
- if (host->dev_timings)
+ if (host->dev_timings) {
fsmc_nand_setup(host, host->dev_timings);
- else
- nand->setup_data_interface = fsmc_setup_data_interface;
+ nand->options |= NAND_KEEP_TIMINGS;
+ }
if (AMBA_REV_BITS(host->pid) >= 8) {
nand->ecc.read_page = fsmc_read_page_hwecc;
@@ -1119,10 +1111,13 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
nand->ecc.strength = 8;
}
+ nand_controller_init(&host->base);
+ host->base.ops = &fsmc_nand_controller_ops;
+ nand->controller = &host->base;
+
/*
* Scan to find existence of the device
*/
- nand->dummy_controller.ops = &fsmc_nand_controller_ops;
ret = nand_scan(nand, 1);
if (ret)
goto release_dma_write_chan;
@@ -1175,19 +1170,23 @@ static int fsmc_nand_remove(struct platform_device *pdev)
static int fsmc_nand_suspend(struct device *dev)
{
struct fsmc_nand_data *host = dev_get_drvdata(dev);
+
if (host)
clk_disable_unprepare(host->clk);
+
return 0;
}
static int fsmc_nand_resume(struct device *dev)
{
struct fsmc_nand_data *host = dev_get_drvdata(dev);
+
if (host) {
clk_prepare_enable(host->clk);
if (host->dev_timings)
fsmc_nand_setup(host, host->dev_timings);
}
+
return 0;
}
#endif
@@ -1212,6 +1211,6 @@ static struct platform_driver fsmc_nand_driver = {
module_platform_driver_probe(fsmc_nand_driver, fsmc_nand_probe);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>, Ashish Priyadarshi");
MODULE_DESCRIPTION("NAND driver for SPEAr Platforms");
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 94c2b7525c85..ed405c9434fe 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -1549,7 +1549,7 @@ static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs)
int column, page, chipnr;
chipnr = (int)(ofs >> chip->chip_shift);
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
@@ -1562,7 +1562,7 @@ static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs)
ret = nand_prog_page_op(chip, page, column, block_mark, 1);
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
return ret;
}
@@ -1610,7 +1610,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
saved_chip_number = this->current_chip;
- chip->select_chip(chip, 0);
+ nand_select_target(chip, 0);
/*
* Loop through the first search area, looking for the NCB fingerprint.
@@ -1638,7 +1638,10 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
}
- chip->select_chip(chip, saved_chip_number);
+ if (saved_chip_number >= 0)
+ nand_select_target(chip, saved_chip_number);
+ else
+ nand_deselect_target(chip);
if (found_an_ncb_fingerprint)
dev_dbg(dev, "\tFound a fingerprint\n");
@@ -1681,7 +1684,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
/* Select chip 0. */
saved_chip_number = this->current_chip;
- chip->select_chip(chip, 0);
+ nand_select_target(chip, 0);
/* Loop over blocks in the first search area, erasing them. */
dev_dbg(dev, "Erasing the search area...\n");
@@ -1713,7 +1716,11 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
}
/* Deselect chip 0. */
- chip->select_chip(chip, saved_chip_number);
+ if (saved_chip_number >= 0)
+ nand_select_target(chip, saved_chip_number);
+ else
+ nand_deselect_target(chip);
+
return 0;
}
@@ -1762,10 +1769,10 @@ static int mx23_boot_init(struct gpmi_nand_data *this)
byte = block << chip->phys_erase_shift;
/* Send the command to read the conventional block mark. */
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
block_mark = chip->legacy.read_byte(chip);
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
/*
* Check if the block is marked bad. If so, we need to mark it
@@ -1882,6 +1889,7 @@ static int gpmi_nand_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops gpmi_nand_controller_ops = {
.attach_chip = gpmi_nand_attach_chip,
+ .setup_data_interface = gpmi_setup_data_interface,
};
static int gpmi_nand_init(struct gpmi_nand_data *this)
@@ -1900,8 +1908,7 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
/* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
nand_set_controller_data(chip, this);
nand_set_flash_node(chip, this->pdev->dev.of_node);
- chip->select_chip = gpmi_select_chip;
- chip->setup_data_interface = gpmi_setup_data_interface;
+ chip->legacy.select_chip = gpmi_select_chip;
chip->legacy.cmd_ctrl = gpmi_cmd_ctrl;
chip->legacy.dev_ready = gpmi_dev_ready;
chip->legacy.read_byte = gpmi_read_byte;
@@ -1924,7 +1931,7 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
if (ret)
goto err_out;
- chip->dummy_controller.ops = &gpmi_nand_controller_ops;
+ chip->legacy.dummy_controller.ops = &gpmi_nand_controller_ops;
ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
if (ret)
goto err_out;
diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c
index f043938ee36b..f3f9aa160cff 100644
--- a/drivers/mtd/nand/raw/hisi504_nand.c
+++ b/drivers/mtd/nand/raw/hisi504_nand.c
@@ -783,7 +783,7 @@ static int hisi_nfc_probe(struct platform_device *pdev)
nand_set_controller_data(chip, host);
nand_set_flash_node(chip, np);
chip->legacy.cmdfunc = hisi_nfc_cmdfunc;
- chip->select_chip = hisi_nfc_select_chip;
+ chip->legacy.select_chip = hisi_nfc_select_chip;
chip->legacy.read_byte = hisi_nfc_read_byte;
chip->legacy.write_buf = hisi_nfc_write_buf;
chip->legacy.read_buf = hisi_nfc_read_buf;
@@ -799,7 +799,7 @@ static int hisi_nfc_probe(struct platform_device *pdev)
return ret;
}
- chip->dummy_controller.ops = &hisi_nfc_controller_ops;
+ chip->legacy.dummy_controller.ops = &hisi_nfc_controller_ops;
ret = nand_scan(chip, max_chips);
if (ret)
return ret;
diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h
index 04c2cf74eff3..fbf6ca015cd7 100644
--- a/drivers/mtd/nand/raw/internals.h
+++ b/drivers/mtd/nand/raw/internals.h
@@ -95,6 +95,39 @@ void nand_decode_ext_id(struct nand_chip *chip);
void panic_nand_wait(struct nand_chip *chip, unsigned long timeo);
void sanitize_string(uint8_t *s, size_t len);
+static inline bool nand_has_exec_op(struct nand_chip *chip)
+{
+ if (!chip->controller || !chip->controller->ops ||
+ !chip->controller->ops->exec_op)
+ return false;
+
+ return true;
+}
+
+static inline int nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ if (!nand_has_exec_op(chip))
+ return -ENOTSUPP;
+
+ if (WARN_ON(op->cs >= chip->numchips))
+ return -EINVAL;
+
+ return chip->controller->ops->exec_op(chip, op, false);
+}
+
+static inline bool nand_has_setup_data_iface(struct nand_chip *chip)
+{
+ if (!chip->controller || !chip->controller->ops ||
+ !chip->controller->ops->setup_data_interface)
+ return false;
+
+ if (chip->options & NAND_KEEP_TIMINGS)
+ return false;
+
+ return true;
+}
+
/* BBT functions */
int nand_markbad_bbt(struct nand_chip *chip, loff_t offs);
int nand_isreserved_bbt(struct nand_chip *chip, loff_t offs);
diff --git a/drivers/mtd/nand/raw/jz4740_nand.c b/drivers/mtd/nand/raw/jz4740_nand.c
index fb59cfca11a7..f92ae5aa2a54 100644
--- a/drivers/mtd/nand/raw/jz4740_nand.c
+++ b/drivers/mtd/nand/raw/jz4740_nand.c
@@ -335,14 +335,14 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
goto notfound_id;
/* Retrieve the IDs from the first chip. */
- chip->select_chip(chip, 0);
+ nand_select_target(chip, 0);
nand_reset_op(chip);
nand_readid_op(chip, 0, id, sizeof(id));
*nand_maf_id = id[0];
*nand_dev_id = id[1];
} else {
/* Detect additional chip. */
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
nand_reset_op(chip);
nand_readid_op(chip, 0, id, sizeof(id));
if (*nand_maf_id != id[0] || *nand_dev_id != id[1]) {
@@ -427,8 +427,8 @@ static int jz_nand_probe(struct platform_device *pdev)
chip->legacy.chip_delay = 50;
chip->legacy.cmd_ctrl = jz_nand_cmd_ctrl;
- chip->select_chip = jz_nand_select_chip;
- chip->dummy_controller.ops = &jz_nand_controller_ops;
+ chip->legacy.select_chip = jz_nand_select_chip;
+ chip->legacy.dummy_controller.ops = &jz_nand_controller_ops;
if (nand->busy_gpio)
chip->legacy.dev_ready = jz_nand_dev_ready;
diff --git a/drivers/mtd/nand/raw/jz4780_bch.c b/drivers/mtd/nand/raw/jz4780_bch.c
index 731c6051d91e..7201827809e9 100644
--- a/drivers/mtd/nand/raw/jz4780_bch.c
+++ b/drivers/mtd/nand/raw/jz4780_bch.c
@@ -136,8 +136,10 @@ static void jz4780_bch_read_parity(struct jz4780_bch *bch, void *buf,
switch (size8) {
case 3:
dest8[2] = (val >> 16) & 0xff;
+ /* fall through */
case 2:
dest8[1] = (val >> 8) & 0xff;
+ /* fall through */
case 1:
dest8[0] = val & 0xff;
break;
diff --git a/drivers/mtd/nand/raw/jz4780_nand.c b/drivers/mtd/nand/raw/jz4780_nand.c
index cdf22100ab77..22e58975f0d5 100644
--- a/drivers/mtd/nand/raw/jz4780_nand.c
+++ b/drivers/mtd/nand/raw/jz4780_nand.c
@@ -279,7 +279,7 @@ static int jz4780_nand_init_chip(struct platform_device *pdev,
chip->legacy.IO_ADDR_W = cs->base + OFFSET_DATA;
chip->legacy.chip_delay = RB_DELAY_US;
chip->options = NAND_NO_SUBPAGE_WRITE;
- chip->select_chip = jz4780_nand_select_chip;
+ chip->legacy.select_chip = jz4780_nand_select_chip;
chip->legacy.cmd_ctrl = jz4780_nand_cmd_ctrl;
chip->ecc.mode = NAND_ECC_HW;
chip->controller = &nfc->controller;
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index abbb655fe154..086964f8d424 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -799,7 +799,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
* Scan to find existence of the device and get the type of NAND device:
* SMALL block or LARGE block.
*/
- nand_chip->dummy_controller.ops = &lpc32xx_nand_controller_ops;
+ nand_chip->legacy.dummy_controller.ops = &lpc32xx_nand_controller_ops;
res = nand_scan(nand_chip, 1);
if (res)
goto free_irq;
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
index f2f2cdbb9d04..a2c5fdc875bd 100644
--- a/drivers/mtd/nand/raw/lpc32xx_slc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -924,7 +924,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
}
/* Find NAND device */
- chip->dummy_controller.ops = &lpc32xx_nand_controller_ops;
+ chip->legacy.dummy_controller.ops = &lpc32xx_nand_controller_ops;
res = nand_scan(chip, 1);
if (res)
goto release_dma;
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 650f2b490a05..84283c6bb0ff 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -378,7 +378,7 @@ struct marvell_nfc_caps {
* @dev: Parent device (used to print error messages)
* @regs: NAND controller registers
* @core_clk: Core clock
- * @reg_clk: Regiters clock
+ * @reg_clk: Registers clock
* @complete: Completion object to wait for NAND controller events
* @assigned_cs: Bitmask describing already assigned CS lines
* @chips: List containing all the NAND chips attached to
@@ -514,9 +514,14 @@ static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask)
writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
}
-static void marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
+static u32 marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
{
+ u32 reg;
+
+ reg = readl_relaxed(nfc->regs + NDSR);
writel_relaxed(int_mask, nfc->regs + NDSR);
+
+ return reg & int_mask;
}
static void marvell_nfc_force_byte_access(struct nand_chip *chip,
@@ -683,6 +688,7 @@ static int marvell_nfc_wait_cmdd(struct nand_chip *chip)
static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
{
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 pending;
int ret;
/* Timeout is expressed in ms */
@@ -695,8 +701,13 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
ret = wait_for_completion_timeout(&nfc->complete,
msecs_to_jiffies(timeout_ms));
marvell_nfc_disable_int(nfc, NDCR_RDYM);
- marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
- if (!ret) {
+ pending = marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
+
+ /*
+ * In case the interrupt was not served in the required time frame,
+ * check if the ISR was not served or if something went actually wrong.
+ */
+ if (ret && !pending) {
dev_err(nfc->dev, "Timeout waiting for RB signal\n");
return -ETIMEDOUT;
}
@@ -704,7 +715,8 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
return 0;
}
-static void marvell_nfc_select_chip(struct nand_chip *chip, int die_nr)
+static void marvell_nfc_select_target(struct nand_chip *chip,
+ unsigned int die_nr)
{
struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
@@ -713,12 +725,6 @@ static void marvell_nfc_select_chip(struct nand_chip *chip, int die_nr)
if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
return;
- if (die_nr < 0 || die_nr >= marvell_nand->nsels) {
- nfc->selected_chip = NULL;
- marvell_nand->selected_die = -1;
- return;
- }
-
writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
@@ -1024,13 +1030,13 @@ static int marvell_nfc_hw_ecc_hmg_do_read_page(struct nand_chip *chip,
}
ret = marvell_nfc_wait_cmdd(chip);
-
return ret;
}
static int marvell_nfc_hw_ecc_hmg_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
+ marvell_nfc_select_target(chip, chip->cur_cs);
return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
true, page);
}
@@ -1043,6 +1049,7 @@ static int marvell_nfc_hw_ecc_hmg_read_page(struct nand_chip *chip, u8 *buf,
int max_bitflips = 0, ret;
u8 *raw_buf;
+ marvell_nfc_select_target(chip, chip->cur_cs);
marvell_nfc_enable_hw_ecc(chip);
marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, false,
page);
@@ -1079,6 +1086,7 @@ static int marvell_nfc_hw_ecc_hmg_read_oob_raw(struct nand_chip *chip, int page)
/* Invalidate page cache */
chip->pagebuf = -1;
+ marvell_nfc_select_target(chip, chip->cur_cs);
return marvell_nfc_hw_ecc_hmg_do_read_page(chip, chip->data_buf,
chip->oob_poi, true, page);
}
@@ -1142,6 +1150,7 @@ static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct nand_chip *chip,
const u8 *buf,
int oob_required, int page)
{
+ marvell_nfc_select_target(chip, chip->cur_cs);
return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
true, page);
}
@@ -1152,6 +1161,7 @@ static int marvell_nfc_hw_ecc_hmg_write_page(struct nand_chip *chip,
{
int ret;
+ marvell_nfc_select_target(chip, chip->cur_cs);
marvell_nfc_enable_hw_ecc(chip);
ret = marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
false, page);
@@ -1175,6 +1185,7 @@ static int marvell_nfc_hw_ecc_hmg_write_oob_raw(struct nand_chip *chip,
memset(chip->data_buf, 0xFF, mtd->writesize);
+ marvell_nfc_select_target(chip, chip->cur_cs);
return marvell_nfc_hw_ecc_hmg_do_write_page(chip, chip->data_buf,
chip->oob_poi, true, page);
}
@@ -1194,6 +1205,8 @@ static int marvell_nfc_hw_ecc_bch_read_page_raw(struct nand_chip *chip, u8 *buf,
int ecc_len = lt->ecc_bytes;
int chunk;
+ marvell_nfc_select_target(chip, chip->cur_cs);
+
if (oob_required)
memset(chip->oob_poi, 0xFF, mtd->oobsize);
@@ -1304,6 +1317,8 @@ static int marvell_nfc_hw_ecc_bch_read_page(struct nand_chip *chip,
u32 failure_mask = 0;
int chunk, ret;
+ marvell_nfc_select_target(chip, chip->cur_cs);
+
/*
* With BCH, OOB is not fully used (and thus not read entirely), not
* expected bytes could show up at the end of the OOB buffer if not
@@ -1448,6 +1463,8 @@ static int marvell_nfc_hw_ecc_bch_write_page_raw(struct nand_chip *chip,
lt->last_spare_bytes;
int chunk;
+ marvell_nfc_select_target(chip, chip->cur_cs);
+
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
for (chunk = 0; chunk < lt->nchunks; chunk++) {
@@ -1559,6 +1576,8 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
int spare_len = lt->spare_bytes;
int chunk, ret;
+ marvell_nfc_select_target(chip, chip->cur_cs);
+
/* Spare data will be written anyway, so clear it to avoid garbage */
if (!oob_required)
memset(chip->oob_poi, 0xFF, mtd->oobsize);
@@ -2097,6 +2116,8 @@ static int marvell_nfc_exec_op(struct nand_chip *chip,
{
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ marvell_nfc_select_target(chip, op->cs);
+
if (nfc->caps->is_nfcv2)
return nand_op_parser_exec_op(chip, &marvell_nfcv2_op_parser,
op, check_only);
@@ -2495,6 +2516,8 @@ static int marvell_nand_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops marvell_nand_controller_ops = {
.attach_chip = marvell_nand_attach_chip,
+ .exec_op = marvell_nfc_exec_op,
+ .setup_data_interface = marvell_nfc_setup_data_interface,
};
static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
@@ -2617,10 +2640,8 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
chip->controller = &nfc->controller;
nand_set_flash_node(chip, np);
- chip->exec_op = marvell_nfc_exec_op;
- chip->select_chip = marvell_nfc_select_chip;
if (!of_property_read_bool(np, "marvell,nand-keep-config"))
- chip->setup_data_interface = marvell_nfc_setup_data_interface;
+ chip->options |= NAND_KEEP_TIMINGS;
mtd = nand_to_mtd(chip);
mtd->dev.parent = dev;
diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
index 86a0aabe08df..062cd1eb2861 100644
--- a/drivers/mtd/nand/raw/mpc5121_nfc.c
+++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
@@ -697,7 +697,7 @@ static int mpc5121_nfc_probe(struct platform_device *op)
chip->legacy.read_byte = mpc5121_nfc_read_byte;
chip->legacy.read_buf = mpc5121_nfc_read_buf;
chip->legacy.write_buf = mpc5121_nfc_write_buf;
- chip->select_chip = mpc5121_nfc_select_chip;
+ chip->legacy.select_chip = mpc5121_nfc_select_chip;
chip->legacy.set_features = nand_get_set_features_notsupp;
chip->legacy.get_features = nand_get_set_features_notsupp;
chip->bbt_options = NAND_BBT_USE_FLASH;
@@ -712,7 +712,7 @@ static int mpc5121_nfc_probe(struct platform_device *op)
return retval;
}
- chip->select_chip = ads5121_select_chip;
+ chip->legacy.select_chip = ads5121_select_chip;
}
/* Enable NFC clock */
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index 2bb0df1b7244..b6b4602f5132 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -1288,6 +1288,7 @@ static int mtk_nfc_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops mtk_nfc_controller_ops = {
.attach_chip = mtk_nfc_attach_chip,
+ .setup_data_interface = mtk_nfc_setup_data_interface,
};
static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
@@ -1333,13 +1334,12 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ;
nand->legacy.dev_ready = mtk_nfc_dev_ready;
- nand->select_chip = mtk_nfc_select_chip;
+ nand->legacy.select_chip = mtk_nfc_select_chip;
nand->legacy.write_byte = mtk_nfc_write_byte;
nand->legacy.write_buf = mtk_nfc_write_buf;
nand->legacy.read_byte = mtk_nfc_read_byte;
nand->legacy.read_buf = mtk_nfc_read_buf;
nand->legacy.cmd_ctrl = mtk_nfc_cmd_ctrl;
- nand->setup_data_interface = mtk_nfc_setup_data_interface;
/* set default mode in case dt entry is missing */
nand->ecc.mode = NAND_ECC_HW;
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
index 88bd3f6a499c..59554c187e01 100644
--- a/drivers/mtd/nand/raw/mxc_nand.c
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -1738,8 +1738,17 @@ static int mxcnd_attach_chip(struct nand_chip *chip)
return 0;
}
+static int mxcnd_setup_data_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_data_interface *conf)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+
+ return host->devtype_data->setup_data_interface(chip, chipnr, conf);
+}
+
static const struct nand_controller_ops mxcnd_controller_ops = {
.attach_chip = mxcnd_attach_chip,
+ .setup_data_interface = mxcnd_setup_data_interface,
};
static int mxcnd_probe(struct platform_device *pdev)
@@ -1800,7 +1809,8 @@ static int mxcnd_probe(struct platform_device *pdev)
if (err < 0)
return err;
- this->setup_data_interface = host->devtype_data->setup_data_interface;
+ if (!host->devtype_data->setup_data_interface)
+ this->options |= NAND_KEEP_TIMINGS;
if (host->devtype_data->needs_ip) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1828,7 +1838,7 @@ static int mxcnd_probe(struct platform_device *pdev)
this->ecc.bytes = host->devtype_data->eccbytes;
host->eccsize = host->devtype_data->eccsize;
- this->select_chip = host->devtype_data->select_chip;
+ this->legacy.select_chip = host->devtype_data->select_chip;
this->ecc.size = 512;
mtd_set_ooblayout(mtd, host->devtype_data->ooblayout);
@@ -1881,7 +1891,7 @@ static int mxcnd_probe(struct platform_device *pdev)
}
/* Scan the NAND device */
- this->dummy_controller.ops = &mxcnd_controller_ops;
+ this->legacy.dummy_controller.ops = &mxcnd_controller_ops;
err = nand_scan(this, is_imx25_nfc(host) ? 4 : 1);
if (err)
goto escan;
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 05bd0779fe9b..cca4b24d2ffa 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -45,14 +45,10 @@
#include <linux/io.h>
#include <linux/mtd/partitions.h>
#include <linux/of.h>
+#include <linux/gpio/consumer.h>
#include "internals.h"
-static int nand_get_device(struct mtd_info *mtd, int new_state);
-
-static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops);
-
/* Define default oob placement schemes for large and small page devices */
static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
@@ -213,10 +209,8 @@ static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
.free = nand_ooblayout_free_lp_hamming,
};
-static int check_offs_len(struct mtd_info *mtd,
- loff_t ofs, uint64_t len)
+static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
int ret = 0;
/* Start address must align on block boundary */
@@ -235,15 +229,54 @@ static int check_offs_len(struct mtd_info *mtd,
}
/**
+ * nand_select_target() - Select a NAND target (A.K.A. die)
+ * @chip: NAND chip object
+ * @cs: the CS line to select. Note that this CS id is always from the chip
+ * PoV, not the controller one
+ *
+ * Select a NAND target so that further operations executed on @chip go to the
+ * selected NAND target.
+ */
+void nand_select_target(struct nand_chip *chip, unsigned int cs)
+{
+ /*
+ * cs should always lie between 0 and chip->numchips, when that's not
+ * the case it's a bug and the caller should be fixed.
+ */
+ if (WARN_ON(cs > chip->numchips))
+ return;
+
+ chip->cur_cs = cs;
+
+ if (chip->legacy.select_chip)
+ chip->legacy.select_chip(chip, cs);
+}
+EXPORT_SYMBOL_GPL(nand_select_target);
+
+/**
+ * nand_deselect_target() - Deselect the currently selected target
+ * @chip: NAND chip object
+ *
+ * Deselect the currently selected NAND target. The result of operations
+ * executed on @chip after the target has been deselected is undefined.
+ */
+void nand_deselect_target(struct nand_chip *chip)
+{
+ if (chip->legacy.select_chip)
+ chip->legacy.select_chip(chip, -1);
+
+ chip->cur_cs = -1;
+}
+EXPORT_SYMBOL_GPL(nand_deselect_target);
+
+/**
* nand_release_device - [GENERIC] release chip
- * @mtd: MTD device structure
+ * @chip: NAND chip object
*
* Release chip lock and wake up anyone waiting on the device.
*/
-static void nand_release_device(struct mtd_info *mtd)
+static void nand_release_device(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
-
/* Release the controller and the chip */
spin_lock(&chip->controller->lock);
chip->controller->active = NULL;
@@ -289,6 +322,197 @@ static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
return 0;
}
+static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
+{
+ if (chip->legacy.block_bad)
+ return chip->legacy.block_bad(chip, ofs);
+
+ return nand_block_bad(chip, ofs);
+}
+
+/**
+ * panic_nand_get_device - [GENERIC] Get chip for selected access
+ * @chip: the nand chip descriptor
+ * @new_state: the state which is requested
+ *
+ * Used when in panic, no locks are taken.
+ */
+static void panic_nand_get_device(struct nand_chip *chip, int new_state)
+{
+ /* Hardware controller shared among independent devices */
+ chip->controller->active = chip;
+ chip->state = new_state;
+}
+
+/**
+ * nand_get_device - [GENERIC] Get chip for selected access
+ * @chip: NAND chip structure
+ * @new_state: the state which is requested
+ *
+ * Get the device and lock it for exclusive access
+ */
+static int
+nand_get_device(struct nand_chip *chip, int new_state)
+{
+ spinlock_t *lock = &chip->controller->lock;
+ wait_queue_head_t *wq = &chip->controller->wq;
+ DECLARE_WAITQUEUE(wait, current);
+retry:
+ spin_lock(lock);
+
+ /* Hardware controller shared among independent devices */
+ if (!chip->controller->active)
+ chip->controller->active = chip;
+
+ if (chip->controller->active == chip && chip->state == FL_READY) {
+ chip->state = new_state;
+ spin_unlock(lock);
+ return 0;
+ }
+ if (new_state == FL_PM_SUSPENDED) {
+ if (chip->controller->active->state == FL_PM_SUSPENDED) {
+ chip->state = FL_PM_SUSPENDED;
+ spin_unlock(lock);
+ return 0;
+ }
+ }
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(wq, &wait);
+ spin_unlock(lock);
+ schedule();
+ remove_wait_queue(wq, &wait);
+ goto retry;
+}
+
+/**
+ * nand_check_wp - [GENERIC] check if the chip is write protected
+ * @chip: NAND chip object
+ *
+ * Check, if the device is write protected. The function expects, that the
+ * device is already selected.
+ */
+static int nand_check_wp(struct nand_chip *chip)
+{
+ u8 status;
+ int ret;
+
+ /* Broken xD cards report WP despite being writable */
+ if (chip->options & NAND_BROKEN_XD)
+ return 0;
+
+ /* Check the WP bit */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ return status & NAND_STATUS_WP ? 0 : 1;
+}
+
+/**
+ * nand_fill_oob - [INTERN] Transfer client buffer to oob
+ * @oob: oob data buffer
+ * @len: oob data write length
+ * @ops: oob ops structure
+ */
+static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ /*
+ * Initialise to all 0xFF, to avoid the possibility of left over OOB
+ * data from a previous OOB read.
+ */
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+
+ switch (ops->mode) {
+
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ memcpy(chip->oob_poi + ops->ooboffs, oob, len);
+ return oob + len;
+
+ case MTD_OPS_AUTO_OOB:
+ ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
+ ops->ooboffs, len);
+ BUG_ON(ret);
+ return oob + len;
+
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+/**
+ * nand_do_write_oob - [MTD Interface] NAND write out-of-band
+ * @chip: NAND chip object
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ *
+ * NAND write out-of-band.
+ */
+static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int chipnr, page, status, len;
+
+ pr_debug("%s: to = 0x%08x, len = %i\n",
+ __func__, (unsigned int)to, (int)ops->ooblen);
+
+ len = mtd_oobavail(mtd, ops);
+
+ /* Do not allow write past end of page */
+ if ((ops->ooboffs + ops->ooblen) > len) {
+ pr_debug("%s: attempt to write past end of page\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ chipnr = (int)(to >> chip->chip_shift);
+
+ /*
+ * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
+ * of my DiskOnChip 2000 test units) will clear the whole data page too
+ * if we don't do this. I have no clue why, but I seem to have 'fixed'
+ * it in the doc2000 driver in August 1999. dwmw2.
+ */
+ nand_reset(chip, chipnr);
+
+ nand_select_target(chip, chipnr);
+
+ /* Shift to get page */
+ page = (int)(to >> chip->page_shift);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(chip)) {
+ nand_deselect_target(chip);
+ return -EROFS;
+ }
+
+ /* Invalidate the page cache, if we write to the cached page */
+ if (page == chip->pagebuf)
+ chip->pagebuf = -1;
+
+ nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
+
+ if (ops->mode == MTD_OPS_RAW)
+ status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
+ else
+ status = chip->ecc.write_oob(chip, page & chip->pagemask);
+
+ nand_deselect_target(chip);
+
+ if (status)
+ return status;
+
+ ops->oobretlen = ops->ooblen;
+
+ return 0;
+}
+
/**
* nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
* @chip: NAND chip object
@@ -320,7 +544,7 @@ static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
ofs += mtd->erasesize - mtd->writesize;
do {
- res = nand_do_write_oob(mtd, ofs, &ops);
+ res = nand_do_write_oob(chip, ofs, &ops);
if (!ret)
ret = res;
@@ -344,17 +568,9 @@ int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
return nand_default_block_markbad(chip, ofs);
}
-static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
-{
- if (chip->legacy.block_bad)
- return chip->legacy.block_bad(chip, ofs);
-
- return nand_block_bad(chip, ofs);
-}
-
/**
* nand_block_markbad_lowlevel - mark a block bad
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @ofs: offset from device start
*
* This function performs the generic NAND bad block marking steps (i.e., bad
@@ -371,9 +587,9 @@ static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
* Note that we retain the first error encountered in (2) or (3), finish the
* procedures, and dump the error in the end.
*/
-static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
+static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
int res, ret = 0;
if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
@@ -386,9 +602,9 @@ static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
nand_erase_nand(chip, &einfo, 0);
/* Write bad block marker to OOB */
- nand_get_device(mtd, FL_WRITING);
+ nand_get_device(chip, FL_WRITING);
ret = nand_markbad_bbm(chip, ofs);
- nand_release_device(mtd);
+ nand_release_device(chip);
}
/* Mark block bad in BBT */
@@ -405,31 +621,6 @@ static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
}
/**
- * nand_check_wp - [GENERIC] check if the chip is write protected
- * @mtd: MTD device structure
- *
- * Check, if the device is write protected. The function expects, that the
- * device is already selected.
- */
-static int nand_check_wp(struct mtd_info *mtd)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- u8 status;
- int ret;
-
- /* Broken xD cards report WP despite being writable */
- if (chip->options & NAND_BROKEN_XD)
- return 0;
-
- /* Check the WP bit */
- ret = nand_status_op(chip, &status);
- if (ret)
- return ret;
-
- return status & NAND_STATUS_WP ? 0 : 1;
-}
-
-/**
* nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
* @mtd: MTD device structure
* @ofs: offset from device start
@@ -448,17 +639,15 @@ static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
/**
* nand_block_checkbad - [GENERIC] Check if a block is marked bad
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @ofs: offset from device start
* @allowbbt: 1, if its allowed to access the bbt area
*
* Check, if the block is bad. Either by reading the bad block table or
* calling of the scan function.
*/
-static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
+static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
-
/* Return info from the table */
if (chip->bbt)
return nand_isbad_bbt(chip, ofs, allowbbt);
@@ -489,7 +678,7 @@ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
u8 status = 0;
int ret;
- if (!chip->exec_op)
+ if (!nand_has_exec_op(chip))
return -ENOTSUPP;
/* Wait tWB before polling the STATUS reg. */
@@ -532,65 +721,37 @@ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
/**
- * panic_nand_get_device - [GENERIC] Get chip for selected access
- * @chip: the nand chip descriptor
- * @mtd: MTD device structure
- * @new_state: the state which is requested
+ * nand_gpio_waitrdy - Poll R/B GPIO pin until ready
+ * @chip: NAND chip structure
+ * @gpiod: GPIO descriptor of R/B pin
+ * @timeout_ms: Timeout in ms
*
- * Used when in panic, no locks are taken.
- */
-static void panic_nand_get_device(struct nand_chip *chip,
- struct mtd_info *mtd, int new_state)
-{
- /* Hardware controller shared among independent devices */
- chip->controller->active = chip;
- chip->state = new_state;
-}
-
-/**
- * nand_get_device - [GENERIC] Get chip for selected access
- * @mtd: MTD device structure
- * @new_state: the state which is requested
+ * Poll the R/B GPIO pin until it becomes ready. If that does not happen
+ * whitin the specified timeout, -ETIMEDOUT is returned.
*
- * Get the device and lock it for exclusive access
+ * This helper is intended to be used when the controller has access to the
+ * NAND R/B pin over GPIO.
+ *
+ * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
*/
-static int
-nand_get_device(struct mtd_info *mtd, int new_state)
+int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
+ unsigned long timeout_ms)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
- spinlock_t *lock = &chip->controller->lock;
- wait_queue_head_t *wq = &chip->controller->wq;
- DECLARE_WAITQUEUE(wait, current);
-retry:
- spin_lock(lock);
+ /* Wait until R/B pin indicates chip is ready or timeout occurs */
+ timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ if (gpiod_get_value_cansleep(gpiod))
+ return 0;
- /* Hardware controller shared among independent devices */
- if (!chip->controller->active)
- chip->controller->active = chip;
+ cond_resched();
+ } while (time_before(jiffies, timeout_ms));
- if (chip->controller->active == chip && chip->state == FL_READY) {
- chip->state = new_state;
- spin_unlock(lock);
- return 0;
- }
- if (new_state == FL_PM_SUSPENDED) {
- if (chip->controller->active->state == FL_PM_SUSPENDED) {
- chip->state = FL_PM_SUSPENDED;
- spin_unlock(lock);
- return 0;
- }
- }
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(wq, &wait);
- spin_unlock(lock);
- schedule();
- remove_wait_queue(wq, &wait);
- goto retry;
-}
+ return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
+};
+EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
/**
* panic_nand_wait - [GENERIC] wait until the command is done
- * @mtd: MTD device structure
* @chip: NAND chip structure
* @timeo: timeout
*
@@ -646,7 +807,7 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
{
int ret;
- if (!chip->setup_data_interface)
+ if (!nand_has_setup_data_iface(chip))
return 0;
/*
@@ -664,7 +825,8 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
*/
onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
- ret = chip->setup_data_interface(chip, chipnr, &chip->data_interface);
+ ret = chip->controller->ops->setup_data_interface(chip, chipnr,
+ &chip->data_interface);
if (ret)
pr_err("Failed to configure data interface to SDR timing mode 0\n");
@@ -691,21 +853,22 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
};
int ret;
- if (!chip->setup_data_interface)
+ if (!nand_has_setup_data_iface(chip))
return 0;
/* Change the mode on the chip side (if supported by the NAND chip) */
if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
tmode_param);
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
if (ret)
return ret;
}
/* Change the mode on the controller side */
- ret = chip->setup_data_interface(chip, chipnr, &chip->data_interface);
+ ret = chip->controller->ops->setup_data_interface(chip, chipnr,
+ &chip->data_interface);
if (ret)
return ret;
@@ -714,10 +877,10 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
return 0;
memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
tmode_param);
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
if (ret)
goto err_reset_chip;
@@ -735,9 +898,9 @@ err_reset_chip:
* timing mode.
*/
nand_reset_data_interface(chip, chipnr);
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
nand_reset_op(chip);
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
return ret;
}
@@ -760,7 +923,7 @@ static int nand_init_data_interface(struct nand_chip *chip)
{
int modes, mode, ret;
- if (!chip->setup_data_interface)
+ if (!nand_has_setup_data_iface(chip))
return 0;
/*
@@ -786,7 +949,7 @@ static int nand_init_data_interface(struct nand_chip *chip)
* Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
* controller supports the requested timings.
*/
- ret = chip->setup_data_interface(chip,
+ ret = chip->controller->ops->setup_data_interface(chip,
NAND_DATA_IFACE_CHECK_ONLY,
&chip->data_interface);
if (!ret) {
@@ -867,7 +1030,7 @@ static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
PSEC_TO_NSEC(sdr->tRR_min)),
NAND_OP_DATA_IN(len, buf, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
int ret;
/* Drop the DATA_IN instruction if len is set to 0. */
@@ -910,7 +1073,7 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
PSEC_TO_NSEC(sdr->tRR_min)),
NAND_OP_DATA_IN(len, buf, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
int ret;
/* Drop the DATA_IN instruction if len is set to 0. */
@@ -956,7 +1119,7 @@ int nand_read_page_op(struct nand_chip *chip, unsigned int page,
if (offset_in_page + len > mtd->writesize + mtd->oobsize)
return -EINVAL;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
if (mtd->writesize > 512)
return nand_lp_exec_read_page_op(chip, page,
offset_in_page, buf,
@@ -995,7 +1158,7 @@ int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
if (len && !buf)
return -EINVAL;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
@@ -1005,7 +1168,7 @@ int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
PSEC_TO_NSEC(sdr->tRR_min)),
NAND_OP_8BIT_DATA_IN(len, buf, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
/* Drop the DATA_IN instruction if len is set to 0. */
if (!len)
@@ -1050,7 +1213,7 @@ int nand_change_read_column_op(struct nand_chip *chip,
if (mtd->writesize <= 512)
return -ENOTSUPP;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
u8 addrs[2] = {};
@@ -1061,7 +1224,7 @@ int nand_change_read_column_op(struct nand_chip *chip,
PSEC_TO_NSEC(sdr->tCCS_min)),
NAND_OP_DATA_IN(len, buf, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
int ret;
ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
@@ -1109,7 +1272,7 @@ int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
if (offset_in_oob + len > mtd->oobsize)
return -EINVAL;
- if (chip->exec_op)
+ if (nand_has_exec_op(chip))
return nand_read_page_op(chip, page,
mtd->writesize + offset_in_oob,
buf, len);
@@ -1143,7 +1306,7 @@ static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
int ret;
u8 status;
@@ -1222,7 +1385,7 @@ int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
if (offset_in_page + len > mtd->writesize + mtd->oobsize)
return -EINVAL;
- if (chip->exec_op)
+ if (nand_has_exec_op(chip))
return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
len, false);
@@ -1249,7 +1412,7 @@ int nand_prog_page_end_op(struct nand_chip *chip)
int ret;
u8 status;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
@@ -1257,7 +1420,7 @@ int nand_prog_page_end_op(struct nand_chip *chip)
PSEC_TO_NSEC(sdr->tWB_max)),
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
ret = nand_exec_op(chip, &op);
if (ret)
@@ -1308,7 +1471,7 @@ int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
if (offset_in_page + len > mtd->writesize + mtd->oobsize)
return -EINVAL;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
len, true);
} else {
@@ -1356,7 +1519,7 @@ int nand_change_write_column_op(struct nand_chip *chip,
if (mtd->writesize <= 512)
return -ENOTSUPP;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
u8 addrs[2];
@@ -1365,7 +1528,7 @@ int nand_change_write_column_op(struct nand_chip *chip,
NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
NAND_OP_DATA_OUT(len, buf, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
int ret;
ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
@@ -1411,7 +1574,7 @@ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
if (len && !buf)
return -EINVAL;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
@@ -1419,7 +1582,7 @@ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
NAND_OP_8BIT_DATA_IN(len, buf, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
/* Drop the DATA_IN instruction if len is set to 0. */
if (!len)
@@ -1450,7 +1613,7 @@ EXPORT_SYMBOL_GPL(nand_readid_op);
*/
int nand_status_op(struct nand_chip *chip, u8 *status)
{
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
@@ -1458,7 +1621,7 @@ int nand_status_op(struct nand_chip *chip, u8 *status)
PSEC_TO_NSEC(sdr->tADL_min)),
NAND_OP_8BIT_DATA_IN(1, status, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
if (!status)
op.ninstrs--;
@@ -1487,11 +1650,11 @@ EXPORT_SYMBOL_GPL(nand_status_op);
*/
int nand_exit_status_op(struct nand_chip *chip)
{
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_READ0, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
@@ -1519,7 +1682,7 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
int ret;
u8 status;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
u8 addrs[3] = { page, page >> 8, page >> 16 };
@@ -1530,7 +1693,7 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
PSEC_TO_MSEC(sdr->tWB_max)),
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
if (chip->options & NAND_ROW_ADDR_3)
instrs[1].ctx.addr.naddrs++;
@@ -1578,7 +1741,7 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature,
const u8 *params = data;
int i, ret;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
@@ -1588,7 +1751,7 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature,
PSEC_TO_NSEC(sdr->tWB_max)),
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
@@ -1625,7 +1788,7 @@ static int nand_get_features_op(struct nand_chip *chip, u8 feature,
u8 *params = data;
int i;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
@@ -1636,7 +1799,7 @@ static int nand_get_features_op(struct nand_chip *chip, u8 feature,
NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
data, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
@@ -1651,12 +1814,12 @@ static int nand_get_features_op(struct nand_chip *chip, u8 feature,
static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
unsigned int delay_ns)
{
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
struct nand_op_instr instrs[] = {
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
PSEC_TO_NSEC(delay_ns)),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
@@ -1682,14 +1845,14 @@ static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
*/
int nand_reset_op(struct nand_chip *chip)
{
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
@@ -1719,11 +1882,11 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
if (!len || !buf)
return -EINVAL;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
struct nand_op_instr instrs[] = {
NAND_OP_DATA_IN(len, buf, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
instrs[0].ctx.data.force_8bit = force_8bit;
@@ -1763,11 +1926,11 @@ int nand_write_data_op(struct nand_chip *chip, const void *buf,
if (!len || !buf)
return -EINVAL;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
struct nand_op_instr instrs[] = {
NAND_OP_DATA_OUT(len, buf, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
instrs[0].ctx.data.force_8bit = force_8bit;
@@ -2225,11 +2388,12 @@ int nand_reset(struct nand_chip *chip, int chipnr)
/*
* The CS line has to be released before we can apply the new NAND
- * interface settings, hence this weird ->select_chip() dance.
+ * interface settings, hence this weird nand_select_target()
+ * nand_deselect_target() dance.
*/
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
ret = nand_reset_op(chip);
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
if (ret)
return ret;
@@ -2925,15 +3089,15 @@ static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
/**
* nand_transfer_oob - [INTERN] Transfer oob to client buffer
- * @mtd: mtd info structure
+ * @chip: NAND chip object
* @oob: oob destination address
* @ops: oob ops structure
* @len: size of oob to transfer
*/
-static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
+static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
struct mtd_oob_ops *ops, size_t len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
switch (ops->mode) {
@@ -2990,17 +3154,17 @@ static void nand_wait_readrdy(struct nand_chip *chip)
/**
* nand_do_read_ops - [INTERN] Read data with ECC
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @from: offset to read from
* @ops: oob ops structure
*
* Internal function. Called with chip held.
*/
-static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
+static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
struct mtd_oob_ops *ops)
{
int chipnr, page, realpage, col, bytes, aligned, oob_required;
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
int ret = 0;
uint32_t readlen = ops->len;
uint32_t oobreadlen = ops->ooblen;
@@ -3013,7 +3177,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
bool ecc_fail = false;
chipnr = (int)(from >> chip->chip_shift);
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
realpage = (int)(from >> chip->page_shift);
page = realpage & chip->pagemask;
@@ -3088,8 +3252,8 @@ read_retry:
int toread = min(oobreadlen, max_oobsize);
if (toread) {
- oob = nand_transfer_oob(mtd,
- oob, ops, toread);
+ oob = nand_transfer_oob(chip, oob, ops,
+ toread);
oobreadlen -= toread;
}
}
@@ -3144,11 +3308,11 @@ read_retry:
/* Check, if we cross a chip boundary */
if (!page) {
chipnr++;
- chip->select_chip(chip, -1);
- chip->select_chip(chip, chipnr);
+ nand_deselect_target(chip);
+ nand_select_target(chip, chipnr);
}
}
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
ops->retlen = ops->len - (size_t) readlen;
if (oob)
@@ -3319,18 +3483,18 @@ static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
/**
* nand_do_read_oob - [INTERN] NAND read out-of-band
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @from: offset to read from
* @ops: oob operations description structure
*
* NAND read out-of-band data from the spare area.
*/
-static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
+static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
struct mtd_oob_ops *ops)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int max_bitflips = 0;
int page, realpage, chipnr;
- struct nand_chip *chip = mtd_to_nand(mtd);
struct mtd_ecc_stats stats;
int readlen = ops->ooblen;
int len;
@@ -3345,7 +3509,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
len = mtd_oobavail(mtd, ops);
chipnr = (int)(from >> chip->chip_shift);
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
/* Shift to get page */
realpage = (int)(from >> chip->page_shift);
@@ -3361,7 +3525,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
break;
len = min(len, readlen);
- buf = nand_transfer_oob(mtd, buf, ops, len);
+ buf = nand_transfer_oob(chip, buf, ops, len);
nand_wait_readrdy(chip);
@@ -3378,11 +3542,11 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
/* Check, if we cross a chip boundary */
if (!page) {
chipnr++;
- chip->select_chip(chip, -1);
- chip->select_chip(chip, chipnr);
+ nand_deselect_target(chip);
+ nand_select_target(chip, chipnr);
}
}
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
ops->oobretlen = ops->ooblen - readlen;
@@ -3406,6 +3570,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
static int nand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
+ struct nand_chip *chip = mtd_to_nand(mtd);
int ret;
ops->retlen = 0;
@@ -3415,14 +3580,14 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
ops->mode != MTD_OPS_RAW)
return -ENOTSUPP;
- nand_get_device(mtd, FL_READING);
+ nand_get_device(chip, FL_READING);
if (!ops->datbuf)
- ret = nand_do_read_oob(mtd, from, ops);
+ ret = nand_do_read_oob(chip, from, ops);
else
- ret = nand_do_read_ops(mtd, from, ops);
+ ret = nand_do_read_ops(chip, from, ops);
- nand_release_device(mtd);
+ nand_release_device(chip);
return ret;
}
@@ -3750,7 +3915,6 @@ static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
/**
* nand_write_page - write one page
- * @mtd: MTD device structure
* @chip: NAND chip descriptor
* @offset: address offset within the page
* @data_len: length of actual data to be written
@@ -3759,10 +3923,11 @@ static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
* @page: page number to write
* @raw: use _raw version of write_page
*/
-static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t offset, int data_len, const uint8_t *buf,
- int oob_required, int page, int raw)
+static int nand_write_page(struct nand_chip *chip, uint32_t offset,
+ int data_len, const uint8_t *buf, int oob_required,
+ int page, int raw)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int status, subpage;
if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
@@ -3786,59 +3951,21 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
return 0;
}
-/**
- * nand_fill_oob - [INTERN] Transfer client buffer to oob
- * @mtd: MTD device structure
- * @oob: oob data buffer
- * @len: oob data write length
- * @ops: oob ops structure
- */
-static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
- struct mtd_oob_ops *ops)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- int ret;
-
- /*
- * Initialise to all 0xFF, to avoid the possibility of left over OOB
- * data from a previous OOB read.
- */
- memset(chip->oob_poi, 0xff, mtd->oobsize);
-
- switch (ops->mode) {
-
- case MTD_OPS_PLACE_OOB:
- case MTD_OPS_RAW:
- memcpy(chip->oob_poi + ops->ooboffs, oob, len);
- return oob + len;
-
- case MTD_OPS_AUTO_OOB:
- ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
- ops->ooboffs, len);
- BUG_ON(ret);
- return oob + len;
-
- default:
- BUG();
- }
- return NULL;
-}
-
#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
/**
* nand_do_write_ops - [INTERN] NAND write with ECC
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @to: offset to write to
* @ops: oob operations description structure
*
* NAND write with ECC.
*/
-static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
+static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
struct mtd_oob_ops *ops)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int chipnr, realpage, page, column;
- struct nand_chip *chip = mtd_to_nand(mtd);
uint32_t writelen = ops->len;
uint32_t oobwritelen = ops->ooblen;
@@ -3863,10 +3990,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
column = to & (mtd->writesize - 1);
chipnr = (int)(to >> chip->chip_shift);
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
/* Check, if it is write protected */
- if (nand_check_wp(mtd)) {
+ if (nand_check_wp(chip)) {
ret = -EIO;
goto err_out;
}
@@ -3914,14 +4041,14 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
if (unlikely(oob)) {
size_t len = min(oobwritelen, oobmaxlen);
- oob = nand_fill_oob(mtd, oob, len, ops);
+ oob = nand_fill_oob(chip, oob, len, ops);
oobwritelen -= len;
} else {
/* We still need to erase leftover OOB data */
memset(chip->oob_poi, 0xff, mtd->oobsize);
}
- ret = nand_write_page(mtd, chip, column, bytes, wbuf,
+ ret = nand_write_page(chip, column, bytes, wbuf,
oob_required, page,
(ops->mode == MTD_OPS_RAW));
if (ret)
@@ -3939,8 +4066,8 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
/* Check, if we cross a chip boundary */
if (!page) {
chipnr++;
- chip->select_chip(chip, -1);
- chip->select_chip(chip, chipnr);
+ nand_deselect_target(chip);
+ nand_select_target(chip, chipnr);
}
}
@@ -3949,7 +4076,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
ops->oobretlen = ops->ooblen;
err_out:
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
return ret;
}
@@ -3973,9 +4100,9 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
int ret;
/* Grab the device */
- panic_nand_get_device(chip, mtd, FL_WRITING);
+ panic_nand_get_device(chip, FL_WRITING);
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
/* Wait for the device to get ready */
panic_nand_wait(chip, 400);
@@ -3985,81 +4112,13 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
ops.datbuf = (uint8_t *)buf;
ops.mode = MTD_OPS_PLACE_OOB;
- ret = nand_do_write_ops(mtd, to, &ops);
+ ret = nand_do_write_ops(chip, to, &ops);
*retlen = ops.retlen;
return ret;
}
/**
- * nand_do_write_oob - [MTD Interface] NAND write out-of-band
- * @mtd: MTD device structure
- * @to: offset to write to
- * @ops: oob operation description structure
- *
- * NAND write out-of-band.
- */
-static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops)
-{
- int chipnr, page, status, len;
- struct nand_chip *chip = mtd_to_nand(mtd);
-
- pr_debug("%s: to = 0x%08x, len = %i\n",
- __func__, (unsigned int)to, (int)ops->ooblen);
-
- len = mtd_oobavail(mtd, ops);
-
- /* Do not allow write past end of page */
- if ((ops->ooboffs + ops->ooblen) > len) {
- pr_debug("%s: attempt to write past end of page\n",
- __func__);
- return -EINVAL;
- }
-
- chipnr = (int)(to >> chip->chip_shift);
-
- /*
- * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
- * of my DiskOnChip 2000 test units) will clear the whole data page too
- * if we don't do this. I have no clue why, but I seem to have 'fixed'
- * it in the doc2000 driver in August 1999. dwmw2.
- */
- nand_reset(chip, chipnr);
-
- chip->select_chip(chip, chipnr);
-
- /* Shift to get page */
- page = (int)(to >> chip->page_shift);
-
- /* Check, if it is write protected */
- if (nand_check_wp(mtd)) {
- chip->select_chip(chip, -1);
- return -EROFS;
- }
-
- /* Invalidate the page cache, if we write to the cached page */
- if (page == chip->pagebuf)
- chip->pagebuf = -1;
-
- nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
-
- if (ops->mode == MTD_OPS_RAW)
- status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
- else
- status = chip->ecc.write_oob(chip, page & chip->pagemask);
-
- chip->select_chip(chip, -1);
-
- if (status)
- return status;
-
- ops->oobretlen = ops->ooblen;
-
- return 0;
-}
-
-/**
* nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
* @mtd: MTD device structure
* @to: offset to write to
@@ -4068,11 +4127,12 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
static int nand_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
+ struct nand_chip *chip = mtd_to_nand(mtd);
int ret = -ENOTSUPP;
ops->retlen = 0;
- nand_get_device(mtd, FL_WRITING);
+ nand_get_device(chip, FL_WRITING);
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
@@ -4085,12 +4145,12 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
}
if (!ops->datbuf)
- ret = nand_do_write_oob(mtd, to, ops);
+ ret = nand_do_write_oob(chip, to, ops);
else
- ret = nand_do_write_ops(mtd, to, ops);
+ ret = nand_do_write_ops(chip, to, ops);
out:
- nand_release_device(mtd);
+ nand_release_device(chip);
return ret;
}
@@ -4134,7 +4194,6 @@ static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
int allowbbt)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
int page, status, pages_per_block, ret, chipnr;
loff_t len;
@@ -4142,11 +4201,11 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
__func__, (unsigned long long)instr->addr,
(unsigned long long)instr->len);
- if (check_offs_len(mtd, instr->addr, instr->len))
+ if (check_offs_len(chip, instr->addr, instr->len))
return -EINVAL;
/* Grab the lock and see if the device is available */
- nand_get_device(mtd, FL_ERASING);
+ nand_get_device(chip, FL_ERASING);
/* Shift to get first page */
page = (int)(instr->addr >> chip->page_shift);
@@ -4156,10 +4215,10 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
/* Select the NAND device */
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
/* Check, if it is write protected */
- if (nand_check_wp(mtd)) {
+ if (nand_check_wp(chip)) {
pr_debug("%s: device is write protected!\n",
__func__);
ret = -EIO;
@@ -4171,7 +4230,7 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
while (len) {
/* Check if we have a bad block, we do not erase bad blocks! */
- if (nand_block_checkbad(mtd, ((loff_t) page) <<
+ if (nand_block_checkbad(chip, ((loff_t) page) <<
chip->page_shift, allowbbt)) {
pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
__func__, page);
@@ -4210,8 +4269,8 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
/* Check, if we cross a chip boundary */
if (len && !(page & chip->pagemask)) {
chipnr++;
- chip->select_chip(chip, -1);
- chip->select_chip(chip, chipnr);
+ nand_deselect_target(chip);
+ nand_select_target(chip, chipnr);
}
}
@@ -4219,8 +4278,8 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
erase_exit:
/* Deselect and wake up anyone waiting on the device */
- chip->select_chip(chip, -1);
- nand_release_device(mtd);
+ nand_deselect_target(chip);
+ nand_release_device(chip);
/* Return more or less happy */
return ret;
@@ -4234,12 +4293,14 @@ erase_exit:
*/
static void nand_sync(struct mtd_info *mtd)
{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
pr_debug("%s: called\n", __func__);
/* Grab the lock and see if the device is available */
- nand_get_device(mtd, FL_SYNCING);
+ nand_get_device(chip, FL_SYNCING);
/* Release it and go back */
- nand_release_device(mtd);
+ nand_release_device(chip);
}
/**
@@ -4254,13 +4315,13 @@ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
int ret;
/* Select the NAND device */
- nand_get_device(mtd, FL_READING);
- chip->select_chip(chip, chipnr);
+ nand_get_device(chip, FL_READING);
+ nand_select_target(chip, chipnr);
- ret = nand_block_checkbad(mtd, offs, 0);
+ ret = nand_block_checkbad(chip, offs, 0);
- chip->select_chip(chip, -1);
- nand_release_device(mtd);
+ nand_deselect_target(chip);
+ nand_release_device(chip);
return ret;
}
@@ -4282,7 +4343,7 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
return ret;
}
- return nand_block_markbad_lowlevel(mtd, ofs);
+ return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
}
/**
@@ -4327,7 +4388,7 @@ static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
*/
static int nand_suspend(struct mtd_info *mtd)
{
- return nand_get_device(mtd, FL_PM_SUSPENDED);
+ return nand_get_device(mtd_to_nand(mtd), FL_PM_SUSPENDED);
}
/**
@@ -4339,7 +4400,7 @@ static void nand_resume(struct mtd_info *mtd)
struct nand_chip *chip = mtd_to_nand(mtd);
if (chip->state == FL_PM_SUSPENDED)
- nand_release_device(mtd);
+ nand_release_device(chip);
else
pr_err("%s called for a chip which is not in suspended state\n",
__func__);
@@ -4352,19 +4413,20 @@ static void nand_resume(struct mtd_info *mtd)
*/
static void nand_shutdown(struct mtd_info *mtd)
{
- nand_get_device(mtd, FL_PM_SUSPENDED);
+ nand_get_device(mtd_to_nand(mtd), FL_PM_SUSPENDED);
}
/* Set default functions */
static void nand_set_defaults(struct nand_chip *chip)
{
- nand_legacy_set_defaults(chip);
-
+ /* If no controller is provided, use the dummy, legacy one. */
if (!chip->controller) {
- chip->controller = &chip->dummy_controller;
+ chip->controller = &chip->legacy.dummy_controller;
nand_controller_init(chip->controller);
}
+ nand_legacy_set_defaults(chip);
+
if (!chip->buf_align)
chip->buf_align = 1;
}
@@ -4628,7 +4690,7 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
return ret;
/* Select the device */
- chip->select_chip(chip, 0);
+ nand_select_target(chip, 0);
/* Send the command for reading device ID */
ret = nand_readid_op(chip, 0, id_data, 2);
@@ -4953,6 +5015,9 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
unsigned int i;
int ret;
+ /* Assume all dies are deselected when we enter nand_scan_ident(). */
+ chip->cur_cs = -1;
+
/* Enforce the right timings for reset/detection */
onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
@@ -4963,31 +5028,32 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
if (!mtd->name && mtd->dev.parent)
mtd->name = dev_name(mtd->dev.parent);
- if (chip->exec_op && !chip->select_chip) {
- pr_err("->select_chip() is mandatory when implementing ->exec_op()\n");
- return -EINVAL;
- }
+ /*
+ * Start with chips->numchips = maxchips to let nand_select_target() do
+ * its job. chip->numchips will be adjusted after.
+ */
+ chip->numchips = maxchips;
+
+ /* Set the default functions */
+ nand_set_defaults(chip);
ret = nand_legacy_check_hooks(chip);
if (ret)
return ret;
- /* Set the default functions */
- nand_set_defaults(chip);
-
/* Read the flash type */
ret = nand_detect(chip, table);
if (ret) {
if (!(chip->options & NAND_SCAN_SILENT_NODEV))
pr_warn("No NAND device found\n");
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
return ret;
}
nand_maf_id = chip->id.data[0];
nand_dev_id = chip->id.data[1];
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
/* Check for a chip array */
for (i = 1; i < maxchips; i++) {
@@ -4996,15 +5062,15 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
/* See comment in nand_get_flash_type for reset */
nand_reset(chip, i);
- chip->select_chip(chip, i);
+ nand_select_target(chip, i);
/* Send the command for reading device ID */
nand_readid_op(chip, 0, id, sizeof(id));
/* Read manufacturer and device IDs */
if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
break;
}
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
}
if (i > 1)
pr_info("%d chips detected\n", i);
@@ -5022,9 +5088,9 @@ static void nand_scan_ident_cleanup(struct nand_chip *chip)
kfree(chip->parameters.onfi);
}
-static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
+static int nand_set_ecc_soft_ops(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
@@ -5380,9 +5446,9 @@ EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
* Requirement (2) ensures we can correct even when all bitflips are clumped
* in the same sector.
*/
-static bool nand_ecc_strength_good(struct mtd_info *mtd)
+static bool nand_ecc_strength_good(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int corr, ds_corr;
@@ -5430,9 +5496,9 @@ static int nand_scan_tail(struct nand_chip *chip)
* to explictly select the relevant die when interacting with the NAND
* chip.
*/
- chip->select_chip(chip, 0);
+ nand_select_target(chip, 0);
ret = nand_manufacturer_init(chip);
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
if (ret)
goto err_free_buf;
@@ -5547,7 +5613,7 @@ static int nand_scan_tail(struct nand_chip *chip)
ecc->algo = NAND_ECC_HAMMING;
case NAND_ECC_SOFT:
- ret = nand_set_ecc_soft_ops(mtd);
+ ret = nand_set_ecc_soft_ops(chip);
if (ret) {
ret = -EINVAL;
goto err_nand_manuf_cleanup;
@@ -5632,7 +5698,7 @@ static int nand_scan_tail(struct nand_chip *chip)
mtd->oobavail = ret;
/* ECC sanity check: warn if it's too weak */
- if (!nand_ecc_strength_good(mtd))
+ if (!nand_ecc_strength_good(chip))
pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
mtd->name);
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index 98a826838b60..1b722fe9213c 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -77,8 +77,6 @@
#define BBT_ENTRY_MASK 0x03
#define BBT_ENTRY_SHIFT 2
-static int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
-
static inline uint8_t bbt_get_entry(struct nand_chip *chip, int block)
{
uint8_t entry = chip->bbt[block >> BBT_ENTRY_SHIFT];
@@ -160,7 +158,7 @@ static u32 add_marker_len(struct nand_bbt_descr *td)
/**
* read_bbt - [GENERIC] Read the bad block table starting from page
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @buf: temporary buffer
* @page: the starting page
* @num: the number of bbt descriptors to read
@@ -169,11 +167,11 @@ static u32 add_marker_len(struct nand_bbt_descr *td)
*
* Read the bad block table starting from page.
*/
-static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
- struct nand_bbt_descr *td, int offs)
+static int read_bbt(struct nand_chip *this, uint8_t *buf, int page, int num,
+ struct nand_bbt_descr *td, int offs)
{
+ struct mtd_info *mtd = nand_to_mtd(this);
int res, ret = 0, i, j, act = 0;
- struct nand_chip *this = mtd_to_nand(mtd);
size_t retlen, len, totlen;
loff_t from;
int bits = td->options & NAND_BBT_NRBITS_MSK;
@@ -253,7 +251,7 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
/**
* read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @td: descriptor for the bad block table
* @chip: read the table for a specific chip, -1 read all chips; applies only if
@@ -262,16 +260,17 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
* Read the bad block table for all chips starting at a given page. We assume
* that the bbt bits are in consecutive order.
*/
-static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
+static int read_abs_bbt(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *td, int chip)
{
- struct nand_chip *this = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(this);
int res = 0, i;
if (td->options & NAND_BBT_PERCHIP) {
int offs = 0;
for (i = 0; i < this->numchips; i++) {
if (chip == -1 || chip == i)
- res = read_bbt(mtd, buf, td->pages[i],
+ res = read_bbt(this, buf, td->pages[i],
this->chipsize >> this->bbt_erase_shift,
td, offs);
if (res)
@@ -279,7 +278,7 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
offs += this->chipsize >> this->bbt_erase_shift;
}
} else {
- res = read_bbt(mtd, buf, td->pages[0],
+ res = read_bbt(this, buf, td->pages[0],
mtd->size >> this->bbt_erase_shift, td, 0);
if (res)
return res;
@@ -288,9 +287,10 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
}
/* BBT marker is in the first page, no OOB */
-static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
- struct nand_bbt_descr *td)
+static int scan_read_data(struct nand_chip *this, uint8_t *buf, loff_t offs,
+ struct nand_bbt_descr *td)
{
+ struct mtd_info *mtd = nand_to_mtd(this);
size_t retlen;
size_t len;
@@ -303,7 +303,7 @@ static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
/**
* scan_read_oob - [GENERIC] Scan data+OOB region to buffer
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @offs: offset at which to scan
* @len: length of data region to read
@@ -312,9 +312,10 @@ static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
* page,OOB,page,OOB,... in buf. Completes transfer and returns the "strongest"
* ECC condition (error or bitflip). May quit on the first (non-ECC) error.
*/
-static int scan_read_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+static int scan_read_oob(struct nand_chip *this, uint8_t *buf, loff_t offs,
size_t len)
{
+ struct mtd_info *mtd = nand_to_mtd(this);
struct mtd_oob_ops ops;
int res, ret = 0;
@@ -342,19 +343,20 @@ static int scan_read_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
return ret;
}
-static int scan_read(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
- size_t len, struct nand_bbt_descr *td)
+static int scan_read(struct nand_chip *this, uint8_t *buf, loff_t offs,
+ size_t len, struct nand_bbt_descr *td)
{
if (td->options & NAND_BBT_NO_OOB)
- return scan_read_data(mtd, buf, offs, td);
+ return scan_read_data(this, buf, offs, td);
else
- return scan_read_oob(mtd, buf, offs, len);
+ return scan_read_oob(this, buf, offs, len);
}
/* Scan write data with oob to flash */
-static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
+static int scan_write_bbt(struct nand_chip *this, loff_t offs, size_t len,
uint8_t *buf, uint8_t *oob)
{
+ struct mtd_info *mtd = nand_to_mtd(this);
struct mtd_oob_ops ops;
ops.mode = MTD_OPS_PLACE_OOB;
@@ -367,8 +369,9 @@ static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
return mtd_write_oob(mtd, offs, &ops);
}
-static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
+static u32 bbt_get_ver_offs(struct nand_chip *this, struct nand_bbt_descr *td)
{
+ struct mtd_info *mtd = nand_to_mtd(this);
u32 ver_offs = td->veroffs;
if (!(td->options & NAND_BBT_NO_OOB))
@@ -378,7 +381,7 @@ static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
/**
* read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @td: descriptor for the bad block table
* @md: descriptor for the bad block table mirror
@@ -386,34 +389,35 @@ static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
* Read the bad block table(s) for all chips starting at a given page. We
* assume that the bbt bits are in consecutive order.
*/
-static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
+static void read_abs_bbts(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *td, struct nand_bbt_descr *md)
{
- struct nand_chip *this = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(this);
/* Read the primary version, if available */
if (td->options & NAND_BBT_VERSION) {
- scan_read(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
- mtd->writesize, td);
- td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
+ scan_read(this, buf, (loff_t)td->pages[0] << this->page_shift,
+ mtd->writesize, td);
+ td->version[0] = buf[bbt_get_ver_offs(this, td)];
pr_info("Bad block table at page %d, version 0x%02X\n",
td->pages[0], td->version[0]);
}
/* Read the mirror version, if available */
if (md && (md->options & NAND_BBT_VERSION)) {
- scan_read(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
- mtd->writesize, md);
- md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
+ scan_read(this, buf, (loff_t)md->pages[0] << this->page_shift,
+ mtd->writesize, md);
+ md->version[0] = buf[bbt_get_ver_offs(this, md)];
pr_info("Bad block table at page %d, version 0x%02X\n",
md->pages[0], md->version[0]);
}
}
/* Scan a given block partially */
-static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
+static int scan_block_fast(struct nand_chip *this, struct nand_bbt_descr *bd,
loff_t offs, uint8_t *buf, int numpages)
{
+ struct mtd_info *mtd = nand_to_mtd(this);
struct mtd_oob_ops ops;
int j, ret;
@@ -443,7 +447,7 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
/**
* create_bbt - [GENERIC] Create a bad block table by scanning the device
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @bd: descriptor for the good/bad block search pattern
* @chip: create the table for a specific chip, -1 read all chips; applies only
@@ -452,10 +456,10 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
* Create a bad block table by scanning the device for the given good/bad block
* identify pattern.
*/
-static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
- struct nand_bbt_descr *bd, int chip)
+static int create_bbt(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *bd, int chip)
{
- struct nand_chip *this = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(this);
int i, numblocks, numpages;
int startblock;
loff_t from;
@@ -491,7 +495,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
BUG_ON(bd->options & NAND_BBT_NO_OOB);
- ret = scan_block_fast(mtd, bd, from, buf, numpages);
+ ret = scan_block_fast(this, bd, from, buf, numpages);
if (ret < 0)
return ret;
@@ -509,7 +513,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
/**
* search_bbt - [GENERIC] scan the device for a specific bad block table
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @td: descriptor for the bad block table
*
@@ -522,9 +526,10 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
*
* The bbt ident pattern resides in the oob area of the first page in a block.
*/
-static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
+static int search_bbt(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *td)
{
- struct nand_chip *this = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(this);
int i, chips;
int startblock, block, dir;
int scanlen = mtd->writesize + mtd->oobsize;
@@ -561,11 +566,11 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
/* Read first page */
- scan_read(mtd, buf, offs, mtd->writesize, td);
+ scan_read(this, buf, offs, mtd->writesize, td);
if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
td->pages[i] = actblock << blocktopage;
if (td->options & NAND_BBT_VERSION) {
- offs = bbt_get_ver_offs(mtd, td);
+ offs = bbt_get_ver_offs(this, td);
td->version[i] = buf[offs];
}
break;
@@ -586,23 +591,23 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
/**
* search_read_bbts - [GENERIC] scan the device for bad block table(s)
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @td: descriptor for the bad block table
* @md: descriptor for the bad block table mirror
*
* Search and read the bad block table(s).
*/
-static void search_read_bbts(struct mtd_info *mtd, uint8_t *buf,
+static void search_read_bbts(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *td,
struct nand_bbt_descr *md)
{
/* Search the primary table */
- search_bbt(mtd, buf, td);
+ search_bbt(this, buf, td);
/* Search the mirror table */
if (md)
- search_bbt(mtd, buf, md);
+ search_bbt(this, buf, md);
}
/**
@@ -700,7 +705,7 @@ static void mark_bbt_block_bad(struct nand_chip *this,
/**
* write_bbt - [GENERIC] (Re)write the bad block table
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @td: descriptor for the bad block table
* @md: descriptor for the bad block table mirror
@@ -708,11 +713,11 @@ static void mark_bbt_block_bad(struct nand_chip *this,
*
* (Re)write the bad block table.
*/
-static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
+static int write_bbt(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *td, struct nand_bbt_descr *md,
int chipsel)
{
- struct nand_chip *this = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(this);
struct erase_info einfo;
int i, res, chip = 0;
int bits, page, offs, numblocks, sft, sftmsk;
@@ -862,9 +867,9 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
continue;
}
- res = scan_write_bbt(mtd, to, len, buf,
- td->options & NAND_BBT_NO_OOB ? NULL :
- &buf[len]);
+ res = scan_write_bbt(this, to, len, buf,
+ td->options & NAND_BBT_NO_OOB ?
+ NULL : &buf[len]);
if (res < 0) {
pr_warn("nand_bbt: error while writing BBT block %d\n",
res);
@@ -887,22 +892,21 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
/**
* nand_memory_bbt - [GENERIC] create a memory based bad block table
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @bd: descriptor for the good/bad block search pattern
*
* The function creates a memory based bbt by scanning the device for
* manufacturer / software marked good / bad blocks.
*/
-static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+static inline int nand_memory_bbt(struct nand_chip *this,
+ struct nand_bbt_descr *bd)
{
- struct nand_chip *this = mtd_to_nand(mtd);
-
- return create_bbt(mtd, this->data_buf, bd, -1);
+ return create_bbt(this, this->data_buf, bd, -1);
}
/**
* check_create - [GENERIC] create and write bbt(s) if necessary
- * @mtd: MTD device structure
+ * @this: the NAND device
* @buf: temporary buffer
* @bd: descriptor for the good/bad block search pattern
*
@@ -911,10 +915,10 @@ static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *b
* for the chip/device. Update is necessary if one of the tables is missing or
* the version nr. of one table is less than the other.
*/
-static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
+static int check_create(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *bd)
{
int i, chips, writeops, create, chipsel, res, res2;
- struct nand_chip *this = mtd_to_nand(mtd);
struct nand_bbt_descr *td = this->bbt_td;
struct nand_bbt_descr *md = this->bbt_md;
struct nand_bbt_descr *rd, *rd2;
@@ -971,7 +975,7 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
/* Create the table in memory by scanning the chip(s) */
if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))
- create_bbt(mtd, buf, bd, chipsel);
+ create_bbt(this, buf, bd, chipsel);
td->version[i] = 1;
if (md)
@@ -980,7 +984,7 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
/* Read back first? */
if (rd) {
- res = read_abs_bbt(mtd, buf, rd, chipsel);
+ res = read_abs_bbt(this, buf, rd, chipsel);
if (mtd_is_eccerr(res)) {
/* Mark table as invalid */
rd->pages[i] = -1;
@@ -991,7 +995,7 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
}
/* If they weren't versioned, read both */
if (rd2) {
- res2 = read_abs_bbt(mtd, buf, rd2, chipsel);
+ res2 = read_abs_bbt(this, buf, rd2, chipsel);
if (mtd_is_eccerr(res2)) {
/* Mark table as invalid */
rd2->pages[i] = -1;
@@ -1013,14 +1017,14 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
/* Write the bad block table to the device? */
if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
- res = write_bbt(mtd, buf, td, md, chipsel);
+ res = write_bbt(this, buf, td, md, chipsel);
if (res < 0)
return res;
}
/* Write the mirror bad block table to the device? */
if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
- res = write_bbt(mtd, buf, md, td, chipsel);
+ res = write_bbt(this, buf, md, td, chipsel);
if (res < 0)
return res;
}
@@ -1029,16 +1033,71 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
}
/**
+ * nand_update_bbt - update bad block table(s)
+ * @this: the NAND device
+ * @offs: the offset of the newly marked block
+ *
+ * The function updates the bad block table(s).
+ */
+static int nand_update_bbt(struct nand_chip *this, loff_t offs)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ int len, res = 0;
+ int chip, chipsel;
+ uint8_t *buf;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+
+ if (!this->bbt || !td)
+ return -EINVAL;
+
+ /* Allocate a temporary buffer for one eraseblock incl. oob */
+ len = (1 << this->bbt_erase_shift);
+ len += (len >> this->page_shift) * mtd->oobsize;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chip = (int)(offs >> this->chip_shift);
+ chipsel = chip;
+ } else {
+ chip = 0;
+ chipsel = -1;
+ }
+
+ td->version[chip]++;
+ if (md)
+ md->version[chip]++;
+
+ /* Write the bad block table to the device? */
+ if (td->options & NAND_BBT_WRITE) {
+ res = write_bbt(this, buf, td, md, chipsel);
+ if (res < 0)
+ goto out;
+ }
+ /* Write the mirror bad block table to the device? */
+ if (md && (md->options & NAND_BBT_WRITE)) {
+ res = write_bbt(this, buf, md, td, chipsel);
+ }
+
+ out:
+ kfree(buf);
+ return res;
+}
+
+/**
* mark_bbt_regions - [GENERIC] mark the bad block table regions
- * @mtd: MTD device structure
+ * @this: the NAND device
* @td: bad block table descriptor
*
* The bad block table regions are marked as "bad" to prevent accidental
* erasures / writes. The regions are identified by the mark 0x02.
*/
-static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
+static void mark_bbt_region(struct nand_chip *this, struct nand_bbt_descr *td)
{
- struct nand_chip *this = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(this);
int i, j, chips, block, nrblocks, update;
uint8_t oldval;
@@ -1061,7 +1120,7 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
if ((oldval != BBT_BLOCK_RESERVED) &&
td->reserved_block_code)
- nand_update_bbt(mtd, (loff_t)block <<
+ nand_update_bbt(this, (loff_t)block <<
this->bbt_erase_shift);
continue;
}
@@ -1083,22 +1142,22 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
* bbts. This should only happen once.
*/
if (update && td->reserved_block_code)
- nand_update_bbt(mtd, (loff_t)(block - 1) <<
+ nand_update_bbt(this, (loff_t)(block - 1) <<
this->bbt_erase_shift);
}
}
/**
* verify_bbt_descr - verify the bad block description
- * @mtd: MTD device structure
+ * @this: the NAND device
* @bd: the table to verify
*
* This functions performs a few sanity checks on the bad block description
* table.
*/
-static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+static void verify_bbt_descr(struct nand_chip *this, struct nand_bbt_descr *bd)
{
- struct nand_chip *this = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(this);
u32 pattern_len;
u32 bits;
u32 table_size;
@@ -1138,7 +1197,7 @@ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
/**
* nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
- * @mtd: MTD device structure
+ * @this: the NAND device
* @bd: descriptor for the good/bad block search pattern
*
* The function checks, if a bad block table(s) is/are already available. If
@@ -1148,9 +1207,9 @@ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
* The bad block table memory is allocated here. It must be freed by calling
* the nand_free_bbt function.
*/
-static int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+static int nand_scan_bbt(struct nand_chip *this, struct nand_bbt_descr *bd)
{
- struct nand_chip *this = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(this);
int len, res;
uint8_t *buf;
struct nand_bbt_descr *td = this->bbt_td;
@@ -1170,14 +1229,14 @@ static int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
* memory based bad block table.
*/
if (!td) {
- if ((res = nand_memory_bbt(mtd, bd))) {
+ if ((res = nand_memory_bbt(this, bd))) {
pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n");
goto err;
}
return 0;
}
- verify_bbt_descr(mtd, td);
- verify_bbt_descr(mtd, md);
+ verify_bbt_descr(this, td);
+ verify_bbt_descr(this, md);
/* Allocate a temporary buffer for one eraseblock incl. oob */
len = (1 << this->bbt_erase_shift);
@@ -1190,20 +1249,20 @@ static int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
/* Is the bbt at a given page? */
if (td->options & NAND_BBT_ABSPAGE) {
- read_abs_bbts(mtd, buf, td, md);
+ read_abs_bbts(this, buf, td, md);
} else {
/* Search the bad block table using a pattern in oob */
- search_read_bbts(mtd, buf, td, md);
+ search_read_bbts(this, buf, td, md);
}
- res = check_create(mtd, buf, bd);
+ res = check_create(this, buf, bd);
if (res)
goto err;
/* Prevent the bbt regions from erasing / writing */
- mark_bbt_region(mtd, td);
+ mark_bbt_region(this, td);
if (md)
- mark_bbt_region(mtd, md);
+ mark_bbt_region(this, md);
vfree(buf);
return 0;
@@ -1214,61 +1273,6 @@ err:
return res;
}
-/**
- * nand_update_bbt - update bad block table(s)
- * @mtd: MTD device structure
- * @offs: the offset of the newly marked block
- *
- * The function updates the bad block table(s).
- */
-static int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
-{
- struct nand_chip *this = mtd_to_nand(mtd);
- int len, res = 0;
- int chip, chipsel;
- uint8_t *buf;
- struct nand_bbt_descr *td = this->bbt_td;
- struct nand_bbt_descr *md = this->bbt_md;
-
- if (!this->bbt || !td)
- return -EINVAL;
-
- /* Allocate a temporary buffer for one eraseblock incl. oob */
- len = (1 << this->bbt_erase_shift);
- len += (len >> this->page_shift) * mtd->oobsize;
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- /* Do we have a bbt per chip? */
- if (td->options & NAND_BBT_PERCHIP) {
- chip = (int)(offs >> this->chip_shift);
- chipsel = chip;
- } else {
- chip = 0;
- chipsel = -1;
- }
-
- td->version[chip]++;
- if (md)
- md->version[chip]++;
-
- /* Write the bad block table to the device? */
- if (td->options & NAND_BBT_WRITE) {
- res = write_bbt(mtd, buf, td, md, chipsel);
- if (res < 0)
- goto out;
- }
- /* Write the mirror bad block table to the device? */
- if (md && (md->options & NAND_BBT_WRITE)) {
- res = write_bbt(mtd, buf, md, td, chipsel);
- }
-
- out:
- kfree(buf);
- return res;
-}
-
/*
* Define some generic bad / good block scan pattern which are used
* while scanning a device for factory marked good / bad blocks.
@@ -1382,7 +1386,7 @@ int nand_create_bbt(struct nand_chip *this)
return ret;
}
- return nand_scan_bbt(nand_to_mtd(this), this->badblock_pattern);
+ return nand_scan_bbt(this, this->badblock_pattern);
}
EXPORT_SYMBOL(nand_create_bbt);
@@ -1433,7 +1437,6 @@ int nand_isbad_bbt(struct nand_chip *this, loff_t offs, int allowbbt)
*/
int nand_markbad_bbt(struct nand_chip *this, loff_t offs)
{
- struct mtd_info *mtd = nand_to_mtd(this);
int block, ret = 0;
block = (int)(offs >> this->bbt_erase_shift);
@@ -1443,7 +1446,7 @@ int nand_markbad_bbt(struct nand_chip *this, loff_t offs)
/* Update flash-based bad block table */
if (this->bbt_options & NAND_BBT_USE_FLASH)
- ret = nand_update_bbt(mtd, offs);
+ ret = nand_update_bbt(this, offs);
return ret;
}
diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
index ac1b5c103968..343f477362d1 100644
--- a/drivers/mtd/nand/raw/nand_hynix.c
+++ b/drivers/mtd/nand/raw/nand_hynix.c
@@ -80,11 +80,11 @@ static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
{
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
struct nand_op_instr instrs[] = {
NAND_OP_CMD(cmd, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
@@ -98,12 +98,12 @@ static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
{
u16 column = ((u16)addr << 8) | addr;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
struct nand_op_instr instrs[] = {
NAND_OP_ADDR(1, &addr, 0),
NAND_OP_8BIT_DATA_OUT(1, &val, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c
index 5c26492c841d..38b5dc22cb30 100644
--- a/drivers/mtd/nand/raw/nand_jedec.c
+++ b/drivers/mtd/nand/raw/nand_jedec.c
@@ -107,6 +107,8 @@ int nand_jedec_detect(struct nand_chip *chip)
pr_warn("Invalid codeword size\n");
}
+ ret = 1;
+
free_jedec_param_page:
kfree(p);
return ret;
diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c
index c5ddc86cd98c..43575943f13b 100644
--- a/drivers/mtd/nand/raw/nand_legacy.c
+++ b/drivers/mtd/nand/raw/nand_legacy.c
@@ -165,15 +165,14 @@ static void nand_read_buf16(struct nand_chip *chip, uint8_t *buf, int len)
/**
* panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @timeo: Timeout
*
* Helper function for nand_wait_ready used when needing to wait in interrupt
* context.
*/
-static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
+static void panic_nand_wait_ready(struct nand_chip *chip, unsigned long timeo)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
int i;
/* Wait for the device to get ready */
@@ -193,11 +192,10 @@ static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
*/
void nand_wait_ready(struct nand_chip *chip)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
unsigned long timeo = 400;
if (in_interrupt() || oops_in_progress)
- return panic_nand_wait_ready(mtd, timeo);
+ return panic_nand_wait_ready(chip, timeo);
/* Wait until command is processed or timeout occurs */
timeo = jiffies + msecs_to_jiffies(timeo);
@@ -214,14 +212,13 @@ EXPORT_SYMBOL_GPL(nand_wait_ready);
/**
* nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @timeo: Timeout in ms
*
* Wait for status ready (i.e. command done) or timeout.
*/
-static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
+static void nand_wait_status_ready(struct nand_chip *chip, unsigned long timeo)
{
- register struct nand_chip *chip = mtd_to_nand(mtd);
int ret;
timeo = jiffies + msecs_to_jiffies(timeo);
@@ -321,7 +318,7 @@ static void nand_command(struct nand_chip *chip, unsigned int command,
chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
/* EZ-NAND can take upto 250ms as per ONFi v4.0 */
- nand_wait_status_ready(mtd, 250);
+ nand_wait_status_ready(chip, 250);
return;
/* This applies to read commands */
@@ -367,7 +364,7 @@ static void nand_ccs_delay(struct nand_chip *chip)
* Wait tCCS_min if it is correctly defined, otherwise wait 500ns
* (which should be safe for all NANDs).
*/
- if (chip->setup_data_interface)
+ if (nand_has_setup_data_iface(chip))
ndelay(chip->data_interface.timings.sdr.tCCS_min / 1000);
else
ndelay(500);
@@ -458,7 +455,7 @@ static void nand_command_lp(struct nand_chip *chip, unsigned int command,
chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
/* EZ-NAND can take upto 250ms as per ONFi v4.0 */
- nand_wait_status_ready(mtd, 250);
+ nand_wait_status_ready(chip, 250);
return;
case NAND_CMD_RNDOUT:
@@ -525,7 +522,6 @@ EXPORT_SYMBOL(nand_get_set_features_notsupp);
/**
* nand_wait - [DEFAULT] wait until the command is done
- * @mtd: MTD device structure
* @chip: NAND chip structure
*
* Wait for command done. This applies to erase and program only.
@@ -581,7 +577,7 @@ void nand_legacy_set_defaults(struct nand_chip *chip)
{
unsigned int busw = chip->options & NAND_BUSWIDTH_16;
- if (chip->exec_op)
+ if (nand_has_exec_op(chip))
return;
/* check for proper chip_delay setup, set 20us if not */
@@ -589,15 +585,15 @@ void nand_legacy_set_defaults(struct nand_chip *chip)
chip->legacy.chip_delay = 20;
/* check, if a user supplied command function given */
- if (!chip->legacy.cmdfunc && !chip->exec_op)
+ if (!chip->legacy.cmdfunc)
chip->legacy.cmdfunc = nand_command;
/* check, if a user supplied wait function given */
if (chip->legacy.waitfunc == NULL)
chip->legacy.waitfunc = nand_wait;
- if (!chip->select_chip)
- chip->select_chip = nand_select_chip;
+ if (!chip->legacy.select_chip)
+ chip->legacy.select_chip = nand_select_chip;
/* If called twice, pointers that depend on busw may need to be reset */
if (!chip->legacy.read_byte || chip->legacy.read_byte == nand_read_byte)
@@ -625,14 +621,15 @@ int nand_legacy_check_hooks(struct nand_chip *chip)
* ->legacy.cmdfunc() is legacy and will only be used if ->exec_op() is
* not populated.
*/
- if (chip->exec_op)
+ if (nand_has_exec_op(chip))
return 0;
/*
* Default functions assigned for ->legacy.cmdfunc() and
- * ->select_chip() both expect ->legacy.cmd_ctrl() to be populated.
+ * ->legacy.select_chip() both expect ->legacy.cmd_ctrl() to be
+ * populated.
*/
- if ((!chip->legacy.cmdfunc || !chip->select_chip) &&
+ if ((!chip->legacy.cmdfunc || !chip->legacy.select_chip) &&
!chip->legacy.cmd_ctrl) {
pr_err("->legacy.cmd_ctrl() should be provided\n");
return -EINVAL;
diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
index 358dcc957bb2..47d8cda547cf 100644
--- a/drivers/mtd/nand/raw/nand_macronix.c
+++ b/drivers/mtd/nand/raw/nand_macronix.c
@@ -33,6 +33,13 @@ static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
"MX30LF4G18AC",
"MX30LF4G28AC",
"MX60LF8G18AC",
+ "MX30UF1G18AC",
+ "MX30UF1G16AC",
+ "MX30UF2G18AC",
+ "MX30UF2G16AC",
+ "MX30UF4G18AC",
+ "MX30UF4G16AC",
+ "MX30UF4G28AC",
};
if (!chip->parameters.supports_set_get_features)
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index ef8721418f2d..933d1a629c51 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -2293,7 +2293,7 @@ static int __init ns_init_module(void)
if ((retval = parse_gravepages()) != 0)
goto error;
- chip->dummy_controller.ops = &ns_controller_ops;
+ chip->legacy.dummy_controller.ops = &ns_controller_ops;
retval = nand_scan(chip, 1);
if (retval) {
NS_ERR("Could not scan NAND Simulator device\n");
diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c
index d49a7a17146c..9857e0e5acd4 100644
--- a/drivers/mtd/nand/raw/ndfc.c
+++ b/drivers/mtd/nand/raw/ndfc.c
@@ -146,7 +146,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
chip->legacy.IO_ADDR_W = ndfc->ndfcbase + NDFC_DATA;
chip->legacy.cmd_ctrl = ndfc_hwcontrol;
chip->legacy.dev_ready = ndfc_ready;
- chip->select_chip = ndfc_select_chip;
+ chip->legacy.select_chip = ndfc_select_chip;
chip->legacy.chip_delay = 50;
chip->controller = &ndfc->ndfc_control;
chip->legacy.read_buf = ndfc_read_buf;
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index 886d05c391ef..68e8b9f7f372 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -1944,7 +1944,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
case NAND_OMAP_PREFETCH_DMA:
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- info->dma = dma_request_chan(dev, "rxtx");
+ info->dma = dma_request_chan(dev->parent, "rxtx");
if (IS_ERR(info->dma)) {
dev_err(dev, "DMA engine request failed\n");
diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c
index 86c536ddaf24..a994b76daa50 100644
--- a/drivers/mtd/nand/raw/plat_nand.c
+++ b/drivers/mtd/nand/raw/plat_nand.c
@@ -63,7 +63,7 @@ static int plat_nand_probe(struct platform_device *pdev)
data->chip.legacy.IO_ADDR_W = data->io_base;
data->chip.legacy.cmd_ctrl = pdata->ctrl.cmd_ctrl;
data->chip.legacy.dev_ready = pdata->ctrl.dev_ready;
- data->chip.select_chip = pdata->ctrl.select_chip;
+ data->chip.legacy.select_chip = pdata->ctrl.select_chip;
data->chip.legacy.write_buf = pdata->ctrl.write_buf;
data->chip.legacy.read_buf = pdata->ctrl.read_buf;
data->chip.legacy.chip_delay = pdata->chip.chip_delay;
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index ef75dfa62a4f..6b76fb5c0aed 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2804,7 +2804,7 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
mtd->dev.parent = dev;
chip->legacy.cmdfunc = qcom_nandc_command;
- chip->select_chip = qcom_nandc_select_chip;
+ chip->legacy.select_chip = qcom_nandc_select_chip;
chip->legacy.read_byte = qcom_nandc_read_byte;
chip->legacy.read_buf = qcom_nandc_read_buf;
chip->legacy.write_buf = qcom_nandc_write_buf;
diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c
index 39be65b35ac2..c01422d953dd 100644
--- a/drivers/mtd/nand/raw/r852.c
+++ b/drivers/mtd/nand/raw/r852.c
@@ -151,8 +151,9 @@ static void r852_dma_done(struct r852_device *dev, int error)
dev->dma_stage = 0;
if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer)
- pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN,
- dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
+ dma_unmap_single(&dev->pci_dev->dev, dev->phys_dma_addr,
+ R852_DMA_LEN,
+ dev->dma_dir ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
/*
@@ -197,11 +198,10 @@ static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
bounce = 1;
if (!bounce) {
- dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf,
+ dev->phys_dma_addr = dma_map_single(&dev->pci_dev->dev, buf,
R852_DMA_LEN,
- (do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
-
- if (pci_dma_mapping_error(dev->pci_dev, dev->phys_dma_addr))
+ do_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pci_dev->dev, dev->phys_dma_addr))
bounce = 1;
}
@@ -835,7 +835,7 @@ static int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
pci_set_master(pci_dev);
- error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ error = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
if (error)
goto error2;
@@ -885,8 +885,8 @@ static int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
dev->pci_dev = pci_dev;
pci_set_drvdata(pci_dev, dev);
- dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN,
- &dev->phys_bounce_buffer);
+ dev->bounce_buffer = dma_alloc_coherent(&pci_dev->dev, R852_DMA_LEN,
+ &dev->phys_bounce_buffer, GFP_KERNEL);
if (!dev->bounce_buffer)
goto error6;
@@ -946,8 +946,8 @@ error9:
error8:
pci_iounmap(pci_dev, dev->mmio);
error7:
- pci_free_consistent(pci_dev, R852_DMA_LEN,
- dev->bounce_buffer, dev->phys_bounce_buffer);
+ dma_free_coherent(&pci_dev->dev, R852_DMA_LEN, dev->bounce_buffer,
+ dev->phys_bounce_buffer);
error6:
kfree(dev);
error5:
@@ -980,8 +980,8 @@ static void r852_remove(struct pci_dev *pci_dev)
/* Cleanup */
kfree(dev->tmp_buffer);
pci_iounmap(pci_dev, dev->mmio);
- pci_free_consistent(pci_dev, R852_DMA_LEN,
- dev->bounce_buffer, dev->phys_bounce_buffer);
+ dma_free_coherent(&pci_dev->dev, R852_DMA_LEN, dev->bounce_buffer,
+ dev->phys_bounce_buffer);
kfree(dev->chip);
kfree(dev);
@@ -1045,9 +1045,9 @@ static int r852_resume(struct device *device)
/* Otherwise, initialize the card */
if (dev->card_registered) {
r852_engine_enable(dev);
- dev->chip->select_chip(dev->chip, 0);
+ nand_select_target(dev->chip, 0);
nand_reset_op(dev->chip);
- dev->chip->select_chip(dev->chip, -1);
+ nand_deselect_target(dev->chip);
}
/* Program card detection IRQ */
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
index d2e42e9d0e8c..adc7a196e383 100644
--- a/drivers/mtd/nand/raw/s3c2410.c
+++ b/drivers/mtd/nand/raw/s3c2410.c
@@ -866,7 +866,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
chip->legacy.write_buf = s3c2410_nand_write_buf;
chip->legacy.read_buf = s3c2410_nand_read_buf;
- chip->select_chip = s3c2410_nand_select_chip;
+ chip->legacy.select_chip = s3c2410_nand_select_chip;
chip->legacy.chip_delay = 50;
nand_set_controller_data(chip, nmtd);
chip->options = set->options;
@@ -876,8 +876,8 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
* let's keep behavior unchanged for legacy boards booting via pdata and
* auto-detect timings only when booting with a device tree.
*/
- if (np)
- chip->setup_data_interface = s3c2410_nand_setup_data_interface;
+ if (!np)
+ chip->options |= NAND_KEEP_TIMINGS;
switch (info->cpu_type) {
case TYPE_S3C2410:
@@ -1011,6 +1011,7 @@ static int s3c2410_nand_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops s3c24xx_nand_controller_ops = {
.attach_chip = s3c2410_nand_attach_chip,
+ .setup_data_interface = s3c2410_nand_setup_data_interface,
};
static const struct of_device_id s3c24xx_nand_dt_ids[] = {
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index 4d20d033de7b..cf6b1be1cf9c 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH FLCTL nand controller
*
@@ -5,20 +6,6 @@
* Copyright (c) 2008 Atom Create Engineering Co., Ltd.
*
* Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/module.h>
@@ -1183,7 +1170,7 @@ static int flctl_probe(struct platform_device *pdev)
nand->legacy.read_byte = flctl_read_byte;
nand->legacy.write_buf = flctl_write_buf;
nand->legacy.read_buf = flctl_read_buf;
- nand->select_chip = flctl_select_chip;
+ nand->legacy.select_chip = flctl_select_chip;
nand->legacy.cmdfunc = flctl_cmdfunc;
nand->legacy.set_features = nand_get_set_features_notsupp;
nand->legacy.get_features = nand_get_set_features_notsupp;
@@ -1196,7 +1183,7 @@ static int flctl_probe(struct platform_device *pdev)
flctl_setup_dma(flctl);
- nand->dummy_controller.ops = &flctl_nand_controller_ops;
+ nand->legacy.dummy_controller.ops = &flctl_nand_controller_ops;
ret = nand_scan(nand, 1);
if (ret)
goto err_chip;
@@ -1236,7 +1223,7 @@ static struct platform_driver flctl_driver = {
module_platform_driver_probe(flctl_driver, flctl_probe);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Yoshihiro Shimoda");
MODULE_DESCRIPTION("SuperH FLCTL driver");
MODULE_ALIAS("platform:sh_flctl");
diff --git a/drivers/mtd/nand/raw/sm_common.c b/drivers/mtd/nand/raw/sm_common.c
index 6f063ef57640..409d036858dc 100644
--- a/drivers/mtd/nand/raw/sm_common.c
+++ b/drivers/mtd/nand/raw/sm_common.c
@@ -194,7 +194,7 @@ int sm_register_device(struct mtd_info *mtd, int smartmedia)
chip->options |= NAND_SKIP_BBTSCAN;
/* Scan for card properties */
- chip->dummy_controller.ops = &sm_controller_ops;
+ chip->legacy.dummy_controller.ops = &sm_controller_ops;
flash_ids = smartmedia ? nand_smartmedia_flash_ids : nand_xd_flash_ids;
ret = nand_scan_with_ids(chip, 1, flash_ids);
if (ret)
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 51b1a548064b..e828ee50a201 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -1393,7 +1393,7 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *chip,
sunxi_nfc_randomizer_enable(mtd);
writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
- nfc->regs + NFC_REG_RCMD_SET);
+ nfc->regs + NFC_REG_WCMD_SET);
dma_async_issue_pending(nfc->dmac);
@@ -1847,6 +1847,7 @@ static int sunxi_nand_attach_chip(struct nand_chip *nand)
static const struct nand_controller_ops sunxi_nand_controller_ops = {
.attach_chip = sunxi_nand_attach_chip,
+ .setup_data_interface = sunxi_nfc_setup_data_interface,
};
static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
@@ -1922,12 +1923,11 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
*/
nand->ecc.mode = NAND_ECC_HW;
nand_set_flash_node(nand, np);
- nand->select_chip = sunxi_nfc_select_chip;
+ nand->legacy.select_chip = sunxi_nfc_select_chip;
nand->legacy.cmd_ctrl = sunxi_nfc_cmd_ctrl;
nand->legacy.read_buf = sunxi_nfc_read_buf;
nand->legacy.write_buf = sunxi_nfc_write_buf;
nand->legacy.read_byte = sunxi_nfc_read_byte;
- nand->setup_data_interface = sunxi_nfc_setup_data_interface;
mtd = nand_to_mtd(nand);
mtd->dev.parent = dev;
diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c
index 8818f893f300..cb3beda88789 100644
--- a/drivers/mtd/nand/raw/tango_nand.c
+++ b/drivers/mtd/nand/raw/tango_nand.c
@@ -530,6 +530,7 @@ static int tango_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops tango_controller_ops = {
.attach_chip = tango_attach_chip,
+ .setup_data_interface = tango_set_timings,
};
static int chip_init(struct device *dev, struct device_node *np)
@@ -567,10 +568,9 @@ static int chip_init(struct device *dev, struct device_node *np)
chip->legacy.read_byte = tango_read_byte;
chip->legacy.write_buf = tango_write_buf;
chip->legacy.read_buf = tango_read_buf;
- chip->select_chip = tango_select_chip;
+ chip->legacy.select_chip = tango_select_chip;
chip->legacy.cmd_ctrl = tango_cmd_ctrl;
chip->legacy.dev_ready = tango_dev_ready;
- chip->setup_data_interface = tango_set_timings;
chip->options = NAND_USE_BOUNCE_BUFFER |
NAND_NO_SUBPAGE_WRITE |
NAND_WAIT_TCCS;
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
index 9767e29d74e2..13be32c38194 100644
--- a/drivers/mtd/nand/raw/tegra_nand.c
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -454,29 +454,24 @@ static const struct nand_op_parser tegra_nand_op_parser = NAND_OP_PARSER(
NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 4)),
);
+static void tegra_nand_select_target(struct nand_chip *chip,
+ unsigned int die_nr)
+{
+ struct tegra_nand_chip *nand = to_tegra_chip(chip);
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+
+ ctrl->cur_cs = nand->cs[die_nr];
+}
+
static int tegra_nand_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
+ tegra_nand_select_target(chip, op->cs);
return nand_op_parser_exec_op(chip, &tegra_nand_op_parser, op,
check_only);
}
-static void tegra_nand_select_chip(struct nand_chip *chip, int die_nr)
-{
- struct tegra_nand_chip *nand = to_tegra_chip(chip);
- struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
-
- WARN_ON(die_nr >= (int)ARRAY_SIZE(nand->cs));
-
- if (die_nr < 0 || die_nr > 0) {
- ctrl->cur_cs = -1;
- return;
- }
-
- ctrl->cur_cs = nand->cs[die_nr];
-}
-
static void tegra_nand_hw_ecc(struct tegra_nand_controller *ctrl,
struct nand_chip *chip, bool enable)
{
@@ -503,6 +498,8 @@ static int tegra_nand_page_xfer(struct mtd_info *mtd, struct nand_chip *chip,
u32 addr1, cmd, dma_ctrl;
int ret;
+ tegra_nand_select_target(chip, chip->cur_cs);
+
if (read) {
writel_relaxed(NAND_CMD_READ0, ctrl->regs + CMD_REG1);
writel_relaxed(NAND_CMD_READSTART, ctrl->regs + CMD_REG2);
@@ -1053,6 +1050,8 @@ static int tegra_nand_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops tegra_nand_controller_ops = {
.attach_chip = &tegra_nand_attach_chip,
+ .exec_op = tegra_nand_exec_op,
+ .setup_data_interface = tegra_nand_setup_data_interface,
};
static int tegra_nand_chips_init(struct device *dev,
@@ -1115,9 +1114,6 @@ static int tegra_nand_chips_init(struct device *dev,
mtd->name = "tegra_nand";
chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER;
- chip->exec_op = tegra_nand_exec_op;
- chip->select_chip = tegra_nand_select_chip;
- chip->setup_data_interface = tegra_nand_setup_data_interface;
ret = nand_scan(chip, 1);
if (ret)
diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
index 9814fd4a84cf..a662ca1970e5 100644
--- a/drivers/mtd/nand/raw/vf610_nfc.c
+++ b/drivers/mtd/nand/raw/vf610_nfc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2009-2015 Freescale Semiconductor, Inc. and others
*
@@ -10,11 +11,6 @@
*
* Based on original driver mpc5121_nfc.c.
*
- * This is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Limitations:
* - Untested on MPC5125 and M54418.
* - DMA and pipelining not used.
@@ -152,6 +148,7 @@ enum vf610_nfc_variant {
};
struct vf610_nfc {
+ struct nand_controller base;
struct nand_chip chip;
struct device *dev;
void __iomem *regs;
@@ -168,11 +165,6 @@ struct vf610_nfc {
u32 ecc_mode;
};
-static inline struct vf610_nfc *mtd_to_nfc(struct mtd_info *mtd)
-{
- return container_of(mtd_to_nand(mtd), struct vf610_nfc, chip);
-}
-
static inline struct vf610_nfc *chip_to_nfc(struct nand_chip *chip)
{
return container_of(chip, struct vf610_nfc, chip);
@@ -316,8 +308,7 @@ static void vf610_nfc_done(struct vf610_nfc *nfc)
static irqreturn_t vf610_nfc_irq(int irq, void *data)
{
- struct mtd_info *mtd = data;
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ struct vf610_nfc *nfc = data;
vf610_nfc_clear(nfc, NFC_IRQ_STATUS, IDLE_EN_BIT);
complete(&nfc->cmd_done);
@@ -487,40 +478,40 @@ static const struct nand_op_parser vf610_nfc_op_parser = NAND_OP_PARSER(
NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, PAGE_2K + OOB_MAX)),
);
-static int vf610_nfc_exec_op(struct nand_chip *chip,
- const struct nand_operation *op,
- bool check_only)
-{
- return nand_op_parser_exec_op(chip, &vf610_nfc_op_parser, op,
- check_only);
-}
-
/*
* This function supports Vybrid only (MPC5125 would have full RB and four CS)
*/
-static void vf610_nfc_select_chip(struct nand_chip *chip, int cs)
+static void vf610_nfc_select_target(struct nand_chip *chip, unsigned int cs)
{
- struct vf610_nfc *nfc = mtd_to_nfc(nand_to_mtd(chip));
- u32 tmp = vf610_nfc_read(nfc, NFC_ROW_ADDR);
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ u32 tmp;
/* Vybrid only (MPC5125 would have full RB and four CS) */
if (nfc->variant != NFC_VFC610)
return;
+ tmp = vf610_nfc_read(nfc, NFC_ROW_ADDR);
tmp &= ~(ROW_ADDR_CHIP_SEL_RB_MASK | ROW_ADDR_CHIP_SEL_MASK);
-
- if (cs >= 0) {
- tmp |= 1 << ROW_ADDR_CHIP_SEL_RB_SHIFT;
- tmp |= BIT(cs) << ROW_ADDR_CHIP_SEL_SHIFT;
- }
+ tmp |= 1 << ROW_ADDR_CHIP_SEL_RB_SHIFT;
+ tmp |= BIT(cs) << ROW_ADDR_CHIP_SEL_SHIFT;
vf610_nfc_write(nfc, NFC_ROW_ADDR, tmp);
}
-static inline int vf610_nfc_correct_data(struct mtd_info *mtd, uint8_t *dat,
+static int vf610_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ vf610_nfc_select_target(chip, op->cs);
+ return nand_op_parser_exec_op(chip, &vf610_nfc_op_parser, op,
+ check_only);
+}
+
+static inline int vf610_nfc_correct_data(struct nand_chip *chip, uint8_t *dat,
uint8_t *oob, int page)
{
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
u32 ecc_status_off = NFC_MAIN_AREA(0) + ECC_SRAM_ADDR + ECC_STATUS;
u8 ecc_status;
u8 ecc_count;
@@ -560,12 +551,14 @@ static void vf610_nfc_fill_row(struct nand_chip *chip, int page, u32 *code,
static int vf610_nfc_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
int trfr_sz = mtd->writesize + mtd->oobsize;
u32 row = 0, cmd1 = 0, cmd2 = 0, code = 0;
int stat;
+ vf610_nfc_select_target(chip, chip->cur_cs);
+
cmd2 |= NAND_CMD_READ0 << CMD_BYTE1_SHIFT;
code |= COMMAND_CMD_BYTE1 | COMMAND_CAR_BYTE1 | COMMAND_CAR_BYTE2;
@@ -592,7 +585,7 @@ static int vf610_nfc_read_page(struct nand_chip *chip, uint8_t *buf,
mtd->writesize,
mtd->oobsize, false);
- stat = vf610_nfc_correct_data(mtd, buf, chip->oob_poi, page);
+ stat = vf610_nfc_correct_data(chip, buf, chip->oob_poi, page);
if (stat < 0) {
mtd->ecc_stats.failed++;
@@ -606,13 +599,15 @@ static int vf610_nfc_read_page(struct nand_chip *chip, uint8_t *buf,
static int vf610_nfc_write_page(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
int trfr_sz = mtd->writesize + mtd->oobsize;
u32 row = 0, cmd1 = 0, cmd2 = 0, code = 0;
u8 status;
int ret;
+ vf610_nfc_select_target(chip, chip->cur_cs);
+
cmd2 |= NAND_CMD_SEQIN << CMD_BYTE1_SHIFT;
code |= COMMAND_CMD_BYTE1 | COMMAND_CAR_BYTE1 | COMMAND_CAR_BYTE2;
@@ -648,8 +643,7 @@ static int vf610_nfc_write_page(struct nand_chip *chip, const uint8_t *buf,
static int vf610_nfc_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
int ret;
nfc->data_access = true;
@@ -662,8 +656,8 @@ static int vf610_nfc_read_page_raw(struct nand_chip *chip, u8 *buf,
static int vf610_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
int ret;
nfc->data_access = true;
@@ -681,7 +675,7 @@ static int vf610_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf,
static int vf610_nfc_read_oob(struct nand_chip *chip, int page)
{
- struct vf610_nfc *nfc = mtd_to_nfc(nand_to_mtd(chip));
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
int ret;
nfc->data_access = true;
@@ -694,7 +688,7 @@ static int vf610_nfc_read_oob(struct nand_chip *chip, int page)
static int vf610_nfc_write_oob(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
int ret;
nfc->data_access = true;
@@ -751,7 +745,7 @@ static void vf610_nfc_init_controller(struct vf610_nfc *nfc)
static int vf610_nfc_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
vf610_nfc_init_controller(nfc);
@@ -809,6 +803,8 @@ static int vf610_nfc_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops vf610_nfc_controller_ops = {
.attach_chip = vf610_nfc_attach_chip,
+ .exec_op = vf610_nfc_exec_op,
+
};
static int vf610_nfc_probe(struct platform_device *pdev)
@@ -876,14 +872,11 @@ static int vf610_nfc_probe(struct platform_device *pdev)
goto err_disable_clk;
}
- chip->exec_op = vf610_nfc_exec_op;
- chip->select_chip = vf610_nfc_select_chip;
-
chip->options |= NAND_NO_SUBPAGE_WRITE;
init_completion(&nfc->cmd_done);
- err = devm_request_irq(nfc->dev, irq, vf610_nfc_irq, 0, DRV_NAME, mtd);
+ err = devm_request_irq(nfc->dev, irq, vf610_nfc_irq, 0, DRV_NAME, nfc);
if (err) {
dev_err(nfc->dev, "Error requesting IRQ!\n");
goto err_disable_clk;
@@ -891,13 +884,16 @@ static int vf610_nfc_probe(struct platform_device *pdev)
vf610_nfc_preinit_controller(nfc);
+ nand_controller_init(&nfc->base);
+ nfc->base.ops = &vf610_nfc_controller_ops;
+ chip->controller = &nfc->base;
+
/* Scan the NAND chip */
- chip->dummy_controller.ops = &vf610_nfc_controller_ops;
err = nand_scan(chip, 1);
if (err)
goto err_disable_clk;
- platform_set_drvdata(pdev, mtd);
+ platform_set_drvdata(pdev, nfc);
/* Register device in MTD */
err = mtd_device_register(mtd, NULL, 0);
@@ -914,10 +910,9 @@ err_disable_clk:
static int vf610_nfc_remove(struct platform_device *pdev)
{
- struct mtd_info *mtd = platform_get_drvdata(pdev);
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ struct vf610_nfc *nfc = platform_get_drvdata(pdev);
- nand_release(mtd_to_nand(mtd));
+ nand_release(&nfc->chip);
clk_disable_unprepare(nfc->clk);
return 0;
}
@@ -925,8 +920,7 @@ static int vf610_nfc_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int vf610_nfc_suspend(struct device *dev)
{
- struct mtd_info *mtd = dev_get_drvdata(dev);
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ struct vf610_nfc *nfc = dev_get_drvdata(dev);
clk_disable_unprepare(nfc->clk);
return 0;
@@ -934,11 +928,9 @@ static int vf610_nfc_suspend(struct device *dev)
static int vf610_nfc_resume(struct device *dev)
{
+ struct vf610_nfc *nfc = dev_get_drvdata(dev);
int err;
- struct mtd_info *mtd = dev_get_drvdata(dev);
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
-
err = clk_prepare_enable(nfc->clk);
if (err)
return err;
diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c
index a234a5cb4868..4cb78106af14 100644
--- a/drivers/mtd/nand/raw/xway_nand.c
+++ b/drivers/mtd/nand/raw/xway_nand.c
@@ -176,7 +176,7 @@ static int xway_nand_probe(struct platform_device *pdev)
data->chip.legacy.cmd_ctrl = xway_cmd_ctrl;
data->chip.legacy.dev_ready = xway_dev_ready;
- data->chip.select_chip = xway_select_chip;
+ data->chip.legacy.select_chip = xway_select_chip;
data->chip.legacy.write_buf = xway_write_buf;
data->chip.legacy.read_buf = xway_read_buf;
data->chip.legacy.read_byte = xway_read_byte;
diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile
index b74e074b363a..753125082640 100644
--- a/drivers/mtd/nand/spi/Makefile
+++ b/drivers/mtd/nand/spi/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-spinand-objs := core.o macronix.o micron.o winbond.o
+spinand-objs := core.o gigadevice.o macronix.o micron.o toshiba.o winbond.o
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 30f83649c481..479c2f2cf17f 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -764,8 +764,10 @@ static const struct nand_ops spinand_ops = {
};
static const struct spinand_manufacturer *spinand_manufacturers[] = {
+ &gigadevice_spinand_manufacturer,
&macronix_spinand_manufacturer,
&micron_spinand_manufacturer,
+ &toshiba_spinand_manufacturer,
&winbond_spinand_manufacturer,
};
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
new file mode 100644
index 000000000000..e4141c20947a
--- /dev/null
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author:
+ * Chuanhong Guo <gch981213@gmail.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_GIGADEVICE 0xC8
+#define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4)
+#define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int gd5fxgq4xa_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 8;
+ region->length = 8;
+
+ return 0;
+}
+
+static int gd5fxgq4xa_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ if (section) {
+ region->offset = 16 * section;
+ region->length = 8;
+ } else {
+ /* section 0 has one byte reserved for bad block mark */
+ region->offset = 1;
+ region->length = 7;
+ }
+ return 0;
+}
+
+static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS:
+ /* 1-7 bits are flipped. return the maximum. */
+ return 7;
+
+ case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS:
+ return 8;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = {
+ .ecc = gd5fxgq4xa_ooblayout_ecc,
+ .free = gd5fxgq4xa_ooblayout_free,
+};
+
+static const struct spinand_info gigadevice_spinand_table[] = {
+ SPINAND_INFO("GD5F1GQ4xA", 0xF1,
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
+ gd5fxgq4xa_ecc_get_status)),
+ SPINAND_INFO("GD5F2GQ4xA", 0xF2,
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
+ gd5fxgq4xa_ecc_get_status)),
+ SPINAND_INFO("GD5F4GQ4xA", 0xF4,
+ NAND_MEMORG(1, 2048, 64, 64, 4096, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
+ gd5fxgq4xa_ecc_get_status)),
+};
+
+static int gigadevice_spinand_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ /*
+ * For GD NANDs, There is an address byte needed to shift in before IDs
+ * are read out, so the first byte in raw_id is dummy.
+ */
+ if (id[1] != SPINAND_MFR_GIGADEVICE)
+ return 0;
+
+ ret = spinand_match_and_init(spinand, gigadevice_spinand_table,
+ ARRAY_SIZE(gigadevice_spinand_table),
+ id[2]);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
+ .detect = gigadevice_spinand_detect,
+};
+
+const struct spinand_manufacturer gigadevice_spinand_manufacturer = {
+ .id = SPINAND_MFR_GIGADEVICE,
+ .name = "GigaDevice",
+ .ops = &gigadevice_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
new file mode 100644
index 000000000000..081265557e70
--- /dev/null
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 exceet electronics GmbH
+ * Copyright (c) 2018 Kontron Electronics GmbH
+ *
+ * Author: Frieder Schrempf <frieder.schrempf@kontron.de>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_TOSHIBA 0x98
+#define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int tc58cvg2s0h_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ region->offset = 128 + 16 * section;
+ region->length = 16;
+
+ return 0;
+}
+
+static int tc58cvg2s0h_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 0)
+ return -ERANGE;
+
+ /* 2 bytes reserved for BBM */
+ region->offset = 2;
+ region->length = 126;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops tc58cvg2s0h_ooblayout = {
+ .ecc = tc58cvg2s0h_ooblayout_ecc,
+ .free = tc58cvg2s0h_ooblayout_free,
+};
+
+static int tc58cvg2s0h_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ u8 mbf = 0;
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf);
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ case TOSH_STATUS_ECC_HAS_BITFLIPS_T:
+ /*
+ * Let's try to retrieve the real maximum number of bitflips
+ * in order to avoid forcing the wear-leveling layer to move
+ * data around if it's not necessary.
+ */
+ if (spi_mem_exec_op(spinand->spimem, &op))
+ return nand->eccreq.strength;
+
+ mbf >>= 4;
+
+ if (WARN_ON(mbf > nand->eccreq.strength || !mbf))
+ return nand->eccreq.strength;
+
+ return mbf;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info toshiba_spinand_table[] = {
+ SPINAND_INFO("TC58CVG2S0H", 0xCD,
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tc58cvg2s0h_ooblayout,
+ tc58cvg2s0h_ecc_get_status)),
+};
+
+static int toshiba_spinand_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ /*
+ * Toshiba SPI NAND read ID needs a dummy byte,
+ * so the first byte in id is garbage.
+ */
+ if (id[1] != SPINAND_MFR_TOSHIBA)
+ return 0;
+
+ ret = spinand_match_and_init(spinand, toshiba_spinand_table,
+ ARRAY_SIZE(toshiba_spinand_table),
+ id[2]);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+static const struct spinand_manufacturer_ops toshiba_spinand_manuf_ops = {
+ .detect = toshiba_spinand_detect,
+};
+
+const struct spinand_manufacturer toshiba_spinand_manufacturer = {
+ .id = SPINAND_MFR_TOSHIBA,
+ .name = "Toshiba",
+ .ops = &toshiba_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index 67baa1b32c00..5d944580b898 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -84,6 +84,14 @@ static const struct spinand_info winbond_spinand_table[] = {
0,
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
SPINAND_SELECT_TARGET(w25m02gv_select_target)),
+ SPINAND_INFO("W25N01GV", 0xAA,
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
};
/**
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index e24db817154e..d846428ef038 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -996,7 +996,7 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
err_unmap:
dma_unmap_single(nor->dev, dma_dst, len, DMA_FROM_DEVICE);
- return 0;
+ return ret;
}
static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 9407ca5f9443..3e54e31889c7 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -3250,12 +3250,14 @@ static int spi_nor_init_params(struct spi_nor *nor,
memcpy(&sfdp_params, params, sizeof(sfdp_params));
memcpy(&prev_map, &nor->erase_map, sizeof(prev_map));
- if (spi_nor_parse_sfdp(nor, &sfdp_params))
+ if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
+ nor->addr_width = 0;
/* restore previous erase map */
memcpy(&nor->erase_map, &prev_map,
sizeof(nor->erase_map));
- else
+ } else {
memcpy(params, &sfdp_params, sizeof(*params));
+ }
}
return 0;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index ffa37adb7681..333387f1f1fe 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3112,13 +3112,13 @@ static int bond_slave_netdev_event(unsigned long event,
case NETDEV_CHANGE:
/* For 802.3ad mode only:
* Getting invalid Speed/Duplex values here will put slave
- * in weird state. So mark it as link-down for the time
+ * in weird state. So mark it as link-fail for the time
* being and let link-monitoring (miimon) set it right when
* correct speeds/duplex are available.
*/
if (bond_update_speed_duplex(slave) &&
BOND_MODE(bond) == BOND_MODE_8023AD)
- slave->link = BOND_LINK_DOWN;
+ slave->link = BOND_LINK_FAIL;
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_adapter_speed_duplex_changed(slave);
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 54e0ca6ed730..86b6464b4525 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -1117,11 +1117,6 @@ static int ksz_switch_init(struct ksz_device *dev)
{
int i;
- mutex_init(&dev->reg_mutex);
- mutex_init(&dev->stats_mutex);
- mutex_init(&dev->alu_mutex);
- mutex_init(&dev->vlan_mutex);
-
dev->ds->ops = &ksz_switch_ops;
for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
@@ -1206,6 +1201,11 @@ int ksz_switch_register(struct ksz_device *dev)
if (dev->pdata)
dev->chip_id = dev->pdata->chip_id;
+ mutex_init(&dev->reg_mutex);
+ mutex_init(&dev->stats_mutex);
+ mutex_init(&dev->alu_mutex);
+ mutex_init(&dev->vlan_mutex);
+
if (ksz_switch_detect(dev))
return -EINVAL;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index d721ccf7d8be..38e399e0f30e 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -567,6 +567,8 @@ int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip)
if (err)
return err;
+ /* Keep the histogram mode bits */
+ val &= MV88E6XXX_G1_STATS_OP_HIST_RX_TX;
val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 6a633c70f603..99ef1daaa4d8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -407,13 +407,13 @@ static void aq_ethtool_get_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *pause)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ u32 fc = aq_nic->aq_nic_cfg.flow_control;
pause->autoneg = 0;
- if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
- pause->rx_pause = 1;
- if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
- pause->tx_pause = 1;
+ pause->rx_pause = !!(fc & AQ_NIC_FC_RX);
+ pause->tx_pause = !!(fc & AQ_NIC_FC_TX);
+
}
static int aq_ethtool_set_pauseparam(struct net_device *ndev,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index e8689241204e..a1e70da358ca 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -204,6 +204,10 @@ struct aq_hw_ops {
int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
+ int (*hw_set_offload)(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg);
+
+ int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc);
};
struct aq_fw_ops {
@@ -226,6 +230,8 @@ struct aq_fw_ops {
int (*update_stats)(struct aq_hw_s *self);
+ u32 (*get_flow_control)(struct aq_hw_s *self, u32 *fcmode);
+
int (*set_flow_control)(struct aq_hw_s *self);
int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index e3ae29e523f0..7c07eef275eb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -99,8 +99,11 @@ static int aq_ndev_set_features(struct net_device *ndev,
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic);
bool is_lro = false;
+ int err = 0;
+
+ aq_cfg->features = features;
- if (aq_cfg->hw_features & NETIF_F_LRO) {
+ if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
is_lro = features & NETIF_F_LRO;
if (aq_cfg->is_lro != is_lro) {
@@ -112,8 +115,11 @@ static int aq_ndev_set_features(struct net_device *ndev,
}
}
}
+ if ((aq_nic->ndev->features ^ features) & NETIF_F_RXCSUM)
+ err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
+ aq_cfg);
- return 0;
+ return err;
}
static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 5fed24446687..7abdc0952425 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -118,12 +118,13 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
}
cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
- cfg->hw_features = cfg->aq_hw_caps->hw_features;
+ cfg->features = cfg->aq_hw_caps->hw_features;
}
static int aq_nic_update_link_status(struct aq_nic_s *self)
{
int err = self->aq_fw_ops->update_link_status(self->aq_hw);
+ u32 fc = 0;
if (err)
return err;
@@ -133,6 +134,15 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
AQ_CFG_DRV_NAME, self->link_status.mbps,
self->aq_hw->aq_link_status.mbps);
aq_nic_update_interrupt_moderation_settings(self);
+
+ /* Driver has to update flow control settings on RX block
+ * on any link event.
+ * We should query FW whether it negotiated FC.
+ */
+ if (self->aq_fw_ops->get_flow_control)
+ self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
+ if (self->aq_hw_ops->hw_set_fc)
+ self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
}
self->link_status = self->aq_hw->aq_link_status;
@@ -590,7 +600,7 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
}
}
- if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) {
+ if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
packet_filter |= IFF_MULTICAST;
self->mc_list.count = i;
self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
@@ -772,7 +782,9 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
- if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX)
+ /* Asym is when either RX or TX, but not both */
+ if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^
+ !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX))
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Asym_Pause);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index c1582f4e8e1b..44ec47a3d60a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -23,7 +23,7 @@ struct aq_vec_s;
struct aq_nic_cfg_s {
const struct aq_hw_caps_s *aq_hw_caps;
- u64 hw_features;
+ u64 features;
u32 rxds; /* rx ring size, descriptors # */
u32 txds; /* tx ring size, descriptors # */
u32 vecs; /* vecs==allocated irqs */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 3db91446cc67..74550ccc7a20 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -172,6 +172,27 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
return !!budget;
}
+static void aq_rx_checksum(struct aq_ring_s *self,
+ struct aq_ring_buff_s *buff,
+ struct sk_buff *skb)
+{
+ if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (unlikely(buff->is_cso_err)) {
+ ++self->stats.rx.errors;
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+ if (buff->is_ip_cso) {
+ __skb_incr_checksum_unnecessary(skb);
+ if (buff->is_udp_cso || buff->is_tcp_cso)
+ __skb_incr_checksum_unnecessary(skb);
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+}
+
#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
int aq_ring_rx_clean(struct aq_ring_s *self,
struct napi_struct *napi,
@@ -267,18 +288,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
}
skb->protocol = eth_type_trans(skb, ndev);
- if (unlikely(buff->is_cso_err)) {
- ++self->stats.rx.errors;
- skb->ip_summed = CHECKSUM_NONE;
- } else {
- if (buff->is_ip_cso) {
- __skb_incr_checksum_unnecessary(skb);
- if (buff->is_udp_cso || buff->is_tcp_cso)
- __skb_incr_checksum_unnecessary(skb);
- } else {
- skb->ip_summed = CHECKSUM_NONE;
- }
- }
+
+ aq_rx_checksum(self, buff, skb);
skb_set_hash(skb, buff->rss_hash,
buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 76d25d594a0f..f02592f43fe3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -100,12 +100,17 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
return err;
}
+static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
+{
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
+ return 0;
+}
+
static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
{
u32 tc = 0U;
u32 buff_size = 0U;
unsigned int i_priority = 0U;
- bool is_rx_flow_control = false;
/* TPS Descriptor rate init */
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
@@ -138,7 +143,6 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
/* QoS Rx buf size per TC */
tc = 0;
- is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
buff_size = HW_ATL_B0_RXBUF_MAX;
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
@@ -150,7 +154,8 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
(buff_size *
(1024U / 32U) * 50U) /
100U, tc);
- hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+
+ hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
@@ -229,8 +234,10 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
/* RX checksums offloads*/
- hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
- hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
+ hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features &
+ NETIF_F_RXCSUM));
+ hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features &
+ NETIF_F_RXCSUM));
/* LSO offloads*/
hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
@@ -655,9 +662,9 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
&ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
- unsigned int is_err = 1U;
unsigned int is_rx_check_sum_enabled = 0U;
unsigned int pkt_type = 0U;
+ u8 rx_stat = 0U;
if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
break;
@@ -665,35 +672,35 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
buff = &ring->buff_ring[ring->hw_head];
- is_err = (0x0000003CU & rxd_wb->status);
+ rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
- is_err &= ~0x20U; /* exclude validity bit */
pkt_type = 0xFFU & (rxd_wb->type >> 4);
- if (is_rx_check_sum_enabled) {
- if (0x0U == (pkt_type & 0x3U))
- buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U;
+ if (is_rx_check_sum_enabled & BIT(0) &&
+ (0x0U == (pkt_type & 0x3U)))
+ buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U;
+ if (is_rx_check_sum_enabled & BIT(1)) {
if (0x4U == (pkt_type & 0x1CU))
- buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
+ buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U :
+ !!(rx_stat & BIT(3));
else if (0x0U == (pkt_type & 0x1CU))
- buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
-
- /* Checksum offload workaround for small packets */
- if (rxd_wb->pkt_len <= 60) {
- buff->is_ip_cso = 0U;
- buff->is_cso_err = 0U;
- }
+ buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U :
+ !!(rx_stat & BIT(3));
+ }
+ buff->is_cso_err = !!(rx_stat & 0x6);
+ /* Checksum offload workaround for small packets */
+ if (unlikely(rxd_wb->pkt_len <= 60)) {
+ buff->is_ip_cso = 0U;
+ buff->is_cso_err = 0U;
}
-
- is_err &= ~0x18U;
dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
- if (is_err || rxd_wb->type & 0x1000U) {
- /* status error or DMA error */
+ if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
+ /* MAC error or DMA error */
buff->is_error = 1U;
} else {
if (self->aq_nic_cfg->is_rss) {
@@ -915,6 +922,12 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
{
hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
+
+ /* Invalidate Descriptor Cache to prevent writing to the cached
+ * descriptors and to the data pointer of those descriptors
+ */
+ hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
+
return aq_hw_err_from_flags(self);
}
@@ -963,4 +976,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_get_regs = hw_atl_utils_hw_get_regs,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
+ .hw_set_offload = hw_atl_b0_hw_offload_set,
+ .hw_set_fc = hw_atl_b0_set_fc,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index be0a3a90dfad..5502ec5f0f69 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -619,6 +619,14 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode
HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
}
+void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
+ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
+ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT,
+ init);
+}
+
void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_pkt_buff_size_per_tc, u32 buffer)
{
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 7056c7342afc..41f239928c15 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -325,6 +325,9 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_pkt_buff_size_per_tc,
u32 buffer);
+/* set rdm rx dma descriptor cache init */
+void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init);
+
/* set rx xoff enable (per tc) */
void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
u32 buffer);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 716674a9b729..a715fa317b1c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -293,6 +293,24 @@
/* default value of bitfield desc{d}_reset */
#define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0
+/* rdm_desc_init_i bitfield definitions
+ * preprocessor definitions for the bitfield rdm_desc_init_i.
+ * port="pif_rdm_desc_init_i"
+ */
+
+/* register address for bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR 0x00005a00
+/* bitmask for bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK 0xffffffff
+/* inverted bitmask for bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSKN 0x00000000
+/* lower bit position of bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT 0
+/* width of bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_WIDTH 32
+/* default value of bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0
+
/* rx int_desc_wrb_en bitfield definitions
* preprocessor definitions for the bitfield "int_desc_wrb_en".
* port="pif_rdm_int_desc_wrb_en_i"
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 096ca5730887..7de3220d9cab 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -30,6 +30,8 @@
#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
+#define HW_ATL_FW2X_CAP_PAUSE BIT(CAPS_HI_PAUSE)
+#define HW_ATL_FW2X_CAP_ASYM_PAUSE BIT(CAPS_HI_ASYMMETRIC_PAUSE)
#define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY)
#define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL)
@@ -451,6 +453,24 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
return 0;
}
+static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode)
+{
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
+
+ if (mpi_state & HW_ATL_FW2X_CAP_PAUSE)
+ if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
+ *fcmode = AQ_NIC_FC_RX;
+ else
+ *fcmode = AQ_NIC_FC_RX | AQ_NIC_FC_TX;
+ else
+ if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
+ *fcmode = AQ_NIC_FC_TX;
+ else
+ *fcmode = 0;
+
+ return 0;
+}
+
const struct aq_fw_ops aq_fw_2x_ops = {
.init = aq_fw2x_init,
.deinit = aq_fw2x_deinit,
@@ -465,4 +485,5 @@ const struct aq_fw_ops aq_fw_2x_ops = {
.set_eee_rate = aq_fw2x_set_eee_rate,
.get_eee_rate = aq_fw2x_get_eee_rate,
.set_flow_control = aq_fw2x_set_flow_control,
+ .get_flow_control = aq_fw2x_get_flow_control
};
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index 78c5de467426..9d0e74f6b089 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -140,6 +140,5 @@ struct alx_priv {
};
extern const struct ethtool_ops alx_ethtool_ops;
-extern const char alx_drv_name[];
#endif
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 7968c644ad86..c131cfc1b79d 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -49,7 +49,7 @@
#include "hw.h"
#include "reg.h"
-const char alx_drv_name[] = "alx";
+static const char alx_drv_name[] = "alx";
static void alx_free_txbuf(struct alx_tx_queue *txq, int entry)
{
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 4122553e224b..0e2d99c737e3 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1902,9 +1902,6 @@ static void bcm_sysport_netif_start(struct net_device *dev)
intrl2_1_mask_clear(priv, 0xffffffff);
else
intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
-
- /* Last call before we start the real business */
- netif_tx_start_all_queues(dev);
}
static void rbuf_init(struct bcm_sysport_priv *priv)
@@ -2048,6 +2045,8 @@ static int bcm_sysport_open(struct net_device *dev)
bcm_sysport_netif_start(dev);
+ netif_tx_start_all_queues(dev);
+
return 0;
out_clear_rx_int:
@@ -2071,7 +2070,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev)
struct bcm_sysport_priv *priv = netdev_priv(dev);
/* stop all software from updating hardware */
- netif_tx_stop_all_queues(dev);
+ netif_tx_disable(dev);
napi_disable(&priv->napi);
cancel_work_sync(&priv->dim.dim.work);
phy_stop(dev->phydev);
@@ -2658,12 +2657,12 @@ static int __maybe_unused bcm_sysport_suspend(struct device *d)
if (!netif_running(dev))
return 0;
+ netif_device_detach(dev);
+
bcm_sysport_netif_stop(dev);
phy_suspend(dev->phydev);
- netif_device_detach(dev);
-
/* Disable UniMAC RX */
umac_enable_set(priv, CMD_RX_EN, 0);
@@ -2746,8 +2745,6 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
goto out_free_rx_ring;
}
- netif_device_attach(dev);
-
/* RX pipe enable */
topctrl_writel(priv, 0, RX_FLUSH_CNTL);
@@ -2788,6 +2785,8 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
bcm_sysport_netif_start(dev);
+ netif_device_attach(dev);
+
return 0;
out_free_rx_ring:
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 20c1681bb1af..2d6f090bf644 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2855,7 +2855,6 @@ static void bcmgenet_netif_start(struct net_device *dev)
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
- netif_tx_start_all_queues(dev);
bcmgenet_enable_tx_napi(priv);
/* Monitor link interrupts now */
@@ -2937,6 +2936,8 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_netif_start(dev);
+ netif_tx_start_all_queues(dev);
+
return 0;
err_irq1:
@@ -2958,7 +2959,7 @@ static void bcmgenet_netif_stop(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
bcmgenet_disable_tx_napi(priv);
- netif_tx_stop_all_queues(dev);
+ netif_tx_disable(dev);
/* Disable MAC receive */
umac_enable_set(priv, CMD_RX_EN, false);
@@ -3620,13 +3621,13 @@ static int bcmgenet_suspend(struct device *d)
if (!netif_running(dev))
return 0;
+ netif_device_detach(dev);
+
bcmgenet_netif_stop(dev);
if (!device_may_wakeup(d))
phy_suspend(dev->phydev);
- netif_device_detach(dev);
-
/* Prepare the device for Wake-on-LAN and switch to the slow clock */
if (device_may_wakeup(d) && priv->wolopts) {
ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
@@ -3700,8 +3701,6 @@ static int bcmgenet_resume(struct device *d)
/* Always enable ring 16 - descriptor ring */
bcmgenet_enable_dma(priv, dma_ctrl);
- netif_device_attach(dev);
-
if (!device_may_wakeup(d))
phy_resume(dev->phydev);
@@ -3710,6 +3709,8 @@ static int bcmgenet_resume(struct device *d)
bcmgenet_netif_start(dev);
+ netif_device_attach(dev);
+
return 0;
out_clk_disable:
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 3f96aa30068e..20fcf0d1c2ce 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -3760,7 +3760,8 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
/* Hardware table is only clear when pf resets */
if (!(handle->flags & HNAE3_SUPPORT_VF)) {
ret = hns3_restore_vlan(netdev);
- return ret;
+ if (ret)
+ return ret;
}
ret = hns3_restore_fd_rules(netdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index aa5cb9834d73..494e562fe8c7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -1168,14 +1168,14 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
*/
static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
{
- struct hclge_vport *vport = hdev->vport;
- u32 i, k, qs_bitmap;
- int ret;
+ int i;
for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
- qs_bitmap = 0;
+ u32 qs_bitmap = 0;
+ int k, ret;
for (k = 0; k < hdev->num_alloc_vport; k++) {
+ struct hclge_vport *vport = &hdev->vport[k];
u16 qs_id = vport->qs_offset + tc;
u8 grp, sub_grp;
@@ -1185,8 +1185,6 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
HCLGE_BP_SUB_GRP_ID_S);
if (i == grp)
qs_bitmap |= (1 << sub_grp);
-
- vport++;
}
ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 7893beffcc71..c9d5d0a7fbf1 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1545,7 +1545,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.sge_len = cpu_to_be32(skb->len);
tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
- if (adapter->vlan_header_insertion) {
+ if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index bc71a21c1dc2..21c2688d6308 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -12249,6 +12249,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6 |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_SCTP_CRC |
@@ -12266,13 +12268,13 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
/* record features VLANs can make use of */
netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
- if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
- netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
-
hw_features = hw_enc_features |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
+
netdev->hw_features |= hw_features;
netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 4c4b5717a627..b8548370f1c7 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -76,6 +76,8 @@ extern const char ice_drv_ver[];
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
+#define ICE_MAX_RESET_WAIT 20
+
#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
@@ -189,7 +191,6 @@ struct ice_vsi {
u64 tx_linearize;
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_VSI_FLAG_NBITS);
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
unsigned int current_netdev_flags;
u32 tx_restart;
u32 tx_busy;
@@ -369,5 +370,6 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
+void ice_napi_del(struct ice_vsi *vsi);
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 8cd6a2401fd9..554fd707a6d6 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -811,6 +811,9 @@ void ice_deinit_hw(struct ice_hw *hw)
/* Attempt to disable FW logging before shutting down control queues */
ice_cfg_fw_log(hw, false);
ice_shutdown_all_ctrlq(hw);
+
+ /* Clear VSI contexts if not already cleared */
+ ice_clear_all_vsi_ctx(hw);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 96923580f2a6..648acdb4c644 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -1517,10 +1517,15 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
}
if (!test_bit(__ICE_DOWN, pf->state)) {
- /* Give it a little more time to try to come back */
+ /* Give it a little more time to try to come back. If still
+ * down, restart autoneg link or reinitialize the interface.
+ */
msleep(75);
if (!test_bit(__ICE_DOWN, pf->state))
return ice_nway_reset(netdev);
+
+ ice_down(vsi);
+ ice_up(vsi);
}
return err;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 5fdea6ec7675..596b9fb1c510 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -242,6 +242,8 @@
#define GLNVM_ULD 0x000B6008
#define GLNVM_ULD_CORER_DONE_M BIT(3)
#define GLNVM_ULD_GLOBR_DONE_M BIT(4)
+#define GLPCI_CNF2 0x000BE004
+#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
#define PF_FUNC_RID 0x0009E880
#define PF_FUNC_RID_FUNC_NUM_S 0
#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 5bacad01f0c9..1041fa2a7767 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1997,7 +1997,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL);
if (status) {
netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
- ena ? "Ena" : "Dis", vsi->idx, vsi->vsi_num, status,
+ ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
vsi->back->hw.adminq.sq_last_status);
goto err_out;
}
@@ -2458,6 +2458,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
* on this wq
*/
if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
+ ice_napi_del(vsi);
unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 05993451147a..333312a1d595 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1465,7 +1465,7 @@ skip_req_irq:
* ice_napi_del - Remove NAPI handler for the VSI
* @vsi: VSI for which NAPI handler is to be removed
*/
-static void ice_napi_del(struct ice_vsi *vsi)
+void ice_napi_del(struct ice_vsi *vsi)
{
int v_idx;
@@ -1622,7 +1622,6 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- int ret;
if (vid >= VLAN_N_VID) {
netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
@@ -1635,7 +1634,8 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
/* Enable VLAN pruning when VLAN 0 is added */
if (unlikely(!vid)) {
- ret = ice_cfg_vlan_pruning(vsi, true);
+ int ret = ice_cfg_vlan_pruning(vsi, true);
+
if (ret)
return ret;
}
@@ -1644,12 +1644,7 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
* needed to continue allowing all untagged packets since VLAN prune
* list is applied to all packets by the switch
*/
- ret = ice_vsi_add_vlan(vsi, vid);
-
- if (!ret)
- set_bit(vid, vsi->active_vlans);
-
- return ret;
+ return ice_vsi_add_vlan(vsi, vid);
}
/**
@@ -1677,8 +1672,6 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev,
if (status)
return status;
- clear_bit(vid, vsi->active_vlans);
-
/* Disable VLAN pruning when VLAN 0 is removed */
if (unlikely(!vid))
status = ice_cfg_vlan_pruning(vsi, false);
@@ -2002,6 +1995,22 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/**
+ * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
+ * @pf: pointer to the PF structure
+ *
+ * There is no error returned here because the driver should be able to handle
+ * 128 Byte cache lines, so we only print a warning in case issues are seen,
+ * specifically with Tx.
+ */
+static void ice_verify_cacheline_size(struct ice_pf *pf)
+{
+ if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
+ dev_warn(&pf->pdev->dev,
+ "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
+ ICE_CACHE_LINE_BYTES);
+}
+
+/**
* ice_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in ice_pci_tbl
@@ -2151,6 +2160,8 @@ static int ice_probe(struct pci_dev *pdev,
/* since everything is good, start the service timer */
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
+ ice_verify_cacheline_size(pf);
+
return 0;
err_alloc_sw_unroll:
@@ -2182,6 +2193,12 @@ static void ice_remove(struct pci_dev *pdev)
if (!pf)
return;
+ for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
+ if (!ice_is_reset_in_progress(pf->state))
+ break;
+ msleep(100);
+ }
+
set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf);
@@ -2510,31 +2527,6 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
}
/**
- * ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up
- * @vsi: the VSI being brought back up
- */
-static int ice_restore_vlan(struct ice_vsi *vsi)
-{
- int err;
- u16 vid;
-
- if (!vsi->netdev)
- return -EINVAL;
-
- err = ice_vsi_vlan_setup(vsi);
- if (err)
- return err;
-
- for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) {
- err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid);
- if (err)
- break;
- }
-
- return err;
-}
-
-/**
* ice_vsi_cfg - Setup the VSI
* @vsi: the VSI being configured
*
@@ -2546,7 +2538,9 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
if (vsi->netdev) {
ice_set_rx_mode(vsi->netdev);
- err = ice_restore_vlan(vsi);
+
+ err = ice_vsi_vlan_setup(vsi);
+
if (err)
return err;
}
@@ -3296,7 +3290,7 @@ static void ice_rebuild(struct ice_pf *pf)
struct device *dev = &pf->pdev->dev;
struct ice_hw *hw = &pf->hw;
enum ice_status ret;
- int err;
+ int err, i;
if (test_bit(__ICE_DOWN, pf->state))
goto clear_recovery;
@@ -3370,6 +3364,22 @@ static void ice_rebuild(struct ice_pf *pf)
}
ice_reset_all_vfs(pf, true);
+
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ bool link_up;
+
+ if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF)
+ continue;
+ ice_get_link_status(pf->vsi[i]->port_info, &link_up);
+ if (link_up) {
+ netif_carrier_on(pf->vsi[i]->netdev);
+ netif_tx_wake_all_queues(pf->vsi[i]->netdev);
+ } else {
+ netif_carrier_off(pf->vsi[i]->netdev);
+ netif_tx_stop_all_queues(pf->vsi[i]->netdev);
+ }
+ }
+
/* if we get here, reset flow is successful */
clear_bit(__ICE_RESET_FAILED, pf->state);
return;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 33403f39f1b3..40c9c6558956 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -348,6 +348,18 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
}
/**
+ * ice_clear_all_vsi_ctx - clear all the VSI context entries
+ * @hw: pointer to the hw struct
+ */
+void ice_clear_all_vsi_ctx(struct ice_hw *hw)
+{
+ u16 i;
+
+ for (i = 0; i < ICE_MAX_VSI; i++)
+ ice_clear_vsi_ctx(hw, i);
+}
+
+/**
* ice_add_vsi - add VSI context to the hardware and VSI handle list
* @hw: pointer to the hw struct
* @vsi_handle: unique VSI handle provided by drivers
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index b88d96a1ef69..d5ef0bd58bf9 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -190,6 +190,8 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
+void ice_clear_all_vsi_ctx(struct ice_hw *hw);
+/* Switch config */
enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
/* Switch/bridge related commands */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 5dae968d853e..fe5bbabbb41e 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1520,7 +1520,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
/* update gso_segs and bytecount */
first->gso_segs = skb_shinfo(skb)->gso_segs;
- first->bytecount = (first->gso_segs - 1) * off->header_len;
+ first->bytecount += (first->gso_segs - 1) * off->header_len;
cd_tso_len = skb->len - off->header_len;
cd_mss = skb_shinfo(skb)->gso_size;
@@ -1556,15 +1556,15 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
* magnitude greater than our largest possible GSO size.
*
* This would then be implemented as:
- * return (((size >> 12) * 85) >> 8) + 1;
+ * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
*
* Since multiplication and division are commutative, we can reorder
* operations into:
- * return ((size * 85) >> 20) + 1;
+ * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
*/
static unsigned int ice_txd_use_count(unsigned int size)
{
- return ((size * 85) >> 20) + 1;
+ return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
}
/**
@@ -1706,7 +1706,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
* + 1 desc for context descriptor,
* otherwise try next time
*/
- if (ice_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+ if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
+ ICE_DESCS_FOR_CTX_DESC)) {
tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 1d0f58bd389b..75d0eaf6c9dd 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -22,8 +22,21 @@
#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
#define ICE_MAX_TXQ_PER_TXQG 128
-/* Tx Descriptors needed, worst case */
-#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+/* We are assuming that the cache line is always 64 Bytes here for ice.
+ * In order to make sure that is a correct assumption there is a check in probe
+ * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
+ * size is 128 bytes. We do it this way because we do not want to read the
+ * GLPCI_CNF2 register or a variable containing the value on every pass through
+ * the Tx path.
+ */
+#define ICE_CACHE_LINE_BYTES 64
+#define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \
+ sizeof(struct ice_tx_desc))
+#define ICE_DESCS_FOR_CTX_DESC 1
+#define ICE_DESCS_FOR_SKB_DATA_PTR 1
+/* Tx descriptors needed, worst case */
+#define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
+ ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
#define ICE_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 12f9432abf11..f4dbc81c1988 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -92,12 +92,12 @@ struct ice_link_status {
u64 phy_type_low;
u16 max_frame_size;
u16 link_speed;
+ u16 req_speeds;
u8 lse_ena; /* Link Status Event notification */
u8 link_info;
u8 an_info;
u8 ext_info;
u8 pacing;
- u8 req_speeds;
/* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of
* ice_aqc_get_phy_caps structure
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 45f10f8f01dc..e71065f9d391 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -348,7 +348,7 @@ static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
struct ice_vsi_ctx ctxt = { 0 };
enum ice_status status;
- ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED |
+ ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
ICE_AQ_VSI_PVLAN_INSERT_PVID |
ICE_AQ_VSI_VLAN_EMOD_STR;
ctxt.info.pvid = cpu_to_le16(vid);
@@ -2171,7 +2171,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (!ice_vsi_add_vlan(vsi, vid)) {
vf->num_vlan++;
- set_bit(vid, vsi->active_vlans);
/* Enable VLAN pruning when VLAN 0 is added */
if (unlikely(!vid))
@@ -2190,7 +2189,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
*/
if (!ice_vsi_kill_vlan(vsi, vid)) {
vf->num_vlan--;
- clear_bit(vid, vsi->active_vlans);
/* Disable VLAN pruning when removing VLAN 0 */
if (unlikely(!vid))
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 29ced6b74d36..2b95dc9c7a6a 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -53,13 +53,15 @@
* 2^40 * 10^-9 / 60 = 18.3 minutes.
*
* SYSTIM is converted to real time using a timecounter. As
- * timecounter_cyc2time() allows old timestamps, the timecounter
- * needs to be updated at least once per half of the SYSTIM interval.
- * Scheduling of delayed work is not very accurate, so we aim for 8
- * minutes to be sure the actual interval is shorter than 9.16 minutes.
+ * timecounter_cyc2time() allows old timestamps, the timecounter needs
+ * to be updated at least once per half of the SYSTIM interval.
+ * Scheduling of delayed work is not very accurate, and also the NIC
+ * clock can be adjusted to run up to 6% faster and the system clock
+ * up to 10% slower, so we aim for 6 minutes to be sure the actual
+ * interval in the NIC time is shorter than 9.16 minutes.
*/
-#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 8)
+#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 6)
#define IGB_PTP_TX_TIMEOUT (HZ * 15)
#define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT)
#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 5bfd349bf41a..3ba672e9e353 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -494,7 +494,7 @@ struct mvneta_port {
#if defined(__LITTLE_ENDIAN)
struct mvneta_tx_desc {
u32 command; /* Options used by HW for packet transmitting.*/
- u16 reserverd1; /* csum_l4 (for future use) */
+ u16 reserved1; /* csum_l4 (for future use) */
u16 data_size; /* Data size of transmitted packet in bytes */
u32 buf_phys_addr; /* Physical addr of transmitted buffer */
u32 reserved2; /* hw_cmd - (for future use, PMT) */
@@ -519,7 +519,7 @@ struct mvneta_rx_desc {
#else
struct mvneta_tx_desc {
u16 data_size; /* Data size of transmitted packet in bytes */
- u16 reserverd1; /* csum_l4 (for future use) */
+ u16 reserved1; /* csum_l4 (for future use) */
u32 command; /* Options used by HW for packet transmitting.*/
u32 reserved2; /* hw_cmd - (for future use, PMT) */
u32 buf_phys_addr; /* Physical addr of transmitted buffer */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 1857ee0f0871..6f5153afcab4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -1006,7 +1006,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->packets++;
}
ring->bytes += tx_info->nr_bytes;
- netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
if (tx_info->inl)
@@ -1044,7 +1043,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(ring->tx_queue);
ring->queue_stopped++;
}
- send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue);
+
+ send_doorbell = __netdev_tx_sent_queue(ring->tx_queue,
+ tx_info->nr_bytes,
+ skb->xmit_more);
real_size = (real_size / 16) & 0x3f;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index a2df12b79f8e..9bec940330a4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -3568,7 +3568,6 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
burst_size = 7;
break;
case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
- is_bytes = true;
rate = 4 * 1024;
burst_size = 4;
break;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index cc1b373c0ace..46dc93d3b9b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -147,7 +147,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
"Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
fcoe_pf_params->num_cqs,
p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
- return -EINVAL;
+ rc = -EINVAL;
+ goto err;
}
p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
@@ -156,14 +157,14 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
if (rc)
- return rc;
+ goto err;
cxt_info.iid = dummy_cid;
rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
if (rc) {
DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
dummy_cid);
- return rc;
+ goto err;
}
p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->tstorm_ag_context.flags3,
@@ -240,6 +241,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
rc = qed_spq_post(p_hwfn, p_ent, NULL);
return rc;
+
+err:
+ qed_sp_destroy_request(p_hwfn, p_ent);
+ return rc;
}
static int
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 1135387bd99d..4f8a685d1a55 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -200,6 +200,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
"Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
p_params->num_queues,
p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]);
+ qed_sp_destroy_request(p_hwfn, p_ent);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 82a1bd1f8a8c..67c02ea93906 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -740,8 +740,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
if (rc) {
- /* Return spq entry which is taken in qed_sp_init_request()*/
- qed_spq_return_entry(p_hwfn, p_ent);
+ qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
}
@@ -1355,6 +1354,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"%d is not supported yet\n",
p_filter_cmd->opcode);
+ qed_sp_destroy_request(p_hwfn, *pp_ent);
return -EINVAL;
}
@@ -2056,13 +2056,13 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
} else {
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc)
- return rc;
+ goto err;
if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
&abs_rx_q_id);
if (rc)
- return rc;
+ goto err;
p_ramrod->rx_qid_valid = 1;
p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
@@ -2083,6 +2083,10 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
(u64)p_params->addr, p_params->length);
return qed_spq_post(p_hwfn, p_ent, NULL);
+
+err:
+ qed_sp_destroy_request(p_hwfn, p_ent);
+ return rc;
}
int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index f40f654398a0..a96364df4320 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1944,9 +1944,12 @@ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_speed_mask)
{
u32 transceiver_type, transceiver_state;
+ int ret;
- qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
- &transceiver_type);
+ ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
+ &transceiver_type);
+ if (ret)
+ return ret;
if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
false)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index c71391b9c757..62113438c880 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -1514,6 +1514,7 @@ qed_rdma_register_tid(void *rdma_cxt,
default:
rc = -EINVAL;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
}
SET_FIELD(p_ramrod->flags1,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index f9167d1354bb..e49fada85410 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -745,6 +745,7 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
rc);
+ qed_sp_destroy_request(p_hwfn, p_ent);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index e95431f6acd4..3157c0d99441 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -167,6 +167,9 @@ struct qed_spq_entry {
enum spq_mode comp_mode;
struct qed_spq_comp_cb comp_cb;
struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
+
+ /* Posted entry for unlimited list entry in EBLOCK mode */
+ struct qed_spq_entry *post_ent;
};
struct qed_eq {
@@ -396,6 +399,17 @@ struct qed_sp_init_data {
struct qed_spq_comp_cb *p_comp_data;
};
+/**
+ * @brief Returns a SPQ entry to the pool / frees the entry if allocated.
+ * Should be called on in error flows after initializing the SPQ entry
+ * and before posting it.
+ *
+ * @param p_hwfn
+ * @param p_ent
+ */
+void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent);
+
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
u8 cmd,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 77b6248ad3b9..888274fa208b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -47,6 +47,19 @@
#include "qed_sp.h"
#include "qed_sriov.h"
+void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
+{
+ /* qed_spq_get_entry() can either get an entry from the free_pool,
+ * or, if no entries are left, allocate a new entry and add it to
+ * the unlimited_pending list.
+ */
+ if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
+ kfree(p_ent);
+ else
+ qed_spq_return_entry(p_hwfn, p_ent);
+}
+
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
@@ -80,7 +93,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
case QED_SPQ_MODE_BLOCK:
if (!p_data->p_comp_data)
- return -EINVAL;
+ goto err;
p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
break;
@@ -95,7 +108,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
default:
DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
p_ent->comp_mode);
- return -EINVAL;
+ goto err;
}
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
@@ -109,6 +122,11 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
return 0;
+
+err:
+ qed_sp_destroy_request(p_hwfn, p_ent);
+
+ return -EINVAL;
}
static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index c4a6274dd625..0a9c5bb0fa48 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -142,6 +142,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
rc = qed_mcp_drain(p_hwfn, p_ptt);
+ qed_ptt_release(p_hwfn, p_ptt);
if (rc) {
DP_NOTICE(p_hwfn, "MCP drain failed\n");
goto err;
@@ -150,18 +151,15 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
/* Retry after drain */
rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
if (!rc)
- goto out;
+ return 0;
comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
- if (comp_done->done == 1)
+ if (comp_done->done == 1) {
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
-out:
- qed_ptt_release(p_hwfn, p_ptt);
- return 0;
-
+ return 0;
+ }
err:
- qed_ptt_release(p_hwfn, p_ptt);
DP_NOTICE(p_hwfn,
"Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
le32_to_cpu(p_ent->elem.hdr.cid),
@@ -685,6 +683,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
/* EBLOCK responsible to free the allocated p_ent */
if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
kfree(p_ent);
+ else
+ p_ent->post_ent = p_en2;
p_ent = p_en2;
}
@@ -767,6 +767,25 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
SPQ_HIGH_PRI_RESERVE_DEFAULT);
}
+/* Avoid overriding of SPQ entries when getting out-of-order completions, by
+ * marking the completions in a bitmap and increasing the chain consumer only
+ * for the first successive completed entries.
+ */
+static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
+{
+ u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+
+ __set_bit(pos, p_spq->p_comp_bitmap);
+ while (test_bit(p_spq->comp_bitmap_idx,
+ p_spq->p_comp_bitmap)) {
+ __clear_bit(p_spq->comp_bitmap_idx,
+ p_spq->p_comp_bitmap);
+ p_spq->comp_bitmap_idx++;
+ qed_chain_return_produced(&p_spq->chain);
+ }
+}
+
int qed_spq_post(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent, u8 *fw_return_code)
{
@@ -824,11 +843,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
p_ent->queue == &p_spq->unlimited_pending);
if (p_ent->queue == &p_spq->unlimited_pending) {
- /* This is an allocated p_ent which does not need to
- * return to pool.
- */
+ struct qed_spq_entry *p_post_ent = p_ent->post_ent;
+
kfree(p_ent);
- return rc;
+
+ /* Return the entry which was actually posted */
+ p_ent = p_post_ent;
}
if (rc)
@@ -842,7 +862,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
spq_post_fail2:
spin_lock_bh(&p_spq->lock);
list_del(&p_ent->list);
- qed_chain_return_produced(&p_spq->chain);
+ qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
spq_post_fail:
/* return to the free pool */
@@ -874,25 +894,8 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
spin_lock_bh(&p_spq->lock);
list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
if (p_ent->elem.hdr.echo == echo) {
- u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
-
list_del(&p_ent->list);
-
- /* Avoid overriding of SPQ entries when getting
- * out-of-order completions, by marking the completions
- * in a bitmap and increasing the chain consumer only
- * for the first successive completed entries.
- */
- __set_bit(pos, p_spq->p_comp_bitmap);
-
- while (test_bit(p_spq->comp_bitmap_idx,
- p_spq->p_comp_bitmap)) {
- __clear_bit(p_spq->comp_bitmap_idx,
- p_spq->p_comp_bitmap);
- p_spq->comp_bitmap_idx++;
- qed_chain_return_produced(&p_spq->chain);
- }
-
+ qed_spq_comp_bmap_update(p_hwfn, echo);
p_spq->comp_count++;
found = p_ent;
break;
@@ -931,11 +934,9 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
QED_MSG_SPQ,
"Got a completion without a callback function\n");
- if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
- (found->queue == &p_spq->unlimited_pending))
+ if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
/* EBLOCK is responsible for returning its own entry into the
- * free list, unless it originally added the entry into the
- * unlimited pending list.
+ * free list.
*/
qed_spq_return_entry(p_hwfn, found);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 9b08a9d9e151..ca6290fa0f30 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -101,6 +101,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
default:
DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
p_hwfn->hw_info.personality);
+ qed_sp_destroy_request(p_hwfn, p_ent);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 9647578cbe6a..14f26bf3b388 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -459,7 +459,7 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
struct qlcnic_host_tx_ring *tx_ring)
{
- u8 l4proto, opcode = 0, hdr_len = 0;
+ u8 l4proto, opcode = 0, hdr_len = 0, tag_vlan = 0;
u16 flags = 0, vlan_tci = 0;
int copied, offset, copy_len, size;
struct cmd_desc_type0 *hwdesc;
@@ -472,14 +472,16 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
flags = QLCNIC_FLAGS_VLAN_TAGGED;
vlan_tci = ntohs(vh->h_vlan_TCI);
protocol = ntohs(vh->h_vlan_encapsulated_proto);
+ tag_vlan = 1;
} else if (skb_vlan_tag_present(skb)) {
flags = QLCNIC_FLAGS_VLAN_OOB;
vlan_tci = skb_vlan_tag_get(skb);
+ tag_vlan = 1;
}
if (unlikely(adapter->tx_pvid)) {
- if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
+ if (tag_vlan && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
return -EIO;
- if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
+ if (tag_vlan && (adapter->flags & QLCNIC_TAGGING_ENABLED))
goto set_flags;
flags = QLCNIC_FLAGS_VLAN_OOB;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 0afc3d335d56..d11c16aeb19a 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -234,7 +234,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct net_device *real_dev,
struct rmnet_endpoint *ep)
{
- struct rmnet_priv *priv;
+ struct rmnet_priv *priv = netdev_priv(rmnet_dev);
int rc;
if (ep->egress_dev)
@@ -247,6 +247,8 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
rmnet_dev->hw_features |= NETIF_F_SG;
+ priv->real_dev = real_dev;
+
rc = register_netdevice(rmnet_dev);
if (!rc) {
ep->egress_dev = rmnet_dev;
@@ -255,9 +257,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
- priv = netdev_priv(rmnet_dev);
priv->mux_id = id;
- priv->real_dev = real_dev;
netdev_dbg(rmnet_dev, "rmnet dev created\n");
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index b1b305f8f414..272b9ca66314 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -365,7 +365,8 @@ struct dma_features {
/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
#define BUF_SIZE_16KiB 16384
-#define BUF_SIZE_8KiB 8192
+/* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */
+#define BUF_SIZE_8KiB 8188
#define BUF_SIZE_4KiB 4096
#define BUF_SIZE_2KiB 2048
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index ca9d7e48034c..40d6356a7e73 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -31,7 +31,7 @@
/* Enhanced descriptors */
static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
{
- p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1)
+ p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
<< ERDES1_BUFFER2_SIZE_SHIFT)
& ERDES1_BUFFER2_SIZE_MASK);
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 77914c89d749..5ef91a790f9d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -262,7 +262,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end)
{
p->des0 |= cpu_to_le32(RDES0_OWN);
- p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
+ p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ehn_desc_rx_set_on_chain(p);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index abc3f85270cd..d8c5bc412219 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -140,7 +140,7 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p)
static int set_16kib_bfsize(int mtu)
{
int ret = 0;
- if (unlikely(mtu >= BUF_SIZE_8KiB))
+ if (unlikely(mtu > BUF_SIZE_8KiB))
ret = BUF_SIZE_16KiB;
return ret;
}
diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c
index 3b7f10a5f06a..c5cae8e74dc4 100644
--- a/drivers/net/fddi/defza.c
+++ b/drivers/net/fddi/defza.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
*
* Copyright (c) 2018 Maciej W. Rozycki
@@ -56,7 +56,7 @@
#define DRV_VERSION "v.1.1.4"
#define DRV_RELDATE "Oct 6 2018"
-static char version[] =
+static const char version[] =
DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Maciej W. Rozycki\n";
MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
@@ -784,7 +784,7 @@ err_rx:
static void fza_tx_smt(struct net_device *dev)
{
struct fza_private *fp = netdev_priv(dev);
- struct fza_buffer_tx __iomem *smt_tx_ptr, *skb_data_ptr;
+ struct fza_buffer_tx __iomem *smt_tx_ptr;
int i, len;
u32 own;
@@ -799,6 +799,7 @@ static void fza_tx_smt(struct net_device *dev)
if (!netif_queue_stopped(dev)) {
if (dev_nit_active(dev)) {
+ struct fza_buffer_tx *skb_data_ptr;
struct sk_buff *skb;
/* Length must be a multiple of 4 as only word
diff --git a/drivers/net/fddi/defza.h b/drivers/net/fddi/defza.h
index b06acf32738e..93bda61be8e3 100644
--- a/drivers/net/fddi/defza.h
+++ b/drivers/net/fddi/defza.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0+ */
/* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
*
* Copyright (c) 2018 Maciej W. Rozycki
@@ -235,6 +235,7 @@ struct fza_ring_cmd {
#define FZA_RING_CMD 0x200400 /* command ring address */
#define FZA_RING_CMD_SIZE 0x40 /* command descriptor ring
* size
+ */
/* Command constants. */
#define FZA_RING_CMD_MASK 0x7fffffff
#define FZA_RING_CMD_NOP 0x00000000 /* nop */
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index e86ea105c802..704537010453 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -92,7 +92,7 @@ static int bcm54612e_config_init(struct phy_device *phydev)
return 0;
}
-static int bcm5481x_config(struct phy_device *phydev)
+static int bcm54xx_config_clock_delay(struct phy_device *phydev)
{
int rc, val;
@@ -429,7 +429,7 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
ret = genphy_config_aneg(phydev);
/* Then we can set up the delay. */
- bcm5481x_config(phydev);
+ bcm54xx_config_clock_delay(phydev);
if (of_property_read_bool(np, "enet-phy-lane-swap")) {
/* Lane Swap - Undocumented register...magic! */
@@ -442,6 +442,19 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
return ret;
}
+static int bcm54616s_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Aneg firsly. */
+ ret = genphy_config_aneg(phydev);
+
+ /* Then we can set up the delay. */
+ bcm54xx_config_clock_delay(phydev);
+
+ return ret;
+}
+
static int brcm_phy_setbits(struct phy_device *phydev, int reg, int set)
{
int val;
@@ -636,6 +649,7 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
+ .config_aneg = bcm54616s_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 7fc8508b5231..271e8adc39f1 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -220,7 +220,7 @@ static struct phy_driver realtek_drvs[] = {
.flags = PHY_HAS_INTERRUPT,
}, {
.phy_id = 0x001cc816,
- .name = "RTL8201F 10/100Mbps Ethernet",
+ .name = "RTL8201F Fast Ethernet",
.phy_id_mask = 0x001fffff,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 262e7a3c23cb..f2d01cb6f958 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1321,6 +1321,8 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
dev->net->flags |= IFF_MULTICAST;
dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
+ dev->net->min_mtu = ETH_MIN_MTU;
+ dev->net->max_mtu = ETH_DATA_LEN;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
pdata->dev = dev;
@@ -1598,6 +1600,8 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
return ret;
}
+ cancel_delayed_work_sync(&pdata->carrier_check);
+
if (pdata->suspend_flags) {
netdev_warn(dev->net, "error during last resume\n");
pdata->suspend_flags = 0;
@@ -1840,6 +1844,11 @@ done:
*/
if (ret && PMSG_IS_AUTO(message))
usbnet_resume(intf);
+
+ if (ret)
+ schedule_delayed_work(&pdata->carrier_check,
+ CARRIER_CHECK_DELAY);
+
return ret;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 2e65be8b1387..559d567693b8 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1519,8 +1519,10 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
if (ns->ndev)
nvme_nvm_update_nvm_info(ns);
#ifdef CONFIG_NVME_MULTIPATH
- if (ns->head->disk)
+ if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id);
+ blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+ }
#endif
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 5e3cc8c59a39..9901afd804ce 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -285,6 +285,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* set to a default value for 512 until disk is validated */
blk_queue_logical_block_size(q, 512);
+ blk_set_stacking_limits(&q->limits);
/* we need to propagate up the VMC settings */
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index f4efe289dc7b..a5f9bbce863f 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -420,7 +420,7 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
struct pci_dev *p2p_dev;
int ret;
- if (!ctrl->p2p_client)
+ if (!ctrl->p2p_client || !ns->use_p2pmem)
return;
if (ns->p2p_dev) {
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index ddce100be57a..3f7971d3706d 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -122,7 +122,6 @@ struct nvmet_rdma_device {
int inline_page_count;
};
-static struct workqueue_struct *nvmet_rdma_delete_wq;
static bool nvmet_rdma_use_srq;
module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
@@ -1274,12 +1273,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */
- flush_workqueue(nvmet_rdma_delete_wq);
+ flush_scheduled_work();
}
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
if (ret) {
- queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+ schedule_work(&queue->release_work);
/* Destroying rdma_cm id is not needed here */
return 0;
}
@@ -1344,7 +1343,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
if (disconnect) {
rdma_disconnect(queue->cm_id);
- queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+ schedule_work(&queue->release_work);
}
}
@@ -1374,7 +1373,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
mutex_unlock(&nvmet_rdma_queue_mutex);
pr_err("failed to connect queue %d\n", queue->idx);
- queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+ schedule_work(&queue->release_work);
}
/**
@@ -1656,17 +1655,8 @@ static int __init nvmet_rdma_init(void)
if (ret)
goto err_ib_client;
- nvmet_rdma_delete_wq = alloc_workqueue("nvmet-rdma-delete-wq",
- WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
- if (!nvmet_rdma_delete_wq) {
- ret = -ENOMEM;
- goto err_unreg_transport;
- }
-
return 0;
-err_unreg_transport:
- nvmet_unregister_transport(&nvmet_rdma_ops);
err_ib_client:
ib_unregister_client(&nvmet_rdma_ib_client);
return ret;
@@ -1674,7 +1664,6 @@ err_ib_client:
static void __exit nvmet_rdma_exit(void)
{
- destroy_workqueue(nvmet_rdma_delete_wq);
nvmet_unregister_transport(&nvmet_rdma_ops);
ib_unregister_client(&nvmet_rdma_ib_client);
WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 0f27fad9fe94..5592437bb3d1 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -149,9 +149,11 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
* set by the driver.
*/
mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
- dev->bus_dma_mask = mask;
dev->coherent_dma_mask &= mask;
*dev->dma_mask &= mask;
+ /* ...but only set bus mask if we found valid dma-ranges earlier */
+ if (!ret)
+ dev->bus_dma_mask = mask;
coherent = of_dma_is_coherent(np);
dev_dbg(dev, "device is%sdma coherent\n",
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index 35c64a4295e0..fe6b13608e51 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -104,9 +104,14 @@ static int __init of_numa_parse_distance_map_v1(struct device_node *map)
distance = of_read_number(matrix, 1);
matrix++;
+ if ((nodea == nodeb && distance != LOCAL_DISTANCE) ||
+ (nodea != nodeb && distance <= LOCAL_DISTANCE)) {
+ pr_err("Invalid distance[node%d -> node%d] = %d\n",
+ nodea, nodeb, distance);
+ return -EINVAL;
+ }
+
numa_set_distance(nodea, nodeb, distance);
- pr_debug("distance[node%d -> node%d] = %d\n",
- nodea, nodeb, distance);
/* Set default distance of node B->A same as A->B */
if (nodeb > nodea)
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6843bc7ee9f2..04e294d1d16d 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -87,6 +87,18 @@ struct qeth_dbf_info {
#define SENSE_RESETTING_EVENT_BYTE 1
#define SENSE_RESETTING_EVENT_FLAG 0x80
+static inline u32 qeth_get_device_id(struct ccw_device *cdev)
+{
+ struct ccw_dev_id dev_id;
+ u32 id;
+
+ ccw_device_get_id(cdev, &dev_id);
+ id = dev_id.devno;
+ id |= (u32) (dev_id.ssid << 16);
+
+ return id;
+}
+
/*
* Common IO related definitions
*/
@@ -97,7 +109,8 @@ struct qeth_dbf_info {
#define CARD_RDEV_ID(card) dev_name(&card->read.ccwdev->dev)
#define CARD_WDEV_ID(card) dev_name(&card->write.ccwdev->dev)
#define CARD_DDEV_ID(card) dev_name(&card->data.ccwdev->dev)
-#define CHANNEL_ID(channel) dev_name(&channel->ccwdev->dev)
+#define CCW_DEVID(cdev) (qeth_get_device_id(cdev))
+#define CARD_DEVID(card) (CCW_DEVID(CARD_RDEV(card)))
/**
* card stuff
@@ -830,6 +843,11 @@ struct qeth_trap_id {
/*some helper functions*/
#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
+static inline bool qeth_netdev_is_registered(struct net_device *dev)
+{
+ return dev->netdev_ops != NULL;
+}
+
static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
unsigned int elements)
{
@@ -973,7 +991,7 @@ int qeth_wait_for_threads(struct qeth_card *, unsigned long);
int qeth_do_run_thread(struct qeth_card *, unsigned long);
void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
-int qeth_core_hardsetup_card(struct qeth_card *);
+int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok);
void qeth_print_status_message(struct qeth_card *);
int qeth_init_qdio_queues(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
@@ -1028,11 +1046,6 @@ int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
void qeth_trace_features(struct qeth_card *);
void qeth_close_dev(struct qeth_card *);
-int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16,
- long,
- int (*reply_cb)(struct qeth_card *,
- struct qeth_reply *, unsigned long),
- void *);
int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
enum qeth_ipa_funcs,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 3274f13aad57..4bce5ae65a55 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -167,6 +167,8 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
return "OSD_1000";
case QETH_LINK_TYPE_10GBIT_ETH:
return "OSD_10GIG";
+ case QETH_LINK_TYPE_25GBIT_ETH:
+ return "OSD_25GIG";
case QETH_LINK_TYPE_LANE_ETH100:
return "OSD_FE_LANE";
case QETH_LINK_TYPE_LANE_TR:
@@ -554,8 +556,8 @@ static int __qeth_issue_next_read(struct qeth_card *card)
if (!iob) {
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
- QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob "
- "available\n", dev_name(&card->gdev->dev));
+ QETH_DBF_MESSAGE(2, "issue_next_read on device %x failed: no iob available\n",
+ CARD_DEVID(card));
return -ENOMEM;
}
qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
@@ -563,8 +565,8 @@ static int __qeth_issue_next_read(struct qeth_card *card)
rc = ccw_device_start(channel->ccwdev, channel->ccw,
(addr_t) iob, 0, 0);
if (rc) {
- QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
- "rc=%i\n", dev_name(&card->gdev->dev), rc);
+ QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
+ rc, CARD_DEVID(card));
atomic_set(&channel->irq_pending, 0);
card->read_or_write_problem = 1;
qeth_schedule_recovery(card);
@@ -613,16 +615,14 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
const char *ipa_name;
int com = cmd->hdr.command;
ipa_name = qeth_get_ipa_cmd_name(com);
+
if (rc)
- QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned "
- "x%X \"%s\"\n",
- ipa_name, com, dev_name(&card->gdev->dev),
- QETH_CARD_IFNAME(card), rc,
- qeth_get_ipa_msg(rc));
+ QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
+ ipa_name, com, CARD_DEVID(card), rc,
+ qeth_get_ipa_msg(rc));
else
- QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n",
- ipa_name, com, dev_name(&card->gdev->dev),
- QETH_CARD_IFNAME(card));
+ QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
+ ipa_name, com, CARD_DEVID(card));
}
static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
@@ -711,7 +711,7 @@ static int qeth_check_idx_response(struct qeth_card *card,
QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
if ((buffer[2] & 0xc0) == 0xc0) {
- QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#02x\n",
+ QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
buffer[4]);
QETH_CARD_TEXT(card, 2, "ckidxres");
QETH_CARD_TEXT(card, 2, " idxterm");
@@ -972,8 +972,8 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
QETH_CARD_TEXT(card, 2, "CGENCHK");
dev_warn(&cdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
- QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n",
- dev_name(&cdev->dev), dstat, cstat);
+ QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
+ CCW_DEVID(cdev), dstat, cstat);
print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
16, 1, irb, 64, 1);
return 1;
@@ -1013,8 +1013,8 @@ static long qeth_check_irb_error(struct qeth_card *card,
switch (PTR_ERR(irb)) {
case -EIO:
- QETH_DBF_MESSAGE(2, "%s i/o-error on device\n",
- dev_name(&cdev->dev));
+ QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
+ CCW_DEVID(cdev));
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
break;
@@ -1031,8 +1031,8 @@ static long qeth_check_irb_error(struct qeth_card *card,
}
break;
default:
- QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n",
- dev_name(&cdev->dev), PTR_ERR(irb));
+ QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
+ PTR_ERR(irb), CCW_DEVID(cdev));
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT(card, 2, " rc???");
}
@@ -1114,9 +1114,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
dev_warn(&channel->ccwdev->dev,
"The qeth device driver failed to recover "
"an error on the device\n");
- QETH_DBF_MESSAGE(2, "%s sense data available. cstat "
- "0x%X dstat 0x%X\n",
- dev_name(&channel->ccwdev->dev), cstat, dstat);
+ QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
+ CCW_DEVID(channel->ccwdev), cstat,
+ dstat);
print_hex_dump(KERN_WARNING, "qeth: irb ",
DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
print_hex_dump(KERN_WARNING, "qeth: sense data ",
@@ -1890,8 +1890,8 @@ static int qeth_idx_activate_channel(struct qeth_card *card,
if (channel->state != CH_STATE_ACTIVATING) {
dev_warn(&channel->ccwdev->dev, "The qeth device driver"
" failed to recover an error on the device\n");
- QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
- dev_name(&channel->ccwdev->dev));
+ QETH_DBF_MESSAGE(2, "IDX activate timed out on channel %x\n",
+ CCW_DEVID(channel->ccwdev));
QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
return -ETIME;
}
@@ -1926,17 +1926,15 @@ static void qeth_idx_write_cb(struct qeth_card *card,
"The adapter is used exclusively by another "
"host\n");
else
- QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:"
- " negative reply\n",
- dev_name(&channel->ccwdev->dev));
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
+ CCW_DEVID(channel->ccwdev));
goto out;
}
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
- QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: "
- "function level mismatch (sent: 0x%x, received: "
- "0x%x)\n", dev_name(&channel->ccwdev->dev),
- card->info.func_level, temp);
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
+ CCW_DEVID(channel->ccwdev),
+ card->info.func_level, temp);
goto out;
}
channel->state = CH_STATE_UP;
@@ -1973,9 +1971,8 @@ static void qeth_idx_read_cb(struct qeth_card *card,
"insufficient authorization\n");
break;
default:
- QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
- " negative reply\n",
- dev_name(&channel->ccwdev->dev));
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
+ CCW_DEVID(channel->ccwdev));
}
QETH_CARD_TEXT_(card, 2, "idxread%c",
QETH_IDX_ACT_CAUSE_CODE(iob->data));
@@ -1984,10 +1981,9 @@ static void qeth_idx_read_cb(struct qeth_card *card,
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if (temp != qeth_peer_func_level(card->info.func_level)) {
- QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function "
- "level mismatch (sent: 0x%x, received: 0x%x)\n",
- dev_name(&channel->ccwdev->dev),
- card->info.func_level, temp);
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
+ CCW_DEVID(channel->ccwdev),
+ card->info.func_level, temp);
goto out;
}
memcpy(&card->token.issuer_rm_r,
@@ -2096,9 +2092,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
(addr_t) iob, 0, 0, event_timeout);
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc) {
- QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
- "ccw_device_start rc = %i\n",
- dev_name(&channel->ccwdev->dev), rc);
+ QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
+ CARD_DEVID(card), rc);
QETH_CARD_TEXT_(card, 2, " err%d", rc);
spin_lock_irq(&card->lock);
list_del_init(&reply->list);
@@ -2853,8 +2848,8 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
} else {
dev_warn(&card->gdev->dev,
"The qeth driver ran out of channel command buffers\n");
- QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers",
- dev_name(&card->gdev->dev));
+ QETH_DBF_MESSAGE(1, "device %x ran out of channel command buffers",
+ CARD_DEVID(card));
}
return iob;
@@ -2989,10 +2984,9 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
return 0;
default:
if (cmd->hdr.return_code) {
- QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled "
- "rc=%d\n",
- dev_name(&card->gdev->dev),
- cmd->hdr.return_code);
+ QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
+ CARD_DEVID(card),
+ cmd->hdr.return_code);
return 0;
}
}
@@ -3004,8 +2998,8 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
} else
- QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected"
- "\n", dev_name(&card->gdev->dev));
+ QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
+ CARD_DEVID(card));
return 0;
}
@@ -4297,10 +4291,9 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
cmd->data.setadapterparms.hdr.return_code);
if (cmd->data.setadapterparms.hdr.return_code !=
SET_ACCESS_CTRL_RC_SUCCESS)
- QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
- card->gdev->dev.kobj.name,
- access_ctrl_req->subcmd_code,
- cmd->data.setadapterparms.hdr.return_code);
+ QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
+ access_ctrl_req->subcmd_code, CARD_DEVID(card),
+ cmd->data.setadapterparms.hdr.return_code);
switch (cmd->data.setadapterparms.hdr.return_code) {
case SET_ACCESS_CTRL_RC_SUCCESS:
if (card->options.isolation == ISOLATION_MODE_NONE) {
@@ -4312,14 +4305,14 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
}
break;
case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
- QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already "
- "deactivated\n", dev_name(&card->gdev->dev));
+ QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
+ CARD_DEVID(card));
if (fallback)
card->options.isolation = card->options.prev_isolation;
break;
case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
- QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already"
- " activated\n", dev_name(&card->gdev->dev));
+ QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
+ CARD_DEVID(card));
if (fallback)
card->options.isolation = card->options.prev_isolation;
break;
@@ -4405,10 +4398,8 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
rc = qeth_setadpparms_set_access_ctrl(card,
card->options.isolation, fallback);
if (rc) {
- QETH_DBF_MESSAGE(3,
- "IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n",
- card->gdev->dev.kobj.name,
- rc);
+ QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
+ rc, CARD_DEVID(card));
rc = -EOPNOTSUPP;
}
} else if (card->options.isolation != ISOLATION_MODE_NONE) {
@@ -4443,7 +4434,8 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
rc = BMCR_FULLDPLX;
if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
(card->info.link_type != QETH_LINK_TYPE_OSN) &&
- (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
+ (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
+ (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
rc |= BMCR_SPEED100;
break;
case MII_BMSR: /* Basic mode status register */
@@ -4634,8 +4626,8 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
qeth_snmp_command_cb, (void *)&qinfo);
if (rc)
- QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n",
- QETH_CARD_IFNAME(card), rc);
+ QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
+ CARD_DEVID(card), rc);
else {
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
rc = -EFAULT;
@@ -4869,8 +4861,8 @@ static void qeth_determine_capabilities(struct qeth_card *card)
rc = qeth_read_conf_data(card, (void **) &prcd, &length);
if (rc) {
- QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
- dev_name(&card->gdev->dev), rc);
+ QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
+ CARD_DEVID(card), rc);
QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
goto out_offline;
}
@@ -5086,7 +5078,7 @@ static struct ccw_driver qeth_ccw_driver = {
.remove = ccwgroup_remove_ccwdev,
};
-int qeth_core_hardsetup_card(struct qeth_card *card)
+int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
{
int retries = 3;
int rc;
@@ -5096,8 +5088,8 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
qeth_update_from_chp_desc(card);
retry:
if (retries < 3)
- QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
- dev_name(&card->gdev->dev));
+ QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
+ CARD_DEVID(card));
rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
@@ -5161,13 +5153,20 @@ retriable:
if (rc == IPA_RC_LAN_OFFLINE) {
dev_warn(&card->gdev->dev,
"The LAN is offline\n");
- netif_carrier_off(card->dev);
+ *carrier_ok = false;
} else {
rc = -ENODEV;
goto out;
}
} else {
- netif_carrier_on(card->dev);
+ *carrier_ok = true;
+ }
+
+ if (qeth_netdev_is_registered(card->dev)) {
+ if (*carrier_ok)
+ netif_carrier_on(card->dev);
+ else
+ netif_carrier_off(card->dev);
}
card->options.ipa4.supported_funcs = 0;
@@ -5201,8 +5200,8 @@ retriable:
out:
dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
"an error on the device\n");
- QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n",
- dev_name(&card->gdev->dev), rc);
+ QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
+ CARD_DEVID(card), rc);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
@@ -5481,11 +5480,12 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
}
EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
-int qeth_send_setassparms(struct qeth_card *card,
- struct qeth_cmd_buffer *iob, __u16 len, long data,
- int (*reply_cb)(struct qeth_card *,
- struct qeth_reply *, unsigned long),
- void *reply_param)
+static int qeth_send_setassparms(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob, u16 len,
+ long data, int (*reply_cb)(struct qeth_card *,
+ struct qeth_reply *,
+ unsigned long),
+ void *reply_param)
{
int rc;
struct qeth_ipa_cmd *cmd;
@@ -5501,7 +5501,6 @@ int qeth_send_setassparms(struct qeth_card *card,
rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_send_setassparms);
int qeth_send_simple_setassparms_prot(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
@@ -6170,8 +6169,14 @@ static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
WARN_ON_ONCE(1);
}
- /* fallthrough from high to low, to select all legal speeds: */
+ /* partially does fall through, to also select lower speeds */
switch (maxspeed) {
+ case SPEED_25000:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 25000baseSR_Full);
+ break;
case SPEED_10000:
ethtool_link_ksettings_add_link_mode(cmd, supported,
10000baseT_Full);
@@ -6254,6 +6259,10 @@ int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
cmd->base.speed = SPEED_10000;
cmd->base.port = PORT_FIBRE;
break;
+ case QETH_LINK_TYPE_25GBIT_ETH:
+ cmd->base.speed = SPEED_25000;
+ cmd->base.port = PORT_FIBRE;
+ break;
default:
cmd->base.speed = SPEED_10;
cmd->base.port = PORT_TP;
@@ -6320,6 +6329,9 @@ int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
case CARD_INFO_PORTS_10G:
cmd->base.speed = SPEED_10000;
break;
+ case CARD_INFO_PORTS_25G:
+ cmd->base.speed = SPEED_25000;
+ break;
}
return 0;
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index e85090467afe..3e54be201b27 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -90,6 +90,7 @@ enum qeth_link_types {
QETH_LINK_TYPE_GBIT_ETH = 0x03,
QETH_LINK_TYPE_OSN = 0x04,
QETH_LINK_TYPE_10GBIT_ETH = 0x10,
+ QETH_LINK_TYPE_25GBIT_ETH = 0x12,
QETH_LINK_TYPE_LANE_ETH100 = 0x81,
QETH_LINK_TYPE_LANE_TR = 0x82,
QETH_LINK_TYPE_LANE_ETH1000 = 0x83,
@@ -347,6 +348,7 @@ enum qeth_card_info_port_speed {
CARD_INFO_PORTS_100M = 0x00000006,
CARD_INFO_PORTS_1G = 0x00000007,
CARD_INFO_PORTS_10G = 0x00000008,
+ CARD_INFO_PORTS_25G = 0x0000000A,
};
/* (SET)DELIP(M) IPA stuff ***************************************************/
@@ -436,7 +438,7 @@ struct qeth_ipacmd_setassparms {
__u32 flags_32bit;
struct qeth_ipa_caps caps;
struct qeth_checksum_cmd chksum;
- struct qeth_arp_cache_entry add_arp_entry;
+ struct qeth_arp_cache_entry arp_entry;
struct qeth_arp_query_data query_arp;
struct qeth_tso_start_data tso;
__u8 ip[16];
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 23aaf373f631..2914a1a69f83 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -146,11 +146,11 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
QETH_CARD_TEXT(card, 2, "L2Wmac");
rc = qeth_l2_send_setdelmac(card, mac, cmd);
if (rc == -EEXIST)
- QETH_DBF_MESSAGE(2, "MAC %pM already registered on %s\n",
- mac, QETH_CARD_IFNAME(card));
+ QETH_DBF_MESSAGE(2, "MAC already registered on device %x\n",
+ CARD_DEVID(card));
else if (rc)
- QETH_DBF_MESSAGE(2, "Failed to register MAC %pM on %s: %d\n",
- mac, QETH_CARD_IFNAME(card), rc);
+ QETH_DBF_MESSAGE(2, "Failed to register MAC on device %x: %d\n",
+ CARD_DEVID(card), rc);
return rc;
}
@@ -163,8 +163,8 @@ static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
QETH_CARD_TEXT(card, 2, "L2Rmac");
rc = qeth_l2_send_setdelmac(card, mac, cmd);
if (rc)
- QETH_DBF_MESSAGE(2, "Failed to delete MAC %pM on %s: %d\n",
- mac, QETH_CARD_IFNAME(card), rc);
+ QETH_DBF_MESSAGE(2, "Failed to delete MAC on device %u: %d\n",
+ CARD_DEVID(card), rc);
return rc;
}
@@ -260,9 +260,9 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "L2sdvcb");
if (cmd->hdr.return_code) {
- QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x.\n",
+ QETH_DBF_MESSAGE(2, "Error in processing VLAN %u on device %x: %#x.\n",
cmd->data.setdelvlan.vlan_id,
- QETH_CARD_IFNAME(card), cmd->hdr.return_code);
+ CARD_DEVID(card), cmd->hdr.return_code);
QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
}
@@ -455,8 +455,8 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
rc = qeth_vm_request_mac(card);
if (!rc)
goto out;
- QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %s: x%x\n",
- CARD_BUS_ID(card), rc);
+ QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %x: %#x\n",
+ CARD_DEVID(card), rc);
QETH_DBF_TEXT_(SETUP, 2, "err%04x", rc);
/* fall back to alternative mechanism: */
}
@@ -468,8 +468,8 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
rc = qeth_setadpparms_change_macaddr(card);
if (!rc)
goto out;
- QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %s: x%x\n",
- CARD_BUS_ID(card), rc);
+ QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
+ CARD_DEVID(card), rc);
QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
/* fall back once more: */
}
@@ -826,7 +826,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
if (cgdev->state == CCWGROUP_ONLINE)
qeth_l2_set_offline(cgdev);
- unregister_netdev(card->dev);
+ if (qeth_netdev_is_registered(card->dev))
+ unregister_netdev(card->dev);
}
static const struct ethtool_ops qeth_l2_ethtool_ops = {
@@ -862,11 +863,11 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_set_features = qeth_set_features
};
-static int qeth_l2_setup_netdev(struct qeth_card *card)
+static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
{
int rc;
- if (card->dev->netdev_ops)
+ if (qeth_netdev_is_registered(card->dev))
return 0;
card->dev->priv_flags |= IFF_UNICAST_FLT;
@@ -919,6 +920,9 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
qeth_l2_request_initial_mac(card);
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
rc = register_netdev(card->dev);
+ if (!rc && carrier_ok)
+ netif_carrier_on(card->dev);
+
if (rc)
card->dev->netdev_ops = NULL;
return rc;
@@ -949,6 +953,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc = 0;
enum qeth_card_states recover_flag;
+ bool carrier_ok;
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
@@ -956,7 +961,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
recover_flag = card->state;
- rc = qeth_core_hardsetup_card(card);
+ rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
rc = -ENODEV;
@@ -967,7 +972,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
dev_info(&card->gdev->dev,
"The device represents a Bridge Capable Port\n");
- rc = qeth_l2_setup_netdev(card);
+ rc = qeth_l2_setup_netdev(card, carrier_ok);
if (rc)
goto out_remove;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 0b161cc1fd2e..f08b745c2007 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -278,9 +278,6 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
QETH_CARD_TEXT(card, 4, "clearip");
- if (recover && card->options.sniffer)
- return;
-
spin_lock_bh(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
@@ -494,9 +491,8 @@ int qeth_l3_setrouting_v4(struct qeth_card *card)
QETH_PROT_IPV4);
if (rc) {
card->options.route4.type = NO_ROUTER;
- QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
- " on %s. Type set to 'no router'.\n", rc,
- QETH_CARD_IFNAME(card));
+ QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n",
+ rc, CARD_DEVID(card));
}
return rc;
}
@@ -518,9 +514,8 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
QETH_PROT_IPV6);
if (rc) {
card->options.route6.type = NO_ROUTER;
- QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
- " on %s. Type set to 'no router'.\n", rc,
- QETH_CARD_IFNAME(card));
+ QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n",
+ rc, CARD_DEVID(card));
}
return rc;
}
@@ -663,6 +658,8 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card,
int rc = 0;
int cnt = 3;
+ if (card->options.sniffer)
+ return 0;
if (addr->proto == QETH_PROT_IPV4) {
QETH_CARD_TEXT(card, 2, "setaddr4");
@@ -697,6 +694,9 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
{
int rc = 0;
+ if (card->options.sniffer)
+ return 0;
+
if (addr->proto == QETH_PROT_IPV4) {
QETH_CARD_TEXT(card, 2, "deladdr4");
QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
@@ -1070,8 +1070,8 @@ qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
}
break;
default:
- QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n",
- cmd->data.diagass.action, QETH_CARD_IFNAME(card));
+ QETH_DBF_MESSAGE(2, "Unknown sniffer action (%#06x) on device %x\n",
+ cmd->data.diagass.action, CARD_DEVID(card));
}
return 0;
@@ -1517,32 +1517,25 @@ static void qeth_l3_set_rx_mode(struct net_device *dev)
qeth_l3_handle_promisc_mode(card);
}
-static const char *qeth_l3_arp_get_error_cause(int *rc)
+static int qeth_l3_arp_makerc(int rc)
{
- switch (*rc) {
- case QETH_IPA_ARP_RC_FAILED:
- *rc = -EIO;
- return "operation failed";
+ switch (rc) {
+ case IPA_RC_SUCCESS:
+ return 0;
case QETH_IPA_ARP_RC_NOTSUPP:
- *rc = -EOPNOTSUPP;
- return "operation not supported";
- case QETH_IPA_ARP_RC_OUT_OF_RANGE:
- *rc = -EINVAL;
- return "argument out of range";
case QETH_IPA_ARP_RC_Q_NOTSUPP:
- *rc = -EOPNOTSUPP;
- return "query operation not supported";
+ return -EOPNOTSUPP;
+ case QETH_IPA_ARP_RC_OUT_OF_RANGE:
+ return -EINVAL;
case QETH_IPA_ARP_RC_Q_NO_DATA:
- *rc = -ENOENT;
- return "no query data available";
+ return -ENOENT;
default:
- return "unknown error";
+ return -EIO;
}
}
static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
{
- int tmp;
int rc;
QETH_CARD_TEXT(card, 3, "arpstnoe");
@@ -1560,13 +1553,10 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
no_entries);
- if (rc) {
- tmp = rc;
- QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on "
- "%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card),
- qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
- }
- return rc;
+ if (rc)
+ QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on device %x: %#x\n",
+ CARD_DEVID(card), rc);
+ return qeth_l3_arp_makerc(rc);
}
static __u32 get_arp_entry_size(struct qeth_card *card,
@@ -1716,7 +1706,6 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- int tmp;
int rc;
QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot);
@@ -1735,15 +1724,10 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
rc = qeth_l3_send_ipa_arp_cmd(card, iob,
QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
qeth_l3_arp_query_cb, (void *)qinfo);
- if (rc) {
- tmp = rc;
- QETH_DBF_MESSAGE(2,
- "Error while querying ARP cache on %s: %s "
- "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
- qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
- }
-
- return rc;
+ if (rc)
+ QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n",
+ CARD_DEVID(card), rc);
+ return qeth_l3_arp_makerc(rc);
}
static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
@@ -1793,15 +1777,18 @@ out:
return rc;
}
-static int qeth_l3_arp_add_entry(struct qeth_card *card,
- struct qeth_arp_cache_entry *entry)
+static int qeth_l3_arp_modify_entry(struct qeth_card *card,
+ struct qeth_arp_cache_entry *entry,
+ enum qeth_arp_process_subcmds arp_cmd)
{
+ struct qeth_arp_cache_entry *cmd_entry;
struct qeth_cmd_buffer *iob;
- char buf[16];
- int tmp;
int rc;
- QETH_CARD_TEXT(card, 3, "arpadent");
+ if (arp_cmd == IPA_CMD_ASS_ARP_ADD_ENTRY)
+ QETH_CARD_TEXT(card, 3, "arpadd");
+ else
+ QETH_CARD_TEXT(card, 3, "arpdel");
/*
* currently GuestLAN only supports the ARP assist function
@@ -1814,71 +1801,25 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
return -EOPNOTSUPP;
}
- iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
- IPA_CMD_ASS_ARP_ADD_ENTRY,
- sizeof(struct qeth_arp_cache_entry),
- QETH_PROT_IPV4);
+ iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, arp_cmd,
+ sizeof(*cmd_entry), QETH_PROT_IPV4);
if (!iob)
return -ENOMEM;
- rc = qeth_send_setassparms(card, iob,
- sizeof(struct qeth_arp_cache_entry),
- (unsigned long) entry,
- qeth_setassparms_cb, NULL);
- if (rc) {
- tmp = rc;
- qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
- QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s "
- "on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
- qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
- }
- return rc;
-}
-
-static int qeth_l3_arp_remove_entry(struct qeth_card *card,
- struct qeth_arp_cache_entry *entry)
-{
- struct qeth_cmd_buffer *iob;
- char buf[16] = {0, };
- int tmp;
- int rc;
- QETH_CARD_TEXT(card, 3, "arprment");
+ cmd_entry = &__ipa_cmd(iob)->data.setassparms.data.arp_entry;
+ ether_addr_copy(cmd_entry->macaddr, entry->macaddr);
+ memcpy(cmd_entry->ipaddr, entry->ipaddr, 4);
+ rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
+ if (rc)
+ QETH_DBF_MESSAGE(2, "Could not modify (cmd: %#x) ARP entry on device %x: %#x\n",
+ arp_cmd, CARD_DEVID(card), rc);
- /*
- * currently GuestLAN only supports the ARP assist function
- * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
- * thus we say EOPNOTSUPP for this ARP function
- */
- if (card->info.guestlan)
- return -EOPNOTSUPP;
- if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
- return -EOPNOTSUPP;
- }
- memcpy(buf, entry, 12);
- iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
- IPA_CMD_ASS_ARP_REMOVE_ENTRY,
- 12,
- QETH_PROT_IPV4);
- if (!iob)
- return -ENOMEM;
- rc = qeth_send_setassparms(card, iob,
- 12, (unsigned long)buf,
- qeth_setassparms_cb, NULL);
- if (rc) {
- tmp = rc;
- memset(buf, 0, 16);
- qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
- QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s"
- " on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
- qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
- }
- return rc;
+ return qeth_l3_arp_makerc(rc);
}
static int qeth_l3_arp_flush_cache(struct qeth_card *card)
{
int rc;
- int tmp;
QETH_CARD_TEXT(card, 3, "arpflush");
@@ -1894,19 +1835,17 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
}
rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
- if (rc) {
- tmp = rc;
- QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s "
- "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
- qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
- }
- return rc;
+ if (rc)
+ QETH_DBF_MESSAGE(2, "Could not flush ARP cache on device %x: %#x\n",
+ CARD_DEVID(card), rc);
+ return qeth_l3_arp_makerc(rc);
}
static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_arp_cache_entry arp_entry;
+ enum qeth_arp_process_subcmds arp_cmd;
int rc = 0;
switch (cmd) {
@@ -1925,27 +1864,16 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data);
break;
case SIOC_QETH_ARP_ADD_ENTRY:
- if (!capable(CAP_NET_ADMIN)) {
- rc = -EPERM;
- break;
- }
- if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
- sizeof(struct qeth_arp_cache_entry)))
- rc = -EFAULT;
- else
- rc = qeth_l3_arp_add_entry(card, &arp_entry);
- break;
case SIOC_QETH_ARP_REMOVE_ENTRY:
- if (!capable(CAP_NET_ADMIN)) {
- rc = -EPERM;
- break;
- }
- if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
- sizeof(struct qeth_arp_cache_entry)))
- rc = -EFAULT;
- else
- rc = qeth_l3_arp_remove_entry(card, &arp_entry);
- break;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&arp_entry, rq->ifr_data, sizeof(arp_entry)))
+ return -EFAULT;
+
+ arp_cmd = (cmd == SIOC_QETH_ARP_ADD_ENTRY) ?
+ IPA_CMD_ASS_ARP_ADD_ENTRY :
+ IPA_CMD_ASS_ARP_REMOVE_ENTRY;
+ return qeth_l3_arp_modify_entry(card, &arp_entry, arp_cmd);
case SIOC_QETH_ARP_FLUSH_CACHE:
if (!capable(CAP_NET_ADMIN)) {
rc = -EPERM;
@@ -2383,12 +2311,12 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_neigh_setup = qeth_l3_neigh_setup,
};
-static int qeth_l3_setup_netdev(struct qeth_card *card)
+static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
{
unsigned int headroom;
int rc;
- if (card->dev->netdev_ops)
+ if (qeth_netdev_is_registered(card->dev))
return 0;
if (card->info.type == QETH_CARD_TYPE_OSD ||
@@ -2457,6 +2385,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
rc = register_netdev(card->dev);
+ if (!rc && carrier_ok)
+ netif_carrier_on(card->dev);
+
out:
if (rc)
card->dev->netdev_ops = NULL;
@@ -2497,7 +2428,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
if (cgdev->state == CCWGROUP_ONLINE)
qeth_l3_set_offline(cgdev);
- unregister_netdev(card->dev);
+ if (qeth_netdev_is_registered(card->dev))
+ unregister_netdev(card->dev);
qeth_l3_clear_ip_htable(card, 0);
qeth_l3_clear_ipato_list(card);
}
@@ -2507,6 +2439,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc = 0;
enum qeth_card_states recover_flag;
+ bool carrier_ok;
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
@@ -2514,14 +2447,14 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
recover_flag = card->state;
- rc = qeth_core_hardsetup_card(card);
+ rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
rc = -ENODEV;
goto out_remove;
}
- rc = qeth_l3_setup_netdev(card);
+ rc = qeth_l3_setup_netdev(card, carrier_ok);
if (rc)
goto out_remove;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 7c015536360d..e4f608815c05 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -78,8 +78,6 @@ source "drivers/staging/goldfish/Kconfig"
source "drivers/staging/netlogic/Kconfig"
-source "drivers/staging/mt29f_spinand/Kconfig"
-
source "drivers/staging/gs_fpgaboot/Kconfig"
source "drivers/staging/unisys/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index a79b3fe20cf0..5868631e8f1b 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -29,7 +29,6 @@ obj-$(CONFIG_STAGING_BOARD) += board/
obj-$(CONFIG_LTE_GDM724X) += gdm724x/
obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
obj-$(CONFIG_GOLDFISH) += goldfish/
-obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand/
obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/
obj-$(CONFIG_UNISYSSPAR) += unisys/
obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
diff --git a/drivers/staging/mt29f_spinand/Kconfig b/drivers/staging/mt29f_spinand/Kconfig
deleted file mode 100644
index f3f9cb3b5c35..000000000000
--- a/drivers/staging/mt29f_spinand/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-config MTD_SPINAND_MT29F
- tristate "SPINAND Device Support for Micron"
- depends on MTD_NAND && SPI
- help
- This enables support for accessing Micron SPI NAND flash
- devices.
- If you have Micron SPI NAND chip say yes.
-
- If unsure, say no here.
-
-config MTD_SPINAND_ONDIEECC
- bool "Use SPINAND internal ECC"
- depends on MTD_SPINAND_MT29F
- help
- Internal ECC.
- Enables Hardware ECC support for Micron SPI NAND.
diff --git a/drivers/staging/mt29f_spinand/Makefile b/drivers/staging/mt29f_spinand/Makefile
deleted file mode 100644
index e47af0f7fda9..000000000000
--- a/drivers/staging/mt29f_spinand/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand.o
diff --git a/drivers/staging/mt29f_spinand/TODO b/drivers/staging/mt29f_spinand/TODO
deleted file mode 100644
index a2209b72d371..000000000000
--- a/drivers/staging/mt29f_spinand/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
-TODO:
- - Tested on XLP platform, needs to be tested on other platforms.
- - Checkpatch.pl cleanups
- - Sparce fixes.
- - Clean up coding style to meet kernel standard.
-
-Please send patches
-To:
-Kamlakant Patel <kamlakant.patel@broadcom.com>
-Cc:
-Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Mona Anonuevo <manonuevo@micron.com>
-linux-mtd@lists.infradead.org
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
deleted file mode 100644
index def8a1f57d1c..000000000000
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ /dev/null
@@ -1,980 +0,0 @@
-/*
- * Copyright (c) 2003-2013 Broadcom Corporation
- *
- * Copyright (c) 2009-2010 Micron Technology, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/spi/spi.h>
-
-#include "mt29f_spinand.h"
-
-#define BUFSIZE (10 * 64 * 2048)
-#define CACHE_BUF 2112
-/*
- * OOB area specification layout: Total 32 available free bytes.
- */
-
-static inline struct spinand_state *mtd_to_state(struct mtd_info *mtd)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct spinand_info *info = nand_get_controller_data(chip);
- struct spinand_state *state = info->priv;
-
- return state;
-}
-
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
-static int enable_hw_ecc;
-static int enable_read_hw_ecc;
-
-static int spinand_ooblayout_64_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- if (section > 3)
- return -ERANGE;
-
- oobregion->offset = (section * 16) + 1;
- oobregion->length = 6;
-
- return 0;
-}
-
-static int spinand_ooblayout_64_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- if (section > 3)
- return -ERANGE;
-
- oobregion->offset = (section * 16) + 8;
- oobregion->length = 8;
-
- return 0;
-}
-
-static const struct mtd_ooblayout_ops spinand_oob_64_ops = {
- .ecc = spinand_ooblayout_64_ecc,
- .free = spinand_ooblayout_64_free,
-};
-#endif
-
-/**
- * spinand_cmd - process a command to send to the SPI Nand
- * Description:
- * Set up the command buffer to send to the SPI controller.
- * The command buffer has to initialized to 0.
- */
-
-static int spinand_cmd(struct spi_device *spi, struct spinand_cmd *cmd)
-{
- struct spi_message message;
- struct spi_transfer x[4];
- u8 dummy = 0xff;
-
- spi_message_init(&message);
- memset(x, 0, sizeof(x));
-
- x[0].len = 1;
- x[0].tx_buf = &cmd->cmd;
- spi_message_add_tail(&x[0], &message);
-
- if (cmd->n_addr) {
- x[1].len = cmd->n_addr;
- x[1].tx_buf = cmd->addr;
- spi_message_add_tail(&x[1], &message);
- }
-
- if (cmd->n_dummy) {
- x[2].len = cmd->n_dummy;
- x[2].tx_buf = &dummy;
- spi_message_add_tail(&x[2], &message);
- }
-
- if (cmd->n_tx) {
- x[3].len = cmd->n_tx;
- x[3].tx_buf = cmd->tx_buf;
- spi_message_add_tail(&x[3], &message);
- }
-
- if (cmd->n_rx) {
- x[3].len = cmd->n_rx;
- x[3].rx_buf = cmd->rx_buf;
- spi_message_add_tail(&x[3], &message);
- }
-
- return spi_sync(spi, &message);
-}
-
-/**
- * spinand_read_id - Read SPI Nand ID
- * Description:
- * read two ID bytes from the SPI Nand device
- */
-static int spinand_read_id(struct spi_device *spi_nand, u8 *id)
-{
- int retval;
- u8 nand_id[3];
- struct spinand_cmd cmd = {0};
-
- cmd.cmd = CMD_READ_ID;
- cmd.n_rx = 3;
- cmd.rx_buf = &nand_id[0];
-
- retval = spinand_cmd(spi_nand, &cmd);
- if (retval < 0) {
- dev_err(&spi_nand->dev, "error %d reading id\n", retval);
- return retval;
- }
- id[0] = nand_id[1];
- id[1] = nand_id[2];
- return retval;
-}
-
-/**
- * spinand_read_status - send command 0xf to the SPI Nand status register
- * Description:
- * After read, write, or erase, the Nand device is expected to set the
- * busy status.
- * This function is to allow reading the status of the command: read,
- * write, and erase.
- * Once the status turns to be ready, the other status bits also are
- * valid status bits.
- */
-static int spinand_read_status(struct spi_device *spi_nand, u8 *status)
-{
- struct spinand_cmd cmd = {0};
- int ret;
-
- cmd.cmd = CMD_READ_REG;
- cmd.n_addr = 1;
- cmd.addr[0] = REG_STATUS;
- cmd.n_rx = 1;
- cmd.rx_buf = status;
-
- ret = spinand_cmd(spi_nand, &cmd);
- if (ret < 0)
- dev_err(&spi_nand->dev, "err: %d read status register\n", ret);
-
- return ret;
-}
-
-#define MAX_WAIT_JIFFIES (40 * HZ)
-static int wait_till_ready(struct spi_device *spi_nand)
-{
- unsigned long deadline;
- int retval;
- u8 stat = 0;
-
- deadline = jiffies + MAX_WAIT_JIFFIES;
- do {
- retval = spinand_read_status(spi_nand, &stat);
- if (retval < 0)
- return -1;
- if (!(stat & 0x1))
- break;
-
- cond_resched();
- } while (!time_after_eq(jiffies, deadline));
-
- if ((stat & 0x1) == 0)
- return 0;
-
- return -1;
-}
-
-/**
- * spinand_get_otp - send command 0xf to read the SPI Nand OTP register
- * Description:
- * There is one bit( bit 0x10 ) to set or to clear the internal ECC.
- * Enable chip internal ECC, set the bit to 1
- * Disable chip internal ECC, clear the bit to 0
- */
-static int spinand_get_otp(struct spi_device *spi_nand, u8 *otp)
-{
- struct spinand_cmd cmd = {0};
- int retval;
-
- cmd.cmd = CMD_READ_REG;
- cmd.n_addr = 1;
- cmd.addr[0] = REG_OTP;
- cmd.n_rx = 1;
- cmd.rx_buf = otp;
-
- retval = spinand_cmd(spi_nand, &cmd);
- if (retval < 0)
- dev_err(&spi_nand->dev, "error %d get otp\n", retval);
- return retval;
-}
-
-/**
- * spinand_set_otp - send command 0x1f to write the SPI Nand OTP register
- * Description:
- * There is one bit( bit 0x10 ) to set or to clear the internal ECC.
- * Enable chip internal ECC, set the bit to 1
- * Disable chip internal ECC, clear the bit to 0
- */
-static int spinand_set_otp(struct spi_device *spi_nand, u8 *otp)
-{
- int retval;
- struct spinand_cmd cmd = {0};
-
- cmd.cmd = CMD_WRITE_REG;
- cmd.n_addr = 1;
- cmd.addr[0] = REG_OTP;
- cmd.n_tx = 1;
- cmd.tx_buf = otp;
-
- retval = spinand_cmd(spi_nand, &cmd);
- if (retval < 0)
- dev_err(&spi_nand->dev, "error %d set otp\n", retval);
-
- return retval;
-}
-
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
-/**
- * spinand_enable_ecc - send command 0x1f to write the SPI Nand OTP register
- * Description:
- * There is one bit( bit 0x10 ) to set or to clear the internal ECC.
- * Enable chip internal ECC, set the bit to 1
- * Disable chip internal ECC, clear the bit to 0
- */
-static int spinand_enable_ecc(struct spi_device *spi_nand)
-{
- int retval;
- u8 otp = 0;
-
- retval = spinand_get_otp(spi_nand, &otp);
- if (retval < 0)
- return retval;
-
- if ((otp & OTP_ECC_MASK) == OTP_ECC_MASK)
- return 0;
- otp |= OTP_ECC_MASK;
- retval = spinand_set_otp(spi_nand, &otp);
- if (retval < 0)
- return retval;
- return spinand_get_otp(spi_nand, &otp);
-}
-#endif
-
-static int spinand_disable_ecc(struct spi_device *spi_nand)
-{
- int retval;
- u8 otp = 0;
-
- retval = spinand_get_otp(spi_nand, &otp);
- if (retval < 0)
- return retval;
-
- if ((otp & OTP_ECC_MASK) == OTP_ECC_MASK) {
- otp &= ~OTP_ECC_MASK;
- retval = spinand_set_otp(spi_nand, &otp);
- if (retval < 0)
- return retval;
- return spinand_get_otp(spi_nand, &otp);
- }
- return 0;
-}
-
-/**
- * spinand_write_enable - send command 0x06 to enable write or erase the
- * Nand cells
- * Description:
- * Before write and erase the Nand cells, the write enable has to be set.
- * After the write or erase, the write enable bit is automatically
- * cleared (status register bit 2)
- * Set the bit 2 of the status register has the same effect
- */
-static int spinand_write_enable(struct spi_device *spi_nand)
-{
- struct spinand_cmd cmd = {0};
-
- cmd.cmd = CMD_WR_ENABLE;
- return spinand_cmd(spi_nand, &cmd);
-}
-
-static int spinand_read_page_to_cache(struct spi_device *spi_nand, u16 page_id)
-{
- struct spinand_cmd cmd = {0};
- u16 row;
-
- row = page_id;
- cmd.cmd = CMD_READ;
- cmd.n_addr = 3;
- cmd.addr[0] = (u8)((row & 0xff0000) >> 16);
- cmd.addr[1] = (u8)((row & 0xff00) >> 8);
- cmd.addr[2] = (u8)(row & 0x00ff);
-
- return spinand_cmd(spi_nand, &cmd);
-}
-
-/**
- * spinand_read_from_cache - send command 0x03 to read out the data from the
- * cache register (2112 bytes max)
- * Description:
- * The read can specify 1 to 2112 bytes of data read at the corresponding
- * locations.
- * No tRd delay.
- */
-static int spinand_read_from_cache(struct spi_device *spi_nand, u16 page_id,
- u16 byte_id, u16 len, u8 *rbuf)
-{
- struct spinand_cmd cmd = {0};
- u16 column;
-
- column = byte_id;
- cmd.cmd = CMD_READ_RDM;
- cmd.n_addr = 3;
- cmd.addr[0] = (u8)((column & 0xff00) >> 8);
- cmd.addr[0] |= (u8)(((page_id >> 6) & 0x1) << 4);
- cmd.addr[1] = (u8)(column & 0x00ff);
- cmd.addr[2] = (u8)(0xff);
- cmd.n_dummy = 0;
- cmd.n_rx = len;
- cmd.rx_buf = rbuf;
-
- return spinand_cmd(spi_nand, &cmd);
-}
-
-/**
- * spinand_read_page - read a page
- * @page_id: the physical page number
- * @offset: the location from 0 to 2111
- * @len: number of bytes to read
- * @rbuf: read buffer to hold @len bytes
- *
- * Description:
- * The read includes two commands to the Nand - 0x13 and 0x03 commands
- * Poll to read status to wait for tRD time.
- */
-static int spinand_read_page(struct spi_device *spi_nand, u16 page_id,
- u16 offset, u16 len, u8 *rbuf)
-{
- int ret;
- u8 status = 0;
-
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
- if (enable_read_hw_ecc) {
- if (spinand_enable_ecc(spi_nand) < 0)
- dev_err(&spi_nand->dev, "enable HW ECC failed!");
- }
-#endif
- ret = spinand_read_page_to_cache(spi_nand, page_id);
- if (ret < 0)
- return ret;
-
- if (wait_till_ready(spi_nand))
- dev_err(&spi_nand->dev, "WAIT timedout!!!\n");
-
- while (1) {
- ret = spinand_read_status(spi_nand, &status);
- if (ret < 0) {
- dev_err(&spi_nand->dev,
- "err %d read status register\n", ret);
- return ret;
- }
-
- if ((status & STATUS_OIP_MASK) == STATUS_READY) {
- if ((status & STATUS_ECC_MASK) == STATUS_ECC_ERROR) {
- dev_err(&spi_nand->dev, "ecc error, page=%d\n",
- page_id);
- return 0;
- }
- break;
- }
- }
-
- ret = spinand_read_from_cache(spi_nand, page_id, offset, len, rbuf);
- if (ret < 0) {
- dev_err(&spi_nand->dev, "read from cache failed!!\n");
- return ret;
- }
-
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
- if (enable_read_hw_ecc) {
- ret = spinand_disable_ecc(spi_nand);
- if (ret < 0) {
- dev_err(&spi_nand->dev, "disable ecc failed!!\n");
- return ret;
- }
- enable_read_hw_ecc = 0;
- }
-#endif
- return ret;
-}
-
-/**
- * spinand_program_data_to_cache - write a page to cache
- * @byte_id: the location to write to the cache
- * @len: number of bytes to write
- * @wbuf: write buffer holding @len bytes
- *
- * Description:
- * The write command used here is 0x84--indicating that the cache is
- * not cleared first.
- * Since it is writing the data to cache, there is no tPROG time.
- */
-static int spinand_program_data_to_cache(struct spi_device *spi_nand,
- u16 page_id, u16 byte_id,
- u16 len, u8 *wbuf)
-{
- struct spinand_cmd cmd = {0};
- u16 column;
-
- column = byte_id;
- cmd.cmd = CMD_PROG_PAGE_CLRCACHE;
- cmd.n_addr = 2;
- cmd.addr[0] = (u8)((column & 0xff00) >> 8);
- cmd.addr[0] |= (u8)(((page_id >> 6) & 0x1) << 4);
- cmd.addr[1] = (u8)(column & 0x00ff);
- cmd.n_tx = len;
- cmd.tx_buf = wbuf;
-
- return spinand_cmd(spi_nand, &cmd);
-}
-
-/**
- * spinand_program_execute - write a page from cache to the Nand array
- * @page_id: the physical page location to write the page.
- *
- * Description:
- * The write command used here is 0x10--indicating the cache is writing to
- * the Nand array.
- * Need to wait for tPROG time to finish the transaction.
- */
-static int spinand_program_execute(struct spi_device *spi_nand, u16 page_id)
-{
- struct spinand_cmd cmd = {0};
- u16 row;
-
- row = page_id;
- cmd.cmd = CMD_PROG_PAGE_EXC;
- cmd.n_addr = 3;
- cmd.addr[0] = (u8)((row & 0xff0000) >> 16);
- cmd.addr[1] = (u8)((row & 0xff00) >> 8);
- cmd.addr[2] = (u8)(row & 0x00ff);
-
- return spinand_cmd(spi_nand, &cmd);
-}
-
-/**
- * spinand_program_page - write a page
- * @page_id: the physical page location to write the page.
- * @offset: the location from the cache starting from 0 to 2111
- * @len: the number of bytes to write
- * @buf: the buffer holding @len bytes
- *
- * Description:
- * The commands used here are 0x06, 0x84, and 0x10--indicating that
- * the write enable is first sent, the write cache command, and the
- * write execute command.
- * Poll to wait for the tPROG time to finish the transaction.
- */
-static int spinand_program_page(struct spi_device *spi_nand,
- u16 page_id, u16 offset, u16 len, u8 *buf)
-{
- int retval;
- u8 status = 0;
- u8 *wbuf;
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
- unsigned int i, j;
-
- wbuf = devm_kzalloc(&spi_nand->dev, CACHE_BUF, GFP_KERNEL);
- if (!wbuf)
- return -ENOMEM;
-
- enable_read_hw_ecc = 1;
- retval = spinand_read_page(spi_nand, page_id, 0, CACHE_BUF, wbuf);
- if (retval < 0) {
- dev_err(&spi_nand->dev, "ecc error on read page!!!\n");
- return retval;
- }
-
- for (i = offset, j = 0; i < len; i++, j++)
- wbuf[i] &= buf[j];
-
- if (enable_hw_ecc) {
- retval = spinand_enable_ecc(spi_nand);
- if (retval < 0) {
- dev_err(&spi_nand->dev, "enable ecc failed!!\n");
- return retval;
- }
- }
-#else
- wbuf = buf;
-#endif
- retval = spinand_write_enable(spi_nand);
- if (retval < 0) {
- dev_err(&spi_nand->dev, "write enable failed!!\n");
- return retval;
- }
- if (wait_till_ready(spi_nand))
- dev_err(&spi_nand->dev, "wait timedout!!!\n");
-
- retval = spinand_program_data_to_cache(spi_nand, page_id,
- offset, len, wbuf);
- if (retval < 0)
- return retval;
- retval = spinand_program_execute(spi_nand, page_id);
- if (retval < 0)
- return retval;
- while (1) {
- retval = spinand_read_status(spi_nand, &status);
- if (retval < 0) {
- dev_err(&spi_nand->dev,
- "error %d reading status register\n", retval);
- return retval;
- }
-
- if ((status & STATUS_OIP_MASK) == STATUS_READY) {
- if ((status & STATUS_P_FAIL_MASK) == STATUS_P_FAIL) {
- dev_err(&spi_nand->dev,
- "program error, page %d\n", page_id);
- return -1;
- }
- break;
- }
- }
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
- if (enable_hw_ecc) {
- retval = spinand_disable_ecc(spi_nand);
- if (retval < 0) {
- dev_err(&spi_nand->dev, "disable ecc failed!!\n");
- return retval;
- }
- enable_hw_ecc = 0;
- }
-#endif
-
- return 0;
-}
-
-/**
- * spinand_erase_block_erase - erase a page
- * @block_id: the physical block location to erase.
- *
- * Description:
- * The command used here is 0xd8--indicating an erase command to erase
- * one block--64 pages
- * Need to wait for tERS.
- */
-static int spinand_erase_block_erase(struct spi_device *spi_nand, u16 block_id)
-{
- struct spinand_cmd cmd = {0};
- u16 row;
-
- row = block_id;
- cmd.cmd = CMD_ERASE_BLK;
- cmd.n_addr = 3;
- cmd.addr[0] = (u8)((row & 0xff0000) >> 16);
- cmd.addr[1] = (u8)((row & 0xff00) >> 8);
- cmd.addr[2] = (u8)(row & 0x00ff);
-
- return spinand_cmd(spi_nand, &cmd);
-}
-
-/**
- * spinand_erase_block - erase a page
- * @block_id: the physical block location to erase.
- *
- * Description:
- * The commands used here are 0x06 and 0xd8--indicating an erase
- * command to erase one block--64 pages
- * It will first to enable the write enable bit (0x06 command),
- * and then send the 0xd8 erase command
- * Poll to wait for the tERS time to complete the tranaction.
- */
-static int spinand_erase_block(struct spi_device *spi_nand, u16 block_id)
-{
- int retval;
- u8 status = 0;
-
- retval = spinand_write_enable(spi_nand);
- if (wait_till_ready(spi_nand))
- dev_err(&spi_nand->dev, "wait timedout!!!\n");
-
- retval = spinand_erase_block_erase(spi_nand, block_id);
- while (1) {
- retval = spinand_read_status(spi_nand, &status);
- if (retval < 0) {
- dev_err(&spi_nand->dev,
- "error %d reading status register\n", retval);
- return retval;
- }
-
- if ((status & STATUS_OIP_MASK) == STATUS_READY) {
- if ((status & STATUS_E_FAIL_MASK) == STATUS_E_FAIL) {
- dev_err(&spi_nand->dev,
- "erase error, block %d\n", block_id);
- return -1;
- }
- break;
- }
- }
- return 0;
-}
-
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
-static int spinand_write_page_hwecc(struct nand_chip *chip,
- const u8 *buf, int oob_required,
- int page)
-{
- const u8 *p = buf;
- int eccsize = chip->ecc.size;
- int eccsteps = chip->ecc.steps;
-
- enable_hw_ecc = 1;
- return nand_prog_page_op(chip, page, 0, p, eccsize * eccsteps);
-}
-
-static int spinand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
- int oob_required, int page)
-{
- int retval;
- u8 status;
- u8 *p = buf;
- int eccsize = chip->ecc.size;
- int eccsteps = chip->ecc.steps;
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct spinand_info *info = nand_get_controller_data(chip);
-
- enable_read_hw_ecc = 1;
-
- nand_read_page_op(chip, page, 0, p, eccsize * eccsteps);
- if (oob_required)
- chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
-
- while (1) {
- retval = spinand_read_status(info->spi, &status);
- if (retval < 0) {
- dev_err(&mtd->dev,
- "error %d reading status register\n", retval);
- return retval;
- }
-
- if ((status & STATUS_OIP_MASK) == STATUS_READY) {
- if ((status & STATUS_ECC_MASK) == STATUS_ECC_ERROR) {
- pr_info("spinand: ECC error\n");
- mtd->ecc_stats.failed++;
- } else if ((status & STATUS_ECC_MASK) ==
- STATUS_ECC_1BIT_CORRECTED)
- mtd->ecc_stats.corrected++;
- break;
- }
- }
- return 0;
-}
-#endif
-
-static void spinand_select_chip(struct nand_chip *chip, int dev)
-{
-}
-
-static u8 spinand_read_byte(struct nand_chip *chip)
-{
- struct spinand_state *state = mtd_to_state(nand_to_mtd(chip));
- u8 data;
-
- data = state->buf[state->buf_ptr];
- state->buf_ptr++;
- return data;
-}
-
-static int spinand_wait(struct nand_chip *chip)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct spinand_info *info = nand_get_controller_data(chip);
-
- unsigned long timeo = jiffies;
- int retval, state = chip->state;
- u8 status;
-
- if (state == FL_ERASING)
- timeo += (HZ * 400) / 1000;
- else
- timeo += (HZ * 20) / 1000;
-
- while (time_before(jiffies, timeo)) {
- retval = spinand_read_status(info->spi, &status);
- if (retval < 0) {
- dev_err(&mtd->dev,
- "error %d reading status register\n", retval);
- return retval;
- }
-
- if ((status & STATUS_OIP_MASK) == STATUS_READY)
- return 0;
-
- cond_resched();
- }
- return 0;
-}
-
-static void spinand_write_buf(struct nand_chip *chip, const u8 *buf, int len)
-{
- struct spinand_state *state = mtd_to_state(nand_to_mtd(chip));
-
- memcpy(state->buf + state->buf_ptr, buf, len);
- state->buf_ptr += len;
-}
-
-static void spinand_read_buf(struct nand_chip *chip, u8 *buf, int len)
-{
- struct spinand_state *state = mtd_to_state(nand_to_mtd(chip));
-
- memcpy(buf, state->buf + state->buf_ptr, len);
- state->buf_ptr += len;
-}
-
-/*
- * spinand_reset- send RESET command "0xff" to the Nand device.
- */
-static void spinand_reset(struct spi_device *spi_nand)
-{
- struct spinand_cmd cmd = {0};
-
- cmd.cmd = CMD_RESET;
-
- if (spinand_cmd(spi_nand, &cmd) < 0)
- pr_info("spinand reset failed!\n");
-
- /* elapse 1ms before issuing any other command */
- usleep_range(1000, 2000);
-
- if (wait_till_ready(spi_nand))
- dev_err(&spi_nand->dev, "wait timedout!\n");
-}
-
-static void spinand_cmdfunc(struct nand_chip *chip, unsigned int command,
- int column, int page)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct spinand_info *info = nand_get_controller_data(chip);
- struct spinand_state *state = info->priv;
-
- switch (command) {
- /*
- * READ0 - read in first 0x800 bytes
- */
- case NAND_CMD_READ1:
- case NAND_CMD_READ0:
- state->buf_ptr = 0;
- spinand_read_page(info->spi, page, 0x0, 0x840, state->buf);
- break;
- /* READOOB reads only the OOB because no ECC is performed. */
- case NAND_CMD_READOOB:
- state->buf_ptr = 0;
- spinand_read_page(info->spi, page, 0x800, 0x40, state->buf);
- break;
- case NAND_CMD_RNDOUT:
- state->buf_ptr = column;
- break;
- case NAND_CMD_READID:
- state->buf_ptr = 0;
- spinand_read_id(info->spi, state->buf);
- break;
- case NAND_CMD_PARAM:
- state->buf_ptr = 0;
- break;
- /* ERASE1 stores the block and page address */
- case NAND_CMD_ERASE1:
- spinand_erase_block(info->spi, page);
- break;
- /* ERASE2 uses the block and page address from ERASE1 */
- case NAND_CMD_ERASE2:
- break;
- /* SEQIN sets up the addr buffer and all registers except the length */
- case NAND_CMD_SEQIN:
- state->col = column;
- state->row = page;
- state->buf_ptr = 0;
- break;
- /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
- case NAND_CMD_PAGEPROG:
- spinand_program_page(info->spi, state->row, state->col,
- state->buf_ptr, state->buf);
- break;
- case NAND_CMD_STATUS:
- spinand_get_otp(info->spi, state->buf);
- if (!(state->buf[0] & 0x80))
- state->buf[0] = 0x80;
- state->buf_ptr = 0;
- break;
- /* RESET command */
- case NAND_CMD_RESET:
- if (wait_till_ready(info->spi))
- dev_err(&info->spi->dev, "WAIT timedout!!!\n");
- /* a minimum of 250us must elapse before issuing RESET cmd*/
- usleep_range(250, 1000);
- spinand_reset(info->spi);
- break;
- default:
- dev_err(&mtd->dev, "Unknown CMD: 0x%x\n", command);
- }
-}
-
-/**
- * spinand_lock_block - send write register 0x1f command to the Nand device
- *
- * Description:
- * After power up, all the Nand blocks are locked. This function allows
- * one to unlock the blocks, and so it can be written or erased.
- */
-static int spinand_lock_block(struct spi_device *spi_nand, u8 lock)
-{
- struct spinand_cmd cmd = {0};
- int ret;
- u8 otp = 0;
-
- ret = spinand_get_otp(spi_nand, &otp);
-
- cmd.cmd = CMD_WRITE_REG;
- cmd.n_addr = 1;
- cmd.addr[0] = REG_BLOCK_LOCK;
- cmd.n_tx = 1;
- cmd.tx_buf = &lock;
-
- ret = spinand_cmd(spi_nand, &cmd);
- if (ret < 0)
- dev_err(&spi_nand->dev, "error %d lock block\n", ret);
-
- return ret;
-}
-
-/**
- * spinand_probe - [spinand Interface]
- * @spi_nand: registered device driver.
- *
- * Description:
- * Set up the device driver parameters to make the device available.
- */
-static int spinand_probe(struct spi_device *spi_nand)
-{
- struct mtd_info *mtd;
- struct nand_chip *chip;
- struct spinand_info *info;
- struct spinand_state *state;
-
- info = devm_kzalloc(&spi_nand->dev, sizeof(struct spinand_info),
- GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->spi = spi_nand;
-
- spinand_lock_block(spi_nand, BL_ALL_UNLOCKED);
-
- state = devm_kzalloc(&spi_nand->dev, sizeof(struct spinand_state),
- GFP_KERNEL);
- if (!state)
- return -ENOMEM;
-
- info->priv = state;
- state->buf_ptr = 0;
- state->buf = devm_kzalloc(&spi_nand->dev, BUFSIZE, GFP_KERNEL);
- if (!state->buf)
- return -ENOMEM;
-
- chip = devm_kzalloc(&spi_nand->dev, sizeof(struct nand_chip),
- GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
- chip->ecc.mode = NAND_ECC_HW;
- chip->ecc.size = 0x200;
- chip->ecc.bytes = 0x6;
- chip->ecc.steps = 0x4;
-
- chip->ecc.strength = 1;
- chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
- chip->ecc.read_page = spinand_read_page_hwecc;
- chip->ecc.write_page = spinand_write_page_hwecc;
-#else
- chip->ecc.mode = NAND_ECC_SOFT;
- chip->ecc.algo = NAND_ECC_HAMMING;
- if (spinand_disable_ecc(spi_nand) < 0)
- dev_info(&spi_nand->dev, "%s: disable ecc failed!\n",
- __func__);
-#endif
-
- nand_set_flash_node(chip, spi_nand->dev.of_node);
- nand_set_controller_data(chip, info);
- chip->legacy.read_buf = spinand_read_buf;
- chip->legacy.write_buf = spinand_write_buf;
- chip->legacy.read_byte = spinand_read_byte;
- chip->legacy.cmdfunc = spinand_cmdfunc;
- chip->legacy.waitfunc = spinand_wait;
- chip->options |= NAND_CACHEPRG;
- chip->select_chip = spinand_select_chip;
- chip->legacy.set_features = nand_get_set_features_notsupp;
- chip->legacy.get_features = nand_get_set_features_notsupp;
-
- mtd = nand_to_mtd(chip);
-
- dev_set_drvdata(&spi_nand->dev, mtd);
-
- mtd->dev.parent = &spi_nand->dev;
- mtd->oobsize = 64;
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
- mtd_set_ooblayout(mtd, &spinand_oob_64_ops);
-#endif
-
- if (nand_scan(chip, 1))
- return -ENXIO;
-
- return mtd_device_register(mtd, NULL, 0);
-}
-
-/**
- * spinand_remove - remove the device driver
- * @spi: the spi device.
- *
- * Description:
- * Remove the device driver parameters and free up allocated memories.
- */
-static int spinand_remove(struct spi_device *spi)
-{
- mtd_device_unregister(dev_get_drvdata(&spi->dev));
-
- return 0;
-}
-
-static const struct of_device_id spinand_dt[] = {
- { .compatible = "spinand,mt29f", },
- {}
-};
-MODULE_DEVICE_TABLE(of, spinand_dt);
-
-/*
- * Device name structure description
- */
-static struct spi_driver spinand_driver = {
- .driver = {
- .name = "mt29f",
- .of_match_table = spinand_dt,
- },
- .probe = spinand_probe,
- .remove = spinand_remove,
-};
-
-module_spi_driver(spinand_driver);
-
-MODULE_DESCRIPTION("SPI NAND driver for Micron");
-MODULE_AUTHOR("Henry Pan <hspan@micron.com>, Kamlakant Patel <kamlakant.patel@broadcom.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.h b/drivers/staging/mt29f_spinand/mt29f_spinand.h
deleted file mode 100644
index 457dc7ffdaf1..000000000000
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*-
- * Copyright 2013 Broadcom Corporation
- *
- * Copyright (c) 2009-2010 Micron Technology, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Henry Pan <hspan@micron.com>
- *
- * based on nand.h
- */
-#ifndef __LINUX_MTD_SPI_NAND_H
-#define __LINUX_MTD_SPI_NAND_H
-
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/mtd/mtd.h>
-
-/* cmd */
-#define CMD_READ 0x13
-#define CMD_READ_RDM 0x03
-#define CMD_PROG_PAGE_CLRCACHE 0x02
-#define CMD_PROG_PAGE 0x84
-#define CMD_PROG_PAGE_EXC 0x10
-#define CMD_ERASE_BLK 0xd8
-#define CMD_WR_ENABLE 0x06
-#define CMD_WR_DISABLE 0x04
-#define CMD_READ_ID 0x9f
-#define CMD_RESET 0xff
-#define CMD_READ_REG 0x0f
-#define CMD_WRITE_REG 0x1f
-
-/* feature/ status reg */
-#define REG_BLOCK_LOCK 0xa0
-#define REG_OTP 0xb0
-#define REG_STATUS 0xc0/* timing */
-
-/* status */
-#define STATUS_OIP_MASK 0x01
-#define STATUS_READY 0
-#define STATUS_BUSY BIT(0)
-
-#define STATUS_E_FAIL_MASK 0x04
-#define STATUS_E_FAIL BIT(2)
-
-#define STATUS_P_FAIL_MASK 0x08
-#define STATUS_P_FAIL BIT(3)
-
-#define STATUS_ECC_MASK 0x30
-#define STATUS_ECC_1BIT_CORRECTED BIT(4)
-#define STATUS_ECC_ERROR BIT(5)
-#define STATUS_ECC_RESERVED (BIT(5) | BIT(4))
-
-/*ECC enable defines*/
-#define OTP_ECC_MASK 0x10
-#define OTP_ECC_OFF 0
-#define OTP_ECC_ON 1
-
-#define ECC_DISABLED
-#define ECC_IN_NAND
-#define ECC_SOFT
-
-/* block lock */
-#define BL_ALL_LOCKED 0x38
-#define BL_1_2_LOCKED 0x30
-#define BL_1_4_LOCKED 0x28
-#define BL_1_8_LOCKED 0x20
-#define BL_1_16_LOCKED 0x18
-#define BL_1_32_LOCKED 0x10
-#define BL_1_64_LOCKED 0x08
-#define BL_ALL_UNLOCKED 0
-
-struct spinand_info {
- struct spi_device *spi;
- void *priv;
-};
-
-struct spinand_state {
- u32 col;
- u32 row;
- int buf_ptr;
- u8 *buf;
-};
-
-struct spinand_cmd {
- u8 cmd;
- u32 n_addr; /* Number of address */
- u8 addr[3]; /* Reg Offset */
- u32 n_dummy; /* Dummy use */
- u32 n_tx; /* Number of tx bytes */
- u8 *tx_buf; /* Tx buf */
- u32 n_rx; /* Number of rx bytes */
- u8 *rx_buf; /* Rx buf */
-};
-
-int spinand_mtd(struct mtd_info *mtd);
-void spinand_mtd_release(struct mtd_info *mtd);
-
-#endif /* __LINUX_MTD_SPI_NAND_H */
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index ff6ba6d86cd8..cc56cb3b3eca 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1614,10 +1614,10 @@ static void sci_request_dma(struct uart_port *port)
hrtimer_init(&s->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
s->rx_timer.function = rx_timer_fn;
+ s->chan_rx_saved = s->chan_rx = chan;
+
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
sci_submit_rx(s);
-
- s->chan_rx_saved = s->chan_rx = chan;
}
}
@@ -3102,6 +3102,7 @@ static struct uart_driver sci_uart_driver = {
static int sci_remove(struct platform_device *dev)
{
struct sci_port *port = platform_get_drvdata(dev);
+ unsigned int type = port->port.type; /* uart_remove_... clears it */
sci_ports_in_use &= ~BIT(port->port.line);
uart_remove_one_port(&sci_uart_driver, &port->port);
@@ -3112,8 +3113,7 @@ static int sci_remove(struct platform_device *dev)
sysfs_remove_file(&dev->dev.kobj,
&dev_attr_rx_fifo_trigger.attr);
}
- if (port->port.type == PORT_SCIFA || port->port.type == PORT_SCIFB ||
- port->port.type == PORT_HSCIF) {
+ if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF) {
sysfs_remove_file(&dev->dev.kobj,
&dev_attr_rx_fifo_timeout.attr);
}
diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c
index 7576ceace571..f438eaa68246 100644
--- a/drivers/tty/tty_baudrate.c
+++ b/drivers/tty/tty_baudrate.c
@@ -77,7 +77,7 @@ speed_t tty_termios_baud_rate(struct ktermios *termios)
else
cbaud += 15;
}
- return baud_table[cbaud];
+ return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
}
EXPORT_SYMBOL(tty_termios_baud_rate);
@@ -113,7 +113,7 @@ speed_t tty_termios_input_baud_rate(struct ktermios *termios)
else
cbaud += 15;
}
- return baud_table[cbaud];
+ return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
#else /* IBSHIFT */
return tty_termios_baud_rate(termios);
#endif /* IBSHIFT */
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 55370e651db3..41ec8e5010f3 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1548,7 +1548,7 @@ static void csi_K(struct vc_data *vc, int vpar)
scr_memsetw(start + offset, vc->vc_video_erase_char, 2 * count);
vc->vc_need_wrap = 0;
if (con_should_update(vc))
- do_update_region(vc, (unsigned long) start, count);
+ do_update_region(vc, (unsigned long)(start + offset), count);
}
static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar positions */
diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig
index e36d6c73c4a4..78118883f96c 100644
--- a/drivers/usb/typec/ucsi/Kconfig
+++ b/drivers/usb/typec/ucsi/Kconfig
@@ -23,6 +23,16 @@ config TYPEC_UCSI
if TYPEC_UCSI
+config UCSI_CCG
+ tristate "UCSI Interface Driver for Cypress CCGx"
+ depends on I2C
+ help
+ This driver enables UCSI support on platforms that expose a
+ Cypress CCGx Type-C controller over I2C interface.
+
+ To compile the driver as a module, choose M here: the module will be
+ called ucsi_ccg.
+
config UCSI_ACPI
tristate "UCSI ACPI Interface Driver"
depends on ACPI
diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile
index 7afbea512207..2f4900b26210 100644
--- a/drivers/usb/typec/ucsi/Makefile
+++ b/drivers/usb/typec/ucsi/Makefile
@@ -8,3 +8,5 @@ typec_ucsi-y := ucsi.o
typec_ucsi-$(CONFIG_TRACING) += trace.o
obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o
+
+obj-$(CONFIG_UCSI_CCG) += ucsi_ccg.o
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
new file mode 100644
index 000000000000..de8a43bdff68
--- /dev/null
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UCSI driver for Cypress CCGx Type-C controller
+ *
+ * Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved.
+ * Author: Ajay Gupta <ajayg@nvidia.com>
+ *
+ * Some code borrowed from drivers/usb/typec/ucsi/ucsi_acpi.c
+ */
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include <asm/unaligned.h>
+#include "ucsi.h"
+
+struct ucsi_ccg {
+ struct device *dev;
+ struct ucsi *ucsi;
+ struct ucsi_ppm ppm;
+ struct i2c_client *client;
+};
+
+#define CCGX_RAB_INTR_REG 0x06
+#define CCGX_RAB_UCSI_CONTROL 0x39
+#define CCGX_RAB_UCSI_CONTROL_START BIT(0)
+#define CCGX_RAB_UCSI_CONTROL_STOP BIT(1)
+#define CCGX_RAB_UCSI_DATA_BLOCK(offset) (0xf000 | ((offset) & 0xff))
+
+static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
+{
+ struct i2c_client *client = uc->client;
+ const struct i2c_adapter_quirks *quirks = client->adapter->quirks;
+ unsigned char buf[2];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0x0,
+ .len = sizeof(buf),
+ .buf = buf,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .buf = data,
+ },
+ };
+ u32 rlen, rem_len = len, max_read_len = len;
+ int status;
+
+ /* check any max_read_len limitation on i2c adapter */
+ if (quirks && quirks->max_read_len)
+ max_read_len = quirks->max_read_len;
+
+ while (rem_len > 0) {
+ msgs[1].buf = &data[len - rem_len];
+ rlen = min_t(u16, rem_len, max_read_len);
+ msgs[1].len = rlen;
+ put_unaligned_le16(rab, buf);
+ status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (status < 0) {
+ dev_err(uc->dev, "i2c_transfer failed %d\n", status);
+ return status;
+ }
+ rab += rlen;
+ rem_len -= rlen;
+ }
+
+ return 0;
+}
+
+static int ccg_write(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
+{
+ struct i2c_client *client = uc->client;
+ unsigned char *buf;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0x0,
+ }
+ };
+ int status;
+
+ buf = kzalloc(len + sizeof(rab), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ put_unaligned_le16(rab, buf);
+ memcpy(buf + sizeof(rab), data, len);
+
+ msgs[0].len = len + sizeof(rab);
+ msgs[0].buf = buf;
+
+ status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (status < 0) {
+ dev_err(uc->dev, "i2c_transfer failed %d\n", status);
+ kfree(buf);
+ return status;
+ }
+
+ kfree(buf);
+ return 0;
+}
+
+static int ucsi_ccg_init(struct ucsi_ccg *uc)
+{
+ unsigned int count = 10;
+ u8 data;
+ int status;
+
+ data = CCGX_RAB_UCSI_CONTROL_STOP;
+ status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
+ if (status < 0)
+ return status;
+
+ data = CCGX_RAB_UCSI_CONTROL_START;
+ status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
+ if (status < 0)
+ return status;
+
+ /*
+ * Flush CCGx RESPONSE queue by acking interrupts. Above ucsi control
+ * register write will push response which must be cleared.
+ */
+ do {
+ status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+ if (status < 0)
+ return status;
+
+ if (!data)
+ return 0;
+
+ status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+ if (status < 0)
+ return status;
+
+ usleep_range(10000, 11000);
+ } while (--count);
+
+ return -ETIMEDOUT;
+}
+
+static int ucsi_ccg_send_data(struct ucsi_ccg *uc)
+{
+ u8 *ppm = (u8 *)uc->ppm.data;
+ int status;
+ u16 rab;
+
+ rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_out));
+ status = ccg_write(uc, rab, ppm +
+ offsetof(struct ucsi_data, message_out),
+ sizeof(uc->ppm.data->message_out));
+ if (status < 0)
+ return status;
+
+ rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, ctrl));
+ return ccg_write(uc, rab, ppm + offsetof(struct ucsi_data, ctrl),
+ sizeof(uc->ppm.data->ctrl));
+}
+
+static int ucsi_ccg_recv_data(struct ucsi_ccg *uc)
+{
+ u8 *ppm = (u8 *)uc->ppm.data;
+ int status;
+ u16 rab;
+
+ rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, cci));
+ status = ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, cci),
+ sizeof(uc->ppm.data->cci));
+ if (status < 0)
+ return status;
+
+ rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_in));
+ return ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, message_in),
+ sizeof(uc->ppm.data->message_in));
+}
+
+static int ucsi_ccg_ack_interrupt(struct ucsi_ccg *uc)
+{
+ int status;
+ unsigned char data;
+
+ status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+ if (status < 0)
+ return status;
+
+ return ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+}
+
+static int ucsi_ccg_sync(struct ucsi_ppm *ppm)
+{
+ struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
+ int status;
+
+ status = ucsi_ccg_recv_data(uc);
+ if (status < 0)
+ return status;
+
+ /* ack interrupt to allow next command to run */
+ return ucsi_ccg_ack_interrupt(uc);
+}
+
+static int ucsi_ccg_cmd(struct ucsi_ppm *ppm, struct ucsi_control *ctrl)
+{
+ struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
+
+ ppm->data->ctrl.raw_cmd = ctrl->raw_cmd;
+ return ucsi_ccg_send_data(uc);
+}
+
+static irqreturn_t ccg_irq_handler(int irq, void *data)
+{
+ struct ucsi_ccg *uc = data;
+
+ ucsi_notify(uc->ucsi);
+
+ return IRQ_HANDLED;
+}
+
+static int ucsi_ccg_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct ucsi_ccg *uc;
+ int status;
+ u16 rab;
+
+ uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL);
+ if (!uc)
+ return -ENOMEM;
+
+ uc->ppm.data = devm_kzalloc(dev, sizeof(struct ucsi_data), GFP_KERNEL);
+ if (!uc->ppm.data)
+ return -ENOMEM;
+
+ uc->ppm.cmd = ucsi_ccg_cmd;
+ uc->ppm.sync = ucsi_ccg_sync;
+ uc->dev = dev;
+ uc->client = client;
+
+ /* reset ccg device and initialize ucsi */
+ status = ucsi_ccg_init(uc);
+ if (status < 0) {
+ dev_err(uc->dev, "ucsi_ccg_init failed - %d\n", status);
+ return status;
+ }
+
+ status = devm_request_threaded_irq(dev, client->irq, NULL,
+ ccg_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ dev_name(dev), uc);
+ if (status < 0) {
+ dev_err(uc->dev, "request_threaded_irq failed - %d\n", status);
+ return status;
+ }
+
+ uc->ucsi = ucsi_register_ppm(dev, &uc->ppm);
+ if (IS_ERR(uc->ucsi)) {
+ dev_err(uc->dev, "ucsi_register_ppm failed\n");
+ return PTR_ERR(uc->ucsi);
+ }
+
+ rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, version));
+ status = ccg_read(uc, rab, (u8 *)(uc->ppm.data) +
+ offsetof(struct ucsi_data, version),
+ sizeof(uc->ppm.data->version));
+ if (status < 0) {
+ ucsi_unregister_ppm(uc->ucsi);
+ return status;
+ }
+
+ i2c_set_clientdata(client, uc);
+ return 0;
+}
+
+static int ucsi_ccg_remove(struct i2c_client *client)
+{
+ struct ucsi_ccg *uc = i2c_get_clientdata(client);
+
+ ucsi_unregister_ppm(uc->ucsi);
+
+ return 0;
+}
+
+static const struct i2c_device_id ucsi_ccg_device_id[] = {
+ {"ccgx-ucsi", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ucsi_ccg_device_id);
+
+static struct i2c_driver ucsi_ccg_driver = {
+ .driver = {
+ .name = "ucsi_ccg",
+ },
+ .probe = ucsi_ccg_probe,
+ .remove = ucsi_ccg_remove,
+ .id_table = ucsi_ccg_device_id,
+};
+
+module_i2c_driver(ucsi_ccg_driver);
+
+MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>");
+MODULE_DESCRIPTION("UCSI driver for Cypress CCGx Type-C controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index f15f89df1f36..7ea6fb6a2e5d 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -914,7 +914,7 @@ int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
ret = xenmem_reservation_increase(args->nr_pages, args->frames);
if (ret != args->nr_pages) {
- pr_debug("Failed to decrease reservation for DMA buffer\n");
+ pr_debug("Failed to increase reservation for DMA buffer\n");
ret = -EFAULT;
} else {
ret = 0;
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
index df1ed37c3269..de01a6d0059d 100644
--- a/drivers/xen/privcmd-buf.c
+++ b/drivers/xen/privcmd-buf.c
@@ -21,15 +21,9 @@
MODULE_LICENSE("GPL");
-static unsigned int limit = 64;
-module_param(limit, uint, 0644);
-MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by "
- "the privcmd-buf device per open file");
-
struct privcmd_buf_private {
struct mutex lock;
struct list_head list;
- unsigned int allocated;
};
struct privcmd_buf_vma_private {
@@ -60,13 +54,10 @@ static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv)
{
unsigned int i;
- vma_priv->file_priv->allocated -= vma_priv->n_pages;
-
list_del(&vma_priv->list);
for (i = 0; i < vma_priv->n_pages; i++)
- if (vma_priv->pages[i])
- __free_page(vma_priv->pages[i]);
+ __free_page(vma_priv->pages[i]);
kfree(vma_priv);
}
@@ -146,8 +137,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
unsigned int i;
int ret = 0;
- if (!(vma->vm_flags & VM_SHARED) || count > limit ||
- file_priv->allocated + count > limit)
+ if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
@@ -155,19 +145,15 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
if (!vma_priv)
return -ENOMEM;
- vma_priv->n_pages = count;
- count = 0;
- for (i = 0; i < vma_priv->n_pages; i++) {
+ for (i = 0; i < count; i++) {
vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!vma_priv->pages[i])
break;
- count++;
+ vma_priv->n_pages++;
}
mutex_lock(&file_priv->lock);
- file_priv->allocated += count;
-
vma_priv->file_priv = file_priv;
vma_priv->users = 1;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 80953528572d..68f322f600a0 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3163,6 +3163,9 @@ void btrfs_destroy_inode(struct inode *inode);
int btrfs_drop_inode(struct inode *inode);
int __init btrfs_init_cachep(void);
void __cold btrfs_destroy_cachep(void);
+struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
+ struct btrfs_root *root, int *new,
+ struct btrfs_path *path);
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root, int *was_new);
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index b0ab41da91d1..3f0b6d1936e8 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1664,9 +1664,8 @@ static int cleaner_kthread(void *arg)
struct btrfs_root *root = arg;
struct btrfs_fs_info *fs_info = root->fs_info;
int again;
- struct btrfs_trans_handle *trans;
- do {
+ while (1) {
again = 0;
/* Make the cleaner go to sleep early. */
@@ -1715,42 +1714,16 @@ static int cleaner_kthread(void *arg)
*/
btrfs_delete_unused_bgs(fs_info);
sleep:
+ if (kthread_should_park())
+ kthread_parkme();
+ if (kthread_should_stop())
+ return 0;
if (!again) {
set_current_state(TASK_INTERRUPTIBLE);
- if (!kthread_should_stop())
- schedule();
+ schedule();
__set_current_state(TASK_RUNNING);
}
- } while (!kthread_should_stop());
-
- /*
- * Transaction kthread is stopped before us and wakes us up.
- * However we might have started a new transaction and COWed some
- * tree blocks when deleting unused block groups for example. So
- * make sure we commit the transaction we started to have a clean
- * shutdown when evicting the btree inode - if it has dirty pages
- * when we do the final iput() on it, eviction will trigger a
- * writeback for it which will fail with null pointer dereferences
- * since work queues and other resources were already released and
- * destroyed by the time the iput/eviction/writeback is made.
- */
- trans = btrfs_attach_transaction(root);
- if (IS_ERR(trans)) {
- if (PTR_ERR(trans) != -ENOENT)
- btrfs_err(fs_info,
- "cleaner transaction attach returned %ld",
- PTR_ERR(trans));
- } else {
- int ret;
-
- ret = btrfs_commit_transaction(trans);
- if (ret)
- btrfs_err(fs_info,
- "cleaner open transaction commit returned %d",
- ret);
}
-
- return 0;
}
static int transaction_kthread(void *arg)
@@ -3931,6 +3904,13 @@ void close_ctree(struct btrfs_fs_info *fs_info)
int ret;
set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
+ /*
+ * We don't want the cleaner to start new transactions, add more delayed
+ * iputs, etc. while we're closing. We can't use kthread_stop() yet
+ * because that frees the task_struct, and the transaction kthread might
+ * still try to wake up the cleaner.
+ */
+ kthread_park(fs_info->cleaner_kthread);
/* wait for the qgroup rescan worker to stop */
btrfs_qgroup_wait_for_completion(fs_info, false);
@@ -3958,9 +3938,8 @@ void close_ctree(struct btrfs_fs_info *fs_info)
if (!sb_rdonly(fs_info->sb)) {
/*
- * If the cleaner thread is stopped and there are
- * block groups queued for removal, the deletion will be
- * skipped when we quit the cleaner thread.
+ * The cleaner kthread is stopped, so do one final pass over
+ * unused block groups.
*/
btrfs_delete_unused_bgs(fs_info);
@@ -4359,13 +4338,23 @@ static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
unpin = pinned_extents;
again:
while (1) {
+ /*
+ * The btrfs_finish_extent_commit() may get the same range as
+ * ours between find_first_extent_bit and clear_extent_dirty.
+ * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
+ * the same extent range.
+ */
+ mutex_lock(&fs_info->unused_bg_unpin_mutex);
ret = find_first_extent_bit(unpin, 0, &start, &end,
EXTENT_DIRTY, NULL);
- if (ret)
+ if (ret) {
+ mutex_unlock(&fs_info->unused_bg_unpin_mutex);
break;
+ }
clear_extent_dirty(unpin, start, end);
btrfs_error_unpin_extent_range(fs_info, start, end);
+ mutex_unlock(&fs_info->unused_bg_unpin_mutex);
cond_resched();
}
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 4ba0aedc878b..74aa552f4793 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -75,7 +75,8 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
* sure NOFS is set to keep us from deadlocking.
*/
nofs_flag = memalloc_nofs_save();
- inode = btrfs_iget(fs_info->sb, &location, root, NULL);
+ inode = btrfs_iget_path(fs_info->sb, &location, root, NULL, path);
+ btrfs_release_path(path);
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(inode))
return inode;
@@ -838,6 +839,25 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
path->search_commit_root = 1;
path->skip_locking = 1;
+ /*
+ * We must pass a path with search_commit_root set to btrfs_iget in
+ * order to avoid a deadlock when allocating extents for the tree root.
+ *
+ * When we are COWing an extent buffer from the tree root, when looking
+ * for a free extent, at extent-tree.c:find_free_extent(), we can find
+ * block group without its free space cache loaded. When we find one
+ * we must load its space cache which requires reading its free space
+ * cache's inode item from the root tree. If this inode item is located
+ * in the same leaf that we started COWing before, then we end up in
+ * deadlock on the extent buffer (trying to read lock it when we
+ * previously write locked it).
+ *
+ * It's safe to read the inode item using the commit root because
+ * block groups, once loaded, stay in memory forever (until they are
+ * removed) as well as their space caches once loaded. New block groups
+ * once created get their ->cached field set to BTRFS_CACHE_FINISHED so
+ * we will never try to read their inode item while the fs is mounted.
+ */
inode = lookup_free_space_inode(fs_info, block_group, path);
if (IS_ERR(inode)) {
btrfs_free_path(path);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index d3df5b52278c..9ea4c6f0352f 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1531,12 +1531,11 @@ out_check:
}
btrfs_release_path(path);
- if (cur_offset <= end && cow_start == (u64)-1) {
+ if (cur_offset <= end && cow_start == (u64)-1)
cow_start = cur_offset;
- cur_offset = end;
- }
if (cow_start != (u64)-1) {
+ cur_offset = end;
ret = cow_file_range(inode, locked_page, cow_start, end, end,
page_started, nr_written, 1, NULL);
if (ret)
@@ -3570,10 +3569,11 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf,
/*
* read an inode from the btree into the in-memory inode
*/
-static int btrfs_read_locked_inode(struct inode *inode)
+static int btrfs_read_locked_inode(struct inode *inode,
+ struct btrfs_path *in_path)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_path *path;
+ struct btrfs_path *path = in_path;
struct extent_buffer *leaf;
struct btrfs_inode_item *inode_item;
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -3589,15 +3589,18 @@ static int btrfs_read_locked_inode(struct inode *inode)
if (!ret)
filled = true;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
+ if (!path) {
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ }
memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
if (ret) {
- btrfs_free_path(path);
+ if (path != in_path)
+ btrfs_free_path(path);
return ret;
}
@@ -3722,7 +3725,8 @@ cache_acl:
btrfs_ino(BTRFS_I(inode)),
root->root_key.objectid, ret);
}
- btrfs_free_path(path);
+ if (path != in_path)
+ btrfs_free_path(path);
if (!maybe_acls)
cache_no_acl(inode);
@@ -5644,8 +5648,9 @@ static struct inode *btrfs_iget_locked(struct super_block *s,
/* Get an inode object given its location and corresponding root.
* Returns in *is_new if the inode was read from disk
*/
-struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
- struct btrfs_root *root, int *new)
+struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
+ struct btrfs_root *root, int *new,
+ struct btrfs_path *path)
{
struct inode *inode;
@@ -5656,7 +5661,7 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
if (inode->i_state & I_NEW) {
int ret;
- ret = btrfs_read_locked_inode(inode);
+ ret = btrfs_read_locked_inode(inode, path);
if (!ret) {
inode_tree_add(inode);
unlock_new_inode(inode);
@@ -5678,6 +5683,12 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
return inode;
}
+struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
+ struct btrfs_root *root, int *new)
+{
+ return btrfs_iget_path(s, location, root, new, NULL);
+}
+
static struct inode *new_simple_dir(struct super_block *s,
struct btrfs_key *key,
struct btrfs_root *root)
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 3ca6943827ef..802a628e9f7d 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3488,6 +3488,8 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
len = round_down(i_size_read(src), sz) - loff;
+ if (len == 0)
+ return 0;
olen = len;
}
}
@@ -4257,9 +4259,17 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
goto out_unlock;
if (len == 0)
olen = len = src->i_size - off;
- /* if we extend to eof, continue to block boundary */
- if (off + len == src->i_size)
+ /*
+ * If we extend to eof, continue to block boundary if and only if the
+ * destination end offset matches the destination file's size, otherwise
+ * we would be corrupting data by placing the eof block into the middle
+ * of a file.
+ */
+ if (off + len == src->i_size) {
+ if (!IS_ALIGNED(len, bs) && destoff + len < inode->i_size)
+ goto out_unlock;
len = ALIGN(src->i_size, bs) - off;
+ }
if (len == 0) {
ret = 0;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index b362b45dd757..cbc9d0d2c12d 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1916,7 +1916,7 @@ restore:
}
/* Used to sort the devices by max_avail(descending sort) */
-static int btrfs_cmp_device_free_bytes(const void *dev_info1,
+static inline int btrfs_cmp_device_free_bytes(const void *dev_info1,
const void *dev_info2)
{
if (((struct btrfs_device_info *)dev_info1)->max_avail >
@@ -1945,8 +1945,8 @@ static inline void btrfs_descending_sort_devices(
* The helper to calc the free space on the devices that can be used to store
* file data.
*/
-static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
- u64 *free_bytes)
+static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
+ u64 *free_bytes)
{
struct btrfs_device_info *devices_info;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index cab0b1f1f741..efcf89a8ba44 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -440,7 +440,7 @@ static int check_block_group_item(struct btrfs_fs_info *fs_info,
type != (BTRFS_BLOCK_GROUP_METADATA |
BTRFS_BLOCK_GROUP_DATA)) {
block_group_err(fs_info, leaf, slot,
-"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llu or 0x%llx",
+"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx",
type, hweight64(type),
BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA,
BTRFS_BLOCK_GROUP_SYSTEM,
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index e07f3376b7df..a5ce99a6c936 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4396,6 +4396,23 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
logged_end = end;
list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
+ /*
+ * Skip extents outside our logging range. It's important to do
+ * it for correctness because if we don't ignore them, we may
+ * log them before their ordered extent completes, and therefore
+ * we could log them without logging their respective checksums
+ * (the checksum items are added to the csum tree at the very
+ * end of btrfs_finish_ordered_io()). Also leave such extents
+ * outside of our range in the list, since we may have another
+ * ranged fsync in the near future that needs them. If an extent
+ * outside our range corresponds to a hole, log it to avoid
+ * leaving gaps between extents (fsck will complain when we are
+ * not using the NO_HOLES feature).
+ */
+ if ((em->start > end || em->start + em->len <= start) &&
+ em->block_start != EXTENT_MAP_HOLE)
+ continue;
+
list_del_init(&em->list);
/*
* Just an arbitrary number, this can be really CPU intensive
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 27cad84dab23..189df668b6a0 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1931,10 +1931,17 @@ static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
if (!prealloc_cf)
return -ENOMEM;
- /* Start by sync'ing the source file */
+ /* Start by sync'ing the source and destination files */
ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
- if (ret < 0)
+ if (ret < 0) {
+ dout("failed to write src file (%zd)\n", ret);
+ goto out;
+ }
+ ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
+ if (ret < 0) {
+ dout("failed to write dst file (%zd)\n", ret);
goto out;
+ }
/*
* We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 67a9aeb2f4ec..bd13a3267ae0 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -80,12 +80,8 @@ static int parse_reply_info_in(void **p, void *end,
info->symlink = *p;
*p += info->symlink_len;
- if (features & CEPH_FEATURE_DIRLAYOUTHASH)
- ceph_decode_copy_safe(p, end, &info->dir_layout,
- sizeof(info->dir_layout), bad);
- else
- memset(&info->dir_layout, 0, sizeof(info->dir_layout));
-
+ ceph_decode_copy_safe(p, end, &info->dir_layout,
+ sizeof(info->dir_layout), bad);
ceph_decode_32_safe(p, end, info->xattr_len, bad);
ceph_decode_need(p, end, info->xattr_len, bad);
info->xattr_data = *p;
@@ -3182,10 +3178,8 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
recon_state.pagelist = pagelist;
if (session->s_con.peer_features & CEPH_FEATURE_MDSENC)
recon_state.msg_version = 3;
- else if (session->s_con.peer_features & CEPH_FEATURE_FLOCK)
- recon_state.msg_version = 2;
else
- recon_state.msg_version = 1;
+ recon_state.msg_version = 2;
err = iterate_session_caps(session, encode_caps_cb, &recon_state);
if (err < 0)
goto fail;
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
index 32d4f13784ba..03f4d24db8fe 100644
--- a/fs/ceph/quota.c
+++ b/fs/ceph/quota.c
@@ -237,7 +237,8 @@ static bool check_quota_exceeded(struct inode *inode, enum quota_check_op op,
ceph_put_snap_realm(mdsc, realm);
realm = next;
}
- ceph_put_snap_realm(mdsc, realm);
+ if (realm)
+ ceph_put_snap_realm(mdsc, realm);
up_read(&mdsc->snap_rwsem);
return exceeded;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 05f01fbd9c7f..22a9d8159720 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5835,9 +5835,10 @@ int ext4_mark_iloc_dirty(handle_t *handle,
{
int err = 0;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
+ put_bh(iloc->bh);
return -EIO;
-
+ }
if (IS_I_VERSION(inode))
inode_inc_iversion(inode);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 17adcb16a9c8..437f71fe83ae 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -126,6 +126,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
if (!is_dx_block && type == INDEX) {
ext4_error_inode(inode, func, line, block,
"directory leaf block found instead of index block");
+ brelse(bh);
return ERR_PTR(-EFSCORRUPTED);
}
if (!ext4_has_metadata_csum(inode->i_sb) ||
@@ -2811,7 +2812,9 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
list_del_init(&EXT4_I(inode)->i_orphan);
mutex_unlock(&sbi->s_orphan_lock);
}
- }
+ } else
+ brelse(iloc.bh);
+
jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
jbd_debug(4, "orphan inode %lu will point to %d\n",
inode->i_ino, NEXT_ORPHAN(inode));
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index ebbc663d0798..a5efee34415f 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -459,16 +459,18 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
BUFFER_TRACE(bh, "get_write_access");
err = ext4_journal_get_write_access(handle, bh);
- if (err)
+ if (err) {
+ brelse(bh);
return err;
+ }
ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
first_cluster, first_cluster - start, count2);
ext4_set_bits(bh->b_data, first_cluster - start, count2);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ brelse(bh);
if (unlikely(err))
return err;
- brelse(bh);
}
return 0;
@@ -605,7 +607,6 @@ handle_bb:
bh = bclean(handle, sb, block);
if (IS_ERR(bh)) {
err = PTR_ERR(bh);
- bh = NULL;
goto out;
}
overhead = ext4_group_overhead_blocks(sb, group);
@@ -618,9 +619,9 @@ handle_bb:
ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
sb->s_blocksize * 8, bh->b_data);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ brelse(bh);
if (err)
goto out;
- brelse(bh);
handle_ib:
if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
@@ -635,18 +636,16 @@ handle_ib:
bh = bclean(handle, sb, block);
if (IS_ERR(bh)) {
err = PTR_ERR(bh);
- bh = NULL;
goto out;
}
ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
sb->s_blocksize * 8, bh->b_data);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ brelse(bh);
if (err)
goto out;
- brelse(bh);
}
- bh = NULL;
/* Mark group tables in block bitmap */
for (j = 0; j < GROUP_TABLE_COUNT; j++) {
@@ -685,7 +684,6 @@ handle_ib:
}
out:
- brelse(bh);
err2 = ext4_journal_stop(handle);
if (err2 && !err)
err = err2;
@@ -873,6 +871,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
if (unlikely(err)) {
ext4_std_error(sb, err);
+ iloc.bh = NULL;
goto exit_inode;
}
brelse(dind);
@@ -924,6 +923,7 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
sizeof(struct buffer_head *),
GFP_NOFS);
if (!n_group_desc) {
+ brelse(gdb_bh);
err = -ENOMEM;
ext4_warning(sb, "not enough memory for %lu groups",
gdb_num + 1);
@@ -939,8 +939,6 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
kvfree(o_group_desc);
BUFFER_TRACE(gdb_bh, "get_write_access");
err = ext4_journal_get_write_access(handle, gdb_bh);
- if (unlikely(err))
- brelse(gdb_bh);
return err;
}
@@ -1124,8 +1122,10 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
backup_block, backup_block -
ext4_group_first_block_no(sb, group));
BUFFER_TRACE(bh, "get_write_access");
- if ((err = ext4_journal_get_write_access(handle, bh)))
+ if ((err = ext4_journal_get_write_access(handle, bh))) {
+ brelse(bh);
break;
+ }
lock_buffer(bh);
memcpy(bh->b_data, data, size);
if (rest)
@@ -2023,7 +2023,7 @@ retry:
err = ext4_alloc_flex_bg_array(sb, n_group + 1);
if (err)
- return err;
+ goto out;
err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
if (err)
@@ -2059,6 +2059,10 @@ retry:
n_blocks_count_retry = 0;
free_flex_gd(flex_gd);
flex_gd = NULL;
+ if (resize_inode) {
+ iput(resize_inode);
+ resize_inode = NULL;
+ }
goto retry;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index a221f1cdf704..53ff6c2a26ed 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -4075,6 +4075,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_groups_count = blocks_count;
sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
+ if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
+ le32_to_cpu(es->s_inodes_count)) {
+ ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
+ le32_to_cpu(es->s_inodes_count),
+ ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
+ ret = -EINVAL;
+ goto failed_mount;
+ }
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
EXT4_DESC_PER_BLOCK(sb);
if (ext4_has_feature_meta_bg(sb)) {
@@ -4094,14 +4102,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ret = -ENOMEM;
goto failed_mount;
}
- if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
- le32_to_cpu(es->s_inodes_count)) {
- ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
- le32_to_cpu(es->s_inodes_count),
- ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
- ret = -EINVAL;
- goto failed_mount;
- }
bgl_lock_init(sbi->s_blockgroup_lock);
@@ -4510,6 +4510,7 @@ failed_mount6:
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+ percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
failed_mount5:
ext4_ext_release(sb);
ext4_release_system_zone(sb);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index f36fc5d5b257..7643d52c776c 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1031,10 +1031,8 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
inode_lock(ea_inode);
ret = ext4_reserve_inode_write(handle, ea_inode, &iloc);
- if (ret) {
- iloc.bh = NULL;
+ if (ret)
goto out;
- }
ref_count = ext4_xattr_inode_get_ref(ea_inode);
ref_count += ref_change;
@@ -1080,12 +1078,10 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
}
ret = ext4_mark_iloc_dirty(handle, ea_inode, &iloc);
- iloc.bh = NULL;
if (ret)
ext4_warning_inode(ea_inode,
"ext4_mark_iloc_dirty() failed ret=%d", ret);
out:
- brelse(iloc.bh);
inode_unlock(ea_inode);
return ret;
}
@@ -1388,6 +1384,12 @@ retry:
bh = ext4_getblk(handle, ea_inode, block, 0);
if (IS_ERR(bh))
return PTR_ERR(bh);
+ if (!bh) {
+ WARN_ON_ONCE(1);
+ EXT4_ERROR_INODE(ea_inode,
+ "ext4_getblk() return bh = NULL");
+ return -EFSCORRUPTED;
+ }
ret = ext4_journal_get_write_access(handle, bh);
if (ret)
goto out;
@@ -2276,8 +2278,10 @@ static struct buffer_head *ext4_xattr_get_block(struct inode *inode)
if (!bh)
return ERR_PTR(-EIO);
error = ext4_xattr_check_block(inode, bh);
- if (error)
+ if (error) {
+ brelse(bh);
return ERR_PTR(error);
+ }
return bh;
}
@@ -2397,6 +2401,8 @@ retry_inode:
error = ext4_xattr_block_set(handle, inode, &i, &bs);
} else if (error == -ENOSPC) {
if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
+ brelse(bs.bh);
+ bs.bh = NULL;
error = ext4_xattr_block_find(inode, &i, &bs);
if (error)
goto cleanup;
@@ -2617,6 +2623,8 @@ out:
kfree(buffer);
if (is)
brelse(is->iloc.bh);
+ if (bs)
+ brelse(bs->bh);
kfree(is);
kfree(bs);
@@ -2696,7 +2704,6 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
struct ext4_inode *raw_inode, handle_t *handle)
{
struct ext4_xattr_ibody_header *header;
- struct buffer_head *bh;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
static unsigned int mnt_count;
size_t min_offs;
@@ -2737,13 +2744,17 @@ retry:
* EA block can hold new_extra_isize bytes.
*/
if (EXT4_I(inode)->i_file_acl) {
+ struct buffer_head *bh;
+
bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
error = -EIO;
if (!bh)
goto cleanup;
error = ext4_xattr_check_block(inode, bh);
- if (error)
+ if (error) {
+ brelse(bh);
goto cleanup;
+ }
base = BHDR(bh);
end = bh->b_data + bh->b_size;
min_offs = end - base;
diff --git a/fs/namespace.c b/fs/namespace.c
index 98d27da43304..74f64294a410 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1540,8 +1540,13 @@ static int do_umount(struct mount *mnt, int flags)
namespace_lock();
lock_mount_hash();
- event++;
+ /* Recheck MNT_LOCKED with the locks held */
+ retval = -EINVAL;
+ if (mnt->mnt.mnt_flags & MNT_LOCKED)
+ goto out;
+
+ event++;
if (flags & MNT_DETACH) {
if (!list_empty(&mnt->mnt_list))
umount_tree(mnt, UMOUNT_PROPAGATE);
@@ -1555,6 +1560,7 @@ static int do_umount(struct mount *mnt, int flags)
retval = 0;
}
}
+out:
unlock_mount_hash();
namespace_unlock();
return retval;
@@ -1645,7 +1651,7 @@ int ksys_umount(char __user *name, int flags)
goto dput_and_out;
if (!check_mnt(mnt))
goto dput_and_out;
- if (mnt->mnt.mnt_flags & MNT_LOCKED)
+ if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
goto dput_and_out;
retval = -EPERM;
if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
@@ -1728,8 +1734,14 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
for (s = r; s; s = next_mnt(s, r)) {
if (!(flag & CL_COPY_UNBINDABLE) &&
IS_MNT_UNBINDABLE(s)) {
- s = skip_mnt_tree(s);
- continue;
+ if (s->mnt.mnt_flags & MNT_LOCKED) {
+ /* Both unbindable and locked. */
+ q = ERR_PTR(-EPERM);
+ goto out;
+ } else {
+ s = skip_mnt_tree(s);
+ continue;
+ }
}
if (!(flag & CL_COPY_MNT_NS_FILE) &&
is_mnt_ns_file(s->mnt.mnt_root)) {
@@ -1782,7 +1794,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
{
namespace_lock();
lock_mount_hash();
- umount_tree(real_mount(mnt), UMOUNT_SYNC);
+ umount_tree(real_mount(mnt), 0);
unlock_mount_hash();
namespace_unlock();
}
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 6fc5425b1474..2652d00842d6 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -243,7 +243,7 @@ xfs_attr3_leaf_verify(
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_attr_leafblock *leaf = bp->b_addr;
struct xfs_attr_leaf_entry *entries;
- uint16_t end;
+ uint32_t end; /* must be 32bit - see below */
int i;
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
@@ -293,6 +293,11 @@ xfs_attr3_leaf_verify(
/*
* Quickly check the freemap information. Attribute data has to be
* aligned to 4-byte boundaries, and likewise for the free space.
+ *
+ * Note that for 64k block size filesystems, the freemap entries cannot
+ * overflow as they are only be16 fields. However, when checking end
+ * pointer of the freemap, we have to be careful to detect overflows and
+ * so use uint32_t for those checks.
*/
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
if (ichdr.freemap[i].base > mp->m_attr_geo->blksize)
@@ -303,7 +308,9 @@ xfs_attr3_leaf_verify(
return __this_address;
if (ichdr.freemap[i].size & 0x3)
return __this_address;
- end = ichdr.freemap[i].base + ichdr.freemap[i].size;
+
+ /* be care of 16 bit overflows here */
+ end = (uint32_t)ichdr.freemap[i].base + ichdr.freemap[i].size;
if (end < ichdr.freemap[i].base)
return __this_address;
if (end > mp->m_attr_geo->blksize)
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 6e2c08f30f60..6ecdbb3af7de 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1608,7 +1608,7 @@ xfs_ioc_getbmap(
error = 0;
out_free_buf:
kmem_free(buf);
- return 0;
+ return error;
}
struct getfsmap_info {
diff --git a/fs/xfs/xfs_message.c b/fs/xfs/xfs_message.c
index 576c375ce12a..6b736ea58d35 100644
--- a/fs/xfs/xfs_message.c
+++ b/fs/xfs/xfs_message.c
@@ -107,5 +107,5 @@ assfail(char *expr, char *file, int line)
void
xfs_hex_dump(void *p, int length)
{
- print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_ADDRESS, 16, 1, p, length, 1);
+ print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_OFFSET, 16, 1, p, length, 1);
}
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
index 89f3b03b1445..e3667c9a33a5 100644
--- a/include/asm-generic/4level-fixup.h
+++ b/include/asm-generic/4level-fixup.h
@@ -3,7 +3,7 @@
#define _4LEVEL_FIXUP_H
#define __ARCH_HAS_4LEVEL_HACK
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
#define PUD_SHIFT PGDIR_SHIFT
#define PUD_SIZE PGDIR_SIZE
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
index 9c2e0708eb82..73474bb52344 100644
--- a/include/asm-generic/5level-fixup.h
+++ b/include/asm-generic/5level-fixup.h
@@ -3,7 +3,7 @@
#define _5LEVEL_FIXUP_H
#define __ARCH_HAS_5LEVEL_HACK
-#define __PAGETABLE_P4D_FOLDED
+#define __PAGETABLE_P4D_FOLDED 1
#define P4D_SHIFT PGDIR_SHIFT
#define P4D_SIZE PGDIR_SIZE
diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h
index 0c34215263b8..1d6dd38c0e5e 100644
--- a/include/asm-generic/pgtable-nop4d-hack.h
+++ b/include/asm-generic/pgtable-nop4d-hack.h
@@ -5,7 +5,7 @@
#ifndef __ASSEMBLY__
#include <asm-generic/5level-fixup.h>
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
/*
* Having the pud type consist of a pgd gets the size right, and allows
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index 1a29b2a0282b..04cb913797bc 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -4,7 +4,7 @@
#ifndef __ASSEMBLY__
-#define __PAGETABLE_P4D_FOLDED
+#define __PAGETABLE_P4D_FOLDED 1
typedef struct { pgd_t pgd; } p4d_t;
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index f35f6e8149e4..b85b8271a73d 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -8,7 +8,7 @@
struct mm_struct;
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
/*
* Having the pmd type consist of a pud gets the size right, and allows
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index e950b9c50f34..9bef475db6fe 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -9,7 +9,7 @@
#else
#include <asm-generic/pgtable-nop4d.h>
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
/*
* Having the pud type consist of a p4d gets the size right, and allows
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 5657a20e0c59..359fb935ded6 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1127,4 +1127,20 @@ static inline bool arch_has_pfn_modify_check(void)
#endif
#endif
+/*
+ * On some architectures it depends on the mm if the p4d/pud or pmd
+ * layer of the page table hierarchy is folded or not.
+ */
+#ifndef mm_p4d_folded
+#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
+#endif
+
+#ifndef mm_pud_folded
+#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
+#endif
+
+#ifndef mm_pmd_folded
+#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
+#endif
+
#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 6b92b3395fa9..65a38c4a02a1 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -213,12 +213,6 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \
CEPH_FEATURE_CEPHX_V2)
-#define CEPH_FEATURES_REQUIRED_DEFAULT \
- (CEPH_FEATURE_NOSRCADDR | \
- CEPH_FEATURE_SUBSCRIBE2 | \
- CEPH_FEATURE_RECONNECT_SEQ | \
- CEPH_FEATURE_PGID64 | \
- CEPH_FEATURE_PGPOOL3 | \
- CEPH_FEATURE_OSDENC)
+#define CEPH_FEATURES_REQUIRED_DEFAULT 0
#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index c0f5db3a9621..2010493e1040 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -143,18 +143,6 @@
#define KASAN_ABI_VERSION 3
#endif
-/*
- * Because __no_sanitize_address conflicts with inlining:
- * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
- * we do one or the other.
- */
-#ifdef CONFIG_KASAN
-#define __no_sanitize_address_or_inline \
- __no_sanitize_address __maybe_unused notrace
-#else
-#define __no_sanitize_address_or_inline inline
-#endif
-
#if GCC_VERSION >= 50100
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 18c80cfa4fc4..06396c1cf127 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -189,7 +189,7 @@ void __read_once_size(const volatile void *p, void *res, int size)
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
-# define __no_kasan_or_inline __no_sanitize_address __maybe_unused
+# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
#else
# define __no_kasan_or_inline __always_inline
#endif
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index 6b28c1b7310c..f8c400ba1929 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -4,22 +4,26 @@
/*
* The attributes in this file are unconditionally defined and they directly
- * map to compiler attribute(s) -- except those that are optional.
+ * map to compiler attribute(s), unless one of the compilers does not support
+ * the attribute. In that case, __has_attribute is used to check for support
+ * and the reason is stated in its comment ("Optional: ...").
*
* Any other "attributes" (i.e. those that depend on a configuration option,
* on a compiler, on an architecture, on plugins, on other attributes...)
* should be defined elsewhere (e.g. compiler_types.h or compiler-*.h).
+ * The intention is to keep this file as simple as possible, as well as
+ * compiler- and version-agnostic (e.g. avoiding GCC_VERSION checks).
*
* This file is meant to be sorted (by actual attribute name,
* not by #define identifier). Use the __attribute__((__name__)) syntax
* (i.e. with underscores) to avoid future collisions with other macros.
- * If an attribute is optional, state the reason in the comment.
+ * Provide links to the documentation of each supported compiler, if it exists.
*/
/*
- * To check for optional attributes, we use __has_attribute, which is supported
- * on gcc >= 5, clang >= 2.9 and icc >= 17. In the meantime, to support
- * 4.6 <= gcc < 5, we implement __has_attribute by hand.
+ * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
+ * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute
+ * by hand.
*
* sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__
* depending on the compiler used to build it; however, these attributes have
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 3439d7d0249a..4a3f9c09c92d 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -130,6 +130,10 @@ struct ftrace_likely_data {
# define randomized_struct_fields_end
#endif
+#ifndef asm_volatile_goto
+#define asm_volatile_goto(x...) asm goto(x)
+#endif
+
/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 2827b87590d8..387c70df6f29 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -722,8 +722,8 @@ struct hid_usage_id {
* input will not be passed to raw_event unless hid_device_io_start is
* called.
*
- * raw_event and event should return 0 on no action performed, 1 when no
- * further processing should be done and negative on error
+ * raw_event and event should return negative on error, any other value will
+ * pass the event on to .event() typically return 0 for success.
*
* input_mapping shall return a negative value to completely ignore this usage
* (e.g. doubled or invalid usage), zero to continue with parsing of this
diff --git a/include/linux/i8253.h b/include/linux/i8253.h
index e6bb36a97519..8336b2f6f834 100644
--- a/include/linux/i8253.h
+++ b/include/linux/i8253.h
@@ -21,6 +21,7 @@
#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ)
extern raw_spinlock_t i8253_lock;
+extern bool i8253_clear_counter_on_shutdown;
extern struct clock_event_device i8253_clockevent;
extern void clockevent_i8253_init(bool oneshot);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fcf9cc9d535f..5411de93a363 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1744,11 +1744,15 @@ int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
static inline void mm_inc_nr_puds(struct mm_struct *mm)
{
+ if (mm_pud_folded(mm))
+ return;
atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
}
static inline void mm_dec_nr_puds(struct mm_struct *mm)
{
+ if (mm_pud_folded(mm))
+ return;
atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
}
#endif
@@ -1768,11 +1772,15 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
static inline void mm_inc_nr_pmds(struct mm_struct *mm)
{
+ if (mm_pmd_folded(mm))
+ return;
atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
}
static inline void mm_dec_nr_pmds(struct mm_struct *mm)
{
+ if (mm_pmd_folded(mm))
+ return;
atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
}
#endif
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index abe975c87b90..7f53ece2c039 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -324,9 +324,8 @@ static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
*/
static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
{
- return (u64)nand->memorg.luns_per_target *
- nand->memorg.eraseblocks_per_lun *
- nand->memorg.pages_per_eraseblock;
+ return nand->memorg.ntargets * nand->memorg.luns_per_target *
+ nand->memorg.eraseblocks_per_lun;
}
/**
@@ -569,7 +568,7 @@ static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
}
/**
- * nanddev_pos_next_eraseblock() - Move a position to the next page
+ * nanddev_pos_next_page() - Move a position to the next page
* @nand: NAND device
* @pos: the position to update
*
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index e10b126e148f..33e240acdc6d 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -203,9 +203,12 @@ enum nand_ecc_algo {
*/
#define NAND_IS_BOOT_MEDIUM 0x00400000
-/* Options set by nand scan */
-/* Nand scan has allocated controller struct */
-#define NAND_CONTROLLER_ALLOC 0x80000000
+/*
+ * Do not try to tweak the timings at runtime. This is needed when the
+ * controller initializes the timings on itself or when it relies on
+ * configuration done by the bootloader.
+ */
+#define NAND_KEEP_TIMINGS 0x00800000
/* Cell info constants */
#define NAND_CI_CHIPNR_MSK 0x03
@@ -245,49 +248,6 @@ struct nand_id {
};
/**
- * struct nand_controller_ops - Controller operations
- *
- * @attach_chip: this method is called after the NAND detection phase after
- * flash ID and MTD fields such as erase size, page size and OOB
- * size have been set up. ECC requirements are available if
- * provided by the NAND chip or device tree. Typically used to
- * choose the appropriate ECC configuration and allocate
- * associated resources.
- * This hook is optional.
- * @detach_chip: free all resources allocated/claimed in
- * nand_controller_ops->attach_chip().
- * This hook is optional.
- */
-struct nand_controller_ops {
- int (*attach_chip)(struct nand_chip *chip);
- void (*detach_chip)(struct nand_chip *chip);
-};
-
-/**
- * struct nand_controller - Structure used to describe a NAND controller
- *
- * @lock: protection lock
- * @active: the mtd device which holds the controller currently
- * @wq: wait queue to sleep on if a NAND operation is in
- * progress used instead of the per chip wait queue
- * when a hw controller is available.
- * @ops: NAND controller operations.
- */
-struct nand_controller {
- spinlock_t lock;
- struct nand_chip *active;
- wait_queue_head_t wq;
- const struct nand_controller_ops *ops;
-};
-
-static inline void nand_controller_init(struct nand_controller *nfc)
-{
- nfc->active = NULL;
- spin_lock_init(&nfc->lock);
- init_waitqueue_head(&nfc->wq);
-}
-
-/**
* struct nand_ecc_step_info - ECC step information of ECC engine
* @stepsize: data bytes per ECC step
* @strengths: array of supported strengths
@@ -879,18 +839,21 @@ struct nand_op_parser {
/**
* struct nand_operation - NAND operation descriptor
+ * @cs: the CS line to select for this NAND operation
* @instrs: array of instructions to execute
* @ninstrs: length of the @instrs array
*
* The actual operation structure that will be passed to chip->exec_op().
*/
struct nand_operation {
+ unsigned int cs;
const struct nand_op_instr *instrs;
unsigned int ninstrs;
};
-#define NAND_OPERATION(_instrs) \
+#define NAND_OPERATION(_cs, _instrs) \
{ \
+ .cs = _cs, \
.instrs = _instrs, \
.ninstrs = ARRAY_SIZE(_instrs), \
}
@@ -898,11 +861,68 @@ struct nand_operation {
int nand_op_parser_exec_op(struct nand_chip *chip,
const struct nand_op_parser *parser,
const struct nand_operation *op, bool check_only);
+/**
+ * struct nand_controller_ops - Controller operations
+ *
+ * @attach_chip: this method is called after the NAND detection phase after
+ * flash ID and MTD fields such as erase size, page size and OOB
+ * size have been set up. ECC requirements are available if
+ * provided by the NAND chip or device tree. Typically used to
+ * choose the appropriate ECC configuration and allocate
+ * associated resources.
+ * This hook is optional.
+ * @detach_chip: free all resources allocated/claimed in
+ * nand_controller_ops->attach_chip().
+ * This hook is optional.
+ * @exec_op: controller specific method to execute NAND operations.
+ * This method replaces chip->legacy.cmdfunc(),
+ * chip->legacy.{read,write}_{buf,byte,word}(),
+ * chip->legacy.dev_ready() and chip->legacy.waifunc().
+ * @setup_data_interface: setup the data interface and timing. If
+ * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this
+ * means the configuration should not be applied but
+ * only checked.
+ * This hook is optional.
+ */
+struct nand_controller_ops {
+ int (*attach_chip)(struct nand_chip *chip);
+ void (*detach_chip)(struct nand_chip *chip);
+ int (*exec_op)(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only);
+ int (*setup_data_interface)(struct nand_chip *chip, int chipnr,
+ const struct nand_data_interface *conf);
+};
+
+/**
+ * struct nand_controller - Structure used to describe a NAND controller
+ *
+ * @lock: protection lock
+ * @active: the mtd device which holds the controller currently
+ * @wq: wait queue to sleep on if a NAND operation is in
+ * progress used instead of the per chip wait queue
+ * when a hw controller is available.
+ * @ops: NAND controller operations.
+ */
+struct nand_controller {
+ spinlock_t lock;
+ struct nand_chip *active;
+ wait_queue_head_t wq;
+ const struct nand_controller_ops *ops;
+};
+
+static inline void nand_controller_init(struct nand_controller *nfc)
+{
+ nfc->active = NULL;
+ spin_lock_init(&nfc->lock);
+ init_waitqueue_head(&nfc->wq);
+}
/**
* struct nand_legacy - NAND chip legacy fields/hooks
* @IO_ADDR_R: address to read the 8 I/O lines of the flash device
* @IO_ADDR_W: address to write the 8 I/O lines of the flash device
+ * @select_chip: select/deselect a specific target/die
* @read_byte: read one byte from the chip
* @write_byte: write a single byte to the chip on the low 8 I/O lines
* @write_buf: write data from the buffer to the chip
@@ -921,6 +941,8 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
* @get_features: get the NAND chip features
* @chip_delay: chip dependent delay for transferring data from array to read
* regs (tR).
+ * @dummy_controller: dummy controller implementation for drivers that can
+ * only control a single chip
*
* If you look at this structure you're already wrong. These fields/hooks are
* all deprecated.
@@ -928,6 +950,7 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
struct nand_legacy {
void __iomem *IO_ADDR_R;
void __iomem *IO_ADDR_W;
+ void (*select_chip)(struct nand_chip *chip, int cs);
u8 (*read_byte)(struct nand_chip *chip);
void (*write_byte)(struct nand_chip *chip, u8 byte);
void (*write_buf)(struct nand_chip *chip, const u8 *buf, int len);
@@ -945,6 +968,7 @@ struct nand_legacy {
int (*get_features)(struct nand_chip *chip, int feature_addr,
u8 *subfeature_para);
int chip_delay;
+ struct nand_controller dummy_controller;
};
/**
@@ -955,17 +979,10 @@ struct nand_legacy {
* you're modifying an existing driver that is using those
* fields/hooks, you should consider reworking the driver
* avoid using them.
- * @select_chip: [REPLACEABLE] select chip nr
- * @exec_op: controller specific method to execute NAND operations.
- * This method replaces ->cmdfunc(),
- * ->legacy.{read,write}_{buf,byte,word}(),
- * ->legacy.dev_ready() and ->waifunc().
* @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for
* setting the read-retry mode. Mostly needed for MLC NAND.
* @ecc: [BOARDSPECIFIC] ECC control structure
* @buf_align: minimum buffer alignment required by a platform
- * @dummy_controller: dummy controller implementation for drivers that can
- * only control a single chip
* @state: [INTERN] the current state of the NAND device
* @oob_poi: "poison value buffer," used for laying out OOB data
* before writing
@@ -1012,11 +1029,11 @@ struct nand_legacy {
* this nand device will encounter their life times.
* @blocks_per_die: [INTERN] The number of PEBs in a die
* @data_interface: [INTERN] NAND interface timing information
+ * @cur_cs: currently selected target. -1 means no target selected,
+ * otherwise we should always have cur_cs >= 0 &&
+ * cur_cs < numchips. NAND Controller drivers should not
+ * modify this value, but they're allowed to read it.
* @read_retries: [INTERN] the number of read retry modes supported
- * @setup_data_interface: [OPTIONAL] setup the data interface and timing. If
- * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this
- * means the configuration should not be applied but
- * only checked.
* @bbt: [INTERN] bad block table pointer
* @bbt_td: [REPLACEABLE] bad block table descriptor for flash
* lookup.
@@ -1037,13 +1054,7 @@ struct nand_chip {
struct nand_legacy legacy;
- void (*select_chip)(struct nand_chip *chip, int cs);
- int (*exec_op)(struct nand_chip *chip,
- const struct nand_operation *op,
- bool check_only);
int (*setup_read_retry)(struct nand_chip *chip, int retry_mode);
- int (*setup_data_interface)(struct nand_chip *chip, int chipnr,
- const struct nand_data_interface *conf);
unsigned int options;
unsigned int bbt_options;
@@ -1073,6 +1084,8 @@ struct nand_chip {
struct nand_data_interface data_interface;
+ int cur_cs;
+
int read_retries;
flstate_t state;
@@ -1082,7 +1095,6 @@ struct nand_chip {
struct nand_ecc_ctrl ecc;
unsigned long buf_align;
- struct nand_controller dummy_controller;
uint8_t *bbt;
struct nand_bbt_descr *bbt_td;
@@ -1098,15 +1110,6 @@ struct nand_chip {
} manufacturer;
};
-static inline int nand_exec_op(struct nand_chip *chip,
- const struct nand_operation *op)
-{
- if (!chip->exec_op)
- return -ENOTSUPP;
-
- return chip->exec_op(chip, op, false);
-}
-
extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
@@ -1345,5 +1348,12 @@ void nand_release(struct nand_chip *chip);
* instruction and have no physical pin to check it.
*/
int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
+struct gpio_desc;
+int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
+ unsigned long timeout_ms);
+
+/* Select/deselect a NAND target. */
+void nand_select_target(struct nand_chip *chip, unsigned int cs);
+void nand_deselect_target(struct nand_chip *chip);
#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
index c759d403cbc0..78fc2d4218c8 100644
--- a/include/linux/mtd/sh_flctl.h
+++ b/include/linux/mtd/sh_flctl.h
@@ -1,20 +1,8 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* SuperH FLCTL nand controller
*
* Copyright © 2008 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __SH_FLCTL_H__
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 088ff96c3eb6..b92e2aa955b6 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -194,8 +194,10 @@ struct spinand_manufacturer {
};
/* SPI NAND manufacturers */
+extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
extern const struct spinand_manufacturer macronix_spinand_manufacturer;
extern const struct spinand_manufacturer micron_spinand_manufacturer;
+extern const struct spinand_manufacturer toshiba_spinand_manufacturer;
extern const struct spinand_manufacturer winbond_spinand_manufacturer;
/**
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index dc1d9ed33b31..857f8abf7b91 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3190,6 +3190,26 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
#endif
}
+/* Variant of netdev_tx_sent_queue() for drivers that are aware
+ * that they should not test BQL status themselves.
+ * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
+ * skb of a batch.
+ * Returns true if the doorbell must be used to kick the NIC.
+ */
+static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
+ unsigned int bytes,
+ bool xmit_more)
+{
+ if (xmit_more) {
+#ifdef CONFIG_BQL
+ dql_queued(&dev_queue->dql, bytes);
+#endif
+ return netif_tx_queue_stopped(dev_queue);
+ }
+ netdev_tx_sent_queue(dev_queue, bytes);
+ return true;
+}
+
/**
* netdev_sent_queue - report the number of bytes queued to hardware
* @dev: network device
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 34fc80f3eb90..1d100efe74ec 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -314,7 +314,7 @@ enum {
extern ip_set_id_t ip_set_get_byname(struct net *net,
const char *name, struct ip_set **set);
extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
-extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
+extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name);
extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
index 8e2bab1e8e90..70877f8de7e9 100644
--- a/include/linux/netfilter/ipset/ip_set_comment.h
+++ b/include/linux/netfilter/ipset/ip_set_comment.h
@@ -43,11 +43,11 @@ ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
rcu_assign_pointer(comment->c, c);
}
-/* Used only when dumping a set, protected by rcu_read_lock_bh() */
+/* Used only when dumping a set, protected by rcu_read_lock() */
static inline int
ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
{
- struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
+ struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
if (!c)
return 0;
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 08f9247e9827..9003e29cde46 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -119,6 +119,8 @@ static inline int hardlockup_detector_perf_init(void) { return 0; }
void watchdog_nmi_stop(void);
void watchdog_nmi_start(void);
int watchdog_nmi_probe(void);
+int watchdog_nmi_enable(unsigned int cpu);
+void watchdog_nmi_disable(unsigned int cpu);
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 14b789a123e7..1656c5978498 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -317,6 +317,8 @@ bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
const struct in6_addr *addr);
bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
const struct in6_addr *addr);
+int ipv6_anycast_init(void);
+void ipv6_anycast_cleanup(void);
/* Device notifier */
int register_inet6addr_notifier(struct notifier_block *nb);
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index d7578cf49c3a..c9c78c15bce0 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -146,10 +146,12 @@ struct ifacaddr6 {
struct in6_addr aca_addr;
struct fib6_info *aca_rt;
struct ifacaddr6 *aca_next;
+ struct hlist_node aca_addr_lst;
int aca_users;
refcount_t aca_refcnt;
unsigned long aca_cstamp;
unsigned long aca_tstamp;
+ struct rcu_head rcu;
};
#define IFA_HOST IPV6_ADDR_LOOPBACK
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index eed04af9b75e..ae7b86f587f2 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -153,4 +153,43 @@ void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
const char *fmt, ...) { }
#endif /* CONFIG_SYSCTL */
+static inline struct nf_generic_net *nf_generic_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.generic;
+}
+
+static inline struct nf_tcp_net *nf_tcp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.tcp;
+}
+
+static inline struct nf_udp_net *nf_udp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.udp;
+}
+
+static inline struct nf_icmp_net *nf_icmp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmp;
+}
+
+static inline struct nf_icmp_net *nf_icmpv6_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmpv6;
+}
+
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+static inline struct nf_dccp_net *nf_dccp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.dccp;
+}
+#endif
+
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+static inline struct nf_sctp_net *nf_sctp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.sctp;
+}
+#endif
+
#endif /*_NF_CONNTRACK_PROTOCOL_H*/
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index f5ff8a76e208..b01eb502d49c 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -83,11 +83,11 @@ struct kfd_ioctl_set_cu_mask_args {
};
struct kfd_ioctl_get_queue_wave_state_args {
- uint64_t ctl_stack_address; /* to KFD */
- uint32_t ctl_stack_used_size; /* from KFD */
- uint32_t save_area_used_size; /* from KFD */
- uint32_t queue_id; /* to KFD */
- uint32_t pad;
+ __u64 ctl_stack_address; /* to KFD */
+ __u32 ctl_stack_used_size; /* from KFD */
+ __u32 save_area_used_size; /* from KFD */
+ __u32 queue_id; /* to KFD */
+ __u32 pad;
};
/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
@@ -255,10 +255,10 @@ struct kfd_hsa_memory_exception_data {
/* hw exception data */
struct kfd_hsa_hw_exception_data {
- uint32_t reset_type;
- uint32_t reset_cause;
- uint32_t memory_lost;
- uint32_t gpu_id;
+ __u32 reset_type;
+ __u32 reset_cause;
+ __u32 memory_lost;
+ __u32 gpu_id;
};
/* Event data */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 579974b0bf0d..7de4f1bdaf06 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -1635,8 +1635,8 @@ enum nft_ng_attributes {
NFTA_NG_MODULUS,
NFTA_NG_TYPE,
NFTA_NG_OFFSET,
- NFTA_NG_SET_NAME,
- NFTA_NG_SET_ID,
+ NFTA_NG_SET_NAME, /* deprecated */
+ NFTA_NG_SET_ID, /* deprecated */
__NFTA_NG_MAX
};
#define NFTA_NG_MAX (__NFTA_NG_MAX - 1)
diff --git a/include/uapi/linux/netfilter_bridge.h b/include/uapi/linux/netfilter_bridge.h
index 156ccd089df1..1610fdbab98d 100644
--- a/include/uapi/linux/netfilter_bridge.h
+++ b/include/uapi/linux/netfilter_bridge.h
@@ -11,6 +11,10 @@
#include <linux/if_vlan.h>
#include <linux/if_pppox.h>
+#ifndef __KERNEL__
+#include <limits.h> /* for INT_MIN, INT_MAX */
+#endif
+
/* Bridge Hooks */
/* After promisc drops, checksum checks. */
#define NF_BR_PRE_ROUTING 0
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index 34dd3d497f2c..c81feb373d3e 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -568,6 +568,8 @@ struct sctp_assoc_reset_event {
#define SCTP_ASSOC_CHANGE_DENIED 0x0004
#define SCTP_ASSOC_CHANGE_FAILED 0x0008
+#define SCTP_STREAM_CHANGE_DENIED SCTP_ASSOC_CHANGE_DENIED
+#define SCTP_STREAM_CHANGE_FAILED SCTP_ASSOC_CHANGE_FAILED
struct sctp_stream_change_event {
__u16 strchange_type;
__u16 strchange_flags;
@@ -1151,6 +1153,7 @@ struct sctp_add_streams {
/* SCTP Stream schedulers */
enum sctp_sched_type {
SCTP_SS_FCFS,
+ SCTP_SS_DEFAULT = SCTP_SS_FCFS,
SCTP_SS_PRIO,
SCTP_SS_RR,
SCTP_SS_MAX = SCTP_SS_RR
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 18803ff76e27..4969817124a8 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -42,16 +42,12 @@ int xen_setup_shutdown_event(void);
extern unsigned long *xen_contiguous_bitmap;
-#ifdef CONFIG_XEN_PV
+#if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle);
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
-
-int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
- xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
- unsigned int domid, bool no_translate, struct page **pages);
#else
static inline int xen_create_contiguous_region(phys_addr_t pstart,
unsigned int order,
@@ -63,7 +59,13 @@ static inline int xen_create_contiguous_region(phys_addr_t pstart,
static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
unsigned int order) { }
+#endif
+#if defined(CONFIG_XEN_PV)
+int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
+ xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
+ unsigned int domid, bool no_translate, struct page **pages);
+#else
static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
xen_pfn_t *pfn, int nr, int *err_ptr,
pgprot_t prot, unsigned int domid,
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 6377225b2082..1a796e0799ec 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -553,7 +553,6 @@ bool is_bpf_text_address(unsigned long addr)
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym)
{
- unsigned long symbol_start, symbol_end;
struct bpf_prog_aux *aux;
unsigned int it = 0;
int ret = -ERANGE;
@@ -566,10 +565,9 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
if (it++ != symnum)
continue;
- bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
bpf_get_prog_name(aux->prog, sym);
- *value = symbol_start;
+ *value = (unsigned long)aux->prog->bpf_func;
*type = BPF_SYM_ELF_TYPE;
ret = 0;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index ccb93277aae2..cf5040fd5434 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -2078,6 +2078,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
info.jited_prog_len = 0;
info.xlated_prog_len = 0;
info.nr_jited_ksyms = 0;
+ info.nr_jited_func_lens = 0;
goto done;
}
@@ -2158,11 +2159,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
}
ulen = info.nr_jited_ksyms;
- info.nr_jited_ksyms = prog->aux->func_cnt;
+ info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
if (info.nr_jited_ksyms && ulen) {
if (bpf_dump_raw_ok()) {
+ unsigned long ksym_addr;
u64 __user *user_ksyms;
- ulong ksym_addr;
u32 i;
/* copy the address of the kernel symbol
@@ -2170,10 +2171,17 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
*/
ulen = min_t(u32, info.nr_jited_ksyms, ulen);
user_ksyms = u64_to_user_ptr(info.jited_ksyms);
- for (i = 0; i < ulen; i++) {
- ksym_addr = (ulong) prog->aux->func[i]->bpf_func;
- ksym_addr &= PAGE_MASK;
- if (put_user((u64) ksym_addr, &user_ksyms[i]))
+ if (prog->aux->func_cnt) {
+ for (i = 0; i < ulen; i++) {
+ ksym_addr = (unsigned long)
+ prog->aux->func[i]->bpf_func;
+ if (put_user((u64) ksym_addr,
+ &user_ksyms[i]))
+ return -EFAULT;
+ }
+ } else {
+ ksym_addr = (unsigned long) prog->bpf_func;
+ if (put_user((u64) ksym_addr, &user_ksyms[0]))
return -EFAULT;
}
} else {
@@ -2182,7 +2190,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
}
ulen = info.nr_jited_func_lens;
- info.nr_jited_func_lens = prog->aux->func_cnt;
+ info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
if (info.nr_jited_func_lens && ulen) {
if (bpf_dump_raw_ok()) {
u32 __user *user_lens;
@@ -2191,9 +2199,16 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
/* copy the JITed image lengths for each function */
ulen = min_t(u32, info.nr_jited_func_lens, ulen);
user_lens = u64_to_user_ptr(info.jited_func_lens);
- for (i = 0; i < ulen; i++) {
- func_len = prog->aux->func[i]->jited_len;
- if (put_user(func_len, &user_lens[i]))
+ if (prog->aux->func_cnt) {
+ for (i = 0; i < ulen; i++) {
+ func_len =
+ prog->aux->func[i]->jited_len;
+ if (put_user(func_len, &user_lens[i]))
+ return -EFAULT;
+ }
+ } else {
+ func_len = prog->jited_len;
+ if (put_user(func_len, &user_lens[0]))
return -EFAULT;
}
} else {
diff --git a/kernel/resource.c b/kernel/resource.c
index b3a3a1fc499e..b0fbf685c77a 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -319,16 +319,23 @@ int release_resource(struct resource *old)
EXPORT_SYMBOL(release_resource);
/**
- * Finds the lowest iomem resource that covers part of [start..end]. The
- * caller must specify start, end, flags, and desc (which may be
+ * Finds the lowest iomem resource that covers part of [@start..@end]. The
+ * caller must specify @start, @end, @flags, and @desc (which may be
* IORES_DESC_NONE).
*
- * If a resource is found, returns 0 and *res is overwritten with the part
- * of the resource that's within [start..end]; if none is found, returns
- * -1.
+ * If a resource is found, returns 0 and @*res is overwritten with the part
+ * of the resource that's within [@start..@end]; if none is found, returns
+ * -1 or -EINVAL for other invalid parameters.
*
* This function walks the whole tree and not just first level children
* unless @first_lvl is true.
+ *
+ * @start: start address of the resource searched for
+ * @end: end address of same resource
+ * @flags: flags which the resource must have
+ * @desc: descriptor the resource must have
+ * @first_lvl: walk only the first level children, if set
+ * @res: return ptr, if resource found
*/
static int find_next_iomem_res(resource_size_t start, resource_size_t end,
unsigned long flags, unsigned long desc,
@@ -399,6 +406,8 @@ static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
* @flags: I/O resource flags
* @start: start addr
* @end: end addr
+ * @arg: function argument for the callback @func
+ * @func: callback function that is called for each qualifying resource area
*
* NOTE: For a new descriptor search, define a new IORES_DESC in
* <linux/ioport.h> and set it in 'desc' of a target resource entry.
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f12225f26b70..091e089063be 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5851,11 +5851,14 @@ void __init sched_init_smp(void)
/*
* There's no userspace yet to cause hotplug operations; hence all the
* CPU masks are stable and all blatant races in the below code cannot
- * happen.
+ * happen. The hotplug lock is nevertheless taken to satisfy lockdep,
+ * but there won't be any contention on it.
*/
+ cpus_read_lock();
mutex_lock(&sched_domains_mutex);
sched_init_domains(cpu_active_mask);
mutex_unlock(&sched_domains_mutex);
+ cpus_read_unlock();
/* Move init over to a non-isolated CPU */
if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ee271bb661cc..3648d0300fdf 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2400,8 +2400,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
local = 1;
/*
- * Retry task to preferred node migration periodically, in case it
- * case it previously failed, or the scheduler moved us.
+ * Retry to migrate task to preferred node periodically, in case it
+ * previously failed, or the scheduler moved us.
*/
if (time_after(jiffies, p->numa_migrate_retry)) {
task_numa_placement(p);
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index ce32cf741b25..8f0644af40be 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -917,9 +917,6 @@ static void check_process_timers(struct task_struct *tsk,
struct task_cputime cputime;
unsigned long soft;
- if (dl_task(tsk))
- check_dl_overrun(tsk);
-
/*
* If cputimer is not running, then there are no active
* process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 3ef15a6683c0..bd30e9398d2a 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -535,7 +535,7 @@ int traceprobe_update_arg(struct probe_arg *arg)
if (code[1].op != FETCH_OP_IMM)
return -EINVAL;
- tmp = strpbrk("+-", code->data);
+ tmp = strpbrk(code->data, "+-");
if (tmp)
c = *tmp;
ret = traceprobe_split_symbol_offset(code->data,
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index e5222b5fb4fe..923414a246e9 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -974,10 +974,6 @@ static ssize_t map_write(struct file *file, const char __user *buf,
if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
goto out;
- ret = sort_idmaps(&new_map);
- if (ret < 0)
- goto out;
-
ret = -EPERM;
/* Map the lower ids from the parent user namespace to the
* kernel global id space.
@@ -1004,6 +1000,14 @@ static ssize_t map_write(struct file *file, const char __user *buf,
e->lower_first = lower_first;
}
+ /*
+ * If we want to use binary search for lookup, this clones the extent
+ * array and sorts both copies.
+ */
+ ret = sort_idmaps(&new_map);
+ if (ret < 0)
+ goto out;
+
/* Install the map */
if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
memcpy(map->extent, new_map.extent,
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 5d73f5cb4d8a..79777645cac9 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -27,7 +27,7 @@ ifeq ($(ARCH),arm)
CFLAGS += -I../../../arch/arm/include -mfpu=neon
HAS_NEON = yes
endif
-ifeq ($(ARCH),arm64)
+ifeq ($(ARCH),aarch64)
CFLAGS += -I../../../arch/arm64/include
HAS_NEON = yes
endif
@@ -41,7 +41,7 @@ ifeq ($(IS_X86),yes)
gcc -c -x assembler - >&/dev/null && \
rm ./-.o && echo -DCONFIG_AS_AVX512=1)
else ifeq ($(HAS_NEON),yes)
- OBJS += neon.o neon1.o neon2.o neon4.o neon8.o
+ OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
else
HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\
diff --git a/net/core/dev.c b/net/core/dev.c
index 77d43ae2a7bb..0ffcbdd55fa9 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3272,7 +3272,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de
}
skb = next;
- if (netif_xmit_stopped(txq) && skb) {
+ if (netif_tx_queue_stopped(txq) && skb) {
rc = NETDEV_TX_BUSY;
break;
}
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 676f3ad629f9..588f475019d4 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1166,8 +1166,8 @@ ip_proto_again:
break;
}
- if (dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_PORTS)) {
+ if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) &&
+ !(key_control->flags & FLOW_DIS_IS_FRAGMENT)) {
key_ports = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_PORTS,
target_container);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 5da9552b186b..2b9fdbc43205 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -717,7 +717,8 @@ int netpoll_setup(struct netpoll *np)
read_lock_bh(&idev->lock);
list_for_each_entry(ifp, &idev->addr_list, if_list) {
- if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
+ if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
+ !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
continue;
np->local_ip.in6 = ifp->addr;
err = 0;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index e01274bd5e3e..33d9227a8b80 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3367,7 +3367,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
cb->seq = 0;
}
ret = dumpit(skb, cb);
- if (ret < 0)
+ if (ret)
break;
}
cb->family = idx;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 946de0e24c87..b4ee5c8b928f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4944,6 +4944,8 @@ static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
*
* This is a helper to do that correctly considering GSO_BY_FRAGS.
*
+ * @skb: GSO skb
+ *
* @seg_len: The segmented length (from skb_gso_*_seglen). In the
* GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
*
diff --git a/net/core/sock.c b/net/core/sock.c
index 6fcc4bc07d19..080a880a1761 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3279,6 +3279,7 @@ int sock_load_diag_module(int family, int protocol)
#ifdef CONFIG_INET
if (family == AF_INET &&
+ protocol != IPPROTO_RAW &&
!rcu_access_pointer(inet_protos[protocol]))
return -ENOENT;
#endif
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index bcb11f3a27c0..760a9e52e02b 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -178,21 +178,22 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
}
static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
- void *arg)
+ void *arg,
+ struct inet_frag_queue **prev)
{
struct inet_frags *f = nf->f;
struct inet_frag_queue *q;
- int err;
q = inet_frag_alloc(nf, f, arg);
- if (!q)
+ if (!q) {
+ *prev = ERR_PTR(-ENOMEM);
return NULL;
-
+ }
mod_timer(&q->timer, jiffies + nf->timeout);
- err = rhashtable_insert_fast(&nf->rhashtable, &q->node,
- f->rhash_params);
- if (err < 0) {
+ *prev = rhashtable_lookup_get_insert_key(&nf->rhashtable, &q->key,
+ &q->node, f->rhash_params);
+ if (*prev) {
q->flags |= INET_FRAG_COMPLETE;
inet_frag_kill(q);
inet_frag_destroy(q);
@@ -204,22 +205,22 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
{
- struct inet_frag_queue *fq;
+ struct inet_frag_queue *fq = NULL, *prev;
if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
return NULL;
rcu_read_lock();
- fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
- if (fq) {
+ prev = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
+ if (!prev)
+ fq = inet_frag_create(nf, key, &prev);
+ if (prev && !IS_ERR(prev)) {
+ fq = prev;
if (!refcount_inc_not_zero(&fq->refcnt))
fq = NULL;
- rcu_read_unlock();
- return fq;
}
rcu_read_unlock();
-
- return inet_frag_create(nf, key);
+ return fq;
}
EXPORT_SYMBOL(inet_frag_find);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 9b0158fa431f..d6ee343fdb86 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -722,10 +722,14 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
if (ip_is_fragment(&iph)) {
skb = skb_share_check(skb, GFP_ATOMIC);
if (skb) {
- if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
- return skb;
- if (pskb_trim_rcsum(skb, netoff + len))
- return skb;
+ if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) {
+ kfree_skb(skb);
+ return NULL;
+ }
+ if (pskb_trim_rcsum(skb, netoff + len)) {
+ kfree_skb(skb);
+ return NULL;
+ }
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
if (ip_defrag(net, skb, user))
return NULL;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 26c36cccabdc..fffcc130900e 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1246,7 +1246,7 @@ int ip_setsockopt(struct sock *sk, int level,
return -ENOPROTOOPT;
err = do_ip_setsockopt(sk, level, optname, optval, optlen);
-#ifdef CONFIG_BPFILTER
+#if IS_ENABLED(CONFIG_BPFILTER_UMH)
if (optname >= BPFILTER_IPT_SO_SET_REPLACE &&
optname < BPFILTER_IPT_SET_MAX)
err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen);
@@ -1559,7 +1559,7 @@ int ip_getsockopt(struct sock *sk, int level,
int err;
err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
-#ifdef CONFIG_BPFILTER
+#if IS_ENABLED(CONFIG_BPFILTER_UMH)
if (optname >= BPFILTER_IPT_SO_GET_INFO &&
optname < BPFILTER_IPT_GET_MAX)
err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
@@ -1596,7 +1596,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
err = do_ip_getsockopt(sk, level, optname, optval, optlen,
MSG_CMSG_COMPAT);
-#ifdef CONFIG_BPFILTER
+#if IS_ENABLED(CONFIG_BPFILTER_UMH)
if (optname >= BPFILTER_IPT_SO_GET_INFO &&
optname < BPFILTER_IPT_GET_MAX)
err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 3f4d61017a69..f0cd291034f0 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -1001,6 +1001,9 @@ static int __init inet6_init(void)
err = ip6_flowlabel_init();
if (err)
goto ip6_flowlabel_fail;
+ err = ipv6_anycast_init();
+ if (err)
+ goto ipv6_anycast_fail;
err = addrconf_init();
if (err)
goto addrconf_fail;
@@ -1091,6 +1094,8 @@ ipv6_frag_fail:
ipv6_exthdrs_fail:
addrconf_cleanup();
addrconf_fail:
+ ipv6_anycast_cleanup();
+ipv6_anycast_fail:
ip6_flowlabel_cleanup();
ip6_flowlabel_fail:
ndisc_late_cleanup();
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 4e0ff7031edd..94999058e110 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -44,8 +44,22 @@
#include <net/checksum.h>
+#define IN6_ADDR_HSIZE_SHIFT 8
+#define IN6_ADDR_HSIZE BIT(IN6_ADDR_HSIZE_SHIFT)
+/* anycast address hash table
+ */
+static struct hlist_head inet6_acaddr_lst[IN6_ADDR_HSIZE];
+static DEFINE_SPINLOCK(acaddr_hash_lock);
+
static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr);
+static u32 inet6_acaddr_hash(struct net *net, const struct in6_addr *addr)
+{
+ u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
+
+ return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
+}
+
/*
* socket join an anycast group
*/
@@ -204,16 +218,39 @@ void ipv6_sock_ac_close(struct sock *sk)
rtnl_unlock();
}
+static void ipv6_add_acaddr_hash(struct net *net, struct ifacaddr6 *aca)
+{
+ unsigned int hash = inet6_acaddr_hash(net, &aca->aca_addr);
+
+ spin_lock(&acaddr_hash_lock);
+ hlist_add_head_rcu(&aca->aca_addr_lst, &inet6_acaddr_lst[hash]);
+ spin_unlock(&acaddr_hash_lock);
+}
+
+static void ipv6_del_acaddr_hash(struct ifacaddr6 *aca)
+{
+ spin_lock(&acaddr_hash_lock);
+ hlist_del_init_rcu(&aca->aca_addr_lst);
+ spin_unlock(&acaddr_hash_lock);
+}
+
static void aca_get(struct ifacaddr6 *aca)
{
refcount_inc(&aca->aca_refcnt);
}
+static void aca_free_rcu(struct rcu_head *h)
+{
+ struct ifacaddr6 *aca = container_of(h, struct ifacaddr6, rcu);
+
+ fib6_info_release(aca->aca_rt);
+ kfree(aca);
+}
+
static void aca_put(struct ifacaddr6 *ac)
{
if (refcount_dec_and_test(&ac->aca_refcnt)) {
- fib6_info_release(ac->aca_rt);
- kfree(ac);
+ call_rcu(&ac->rcu, aca_free_rcu);
}
}
@@ -229,6 +266,7 @@ static struct ifacaddr6 *aca_alloc(struct fib6_info *f6i,
aca->aca_addr = *addr;
fib6_info_hold(f6i);
aca->aca_rt = f6i;
+ INIT_HLIST_NODE(&aca->aca_addr_lst);
aca->aca_users = 1;
/* aca_tstamp should be updated upon changes */
aca->aca_cstamp = aca->aca_tstamp = jiffies;
@@ -285,6 +323,8 @@ int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr)
aca_get(aca);
write_unlock_bh(&idev->lock);
+ ipv6_add_acaddr_hash(net, aca);
+
ip6_ins_rt(net, f6i);
addrconf_join_solict(idev->dev, &aca->aca_addr);
@@ -325,6 +365,7 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
else
idev->ac_list = aca->aca_next;
write_unlock_bh(&idev->lock);
+ ipv6_del_acaddr_hash(aca);
addrconf_leave_solict(idev, &aca->aca_addr);
ip6_del_rt(dev_net(idev->dev), aca->aca_rt);
@@ -352,6 +393,8 @@ void ipv6_ac_destroy_dev(struct inet6_dev *idev)
idev->ac_list = aca->aca_next;
write_unlock_bh(&idev->lock);
+ ipv6_del_acaddr_hash(aca);
+
addrconf_leave_solict(idev, &aca->aca_addr);
ip6_del_rt(dev_net(idev->dev), aca->aca_rt);
@@ -390,17 +433,25 @@ static bool ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *ad
bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
const struct in6_addr *addr)
{
+ unsigned int hash = inet6_acaddr_hash(net, addr);
+ struct net_device *nh_dev;
+ struct ifacaddr6 *aca;
bool found = false;
rcu_read_lock();
if (dev)
found = ipv6_chk_acast_dev(dev, addr);
else
- for_each_netdev_rcu(net, dev)
- if (ipv6_chk_acast_dev(dev, addr)) {
+ hlist_for_each_entry_rcu(aca, &inet6_acaddr_lst[hash],
+ aca_addr_lst) {
+ nh_dev = fib6_info_nh_dev(aca->aca_rt);
+ if (!nh_dev || !net_eq(dev_net(nh_dev), net))
+ continue;
+ if (ipv6_addr_equal(&aca->aca_addr, addr)) {
found = true;
break;
}
+ }
rcu_read_unlock();
return found;
}
@@ -540,3 +591,24 @@ void ac6_proc_exit(struct net *net)
remove_proc_entry("anycast6", net->proc_net);
}
#endif
+
+/* Init / cleanup code
+ */
+int __init ipv6_anycast_init(void)
+{
+ int i;
+
+ for (i = 0; i < IN6_ADDR_HSIZE; i++)
+ INIT_HLIST_HEAD(&inet6_acaddr_lst[i]);
+ return 0;
+}
+
+void ipv6_anycast_cleanup(void)
+{
+ int i;
+
+ spin_lock(&acaddr_hash_lock);
+ for (i = 0; i < IN6_ADDR_HSIZE; i++)
+ WARN_ON(!hlist_empty(&inet6_acaddr_lst[i]));
+ spin_unlock(&acaddr_hash_lock);
+}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 1b8bc008b53b..ae3786132c23 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -591,7 +591,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
/* fib entries are never clones */
if (arg.filter.flags & RTM_F_CLONED)
- return skb->len;
+ goto out;
w = (void *)cb->args[2];
if (!w) {
@@ -621,7 +621,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
tb = fib6_get_table(net, arg.filter.table_id);
if (!tb) {
if (arg.filter.dump_all_families)
- return skb->len;
+ goto out;
NL_SET_ERR_MSG_MOD(cb->extack, "FIB table does not exist");
return -ENOENT;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index b8ac369f98ad..d219979c3e52 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -587,11 +587,16 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
*/
ret = -EINPROGRESS;
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
- fq->q.meat == fq->q.len &&
- nf_ct_frag6_reasm(fq, skb, dev))
- ret = 0;
- else
+ fq->q.meat == fq->q.len) {
+ unsigned long orefdst = skb->_skb_refdst;
+
+ skb->_skb_refdst = 0UL;
+ if (nf_ct_frag6_reasm(fq, skb, dev))
+ ret = 0;
+ skb->_skb_refdst = orefdst;
+ } else {
skb_dst_drop(skb);
+ }
out_unlock:
spin_unlock_bh(&fq->q.lock);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index bc4bd247bb7d..1577f2f76060 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -55,11 +55,15 @@ MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_DESCRIPTION("core IP set support");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
-/* When the nfnl mutex is held: */
+/* When the nfnl mutex or ip_set_ref_lock is held: */
#define ip_set_dereference(p) \
- rcu_dereference_protected(p, lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
+ rcu_dereference_protected(p, \
+ lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
+ lockdep_is_held(&ip_set_ref_lock))
#define ip_set(inst, id) \
ip_set_dereference((inst)->ip_set_list)[id]
+#define ip_set_ref_netlink(inst,id) \
+ rcu_dereference_raw((inst)->ip_set_list)[id]
/* The set types are implemented in modules and registered set types
* can be found in ip_set_type_list. Adding/deleting types is
@@ -693,21 +697,20 @@ ip_set_put_byindex(struct net *net, ip_set_id_t index)
EXPORT_SYMBOL_GPL(ip_set_put_byindex);
/* Get the name of a set behind a set index.
- * We assume the set is referenced, so it does exist and
- * can't be destroyed. The set cannot be renamed due to
- * the referencing either.
- *
+ * Set itself is protected by RCU, but its name isn't: to protect against
+ * renaming, grab ip_set_ref_lock as reader (see ip_set_rename()) and copy the
+ * name.
*/
-const char *
-ip_set_name_byindex(struct net *net, ip_set_id_t index)
+void
+ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name)
{
- const struct ip_set *set = ip_set_rcu_get(net, index);
+ struct ip_set *set = ip_set_rcu_get(net, index);
BUG_ON(!set);
- BUG_ON(set->ref == 0);
- /* Referenced, so it's safe */
- return set->name;
+ read_lock_bh(&ip_set_ref_lock);
+ strncpy(name, set->name, IPSET_MAXNAMELEN);
+ read_unlock_bh(&ip_set_ref_lock);
}
EXPORT_SYMBOL_GPL(ip_set_name_byindex);
@@ -961,7 +964,7 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
/* Wraparound */
goto cleanup;
- list = kcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
+ list = kvcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
if (!list)
goto cleanup;
/* nfnl mutex is held, both lists are valid */
@@ -973,7 +976,7 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
/* Use new list */
index = inst->ip_set_max;
inst->ip_set_max = i;
- kfree(tmp);
+ kvfree(tmp);
ret = 0;
} else if (ret) {
goto cleanup;
@@ -1153,7 +1156,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
if (!set)
return -ENOENT;
- read_lock_bh(&ip_set_ref_lock);
+ write_lock_bh(&ip_set_ref_lock);
if (set->ref != 0) {
ret = -IPSET_ERR_REFERENCED;
goto out;
@@ -1170,7 +1173,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
strncpy(set->name, name2, IPSET_MAXNAMELEN);
out:
- read_unlock_bh(&ip_set_ref_lock);
+ write_unlock_bh(&ip_set_ref_lock);
return ret;
}
@@ -1252,7 +1255,7 @@ ip_set_dump_done(struct netlink_callback *cb)
struct ip_set_net *inst =
(struct ip_set_net *)cb->args[IPSET_CB_NET];
ip_set_id_t index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
- struct ip_set *set = ip_set(inst, index);
+ struct ip_set *set = ip_set_ref_netlink(inst, index);
if (set->variant->uref)
set->variant->uref(set, cb, false);
@@ -1441,7 +1444,7 @@ next_set:
release_refcount:
/* If there was an error or set is done, release set */
if (ret || !cb->args[IPSET_CB_ARG0]) {
- set = ip_set(inst, index);
+ set = ip_set_ref_netlink(inst, index);
if (set->variant->uref)
set->variant->uref(set, cb, false);
pr_debug("release set %s\n", set->name);
@@ -2059,7 +2062,7 @@ ip_set_net_init(struct net *net)
if (inst->ip_set_max >= IPSET_INVALID_ID)
inst->ip_set_max = IPSET_INVALID_ID - 1;
- list = kcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
+ list = kvcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
if (!list)
return -ENOMEM;
inst->is_deleted = false;
@@ -2087,7 +2090,7 @@ ip_set_net_exit(struct net *net)
}
}
nfnl_unlock(NFNL_SUBSYS_IPSET);
- kfree(rcu_dereference_protected(inst->ip_set_list, 1));
+ kvfree(rcu_dereference_protected(inst->ip_set_list, 1));
}
static struct pernet_operations ip_set_net_ops = {
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
index d391485a6acd..613e18e720a4 100644
--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -213,13 +213,13 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_CIDR]) {
e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
+ if (e.cidr[0] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
if (tb[IPSET_ATTR_CIDR2]) {
e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
- if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
+ if (e.cidr[1] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
@@ -493,13 +493,13 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_CIDR]) {
e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
+ if (e.cidr[0] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
if (tb[IPSET_ATTR_CIDR2]) {
e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
- if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
+ if (e.cidr[1] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 072a658fde04..4eef55da0878 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -148,9 +148,7 @@ __list_set_del_rcu(struct rcu_head * rcu)
{
struct set_elem *e = container_of(rcu, struct set_elem, rcu);
struct ip_set *set = e->set;
- struct list_set *map = set->data;
- ip_set_put_byindex(map->net, e->id);
ip_set_ext_destroy(set, e);
kfree(e);
}
@@ -158,15 +156,21 @@ __list_set_del_rcu(struct rcu_head * rcu)
static inline void
list_set_del(struct ip_set *set, struct set_elem *e)
{
+ struct list_set *map = set->data;
+
set->elements--;
list_del_rcu(&e->list);
+ ip_set_put_byindex(map->net, e->id);
call_rcu(&e->rcu, __list_set_del_rcu);
}
static inline void
-list_set_replace(struct set_elem *e, struct set_elem *old)
+list_set_replace(struct ip_set *set, struct set_elem *e, struct set_elem *old)
{
+ struct list_set *map = set->data;
+
list_replace_rcu(&old->list, &e->list);
+ ip_set_put_byindex(map->net, old->id);
call_rcu(&old->rcu, __list_set_del_rcu);
}
@@ -298,7 +302,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
INIT_LIST_HEAD(&e->list);
list_set_init_extensions(set, ext, e);
if (n)
- list_set_replace(e, n);
+ list_set_replace(set, e, n);
else if (next)
list_add_tail_rcu(&e->list, &next->list);
else if (prev)
@@ -486,6 +490,7 @@ list_set_list(const struct ip_set *set,
const struct list_set *map = set->data;
struct nlattr *atd, *nested;
u32 i = 0, first = cb->args[IPSET_CB_ARG0];
+ char name[IPSET_MAXNAMELEN];
struct set_elem *e;
int ret = 0;
@@ -504,8 +509,8 @@ list_set_list(const struct ip_set *set,
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
goto nla_put_failure;
- if (nla_put_string(skb, IPSET_ATTR_NAME,
- ip_set_name_byindex(map->net, e->id)))
+ ip_set_name_byindex(map->net, e->id, name);
+ if (nla_put_string(skb, IPSET_ATTR_NAME, name))
goto nla_put_failure;
if (ip_set_put_extensions(skb, set, e, true))
goto nla_put_failure;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index ca1168d67fac..e92e749aff53 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1073,19 +1073,22 @@ static unsigned int early_drop_list(struct net *net,
return drops;
}
-static noinline int early_drop(struct net *net, unsigned int _hash)
+static noinline int early_drop(struct net *net, unsigned int hash)
{
- unsigned int i;
+ unsigned int i, bucket;
for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
struct hlist_nulls_head *ct_hash;
- unsigned int hash, hsize, drops;
+ unsigned int hsize, drops;
rcu_read_lock();
nf_conntrack_get_ht(&ct_hash, &hsize);
- hash = reciprocal_scale(_hash++, hsize);
+ if (!i)
+ bucket = reciprocal_scale(hash, hsize);
+ else
+ bucket = (bucket + 1) % hsize;
- drops = early_drop_list(net, &ct_hash[hash]);
+ drops = early_drop_list(net, &ct_hash[bucket]);
rcu_read_unlock();
if (drops) {
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 171e9e122e5f..023c1445bc39 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -384,11 +384,6 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
},
};
-static inline struct nf_dccp_net *dccp_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.dccp;
-}
-
static noinline bool
dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
const struct dccp_hdr *dh)
@@ -401,7 +396,7 @@ dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
switch (state) {
default:
- dn = dccp_pernet(net);
+ dn = nf_dccp_pernet(net);
if (dn->dccp_loose == 0) {
msg = "not picking up existing connection ";
goto out_invalid;
@@ -568,7 +563,7 @@ static int dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
- timeouts = dccp_pernet(nf_ct_net(ct))->dccp_timeout;
+ timeouts = nf_dccp_pernet(nf_ct_net(ct))->dccp_timeout;
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
return NF_ACCEPT;
@@ -681,7 +676,7 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
- struct nf_dccp_net *dn = dccp_pernet(net);
+ struct nf_dccp_net *dn = nf_dccp_pernet(net);
unsigned int *timeouts = data;
int i;
@@ -814,7 +809,7 @@ static int dccp_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *pn,
static int dccp_init_net(struct net *net)
{
- struct nf_dccp_net *dn = dccp_pernet(net);
+ struct nf_dccp_net *dn = nf_dccp_pernet(net);
struct nf_proto_net *pn = &dn->pn;
if (!pn->users) {
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index e10e867e0b55..5da19d5fbc76 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -27,11 +27,6 @@ static bool nf_generic_should_process(u8 proto)
}
}
-static inline struct nf_generic_net *generic_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.generic;
-}
-
static bool generic_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct net *net, struct nf_conntrack_tuple *tuple)
@@ -58,7 +53,7 @@ static int generic_packet(struct nf_conn *ct,
}
if (!timeout)
- timeout = &generic_pernet(nf_ct_net(ct))->timeout;
+ timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
@@ -72,7 +67,7 @@ static int generic_packet(struct nf_conn *ct,
static int generic_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
- struct nf_generic_net *gn = generic_pernet(net);
+ struct nf_generic_net *gn = nf_generic_pernet(net);
unsigned int *timeout = data;
if (!timeout)
@@ -138,7 +133,7 @@ static int generic_kmemdup_sysctl_table(struct nf_proto_net *pn,
static int generic_init_net(struct net *net)
{
- struct nf_generic_net *gn = generic_pernet(net);
+ struct nf_generic_net *gn = nf_generic_pernet(net);
struct nf_proto_net *pn = &gn->pn;
gn->timeout = nf_ct_generic_timeout;
diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c
index 3598520bd19b..de64d8a5fdfd 100644
--- a/net/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/netfilter/nf_conntrack_proto_icmp.c
@@ -25,11 +25,6 @@
static const unsigned int nf_ct_icmp_timeout = 30*HZ;
-static inline struct nf_icmp_net *icmp_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.icmp;
-}
-
static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct net *net, struct nf_conntrack_tuple *tuple)
{
@@ -103,7 +98,7 @@ static int icmp_packet(struct nf_conn *ct,
}
if (!timeout)
- timeout = &icmp_pernet(nf_ct_net(ct))->timeout;
+ timeout = &nf_icmp_pernet(nf_ct_net(ct))->timeout;
nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
@@ -275,7 +270,7 @@ static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
unsigned int *timeout = data;
- struct nf_icmp_net *in = icmp_pernet(net);
+ struct nf_icmp_net *in = nf_icmp_pernet(net);
if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) {
if (!timeout)
@@ -337,7 +332,7 @@ static int icmp_kmemdup_sysctl_table(struct nf_proto_net *pn,
static int icmp_init_net(struct net *net)
{
- struct nf_icmp_net *in = icmp_pernet(net);
+ struct nf_icmp_net *in = nf_icmp_pernet(net);
struct nf_proto_net *pn = &in->pn;
in->timeout = nf_ct_icmp_timeout;
diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c
index 378618feed5d..a15eefb8e317 100644
--- a/net/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/netfilter/nf_conntrack_proto_icmpv6.c
@@ -30,11 +30,6 @@
static const unsigned int nf_ct_icmpv6_timeout = 30*HZ;
-static inline struct nf_icmp_net *icmpv6_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.icmpv6;
-}
-
static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct net *net,
@@ -87,7 +82,7 @@ static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
static unsigned int *icmpv6_get_timeouts(struct net *net)
{
- return &icmpv6_pernet(net)->timeout;
+ return &nf_icmpv6_pernet(net)->timeout;
}
/* Returns verdict for packet, or -1 for invalid. */
@@ -286,7 +281,7 @@ static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
unsigned int *timeout = data;
- struct nf_icmp_net *in = icmpv6_pernet(net);
+ struct nf_icmp_net *in = nf_icmpv6_pernet(net);
if (!timeout)
timeout = icmpv6_get_timeouts(net);
@@ -348,7 +343,7 @@ static int icmpv6_kmemdup_sysctl_table(struct nf_proto_net *pn,
static int icmpv6_init_net(struct net *net)
{
- struct nf_icmp_net *in = icmpv6_pernet(net);
+ struct nf_icmp_net *in = nf_icmpv6_pernet(net);
struct nf_proto_net *pn = &in->pn;
in->timeout = nf_ct_icmpv6_timeout;
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 3d719d3eb9a3..d53e3e78f605 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -146,11 +146,6 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
}
};
-static inline struct nf_sctp_net *sctp_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.sctp;
-}
-
#ifdef CONFIG_NF_CONNTRACK_PROCFS
/* Print out the private part of the conntrack. */
static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
@@ -480,7 +475,7 @@ static int sctp_packet(struct nf_conn *ct,
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
- timeouts = sctp_pernet(nf_ct_net(ct))->timeouts;
+ timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts;
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
@@ -599,7 +594,7 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
unsigned int *timeouts = data;
- struct nf_sctp_net *sn = sctp_pernet(net);
+ struct nf_sctp_net *sn = nf_sctp_pernet(net);
int i;
/* set default SCTP timeouts. */
@@ -736,7 +731,7 @@ static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn,
static int sctp_init_net(struct net *net)
{
- struct nf_sctp_net *sn = sctp_pernet(net);
+ struct nf_sctp_net *sn = nf_sctp_pernet(net);
struct nf_proto_net *pn = &sn->pn;
if (!pn->users) {
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 1bcf9984d45e..4dcbd51a8e97 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -272,11 +272,6 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
}
};
-static inline struct nf_tcp_net *tcp_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.tcp;
-}
-
#ifdef CONFIG_NF_CONNTRACK_PROCFS
/* Print out the private part of the conntrack. */
static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
@@ -475,7 +470,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
const struct tcphdr *tcph)
{
struct net *net = nf_ct_net(ct);
- struct nf_tcp_net *tn = tcp_pernet(net);
+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
struct ip_ct_tcp_state *sender = &state->seen[dir];
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
@@ -767,7 +762,7 @@ static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
{
enum tcp_conntrack new_state;
struct net *net = nf_ct_net(ct);
- const struct nf_tcp_net *tn = tcp_pernet(net);
+ const struct nf_tcp_net *tn = nf_tcp_pernet(net);
const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
@@ -841,7 +836,7 @@ static int tcp_packet(struct nf_conn *ct,
const struct nf_hook_state *state)
{
struct net *net = nf_ct_net(ct);
- struct nf_tcp_net *tn = tcp_pernet(net);
+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
struct nf_conntrack_tuple *tuple;
enum tcp_conntrack new_state, old_state;
unsigned int index, *timeouts;
@@ -1283,7 +1278,7 @@ static unsigned int tcp_nlattr_tuple_size(void)
static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
- struct nf_tcp_net *tn = tcp_pernet(net);
+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
unsigned int *timeouts = data;
int i;
@@ -1508,7 +1503,7 @@ static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn,
static int tcp_init_net(struct net *net)
{
- struct nf_tcp_net *tn = tcp_pernet(net);
+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
struct nf_proto_net *pn = &tn->pn;
if (!pn->users) {
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index a7aa70370913..c879d8d78cfd 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -32,14 +32,9 @@ static const unsigned int udp_timeouts[UDP_CT_MAX] = {
[UDP_CT_REPLIED] = 180*HZ,
};
-static inline struct nf_udp_net *udp_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.udp;
-}
-
static unsigned int *udp_get_timeouts(struct net *net)
{
- return udp_pernet(net)->timeouts;
+ return nf_udp_pernet(net)->timeouts;
}
static void udp_error_log(const struct sk_buff *skb,
@@ -212,7 +207,7 @@ static int udp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
unsigned int *timeouts = data;
- struct nf_udp_net *un = udp_pernet(net);
+ struct nf_udp_net *un = nf_udp_pernet(net);
if (!timeouts)
timeouts = un->timeouts;
@@ -292,7 +287,7 @@ static int udp_kmemdup_sysctl_table(struct nf_proto_net *pn,
static int udp_init_net(struct net *net)
{
- struct nf_udp_net *un = udp_pernet(net);
+ struct nf_udp_net *un = nf_udp_pernet(net);
struct nf_proto_net *pn = &un->pn;
if (!pn->users) {
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index e7a50af1b3d6..a518eb162344 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -382,7 +382,8 @@ err:
static int
cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
u32 seq, u32 type, int event, u16 l3num,
- const struct nf_conntrack_l4proto *l4proto)
+ const struct nf_conntrack_l4proto *l4proto,
+ const unsigned int *timeouts)
{
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
@@ -408,7 +409,7 @@ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
if (!nest_parms)
goto nla_put_failure;
- ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, NULL);
+ ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, timeouts);
if (ret < 0)
goto nla_put_failure;
@@ -430,6 +431,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
struct netlink_ext_ack *extack)
{
const struct nf_conntrack_l4proto *l4proto;
+ unsigned int *timeouts = NULL;
struct sk_buff *skb2;
int ret, err;
__u16 l3num;
@@ -442,12 +444,44 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
l4proto = nf_ct_l4proto_find_get(l4num);
- /* This protocol is not supported, skip. */
- if (l4proto->l4proto != l4num) {
- err = -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ if (l4proto->l4proto != l4num)
goto err;
+
+ switch (l4proto->l4proto) {
+ case IPPROTO_ICMP:
+ timeouts = &nf_icmp_pernet(net)->timeout;
+ break;
+ case IPPROTO_TCP:
+ timeouts = nf_tcp_pernet(net)->timeouts;
+ break;
+ case IPPROTO_UDP:
+ timeouts = nf_udp_pernet(net)->timeouts;
+ break;
+ case IPPROTO_DCCP:
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ timeouts = nf_dccp_pernet(net)->dccp_timeout;
+#endif
+ break;
+ case IPPROTO_ICMPV6:
+ timeouts = &nf_icmpv6_pernet(net)->timeout;
+ break;
+ case IPPROTO_SCTP:
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ timeouts = nf_sctp_pernet(net)->timeouts;
+#endif
+ break;
+ case 255:
+ timeouts = &nf_generic_pernet(net)->timeout;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
}
+ if (!timeouts)
+ goto err;
+
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb2 == NULL) {
err = -ENOMEM;
@@ -458,8 +492,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
nlh->nlmsg_seq,
NFNL_MSG_TYPE(nlh->nlmsg_type),
IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
- l3num,
- l4proto);
+ l3num, l4proto, timeouts);
if (ret <= 0) {
kfree_skb(skb2);
err = -ENOMEM;
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 768292eac2a4..9d0ede474224 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -54,9 +54,11 @@ static bool nft_xt_put(struct nft_xt *xt)
return false;
}
-static int nft_compat_chain_validate_dependency(const char *tablename,
- const struct nft_chain *chain)
+static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx,
+ const char *tablename)
{
+ enum nft_chain_types type = NFT_CHAIN_T_DEFAULT;
+ const struct nft_chain *chain = ctx->chain;
const struct nft_base_chain *basechain;
if (!tablename ||
@@ -64,9 +66,12 @@ static int nft_compat_chain_validate_dependency(const char *tablename,
return 0;
basechain = nft_base_chain(chain);
- if (strcmp(tablename, "nat") == 0 &&
- basechain->type->type != NFT_CHAIN_T_NAT)
- return -EINVAL;
+ if (strcmp(tablename, "nat") == 0) {
+ if (ctx->family != NFPROTO_BRIDGE)
+ type = NFT_CHAIN_T_NAT;
+ if (basechain->type->type != type)
+ return -EINVAL;
+ }
return 0;
}
@@ -342,8 +347,7 @@ static int nft_target_validate(const struct nft_ctx *ctx,
if (target->hooks && !(hook_mask & target->hooks))
return -EINVAL;
- ret = nft_compat_chain_validate_dependency(target->table,
- ctx->chain);
+ ret = nft_compat_chain_validate_dependency(ctx, target->table);
if (ret < 0)
return ret;
}
@@ -590,8 +594,7 @@ static int nft_match_validate(const struct nft_ctx *ctx,
if (match->hooks && !(hook_mask & match->hooks))
return -EINVAL;
- ret = nft_compat_chain_validate_dependency(match->table,
- ctx->chain);
+ ret = nft_compat_chain_validate_dependency(ctx, match->table);
if (ret < 0)
return ret;
}
diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c
index 649d1700ec5b..3cc1b3dc3c3c 100644
--- a/net/netfilter/nft_numgen.c
+++ b/net/netfilter/nft_numgen.c
@@ -24,7 +24,6 @@ struct nft_ng_inc {
u32 modulus;
atomic_t counter;
u32 offset;
- struct nft_set *map;
};
static u32 nft_ng_inc_gen(struct nft_ng_inc *priv)
@@ -48,34 +47,11 @@ static void nft_ng_inc_eval(const struct nft_expr *expr,
regs->data[priv->dreg] = nft_ng_inc_gen(priv);
}
-static void nft_ng_inc_map_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
-{
- struct nft_ng_inc *priv = nft_expr_priv(expr);
- const struct nft_set *map = priv->map;
- const struct nft_set_ext *ext;
- u32 result;
- bool found;
-
- result = nft_ng_inc_gen(priv);
- found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
-
- if (!found)
- return;
-
- nft_data_copy(&regs->data[priv->dreg],
- nft_set_ext_data(ext), map->dlen);
-}
-
static const struct nla_policy nft_ng_policy[NFTA_NG_MAX + 1] = {
[NFTA_NG_DREG] = { .type = NLA_U32 },
[NFTA_NG_MODULUS] = { .type = NLA_U32 },
[NFTA_NG_TYPE] = { .type = NLA_U32 },
[NFTA_NG_OFFSET] = { .type = NLA_U32 },
- [NFTA_NG_SET_NAME] = { .type = NLA_STRING,
- .len = NFT_SET_MAXNAMELEN - 1 },
- [NFTA_NG_SET_ID] = { .type = NLA_U32 },
};
static int nft_ng_inc_init(const struct nft_ctx *ctx,
@@ -101,22 +77,6 @@ static int nft_ng_inc_init(const struct nft_ctx *ctx,
NFT_DATA_VALUE, sizeof(u32));
}
-static int nft_ng_inc_map_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
-{
- struct nft_ng_inc *priv = nft_expr_priv(expr);
- u8 genmask = nft_genmask_next(ctx->net);
-
- nft_ng_inc_init(ctx, expr, tb);
-
- priv->map = nft_set_lookup_global(ctx->net, ctx->table,
- tb[NFTA_NG_SET_NAME],
- tb[NFTA_NG_SET_ID], genmask);
-
- return PTR_ERR_OR_ZERO(priv->map);
-}
-
static int nft_ng_dump(struct sk_buff *skb, enum nft_registers dreg,
u32 modulus, enum nft_ng_types type, u32 offset)
{
@@ -143,27 +103,10 @@ static int nft_ng_inc_dump(struct sk_buff *skb, const struct nft_expr *expr)
priv->offset);
}
-static int nft_ng_inc_map_dump(struct sk_buff *skb,
- const struct nft_expr *expr)
-{
- const struct nft_ng_inc *priv = nft_expr_priv(expr);
-
- if (nft_ng_dump(skb, priv->dreg, priv->modulus,
- NFT_NG_INCREMENTAL, priv->offset) ||
- nla_put_string(skb, NFTA_NG_SET_NAME, priv->map->name))
- goto nla_put_failure;
-
- return 0;
-
-nla_put_failure:
- return -1;
-}
-
struct nft_ng_random {
enum nft_registers dreg:8;
u32 modulus;
u32 offset;
- struct nft_set *map;
};
static u32 nft_ng_random_gen(struct nft_ng_random *priv)
@@ -183,25 +126,6 @@ static void nft_ng_random_eval(const struct nft_expr *expr,
regs->data[priv->dreg] = nft_ng_random_gen(priv);
}
-static void nft_ng_random_map_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
-{
- struct nft_ng_random *priv = nft_expr_priv(expr);
- const struct nft_set *map = priv->map;
- const struct nft_set_ext *ext;
- u32 result;
- bool found;
-
- result = nft_ng_random_gen(priv);
- found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
- if (!found)
- return;
-
- nft_data_copy(&regs->data[priv->dreg],
- nft_set_ext_data(ext), map->dlen);
-}
-
static int nft_ng_random_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
@@ -226,21 +150,6 @@ static int nft_ng_random_init(const struct nft_ctx *ctx,
NFT_DATA_VALUE, sizeof(u32));
}
-static int nft_ng_random_map_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
-{
- struct nft_ng_random *priv = nft_expr_priv(expr);
- u8 genmask = nft_genmask_next(ctx->net);
-
- nft_ng_random_init(ctx, expr, tb);
- priv->map = nft_set_lookup_global(ctx->net, ctx->table,
- tb[NFTA_NG_SET_NAME],
- tb[NFTA_NG_SET_ID], genmask);
-
- return PTR_ERR_OR_ZERO(priv->map);
-}
-
static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_ng_random *priv = nft_expr_priv(expr);
@@ -249,22 +158,6 @@ static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
priv->offset);
}
-static int nft_ng_random_map_dump(struct sk_buff *skb,
- const struct nft_expr *expr)
-{
- const struct nft_ng_random *priv = nft_expr_priv(expr);
-
- if (nft_ng_dump(skb, priv->dreg, priv->modulus,
- NFT_NG_RANDOM, priv->offset) ||
- nla_put_string(skb, NFTA_NG_SET_NAME, priv->map->name))
- goto nla_put_failure;
-
- return 0;
-
-nla_put_failure:
- return -1;
-}
-
static struct nft_expr_type nft_ng_type;
static const struct nft_expr_ops nft_ng_inc_ops = {
.type = &nft_ng_type,
@@ -274,14 +167,6 @@ static const struct nft_expr_ops nft_ng_inc_ops = {
.dump = nft_ng_inc_dump,
};
-static const struct nft_expr_ops nft_ng_inc_map_ops = {
- .type = &nft_ng_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_ng_inc)),
- .eval = nft_ng_inc_map_eval,
- .init = nft_ng_inc_map_init,
- .dump = nft_ng_inc_map_dump,
-};
-
static const struct nft_expr_ops nft_ng_random_ops = {
.type = &nft_ng_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)),
@@ -290,14 +175,6 @@ static const struct nft_expr_ops nft_ng_random_ops = {
.dump = nft_ng_random_dump,
};
-static const struct nft_expr_ops nft_ng_random_map_ops = {
- .type = &nft_ng_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)),
- .eval = nft_ng_random_map_eval,
- .init = nft_ng_random_map_init,
- .dump = nft_ng_random_map_dump,
-};
-
static const struct nft_expr_ops *
nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
{
@@ -312,12 +189,8 @@ nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
switch (type) {
case NFT_NG_INCREMENTAL:
- if (tb[NFTA_NG_SET_NAME])
- return &nft_ng_inc_map_ops;
return &nft_ng_inc_ops;
case NFT_NG_RANDOM:
- if (tb[NFTA_NG_SET_NAME])
- return &nft_ng_random_map_ops;
return &nft_ng_random_ops;
}
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
index ca5e5d8c5ef8..b13618c764ec 100644
--- a/net/netfilter/nft_osf.c
+++ b/net/netfilter/nft_osf.c
@@ -50,7 +50,7 @@ static int nft_osf_init(const struct nft_ctx *ctx,
int err;
u8 ttl;
- if (nla_get_u8(tb[NFTA_OSF_TTL])) {
+ if (tb[NFTA_OSF_TTL]) {
ttl = nla_get_u8(tb[NFTA_OSF_TTL]);
if (ttl > 2)
return -EINVAL;
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index c6acfc2d9c84..eb4cbd244c3d 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -114,6 +114,22 @@ static void idletimer_tg_expired(struct timer_list *t)
schedule_work(&timer->work);
}
+static int idletimer_check_sysfs_name(const char *name, unsigned int size)
+{
+ int ret;
+
+ ret = xt_check_proc_name(name, size);
+ if (ret < 0)
+ return ret;
+
+ if (!strcmp(name, "power") ||
+ !strcmp(name, "subsystem") ||
+ !strcmp(name, "uevent"))
+ return -EINVAL;
+
+ return 0;
+}
+
static int idletimer_tg_create(struct idletimer_tg_info *info)
{
int ret;
@@ -124,6 +140,10 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
goto out;
}
+ ret = idletimer_check_sysfs_name(info->label, sizeof(info->label));
+ if (ret < 0)
+ goto out_free_timer;
+
sysfs_attr_init(&info->timer->attr.attr);
info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
if (!info->timer->attr.attr.name) {
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 6bec37ab4472..a4660c48ff01 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1203,7 +1203,8 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
&info->labels.mask);
if (err)
return err;
- } else if (labels_nonzero(&info->labels.mask)) {
+ } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+ labels_nonzero(&info->labels.mask)) {
err = ovs_ct_set_labels(ct, key, &info->labels.value,
&info->labels.mask);
if (err)
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 382196e57a26..bc628acf4f4f 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -611,6 +611,7 @@ struct rxrpc_call {
* not hard-ACK'd packet follows this.
*/
rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
+ u16 tx_backoff; /* Delay to insert due to Tx failure */
/* TCP-style slow-start congestion control [RFC5681]. Since the SMSS
* is fixed, we keep these numbers in terms of segments (ie. DATA
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 8e7434e92097..468efc3660c0 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -123,6 +123,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
else
ack_at = expiry;
+ ack_at += READ_ONCE(call->tx_backoff);
ack_at += now;
if (time_before(ack_at, call->ack_at)) {
WRITE_ONCE(call->ack_at, ack_at);
@@ -311,6 +312,7 @@ void rxrpc_process_call(struct work_struct *work)
container_of(work, struct rxrpc_call, processor);
rxrpc_serial_t *send_ack;
unsigned long now, next, t;
+ unsigned int iterations = 0;
rxrpc_see_call(call);
@@ -319,6 +321,11 @@ void rxrpc_process_call(struct work_struct *work)
call->debug_id, rxrpc_call_states[call->state], call->events);
recheck_state:
+ /* Limit the number of times we do this before returning to the manager */
+ iterations++;
+ if (iterations > 5)
+ goto requeue;
+
if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
rxrpc_send_abort_packet(call);
goto recheck_state;
@@ -447,13 +454,16 @@ recheck_state:
rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
/* other events may have been raised since we started checking */
- if (call->events && call->state < RXRPC_CALL_COMPLETE) {
- __rxrpc_queue_call(call);
- goto out;
- }
+ if (call->events && call->state < RXRPC_CALL_COMPLETE)
+ goto requeue;
out_put:
rxrpc_put_call(call, rxrpc_call_put);
out:
_leave("");
+ return;
+
+requeue:
+ __rxrpc_queue_call(call);
+ goto out;
}
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 189418888839..736aa9281100 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -35,6 +35,21 @@ struct rxrpc_abort_buffer {
static const char rxrpc_keepalive_string[] = "";
/*
+ * Increase Tx backoff on transmission failure and clear it on success.
+ */
+static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
+{
+ if (ret < 0) {
+ u16 tx_backoff = READ_ONCE(call->tx_backoff);
+
+ if (tx_backoff < HZ)
+ WRITE_ONCE(call->tx_backoff, tx_backoff + 1);
+ } else {
+ WRITE_ONCE(call->tx_backoff, 0);
+ }
+}
+
+/*
* Arrange for a keepalive ping a certain time after we last transmitted. This
* lets the far side know we're still interested in this call and helps keep
* the route through any intervening firewall open.
@@ -210,6 +225,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
else
trace_rxrpc_tx_packet(call->debug_id, &pkt->whdr,
rxrpc_tx_point_call_ack);
+ rxrpc_tx_backoff(call, ret);
if (call->state < RXRPC_CALL_COMPLETE) {
if (ret < 0) {
@@ -218,7 +234,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
rxrpc_propose_ACK(call, pkt->ack.reason,
ntohs(pkt->ack.maxSkew),
ntohl(pkt->ack.serial),
- true, true,
+ false, true,
rxrpc_propose_ack_retry_tx);
} else {
spin_lock_bh(&call->lock);
@@ -300,7 +316,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
else
trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
rxrpc_tx_point_call_abort);
-
+ rxrpc_tx_backoff(call, ret);
rxrpc_put_connection(conn);
return ret;
@@ -413,6 +429,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
else
trace_rxrpc_tx_packet(call->debug_id, &whdr,
rxrpc_tx_point_call_data_nofrag);
+ rxrpc_tx_backoff(call, ret);
if (ret == -EMSGSIZE)
goto send_fragmentable;
@@ -445,9 +462,18 @@ done:
rxrpc_reduce_call_timer(call, expect_rx_by, nowj,
rxrpc_timer_set_for_normal);
}
- }
- rxrpc_set_keepalive(call);
+ rxrpc_set_keepalive(call);
+ } else {
+ /* Cancel the call if the initial transmission fails,
+ * particularly if that's due to network routing issues that
+ * aren't going away anytime soon. The layer above can arrange
+ * the retransmission.
+ */
+ if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags))
+ rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
+ RX_USER_ABORT, ret);
+ }
_leave(" = %d [%u]", ret, call->peer->maxdata);
return ret;
@@ -506,6 +532,7 @@ send_fragmentable:
else
trace_rxrpc_tx_packet(call->debug_id, &whdr,
rxrpc_tx_point_call_data_frag);
+ rxrpc_tx_backoff(call, ret);
up_write(&conn->params.local->defrag_sem);
goto done;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 1dae5f2b358f..c8cf4d10c435 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -258,7 +258,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
if (is_redirect) {
skb2->tc_redirected = 1;
skb2->tc_from_ingress = skb2->tc_at_ingress;
-
+ if (skb2->tc_from_ingress)
+ skb2->tstamp = 0;
/* let's the caller reinsert the packet, if possible */
if (use_reinsert) {
res->ingress = want_ingress;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 9aada2d0ef06..c6c327874abc 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -709,11 +709,23 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
struct netlink_ext_ack *extack)
{
const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
- int option_len, key_depth, msk_depth = 0;
+ int err, option_len, key_depth, msk_depth = 0;
+
+ err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS],
+ TCA_FLOWER_KEY_ENC_OPTS_MAX,
+ enc_opts_policy, extack);
+ if (err)
+ return err;
nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
+ err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
+ TCA_FLOWER_KEY_ENC_OPTS_MAX,
+ enc_opts_policy, extack);
+ if (err)
+ return err;
+
nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 57b3ad9394ad..2c38e3d07924 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -648,15 +648,6 @@ deliver:
*/
skb->dev = qdisc_dev(sch);
-#ifdef CONFIG_NET_CLS_ACT
- /*
- * If it's at ingress let's pretend the delay is
- * from the network (tstamp will be updated).
- */
- if (skb->tc_redirected && skb->tc_from_ingress)
- skb->tstamp = 0;
-#endif
-
if (q->slot.slot_next) {
q->slot.packets_left--;
q->slot.bytes_left -= qdisc_pkt_len(skb);
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 9cb854b05342..c37e1c2dec9d 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -212,7 +212,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
INIT_LIST_HEAD(&q->retransmit);
INIT_LIST_HEAD(&q->sacked);
INIT_LIST_HEAD(&q->abandoned);
- sctp_sched_set_sched(asoc, SCTP_SS_FCFS);
+ sctp_sched_set_sched(asoc, SCTP_SS_DEFAULT);
}
/* Free the outqueue structure and any related pending chunks.
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 201c3b5bc96b..836727e363c4 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1594,14 +1594,17 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
l->priority = peers_prio;
- /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
- if (msg_peer_stopping(hdr))
+ /* If peer is going down we want full re-establish cycle */
+ if (msg_peer_stopping(hdr)) {
rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
- else if ((mtyp == RESET_MSG) || !link_is_up(l))
+ break;
+ }
+ /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
+ if (mtyp == RESET_MSG || !link_is_up(l))
rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
/* ACTIVATE_MSG takes up link if it was already locally reset */
- if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
+ if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
rc = TIPC_LINK_UP_EVT;
l->peer_session = msg_session(hdr);
diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh
index da66e7742282..0ef906499646 100755
--- a/scripts/kconfig/merge_config.sh
+++ b/scripts/kconfig/merge_config.sh
@@ -102,7 +102,8 @@ if [ ! -r "$INITFILE" ]; then
fi
MERGE_LIST=$*
-SED_CONFIG_EXP="s/^\(# \)\{0,1\}\(${CONFIG_PREFIX}[a-zA-Z0-9_]*\)[= ].*/\2/p"
+SED_CONFIG_EXP1="s/^\(${CONFIG_PREFIX}[a-zA-Z0-9_]*\)=.*/\1/p"
+SED_CONFIG_EXP2="s/^# \(${CONFIG_PREFIX}[a-zA-Z0-9_]*\) is not set$/\1/p"
TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX)
@@ -116,7 +117,7 @@ for MERGE_FILE in $MERGE_LIST ; do
echo "The merge file '$MERGE_FILE' does not exist. Exit." >&2
exit 1
fi
- CFG_LIST=$(sed -n "$SED_CONFIG_EXP" $MERGE_FILE)
+ CFG_LIST=$(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $MERGE_FILE)
for CFG in $CFG_LIST ; do
grep -q -w $CFG $TMP_FILE || continue
@@ -159,7 +160,7 @@ make KCONFIG_ALLCONFIG=$TMP_FILE $OUTPUT_ARG $ALLTARGET
# Check all specified config values took (might have missed-dependency issues)
-for CFG in $(sed -n "$SED_CONFIG_EXP" $TMP_FILE); do
+for CFG in $(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $TMP_FILE); do
REQUESTED_VAL=$(grep -w -e "$CFG" $TMP_FILE)
ACTUAL_VAL=$(grep -w -e "$CFG" "$KCONFIG_CONFIG")
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 90c9a8ac7adb..f43a274f4f1d 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -81,11 +81,11 @@ else
cp System.map "$tmpdir/boot/System.map-$version"
cp $KCONFIG_CONFIG "$tmpdir/boot/config-$version"
fi
-cp "$($MAKE -s image_name)" "$tmpdir/$installed_image_path"
+cp "$($MAKE -s -f $srctree/Makefile image_name)" "$tmpdir/$installed_image_path"
-if grep -q "^CONFIG_OF=y" $KCONFIG_CONFIG ; then
+if grep -q "^CONFIG_OF_EARLY_FLATTREE=y" $KCONFIG_CONFIG ; then
# Only some architectures with OF support have this target
- if grep -q dtbs_install "${srctree}/arch/$SRCARCH/Makefile"; then
+ if [ -d "${srctree}/arch/$SRCARCH/boot/dts" ]; then
$MAKE KBUILD_SRC= INSTALL_DTBS_PATH="$tmpdir/usr/lib/$packagename" dtbs_install
fi
fi
diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
index 663a7f343b42..edcad61fe3cd 100755
--- a/scripts/package/mkdebian
+++ b/scripts/package/mkdebian
@@ -88,6 +88,7 @@ set_debarch() {
version=$KERNELRELEASE
if [ -n "$KDEB_PKGVERSION" ]; then
packageversion=$KDEB_PKGVERSION
+ revision=${packageversion##*-}
else
revision=$(cat .version 2>/dev/null||echo 1)
packageversion=$version-$revision
@@ -205,10 +206,12 @@ cat <<EOF > debian/rules
#!$(command -v $MAKE) -f
build:
- \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} KBUILD_SRC=
+ \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} \
+ KBUILD_BUILD_VERSION=${revision} KBUILD_SRC=
binary-arch:
- \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} KBUILD_SRC= intdeb-pkg
+ \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} \
+ KBUILD_BUILD_VERSION=${revision} KBUILD_SRC= intdeb-pkg
clean:
rm -rf debian/*tmp debian/files
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index e05646dc24dc..009147d4718e 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -12,6 +12,7 @@
# how we were called determines which rpms we build and how we build them
if [ "$1" = prebuilt ]; then
S=DEL
+ MAKE="$MAKE -f $srctree/Makefile"
else
S=
fi
@@ -78,19 +79,19 @@ $S %prep
$S %setup -q
$S
$S %build
-$S make %{?_smp_mflags} KBUILD_BUILD_VERSION=%{release}
+$S $MAKE %{?_smp_mflags} KBUILD_BUILD_VERSION=%{release}
$S
%install
mkdir -p %{buildroot}/boot
%ifarch ia64
mkdir -p %{buildroot}/boot/efi
- cp \$(make image_name) %{buildroot}/boot/efi/vmlinuz-$KERNELRELEASE
+ cp \$($MAKE image_name) %{buildroot}/boot/efi/vmlinuz-$KERNELRELEASE
ln -s efi/vmlinuz-$KERNELRELEASE %{buildroot}/boot/
%else
- cp \$(make image_name) %{buildroot}/boot/vmlinuz-$KERNELRELEASE
+ cp \$($MAKE image_name) %{buildroot}/boot/vmlinuz-$KERNELRELEASE
%endif
-$M make %{?_smp_mflags} INSTALL_MOD_PATH=%{buildroot} KBUILD_SRC= modules_install
- make %{?_smp_mflags} INSTALL_HDR_PATH=%{buildroot}/usr KBUILD_SRC= headers_install
+$M $MAKE %{?_smp_mflags} INSTALL_MOD_PATH=%{buildroot} modules_install
+ $MAKE %{?_smp_mflags} INSTALL_HDR_PATH=%{buildroot}/usr headers_install
cp System.map %{buildroot}/boot/System.map-$KERNELRELEASE
cp .config %{buildroot}/boot/config-$KERNELRELEASE
bzip2 -9 --keep vmlinux
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 79f7dd57d571..71f39410691b 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -74,7 +74,7 @@ scm_version()
fi
# Check for uncommitted changes
- if git status -uno --porcelain | grep -qv '^.. scripts/package'; then
+ if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then
printf '%s' -dirty
fi
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
index 97f49b751e6e..568575b72f2f 100644
--- a/sound/pci/hda/thinkpad_helper.c
+++ b/sound/pci/hda/thinkpad_helper.c
@@ -58,8 +58,8 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
removefunc = false;
}
if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0 &&
- snd_hda_gen_add_micmute_led(codec,
- update_tpacpi_micmute) > 0)
+ !snd_hda_gen_add_micmute_led(codec,
+ update_tpacpi_micmute))
removefunc = false;
}
diff --git a/tools/arch/arm64/include/asm/barrier.h b/tools/arch/arm64/include/asm/barrier.h
index 12835ea0e417..378c051fa177 100644
--- a/tools/arch/arm64/include/asm/barrier.h
+++ b/tools/arch/arm64/include/asm/barrier.h
@@ -14,74 +14,75 @@
#define wmb() asm volatile("dmb ishst" ::: "memory")
#define rmb() asm volatile("dmb ishld" ::: "memory")
-#define smp_store_release(p, v) \
-do { \
- union { typeof(*p) __val; char __c[1]; } __u = \
- { .__val = (__force typeof(*p)) (v) }; \
- \
- switch (sizeof(*p)) { \
- case 1: \
- asm volatile ("stlrb %w1, %0" \
- : "=Q" (*p) \
- : "r" (*(__u8 *)__u.__c) \
- : "memory"); \
- break; \
- case 2: \
- asm volatile ("stlrh %w1, %0" \
- : "=Q" (*p) \
- : "r" (*(__u16 *)__u.__c) \
- : "memory"); \
- break; \
- case 4: \
- asm volatile ("stlr %w1, %0" \
- : "=Q" (*p) \
- : "r" (*(__u32 *)__u.__c) \
- : "memory"); \
- break; \
- case 8: \
- asm volatile ("stlr %1, %0" \
- : "=Q" (*p) \
- : "r" (*(__u64 *)__u.__c) \
- : "memory"); \
- break; \
- default: \
- /* Only to shut up gcc ... */ \
- mb(); \
- break; \
- } \
+#define smp_store_release(p, v) \
+do { \
+ union { typeof(*p) __val; char __c[1]; } __u = \
+ { .__val = (v) }; \
+ \
+ switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile ("stlrb %w1, %0" \
+ : "=Q" (*p) \
+ : "r" (*(__u8_alias_t *)__u.__c) \
+ : "memory"); \
+ break; \
+ case 2: \
+ asm volatile ("stlrh %w1, %0" \
+ : "=Q" (*p) \
+ : "r" (*(__u16_alias_t *)__u.__c) \
+ : "memory"); \
+ break; \
+ case 4: \
+ asm volatile ("stlr %w1, %0" \
+ : "=Q" (*p) \
+ : "r" (*(__u32_alias_t *)__u.__c) \
+ : "memory"); \
+ break; \
+ case 8: \
+ asm volatile ("stlr %1, %0" \
+ : "=Q" (*p) \
+ : "r" (*(__u64_alias_t *)__u.__c) \
+ : "memory"); \
+ break; \
+ default: \
+ /* Only to shut up gcc ... */ \
+ mb(); \
+ break; \
+ } \
} while (0)
-#define smp_load_acquire(p) \
-({ \
- union { typeof(*p) __val; char __c[1]; } __u; \
- \
- switch (sizeof(*p)) { \
- case 1: \
- asm volatile ("ldarb %w0, %1" \
- : "=r" (*(__u8 *)__u.__c) \
- : "Q" (*p) : "memory"); \
- break; \
- case 2: \
- asm volatile ("ldarh %w0, %1" \
- : "=r" (*(__u16 *)__u.__c) \
- : "Q" (*p) : "memory"); \
- break; \
- case 4: \
- asm volatile ("ldar %w0, %1" \
- : "=r" (*(__u32 *)__u.__c) \
- : "Q" (*p) : "memory"); \
- break; \
- case 8: \
- asm volatile ("ldar %0, %1" \
- : "=r" (*(__u64 *)__u.__c) \
- : "Q" (*p) : "memory"); \
- break; \
- default: \
- /* Only to shut up gcc ... */ \
- mb(); \
- break; \
- } \
- __u.__val; \
+#define smp_load_acquire(p) \
+({ \
+ union { typeof(*p) __val; char __c[1]; } __u = \
+ { .__c = { 0 } }; \
+ \
+ switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile ("ldarb %w0, %1" \
+ : "=r" (*(__u8_alias_t *)__u.__c) \
+ : "Q" (*p) : "memory"); \
+ break; \
+ case 2: \
+ asm volatile ("ldarh %w0, %1" \
+ : "=r" (*(__u16_alias_t *)__u.__c) \
+ : "Q" (*p) : "memory"); \
+ break; \
+ case 4: \
+ asm volatile ("ldar %w0, %1" \
+ : "=r" (*(__u32_alias_t *)__u.__c) \
+ : "Q" (*p) : "memory"); \
+ break; \
+ case 8: \
+ asm volatile ("ldar %0, %1" \
+ : "=r" (*(__u64_alias_t *)__u.__c) \
+ : "Q" (*p) : "memory"); \
+ break; \
+ default: \
+ /* Only to shut up gcc ... */ \
+ mb(); \
+ break; \
+ } \
+ __u.__val; \
})
#endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 236b9b97dfdb..667c14e56031 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -55,7 +55,6 @@ counted. The following modifiers exist:
S - read sample value (PERF_SAMPLE_READ)
D - pin the event to the PMU
W - group is weak and will fallback to non-group if not schedulable,
- only supported in 'perf stat' for now.
The 'p' modifier can be used for specifying how precise the instruction
address should be. The 'p' modifier can be specified multiple times:
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 3ccb4f0bf088..d95655489f7e 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -387,7 +387,7 @@ SHELL = $(SHELL_PATH)
linux_uapi_dir := $(srctree)/tools/include/uapi/linux
asm_generic_uapi_dir := $(srctree)/tools/include/uapi/asm-generic
-arch_asm_uapi_dir := $(srctree)/tools/arch/$(ARCH)/include/uapi/asm/
+arch_asm_uapi_dir := $(srctree)/tools/arch/$(SRCARCH)/include/uapi/asm/
beauty_outdir := $(OUTPUT)trace/beauty/generated
beauty_ioctl_outdir := $(beauty_outdir)/ioctl
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 10cf889c6d75..488779bc4c8d 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -391,7 +391,12 @@ try_again:
ui__warning("%s\n", msg);
goto try_again;
}
-
+ if ((errno == EINVAL || errno == EBADF) &&
+ pos->leader != pos &&
+ pos->weak_group) {
+ pos = perf_evlist__reset_weak_group(evlist, pos);
+ goto try_again;
+ }
rc = -errno;
perf_evsel__open_strerror(pos, &opts->target,
errno, msg, sizeof(msg));
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index d1028d7755bb..a635abfa77b6 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -383,32 +383,6 @@ static bool perf_evsel__should_store_id(struct perf_evsel *counter)
return STAT_RECORD || counter->attr.read_format & PERF_FORMAT_ID;
}
-static struct perf_evsel *perf_evsel__reset_weak_group(struct perf_evsel *evsel)
-{
- struct perf_evsel *c2, *leader;
- bool is_open = true;
-
- leader = evsel->leader;
- pr_debug("Weak group for %s/%d failed\n",
- leader->name, leader->nr_members);
-
- /*
- * for_each_group_member doesn't work here because it doesn't
- * include the first entry.
- */
- evlist__for_each_entry(evsel_list, c2) {
- if (c2 == evsel)
- is_open = false;
- if (c2->leader == leader) {
- if (is_open)
- perf_evsel__close(c2);
- c2->leader = c2;
- c2->nr_members = 0;
- }
- }
- return leader;
-}
-
static bool is_target_alive(struct target *_target,
struct thread_map *threads)
{
@@ -477,7 +451,7 @@ try_again:
if ((errno == EINVAL || errno == EBADF) &&
counter->leader != counter &&
counter->weak_group) {
- counter = perf_evsel__reset_weak_group(counter);
+ counter = perf_evlist__reset_weak_group(evsel_list, counter);
goto try_again;
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index b2838de13de0..aa0c73e57924 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1429,6 +1429,9 @@ int cmd_top(int argc, const char **argv)
}
}
+ if (opts->branch_stack && callchain_param.enabled)
+ symbol_conf.show_branchflag_count = true;
+
sort__mode = SORT_MODE__TOP;
/* display thread wants entries to be collapsed in a different tree */
perf_hpp_list.need_collapse = 1;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index dc8a6c4986ce..835619476370 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -108,6 +108,7 @@ struct trace {
} stats;
unsigned int max_stack;
unsigned int min_stack;
+ bool raw_augmented_syscalls;
bool not_ev_qualifier;
bool live;
bool full_time;
@@ -1724,13 +1725,28 @@ static int trace__fprintf_sample(struct trace *trace, struct perf_evsel *evsel,
return printed;
}
-static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size)
+static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, bool raw_augmented)
{
void *augmented_args = NULL;
+ /*
+ * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
+ * and there we get all 6 syscall args plus the tracepoint common
+ * fields (sizeof(long)) and the syscall_nr (another long). So we check
+ * if that is the case and if so don't look after the sc->args_size,
+ * but always after the full raw_syscalls:sys_enter payload, which is
+ * fixed.
+ *
+ * We'll revisit this later to pass s->args_size to the BPF augmenter
+ * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
+ * copies only what we need for each syscall, like what happens when we
+ * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
+ * traffic to just what is needed for each syscall.
+ */
+ int args_size = raw_augmented ? (8 * (int)sizeof(long)) : sc->args_size;
- *augmented_args_size = sample->raw_size - sc->args_size;
+ *augmented_args_size = sample->raw_size - args_size;
if (*augmented_args_size > 0)
- augmented_args = sample->raw_data + sc->args_size;
+ augmented_args = sample->raw_data + args_size;
return augmented_args;
}
@@ -1780,7 +1796,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
* here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
*/
if (evsel != trace->syscalls.events.sys_enter)
- augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size);
+ augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls);
ttrace->entry_time = sample->time;
msg = ttrace->entry_str;
printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
@@ -1833,7 +1849,7 @@ static int trace__fprintf_sys_enter(struct trace *trace, struct perf_evsel *evse
goto out_put;
args = perf_evsel__sc_tp_ptr(evsel, args, sample);
- augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size);
+ augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls);
syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
fprintf(trace->output, "%s", msg);
err = 0;
@@ -3501,7 +3517,15 @@ int cmd_trace(int argc, const char **argv)
evsel->handler = trace__sys_enter;
evlist__for_each_entry(trace.evlist, evsel) {
+ bool raw_syscalls_sys_exit = strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
+
+ if (raw_syscalls_sys_exit) {
+ trace.raw_augmented_syscalls = true;
+ goto init_augmented_syscall_tp;
+ }
+
if (strstarts(perf_evsel__name(evsel), "syscalls:sys_exit_")) {
+init_augmented_syscall_tp:
perf_evsel__init_augmented_syscall_tp(evsel);
perf_evsel__init_augmented_syscall_tp_ret(evsel);
evsel->handler = trace__sys_exit;
diff --git a/tools/perf/examples/bpf/augmented_raw_syscalls.c b/tools/perf/examples/bpf/augmented_raw_syscalls.c
new file mode 100644
index 000000000000..90a19336310b
--- /dev/null
+++ b/tools/perf/examples/bpf/augmented_raw_syscalls.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Augment the raw_syscalls tracepoints with the contents of the pointer arguments.
+ *
+ * Test it with:
+ *
+ * perf trace -e tools/perf/examples/bpf/augmented_raw_syscalls.c cat /etc/passwd > /dev/null
+ *
+ * This exactly matches what is marshalled into the raw_syscall:sys_enter
+ * payload expected by the 'perf trace' beautifiers.
+ *
+ * For now it just uses the existing tracepoint augmentation code in 'perf
+ * trace', in the next csets we'll hook up these with the sys_enter/sys_exit
+ * code that will combine entry/exit in a strace like way.
+ */
+
+#include <stdio.h>
+#include <linux/socket.h>
+
+/* bpf-output associated map */
+struct bpf_map SEC("maps") __augmented_syscalls__ = {
+ .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(u32),
+ .max_entries = __NR_CPUS__,
+};
+
+struct syscall_enter_args {
+ unsigned long long common_tp_fields;
+ long syscall_nr;
+ unsigned long args[6];
+};
+
+struct syscall_exit_args {
+ unsigned long long common_tp_fields;
+ long syscall_nr;
+ long ret;
+};
+
+struct augmented_filename {
+ unsigned int size;
+ int reserved;
+ char value[256];
+};
+
+#define SYS_OPEN 2
+#define SYS_OPENAT 257
+
+SEC("raw_syscalls:sys_enter")
+int sys_enter(struct syscall_enter_args *args)
+{
+ struct {
+ struct syscall_enter_args args;
+ struct augmented_filename filename;
+ } augmented_args;
+ unsigned int len = sizeof(augmented_args);
+ const void *filename_arg = NULL;
+
+ probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
+ /*
+ * Yonghong and Edward Cree sayz:
+ *
+ * https://www.spinics.net/lists/netdev/msg531645.html
+ *
+ * >> R0=inv(id=0) R1=inv2 R6=ctx(id=0,off=0,imm=0) R7=inv64 R10=fp0,call_-1
+ * >> 10: (bf) r1 = r6
+ * >> 11: (07) r1 += 16
+ * >> 12: (05) goto pc+2
+ * >> 15: (79) r3 = *(u64 *)(r1 +0)
+ * >> dereference of modified ctx ptr R1 off=16 disallowed
+ * > Aha, we at least got a different error message this time.
+ * > And indeed llvm has done that optimisation, rather than the more obvious
+ * > 11: r3 = *(u64 *)(r1 +16)
+ * > because it wants to have lots of reads share a single insn. You may be able
+ * > to defeat that optimisation by adding compiler barriers, idk. Maybe someone
+ * > with llvm knowledge can figure out how to stop it (ideally, llvm would know
+ * > when it's generating for bpf backend and not do that). -O0? ¯\_(ツ)_/¯
+ *
+ * The optimization mostly likes below:
+ *
+ * br1:
+ * ...
+ * r1 += 16
+ * goto merge
+ * br2:
+ * ...
+ * r1 += 20
+ * goto merge
+ * merge:
+ * *(u64 *)(r1 + 0)
+ *
+ * The compiler tries to merge common loads. There is no easy way to
+ * stop this compiler optimization without turning off a lot of other
+ * optimizations. The easiest way is to add barriers:
+ *
+ * __asm__ __volatile__("": : :"memory")
+ *
+ * after the ctx memory access to prevent their down stream merging.
+ */
+ switch (augmented_args.args.syscall_nr) {
+ case SYS_OPEN: filename_arg = (const void *)args->args[0];
+ __asm__ __volatile__("": : :"memory");
+ break;
+ case SYS_OPENAT: filename_arg = (const void *)args->args[1];
+ break;
+ }
+
+ if (filename_arg != NULL) {
+ augmented_args.filename.reserved = 0;
+ augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
+ sizeof(augmented_args.filename.value),
+ filename_arg);
+ if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
+ len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
+ len &= sizeof(augmented_args.filename.value) - 1;
+ }
+ } else {
+ len = sizeof(augmented_args.args);
+ }
+
+ perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
+ return 0;
+}
+
+SEC("raw_syscalls:sys_exit")
+int sys_exit(struct syscall_exit_args *args)
+{
+ return 1; /* 0 as soon as we start copying data returned by the kernel, e.g. 'read' */
+}
+
+license(GPL);
diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
index ac1bcdc17dae..f7eb63cbbc65 100644
--- a/tools/perf/jvmti/jvmti_agent.c
+++ b/tools/perf/jvmti/jvmti_agent.c
@@ -125,7 +125,7 @@ perf_get_timestamp(void)
}
static int
-debug_cache_init(void)
+create_jit_cache_dir(void)
{
char str[32];
char *base, *p;
@@ -144,8 +144,13 @@ debug_cache_init(void)
strftime(str, sizeof(str), JIT_LANG"-jit-%Y%m%d", &tm);
- snprintf(jit_path, PATH_MAX - 1, "%s/.debug/", base);
-
+ ret = snprintf(jit_path, PATH_MAX, "%s/.debug/", base);
+ if (ret >= PATH_MAX) {
+ warnx("jvmti: cannot generate jit cache dir because %s/.debug/"
+ " is too long, please check the cwd, JITDUMPDIR, and"
+ " HOME variables", base);
+ return -1;
+ }
ret = mkdir(jit_path, 0755);
if (ret == -1) {
if (errno != EEXIST) {
@@ -154,20 +159,32 @@ debug_cache_init(void)
}
}
- snprintf(jit_path, PATH_MAX - 1, "%s/.debug/jit", base);
+ ret = snprintf(jit_path, PATH_MAX, "%s/.debug/jit", base);
+ if (ret >= PATH_MAX) {
+ warnx("jvmti: cannot generate jit cache dir because"
+ " %s/.debug/jit is too long, please check the cwd,"
+ " JITDUMPDIR, and HOME variables", base);
+ return -1;
+ }
ret = mkdir(jit_path, 0755);
if (ret == -1) {
if (errno != EEXIST) {
- warn("cannot create jit cache dir %s", jit_path);
+ warn("jvmti: cannot create jit cache dir %s", jit_path);
return -1;
}
}
- snprintf(jit_path, PATH_MAX - 1, "%s/.debug/jit/%s.XXXXXXXX", base, str);
-
+ ret = snprintf(jit_path, PATH_MAX, "%s/.debug/jit/%s.XXXXXXXX", base, str);
+ if (ret >= PATH_MAX) {
+ warnx("jvmti: cannot generate jit cache dir because"
+ " %s/.debug/jit/%s.XXXXXXXX is too long, please check"
+ " the cwd, JITDUMPDIR, and HOME variables",
+ base, str);
+ return -1;
+ }
p = mkdtemp(jit_path);
if (p != jit_path) {
- warn("cannot create jit cache dir %s", jit_path);
+ warn("jvmti: cannot create jit cache dir %s", jit_path);
return -1;
}
@@ -228,7 +245,7 @@ void *jvmti_open(void)
{
char dump_path[PATH_MAX];
struct jitheader header;
- int fd;
+ int fd, ret;
FILE *fp;
init_arch_timestamp();
@@ -245,12 +262,22 @@ void *jvmti_open(void)
memset(&header, 0, sizeof(header));
- debug_cache_init();
+ /*
+ * jitdump file dir
+ */
+ if (create_jit_cache_dir() < 0)
+ return NULL;
/*
* jitdump file name
*/
- scnprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
+ ret = snprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
+ if (ret >= PATH_MAX) {
+ warnx("jvmti: cannot generate jitdump file full path because"
+ " %s/jit-%i.dump is too long, please check the cwd,"
+ " JITDUMPDIR, and HOME variables", jit_path, getpid());
+ return NULL;
+ }
fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666);
if (fd == -1)
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
index 24cb0bd56afa..f278ce5ebab7 100755
--- a/tools/perf/scripts/python/exported-sql-viewer.py
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -119,6 +119,14 @@ def dsoname(name):
return "[kernel]"
return name
+def findnth(s, sub, n, offs=0):
+ pos = s.find(sub)
+ if pos < 0:
+ return pos
+ if n <= 1:
+ return offs + pos
+ return findnth(s[pos + 1:], sub, n - 1, offs + pos + 1)
+
# Percent to one decimal place
def PercentToOneDP(n, d):
@@ -1464,6 +1472,317 @@ class BranchWindow(QMdiSubWindow):
else:
self.find_bar.NotFound()
+# Dialog data item converted and validated using a SQL table
+
+class SQLTableDialogDataItem():
+
+ def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
+ self.glb = glb
+ self.label = label
+ self.placeholder_text = placeholder_text
+ self.table_name = table_name
+ self.match_column = match_column
+ self.column_name1 = column_name1
+ self.column_name2 = column_name2
+ self.parent = parent
+
+ self.value = ""
+
+ self.widget = QLineEdit()
+ self.widget.editingFinished.connect(self.Validate)
+ self.widget.textChanged.connect(self.Invalidate)
+ self.red = False
+ self.error = ""
+ self.validated = True
+
+ self.last_id = 0
+ self.first_time = 0
+ self.last_time = 2 ** 64
+ if self.table_name == "<timeranges>":
+ query = QSqlQuery(self.glb.db)
+ QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
+ if query.next():
+ self.last_id = int(query.value(0))
+ self.last_time = int(query.value(1))
+ QueryExec(query, "SELECT time FROM samples WHERE time != 0 ORDER BY id LIMIT 1")
+ if query.next():
+ self.first_time = int(query.value(0))
+ if placeholder_text:
+ placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
+
+ if placeholder_text:
+ self.widget.setPlaceholderText(placeholder_text)
+
+ def ValueToIds(self, value):
+ ids = []
+ query = QSqlQuery(self.glb.db)
+ stmt = "SELECT id FROM " + self.table_name + " WHERE " + self.match_column + " = '" + value + "'"
+ ret = query.exec_(stmt)
+ if ret:
+ while query.next():
+ ids.append(str(query.value(0)))
+ return ids
+
+ def IdBetween(self, query, lower_id, higher_id, order):
+ QueryExec(query, "SELECT id FROM samples WHERE id > " + str(lower_id) + " AND id < " + str(higher_id) + " ORDER BY id " + order + " LIMIT 1")
+ if query.next():
+ return True, int(query.value(0))
+ else:
+ return False, 0
+
+ def BinarySearchTime(self, lower_id, higher_id, target_time, get_floor):
+ query = QSqlQuery(self.glb.db)
+ while True:
+ next_id = int((lower_id + higher_id) / 2)
+ QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
+ if not query.next():
+ ok, dbid = self.IdBetween(query, lower_id, next_id, "DESC")
+ if not ok:
+ ok, dbid = self.IdBetween(query, next_id, higher_id, "")
+ if not ok:
+ return str(higher_id)
+ next_id = dbid
+ QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
+ next_time = int(query.value(0))
+ if get_floor:
+ if target_time > next_time:
+ lower_id = next_id
+ else:
+ higher_id = next_id
+ if higher_id <= lower_id + 1:
+ return str(higher_id)
+ else:
+ if target_time >= next_time:
+ lower_id = next_id
+ else:
+ higher_id = next_id
+ if higher_id <= lower_id + 1:
+ return str(lower_id)
+
+ def ConvertRelativeTime(self, val):
+ print "val ", val
+ mult = 1
+ suffix = val[-2:]
+ if suffix == "ms":
+ mult = 1000000
+ elif suffix == "us":
+ mult = 1000
+ elif suffix == "ns":
+ mult = 1
+ else:
+ return val
+ val = val[:-2].strip()
+ if not self.IsNumber(val):
+ return val
+ val = int(val) * mult
+ if val >= 0:
+ val += self.first_time
+ else:
+ val += self.last_time
+ return str(val)
+
+ def ConvertTimeRange(self, vrange):
+ print "vrange ", vrange
+ if vrange[0] == "":
+ vrange[0] = str(self.first_time)
+ if vrange[1] == "":
+ vrange[1] = str(self.last_time)
+ vrange[0] = self.ConvertRelativeTime(vrange[0])
+ vrange[1] = self.ConvertRelativeTime(vrange[1])
+ print "vrange2 ", vrange
+ if not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
+ return False
+ print "ok1"
+ beg_range = max(int(vrange[0]), self.first_time)
+ end_range = min(int(vrange[1]), self.last_time)
+ if beg_range > self.last_time or end_range < self.first_time:
+ return False
+ print "ok2"
+ vrange[0] = self.BinarySearchTime(0, self.last_id, beg_range, True)
+ vrange[1] = self.BinarySearchTime(1, self.last_id + 1, end_range, False)
+ print "vrange3 ", vrange
+ return True
+
+ def AddTimeRange(self, value, ranges):
+ print "value ", value
+ n = value.count("-")
+ if n == 1:
+ pass
+ elif n == 2:
+ if value.split("-")[1].strip() == "":
+ n = 1
+ elif n == 3:
+ n = 2
+ else:
+ return False
+ pos = findnth(value, "-", n)
+ vrange = [value[:pos].strip() ,value[pos+1:].strip()]
+ if self.ConvertTimeRange(vrange):
+ ranges.append(vrange)
+ return True
+ return False
+
+ def InvalidValue(self, value):
+ self.value = ""
+ palette = QPalette()
+ palette.setColor(QPalette.Text,Qt.red)
+ self.widget.setPalette(palette)
+ self.red = True
+ self.error = self.label + " invalid value '" + value + "'"
+ self.parent.ShowMessage(self.error)
+
+ def IsNumber(self, value):
+ try:
+ x = int(value)
+ except:
+ x = 0
+ return str(x) == value
+
+ def Invalidate(self):
+ self.validated = False
+
+ def Validate(self):
+ input_string = self.widget.text()
+ self.validated = True
+ if self.red:
+ palette = QPalette()
+ self.widget.setPalette(palette)
+ self.red = False
+ if not len(input_string.strip()):
+ self.error = ""
+ self.value = ""
+ return
+ if self.table_name == "<timeranges>":
+ ranges = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ if not self.AddTimeRange(value, ranges):
+ return self.InvalidValue(value)
+ ranges = [("(" + self.column_name1 + " >= " + r[0] + " AND " + self.column_name1 + " <= " + r[1] + ")") for r in ranges]
+ self.value = " OR ".join(ranges)
+ elif self.table_name == "<ranges>":
+ singles = []
+ ranges = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ if "-" in value:
+ vrange = value.split("-")
+ if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
+ return self.InvalidValue(value)
+ ranges.append(vrange)
+ else:
+ if not self.IsNumber(value):
+ return self.InvalidValue(value)
+ singles.append(value)
+ ranges = [("(" + self.column_name1 + " >= " + r[0] + " AND " + self.column_name1 + " <= " + r[1] + ")") for r in ranges]
+ if len(singles):
+ ranges.append(self.column_name1 + " IN (" + ",".join(singles) + ")")
+ self.value = " OR ".join(ranges)
+ elif self.table_name:
+ all_ids = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ ids = self.ValueToIds(value)
+ if len(ids):
+ all_ids.extend(ids)
+ else:
+ return self.InvalidValue(value)
+ self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
+ if self.column_name2:
+ self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
+ else:
+ self.value = input_string.strip()
+ self.error = ""
+ self.parent.ClearMessage()
+
+ def IsValid(self):
+ if not self.validated:
+ self.Validate()
+ if len(self.error):
+ self.parent.ShowMessage(self.error)
+ return False
+ return True
+
+# Selected branch report creation dialog
+
+class SelectedBranchDialog(QDialog):
+
+ def __init__(self, glb, parent=None):
+ super(SelectedBranchDialog, self).__init__(parent)
+
+ self.glb = glb
+
+ self.name = ""
+ self.where_clause = ""
+
+ self.setWindowTitle("Selected Branches")
+ self.setMinimumWidth(600)
+
+ items = (
+ ("Report name:", "Enter a name to appear in the window title bar", "", "", "", ""),
+ ("Time ranges:", "Enter time ranges", "<timeranges>", "", "samples.id", ""),
+ ("CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "<ranges>", "", "cpu", ""),
+ ("Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", ""),
+ ("PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", ""),
+ ("TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", ""),
+ ("DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id"),
+ ("Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id"),
+ ("Raw SQL clause: ", "Enter a raw SQL WHERE clause", "", "", "", ""),
+ )
+ self.data_items = [SQLTableDialogDataItem(glb, *x, parent=self) for x in items]
+
+ self.grid = QGridLayout()
+
+ for row in xrange(len(self.data_items)):
+ self.grid.addWidget(QLabel(self.data_items[row].label), row, 0)
+ self.grid.addWidget(self.data_items[row].widget, row, 1)
+
+ self.status = QLabel()
+
+ self.ok_button = QPushButton("Ok", self)
+ self.ok_button.setDefault(True)
+ self.ok_button.released.connect(self.Ok)
+ self.ok_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+
+ self.cancel_button = QPushButton("Cancel", self)
+ self.cancel_button.released.connect(self.reject)
+ self.cancel_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+
+ self.hbox = QHBoxLayout()
+ #self.hbox.addStretch()
+ self.hbox.addWidget(self.status)
+ self.hbox.addWidget(self.ok_button)
+ self.hbox.addWidget(self.cancel_button)
+
+ self.vbox = QVBoxLayout()
+ self.vbox.addLayout(self.grid)
+ self.vbox.addLayout(self.hbox)
+
+ self.setLayout(self.vbox);
+
+ def Ok(self):
+ self.name = self.data_items[0].value
+ if not self.name:
+ self.ShowMessage("Report name is required")
+ return
+ for d in self.data_items:
+ if not d.IsValid():
+ return
+ for d in self.data_items[1:]:
+ if len(d.value):
+ if len(self.where_clause):
+ self.where_clause += " AND "
+ self.where_clause += d.value
+ if len(self.where_clause):
+ self.where_clause = " AND ( " + self.where_clause + " ) "
+ else:
+ self.ShowMessage("No selection")
+ return
+ self.accept()
+
+ def ShowMessage(self, msg):
+ self.status.setText("<font color=#FF0000>" + msg)
+
+ def ClearMessage(self):
+ self.status.setText("")
+
# Event list
def GetEventList(db):
@@ -1656,7 +1975,7 @@ class TableWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
- self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
+ self.view.setCurrentIndex(self.model.mapFromSource(self.data_model.index(row, 0, QModelIndex())))
else:
self.find_bar.NotFound()
@@ -1765,6 +2084,149 @@ class WindowMenu():
def setActiveSubWindow(self, nr):
self.mdi_area.setActiveSubWindow(self.mdi_area.subWindowList()[nr - 1])
+# Help text
+
+glb_help_text = """
+<h1>Contents</h1>
+<style>
+p.c1 {
+ text-indent: 40px;
+}
+p.c2 {
+ text-indent: 80px;
+}
+}
+</style>
+<p class=c1><a href=#reports>1. Reports</a></p>
+<p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p>
+<p class=c2><a href=#allbranches>1.2 All branches</a></p>
+<p class=c2><a href=#selectedbranches>1.3 Selected branches</a></p>
+<p class=c1><a href=#tables>2. Tables</a></p>
+<h1 id=reports>1. Reports</h1>
+<h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2>
+The result is a GUI window with a tree representing a context-sensitive
+call-graph. Expanding a couple of levels of the tree and adjusting column
+widths to suit will display something like:
+<pre>
+ Call Graph: pt_example
+Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%)
+v- ls
+ v- 2638:2638
+ v- _start ld-2.19.so 1 10074071 100.0 211135 100.0
+ |- unknown unknown 1 13198 0.1 1 0.0
+ >- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3
+ >- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3
+ v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4
+ >- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1
+ >- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0
+ >- __libc_csu_init ls 1 10354 0.1 10 0.0
+ |- _setjmp libc-2.19.so 1 0 0.0 4 0.0
+ v- main ls 1 8182043 99.6 180254 99.9
+</pre>
+<h3>Points to note:</h3>
+<ul>
+<li>The top level is a command name (comm)</li>
+<li>The next level is a thread (pid:tid)</li>
+<li>Subsequent levels are functions</li>
+<li>'Count' is the number of calls</li>
+<li>'Time' is the elapsed time until the function returns</li>
+<li>Percentages are relative to the level above</li>
+<li>'Branch Count' is the total number of branches for that function and all functions that it calls
+</ul>
+<h3>Find</h3>
+Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match.
+The pattern matching symbols are ? for any character and * for zero or more characters.
+<h2 id=allbranches>1.2 All branches</h2>
+The All branches report displays all branches in chronological order.
+Not all data is fetched immediately. More records can be fetched using the Fetch bar provided.
+<h3>Disassembly</h3>
+Open a branch to display disassembly. This only works if:
+<ol>
+<li>The disassembler is available. Currently, only Intel XED is supported - see <a href=#xed>Intel XED Setup</a></li>
+<li>The object code is available. Currently, only the perf build ID cache is searched for object code.
+The default directory ~/.debug can be overridden by setting environment variable PERF_BUILDID_DIR.
+One exception is kcore where the DSO long name is used (refer dsos_view on the Tables menu),
+or alternatively, set environment variable PERF_KCORE to the kcore file name.</li>
+</ol>
+<h4 id=xed>Intel XED Setup</h4>
+To use Intel XED, libxed.so must be present. To build and install libxed.so:
+<pre>
+git clone https://github.com/intelxed/mbuild.git mbuild
+git clone https://github.com/intelxed/xed
+cd xed
+./mfile.py --share
+sudo ./mfile.py --prefix=/usr/local install
+sudo ldconfig
+</pre>
+<h3>Find</h3>
+Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
+Refer to Python documentation for the regular expression syntax.
+All columns are searched, but only currently fetched rows are searched.
+<h2 id=selectedbranches>1.3 Selected branches</h2>
+This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced
+by various selection criteria. A dialog box displays available criteria which are AND'ed together.
+<h3>1.3.1 Time ranges</h3>
+The time ranges hint text shows the total time range. Relative time ranges can also be entered in
+ms, us or ns. Also, negative values are relative to the end of trace. Examples:
+<pre>
+ 81073085947329-81073085958238 From 81073085947329 to 81073085958238
+ 100us-200us From 100us to 200us
+ 10ms- From 10ms to the end
+ -100ns The first 100ns
+ -10ms- The last 10ms
+</pre>
+N.B. Due to the granularity of timestamps, there could be no branches in any given time range.
+<h1 id=tables>2. Tables</h1>
+The Tables menu shows all tables and views in the database. Most tables have an associated view
+which displays the information in a more friendly way. Not all data for large tables is fetched
+immediately. More records can be fetched using the Fetch bar provided. Columns can be sorted,
+but that can be slow for large tables.
+<p>There are also tables of database meta-information.
+For SQLite3 databases, the sqlite_master table is included.
+For PostgreSQL databases, information_schema.tables/views/columns are included.
+<h3>Find</h3>
+Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
+Refer to Python documentation for the regular expression syntax.
+All columns are searched, but only currently fetched rows are searched.
+<p>N.B. Results are found in id order, so if the table is re-ordered, find-next and find-previous
+will go to the next/previous result in id order, instead of display order.
+"""
+
+# Help window
+
+class HelpWindow(QMdiSubWindow):
+
+ def __init__(self, glb, parent=None):
+ super(HelpWindow, self).__init__(parent)
+
+ self.text = QTextBrowser()
+ self.text.setHtml(glb_help_text)
+ self.text.setReadOnly(True)
+ self.text.setOpenExternalLinks(True)
+
+ self.setWidget(self.text)
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, "Exported SQL Viewer Help")
+
+# Main window that only displays the help text
+
+class HelpOnlyWindow(QMainWindow):
+
+ def __init__(self, parent=None):
+ super(HelpOnlyWindow, self).__init__(parent)
+
+ self.setMinimumSize(200, 100)
+ self.resize(800, 600)
+ self.setWindowTitle("Exported SQL Viewer Help")
+ self.setWindowIcon(self.style().standardIcon(QStyle.SP_MessageBoxInformation))
+
+ self.text = QTextBrowser()
+ self.text.setHtml(glb_help_text)
+ self.text.setReadOnly(True)
+ self.text.setOpenExternalLinks(True)
+
+ self.setCentralWidget(self.text)
+
# Font resize
def ResizeFont(widget, diff):
@@ -1851,6 +2313,9 @@ class MainWindow(QMainWindow):
self.window_menu = WindowMenu(self.mdi_area, menu)
+ help_menu = menu.addMenu("&Help")
+ help_menu.addAction(CreateAction("&Exported SQL Viewer Help", "Helpful information", self.Help, self, QKeySequence.HelpContents))
+
def Find(self):
win = self.mdi_area.activeSubWindow()
if win:
@@ -1888,6 +2353,8 @@ class MainWindow(QMainWindow):
if event == "branches":
label = "All branches" if branches_events == 1 else "All branches " + "(id=" + dbid + ")"
reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewBranchView(x), self))
+ label = "Selected branches" if branches_events == 1 else "Selected branches " + "(id=" + dbid + ")"
+ reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewSelectedBranchView(x), self))
def TableMenu(self, tables, menu):
table_menu = menu.addMenu("&Tables")
@@ -1900,9 +2367,18 @@ class MainWindow(QMainWindow):
def NewBranchView(self, event_id):
BranchWindow(self.glb, event_id, "", "", self)
+ def NewSelectedBranchView(self, event_id):
+ dialog = SelectedBranchDialog(self.glb, self)
+ ret = dialog.exec_()
+ if ret:
+ BranchWindow(self.glb, event_id, dialog.name, dialog.where_clause, self)
+
def NewTableView(self, table_name):
TableWindow(self.glb, table_name, self)
+ def Help(self):
+ HelpWindow(self.glb, self)
+
# XED Disassembler
class xed_state_t(Structure):
@@ -1929,7 +2405,12 @@ class XEDInstruction():
class LibXED():
def __init__(self):
- self.libxed = CDLL("libxed.so")
+ try:
+ self.libxed = CDLL("libxed.so")
+ except:
+ self.libxed = None
+ if not self.libxed:
+ self.libxed = CDLL("/usr/local/lib/libxed.so")
self.xed_tables_init = self.libxed.xed_tables_init
self.xed_tables_init.restype = None
@@ -2097,10 +2578,16 @@ class DBRef():
def Main():
if (len(sys.argv) < 2):
- print >> sys.stderr, "Usage is: exported-sql-viewer.py <database name>"
+ print >> sys.stderr, "Usage is: exported-sql-viewer.py {<database name> | --help-only}"
raise Exception("Too few arguments")
dbname = sys.argv[1]
+ if dbname == "--help-only":
+ app = QApplication(sys.argv)
+ mainwindow = HelpOnlyWindow()
+ mainwindow.show()
+ err = app.exec_()
+ sys.exit(err)
is_sqlite3 = False
try:
diff --git a/tools/perf/tests/attr/test-record-group-sampling b/tools/perf/tests/attr/test-record-group-sampling
index 8a33ca4f9e1f..f0729c454f16 100644
--- a/tools/perf/tests/attr/test-record-group-sampling
+++ b/tools/perf/tests/attr/test-record-group-sampling
@@ -37,4 +37,3 @@ sample_freq=0
sample_period=0
freq=0
write_backward=0
-sample_id_all=0
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index e88e6f9b1463..668d2a9ef0f4 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1810,3 +1810,30 @@ void perf_evlist__force_leader(struct perf_evlist *evlist)
leader->forced_leader = true;
}
}
+
+struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evsel_list,
+ struct perf_evsel *evsel)
+{
+ struct perf_evsel *c2, *leader;
+ bool is_open = true;
+
+ leader = evsel->leader;
+ pr_debug("Weak group for %s/%d failed\n",
+ leader->name, leader->nr_members);
+
+ /*
+ * for_each_group_member doesn't work here because it doesn't
+ * include the first entry.
+ */
+ evlist__for_each_entry(evsel_list, c2) {
+ if (c2 == evsel)
+ is_open = false;
+ if (c2->leader == leader) {
+ if (is_open)
+ perf_evsel__close(c2);
+ c2->leader = c2;
+ c2->nr_members = 0;
+ }
+ }
+ return leader;
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index dc66436add98..9919eed6d15b 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -312,4 +312,7 @@ bool perf_evlist__exclude_kernel(struct perf_evlist *evlist);
void perf_evlist__force_leader(struct perf_evlist *evlist);
+struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evlist,
+ struct perf_evsel *evsel);
+
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 6d187059a373..d37bb1566cd9 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -956,7 +956,6 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
attr->sample_freq = 0;
attr->sample_period = 0;
attr->write_backward = 0;
- attr->sample_id_all = 0;
}
if (opts->no_samples)
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 58f6a9ceb590..4503f3ca45ab 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -1474,6 +1474,8 @@ static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder)
decoder->have_calc_cyc_to_tsc = false;
intel_pt_calc_cyc_to_tsc(decoder, true);
}
+
+ intel_pt_log_to("Setting timestamp", decoder->timestamp);
}
static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder)
@@ -1514,6 +1516,8 @@ static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder)
decoder->timestamp = timestamp;
decoder->timestamp_insn_cnt = 0;
+
+ intel_pt_log_to("Setting timestamp", decoder->timestamp);
}
/* Walk PSB+ packets when already in sync. */
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-log.c b/tools/perf/util/intel-pt-decoder/intel-pt-log.c
index e02bc7b166a0..5e64da270f97 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-log.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-log.c
@@ -31,6 +31,11 @@ static FILE *f;
static char log_name[MAX_LOG_NAME];
bool intel_pt_enable_logging;
+void *intel_pt_log_fp(void)
+{
+ return f;
+}
+
void intel_pt_log_enable(void)
{
intel_pt_enable_logging = true;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-log.h b/tools/perf/util/intel-pt-decoder/intel-pt-log.h
index 45b64f93f358..cc084937f701 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-log.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-log.h
@@ -22,6 +22,7 @@
struct intel_pt_pkt;
+void *intel_pt_log_fp(void);
void intel_pt_log_enable(void);
void intel_pt_log_disable(void);
void intel_pt_log_set_name(const char *name);
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 86cc9a64e982..149ff361ca78 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -206,6 +206,16 @@ static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
intel_pt_dump(pt, buf, len);
}
+static void intel_pt_log_event(union perf_event *event)
+{
+ FILE *f = intel_pt_log_fp();
+
+ if (!intel_pt_enable_logging || !f)
+ return;
+
+ perf_event__fprintf(event, f);
+}
+
static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
struct auxtrace_buffer *b)
{
@@ -2010,9 +2020,9 @@ static int intel_pt_process_event(struct perf_session *session,
event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
err = intel_pt_context_switch(pt, event, sample);
- intel_pt_log("event %s (%u): cpu %d time %"PRIu64" tsc %#"PRIx64"\n",
- perf_event__name(event->header.type), event->header.type,
- sample->cpu, sample->time, timestamp);
+ intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
+ event->header.type, sample->cpu, sample->time, timestamp);
+ intel_pt_log_event(event);
return err;
}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 7799788f662f..7e49baad304d 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -773,7 +773,7 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
if (!is_arm_pmu_core(name)) {
pname = pe->pmu ? pe->pmu : "cpu";
- if (strncmp(pname, name, strlen(pname)))
+ if (strcmp(pname, name))
continue;
}