@@ -12,7 +12,7 @@ obj-$(CONFIG_GENERIC_PHY) += phy/ # GPIO must come after pinctrl as gpios may need to mux pins etc obj-$(CONFIG_PINCTRL) += pinctrl/ -obj-y += gpio/ +obj-$(CONFIG_GPIOLIB) += gpio/ obj-y += pwm/ obj-$(CONFIG_PCI) += pci/ obj-$(CONFIG_PARISC) += parisc/ @@ -78,7 +78,7 @@ obj-$(CONFIG_TARGET_CORE) += target/ obj-$(CONFIG_MTD) += mtd/ obj-$(CONFIG_SPI) += spi/ obj-$(CONFIG_SPMI) += spmi/ -obj-y += hsi/ +obj-$(CONFIG_HSI) += hsi/ obj-y += net/ obj-$(CONFIG_ATM) += atm/ obj-$(CONFIG_FUSION) += message/ @@ -122,13 +122,12 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_CPU_IDLE) += cpuidle/ obj-y += mmc/ obj-$(CONFIG_MEMSTICK) += memstick/ -obj-y += leds/ +obj-$(CONFIG_NEW_LEDS) += leds/ obj-$(CONFIG_INFINIBAND) += infiniband/ obj-$(CONFIG_SGI_SN) += sn/ obj-y += firmware/ obj-$(CONFIG_CRYPTO) += crypto/ obj-$(CONFIG_SUPERH) += sh/ -obj-$(CONFIG_ARCH_SHMOBILE) += sh/ ifndef CONFIG_ARCH_USES_GETTIMEOFFSET obj-y += clocksource/ endif @@ -213,6 +213,10 @@ config ACPI_CPU_FREQ_PSS bool select THERMAL +config ACPI_PROCESSOR_CSTATE + def_bool y + depends on IA64 || X86 + config ACPI_PROCESSOR_IDLE bool select CPU_IDLE @@ -234,7 +238,7 @@ config ACPI_CPPC_LIB config ACPI_PROCESSOR tristate "Processor" depends on X86 || IA64 || ARM64 - select ACPI_PROCESSOR_IDLE if X86 || IA64 + select ACPI_PROCESSOR_IDLE select ACPI_CPU_FREQ_PSS if X86 || IA64 default y help @@ -291,8 +295,8 @@ config ACPI_THERMAL config ACPI_NUMA bool "NUMA support" depends on NUMA - depends on (X86 || IA64) - default y if IA64_GENERIC || IA64_SGI_SN2 + depends on (X86 || IA64 || ARM64) + default y if IA64_GENERIC || IA64_SGI_SN2 || ARM64 config ACPI_CUSTOM_DSDT_FILE string "Custom DSDT Table file to include" @@ -311,9 +315,12 @@ config ACPI_CUSTOM_DSDT bool default ACPI_CUSTOM_DSDT_FILE != "" +config ARCH_HAS_ACPI_TABLE_UPGRADE + def_bool n + config ACPI_TABLE_UPGRADE bool "Allow upgrading ACPI tables via initrd" - depends on BLK_DEV_INITRD && X86 + depends on BLK_DEV_INITRD && ARCH_HAS_ACPI_TABLE_UPGRADE default y help This option provides functionality to upgrade arbitrary ACPI tables @@ -475,6 +482,7 @@ config ACPI_NFIT_DEBUG issue. source "drivers/acpi/apei/Kconfig" +source "drivers/acpi/dptf/Kconfig" config ACPI_EXTLOG tristate "Extended Error Log support" @@ -519,6 +527,20 @@ config XPOWER_PMIC_OPREGION help This config adds ACPI operation region support for XPower AXP288 PMIC. +config BXT_WC_PMIC_OPREGION + bool "ACPI operation region support for BXT WhiskeyCove PMIC" + depends on INTEL_SOC_PMIC + help + This config adds ACPI operation region support for BXT WhiskeyCove PMIC. + endif +config ACPI_CONFIGFS + tristate "ACPI configfs support" + select CONFIGFS_FS + help + Select this option to enable support for ACPI configuration from + userspace. The configurable ACPI groups will be visible under + /config/acpi, assuming configfs is mounted under /config. + endif # ACPI @@ -44,7 +44,6 @@ acpi-y += acpi_lpss.o acpi_apd.o acpi-y += acpi_platform.o acpi-y += acpi_pnp.o acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o -acpi-y += int340x_thermal.o acpi-y += power.o acpi-y += event.o acpi-$(CONFIG_ACPI_REDUCED_HARDWARE_ONLY) += evged.o @@ -99,5 +98,9 @@ obj-$(CONFIG_ACPI_EXTLOG) += acpi_extlog.o obj-$(CONFIG_PMIC_OPREGION) += pmic/intel_pmic.o obj-$(CONFIG_CRC_PMIC_OPREGION) += pmic/intel_pmic_crc.o obj-$(CONFIG_XPOWER_PMIC_OPREGION) += pmic/intel_pmic_xpower.o +obj-$(CONFIG_BXT_WC_PMIC_OPREGION) += pmic/intel_pmic_bxtwc.o + +obj-$(CONFIG_ACPI_CONFIGFS) += acpi_configfs.o video-objs += acpi_video.o video_detect.o +obj-y += dptf/ diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c new file mode 100644 index 000000000000..146a77fb762d --- /dev/null +++ b/ drivers/acpi/acpi_configfs.c@@ -0,0 +1,267 @@ +/* + * ACPI configfs support + * + * Copyright (c) 2016 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#define pr_fmt(fmt) "ACPI configfs: " fmt + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/configfs.h> +#include <linux/acpi.h> + +static struct config_group *acpi_table_group; + +struct acpi_table { + struct config_item cfg; + struct acpi_table_header *header; +}; + +static ssize_t acpi_table_aml_write(struct config_item *cfg, + const void *data, size_t size) +{ + const struct acpi_table_header *header = data; + struct acpi_table *table; + int ret; + + table = container_of(cfg, struct acpi_table, cfg); + + if (table->header) { + pr_err("table already loaded\n"); + return -EBUSY; + } + + if (header->length != size) { + pr_err("invalid table length\n"); + return -EINVAL; + } + + if (memcmp(header->signature, ACPI_SIG_SSDT, 4)) { + pr_err("invalid table signature\n"); + return -EINVAL; + } + + table = container_of(cfg, struct acpi_table, cfg); + + table->header = kmemdup(header, header->length, GFP_KERNEL); + if (!table->header) + return -ENOMEM; + + ret = acpi_load_table(table->header); + if (ret) { + kfree(table->header); + table->header = NULL; + } + + return ret; +} + +static inline struct acpi_table_header *get_header(struct config_item *cfg) +{ + struct acpi_table *table = container_of(cfg, struct acpi_table, cfg); + + if (!table->header) + pr_err("table not loaded\n"); + + return table->header; +} + +static ssize_t acpi_table_aml_read(struct config_item *cfg, + void *data, size_t size) +{ + struct acpi_table_header *h = get_header(cfg); + + if (!h) + return -EINVAL; + + if (data) + memcpy(data, h, h->length); + + return h->length; +} + +#define MAX_ACPI_TABLE_SIZE (128 * 1024) + +CONFIGFS_BIN_ATTR(acpi_table_, aml, NULL, MAX_ACPI_TABLE_SIZE); + +struct configfs_bin_attribute *acpi_table_bin_attrs[] = { + &acpi_table_attr_aml, + NULL, +}; + +ssize_t acpi_table_signature_show(struct config_item *cfg, char *str) +{ + struct acpi_table_header *h = get_header(cfg); + + if (!h) + return -EINVAL; + + return sprintf(str, "%.*s\n", ACPI_NAME_SIZE, h->signature); +} + +ssize_t acpi_table_length_show(struct config_item *cfg, char *str) +{ + struct acpi_table_header *h = get_header(cfg); + + if (!h) + return -EINVAL; + + return sprintf(str, "%d\n", h->length); +} + +ssize_t acpi_table_revision_show(struct config_item *cfg, char *str) +{ + struct acpi_table_header *h = get_header(cfg); + + if (!h) + return -EINVAL; + + return sprintf(str, "%d\n", h->revision); +} + +ssize_t acpi_table_oem_id_show(struct config_item *cfg, char *str) +{ + struct acpi_table_header *h = get_header(cfg); + + if (!h) + return -EINVAL; + + return sprintf(str, "%.*s\n", ACPI_OEM_ID_SIZE, h->oem_id); +} + +ssize_t acpi_table_oem_table_id_show(struct config_item *cfg, char *str) +{ + struct acpi_table_header *h = get_header(cfg); + + if (!h) + return -EINVAL; + + return sprintf(str, "%.*s\n", ACPI_OEM_TABLE_ID_SIZE, h->oem_table_id); +} + +ssize_t acpi_table_oem_revision_show(struct config_item *cfg, char *str) +{ + struct acpi_table_header *h = get_header(cfg); + + if (!h) + return -EINVAL; + + return sprintf(str, "%d\n", h->oem_revision); +} + +ssize_t acpi_table_asl_compiler_id_show(struct config_item *cfg, char *str) +{ + struct acpi_table_header *h = get_header(cfg); + + if (!h) + return -EINVAL; + + return sprintf(str, "%.*s\n", ACPI_NAME_SIZE, h->asl_compiler_id); +} + +ssize_t acpi_table_asl_compiler_revision_show(struct config_item *cfg, + char *str) +{ + struct acpi_table_header *h = get_header(cfg); + + if (!h) + return -EINVAL; + + return sprintf(str, "%d\n", h->asl_compiler_revision); +} + +CONFIGFS_ATTR_RO(acpi_table_, signature); +CONFIGFS_ATTR_RO(acpi_table_, length); +CONFIGFS_ATTR_RO(acpi_table_, revision); +CONFIGFS_ATTR_RO(acpi_table_, oem_id); +CONFIGFS_ATTR_RO(acpi_table_, oem_table_id); +CONFIGFS_ATTR_RO(acpi_table_, oem_revision); +CONFIGFS_ATTR_RO(acpi_table_, asl_compiler_id); +CONFIGFS_ATTR_RO(acpi_table_, asl_compiler_revision); + +struct configfs_attribute *acpi_table_attrs[] = { + &acpi_table_attr_signature, + &acpi_table_attr_length, + &acpi_table_attr_revision, + &acpi_table_attr_oem_id, + &acpi_table_attr_oem_table_id, + &acpi_table_attr_oem_revision, + &acpi_table_attr_asl_compiler_id, + &acpi_table_attr_asl_compiler_revision, + NULL, +}; + +static struct config_item_type acpi_table_type = { + .ct_owner = THIS_MODULE, + .ct_bin_attrs = acpi_table_bin_attrs, + .ct_attrs = acpi_table_attrs, +}; + +static struct config_item *acpi_table_make_item(struct config_group *group, + const char *name) +{ + struct acpi_table *table; + + table = kzalloc(sizeof(*table), GFP_KERNEL); + if (!table) + return ERR_PTR(-ENOMEM); + + config_item_init_type_name(&table->cfg, name, &acpi_table_type); + return &table->cfg; +} + +struct configfs_group_operations acpi_table_group_ops = { + .make_item = acpi_table_make_item, +}; + +static struct config_item_type acpi_tables_type = { + .ct_owner = THIS_MODULE, + .ct_group_ops = &acpi_table_group_ops, +}; + +static struct config_item_type acpi_root_group_type = { + .ct_owner = THIS_MODULE, +}; + +static struct configfs_subsystem acpi_configfs = { + .su_group = { + .cg_item = { + .ci_namebuf = "acpi", + .ci_type = &acpi_root_group_type, + }, + }, + .su_mutex = __MUTEX_INITIALIZER(acpi_configfs.su_mutex), +}; + +static int __init acpi_configfs_init(void) +{ + int ret; + struct config_group *root = &acpi_configfs.su_group; + + config_group_init(root); + + ret = configfs_register_subsystem(&acpi_configfs); + if (ret) + return ret; + + acpi_table_group = configfs_register_default_group(root, "table", + &acpi_tables_type); + return PTR_ERR_OR_ZERO(acpi_table_group); +} +module_init(acpi_configfs_init); + +static void __exit acpi_configfs_exit(void) +{ + configfs_unregister_default_group(acpi_table_group); + configfs_unregister_subsystem(&acpi_configfs); +} +module_exit(acpi_configfs_exit); + +MODULE_AUTHOR("Octavian Purdila <octavian.purdila@intel.com>"); +MODULE_DESCRIPTION("ACPI configfs support"); +MODULE_LICENSE("GPL v2"); @@ -602,7 +602,7 @@ static int acpi_aml_read_user(char __user *buf, int len) crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1); ret = n; out: - acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !ret); + acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0); return ret; } @@ -672,7 +672,7 @@ static int acpi_aml_write_user(const char __user *buf, int len) crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1); ret = n; out: - acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !ret); + acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0); return n; } @@ -13,7 +13,7 @@ * GNU General Public License for more details. */ -#include <linux/module.h> +#include <linux/export.h> #include <linux/acpi.h> #include <acpi/acpi_lpat.h> @@ -157,5 +157,3 @@ void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table } } EXPORT_SYMBOL_GPL(acpi_lpat_free_conversion_table); - -MODULE_LICENSE("GPL"); @@ -29,6 +29,7 @@ ACPI_MODULE_NAME("acpi_lpss"); #ifdef CONFIG_X86_INTEL_LPSS #include <asm/cpu_device_id.h> +#include <asm/intel-family.h> #include <asm/iosf_mbi.h> #include <asm/pmc_atom.h> @@ -229,8 +230,8 @@ static const struct lpss_device_desc bsw_spi_dev_desc = { #define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } static const struct x86_cpu_id lpss_cpu_ids[] = { - ICPU(0x37), /* Valleyview, Bay Trail */ - ICPU(0x4c), /* Braswell, Cherry Trail */ + ICPU(INTEL_FAM6_ATOM_SILVERMONT1), /* Valleyview, Bay Trail */ + ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */ {} }; @@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device) pr->throttling.duty_width = acpi_gbl_FADT.duty_width; pr->pblk = object.processor.pblk_address; - - /* - * We don't care about error returns - we just try to mark - * these reserved so that nobody else is confused into thinking - * that this region might be unused.. - * - * (In particular, allocating the IO range for Cardbus) - */ - request_region(pr->throttling.address, 6, "ACPI CPU throttle"); } /* @@ -754,7 +754,8 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device, } int acpi_video_get_levels(struct acpi_device *device, - struct acpi_video_device_brightness **dev_br) + struct acpi_video_device_brightness **dev_br, + int *pmax_level) { union acpi_object *obj = NULL; int i, max_level = 0, count = 0, level_ac_battery = 0; @@ -841,6 +842,8 @@ int acpi_video_get_levels(struct acpi_device *device, br->count = count; *dev_br = br; + if (pmax_level) + *pmax_level = max_level; out: kfree(obj); @@ -869,7 +872,7 @@ acpi_video_init_brightness(struct acpi_video_device *device) struct acpi_video_device_brightness *br = NULL; int result = -EINVAL; - result = acpi_video_get_levels(device->dev, &br); + result = acpi_video_get_levels(device->dev, &br, &max_level); if (result) return result; device->brightness = br; @@ -1243,6 +1246,9 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video) union acpi_object *dod = NULL; union acpi_object *obj; + if (!video->cap._DOD) + return AE_NOT_EXIST; + status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer); if (!ACPI_SUCCESS(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD")); @@ -1737,7 +1743,7 @@ static void acpi_video_run_bcl_for_osi(struct acpi_video_bus *video) mutex_lock(&video->device_list_lock); list_for_each_entry(dev, &video->video_device_list, entry) { - if (!acpi_video_device_lcd_query_levels(dev, &levels)) + if (!acpi_video_device_lcd_query_levels(dev->dev->handle, &levels)) kfree(levels); } mutex_unlock(&video->device_list_lock); @@ -83,27 +83,22 @@ acpi_hw_write_multiple(u32 value, static u8 acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width) { - u64 address; - if (!reg->access_width) { + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + max_bit_width = 32; + } + /* * Detect old register descriptors where only the bit_width field - * makes senses. The target address is copied to handle possible - * alignment issues. + * makes senses. */ - ACPI_MOVE_64_TO_64(&address, ®->address); - if (!reg->bit_offset && reg->bit_width && + if (reg->bit_width < max_bit_width && + !reg->bit_offset && reg->bit_width && ACPI_IS_POWER_OF_TWO(reg->bit_width) && - ACPI_IS_ALIGNED(reg->bit_width, 8) && - ACPI_IS_ALIGNED(address, reg->bit_width)) { + ACPI_IS_ALIGNED(reg->bit_width, 8)) { return (reg->bit_width); - } else { - if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { - return (32); - } else { - return (max_bit_width); - } } + return (max_bit_width); } else { return (1 << (reg->access_width + 2)); } @@ -311,12 +306,6 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg) acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) { u64 address; - u8 access_width; - u32 bit_width; - u8 bit_offset; - u64 value64; - u32 new_value32, old_value32; - u8 index; acpi_status status; ACPI_FUNCTION_NAME(hw_write); @@ -328,145 +317,23 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) return (status); } - /* Convert access_width into number of bits based */ - - access_width = acpi_hw_get_access_bit_width(reg, 32); - bit_width = reg->bit_offset + reg->bit_width; - bit_offset = reg->bit_offset; - /* * Two address spaces supported: Memory or IO. PCI_Config is * not supported here because the GAS structure is insufficient */ - index = 0; - while (bit_width) { - /* - * Use offset style bit reads because "Index * AccessWidth" is - * ensured to be less than 32-bits by acpi_hw_validate_register(). - */ - new_value32 = ACPI_GET_BITS(&value, index * access_width, - ACPI_MASK_BITS_ABOVE_32 - (access_width)); - - if (bit_offset >= access_width) { - bit_offset -= access_width; - } else { - /* - * Use offset style bit masks because access_width is ensured - * to be less than 32-bits by acpi_hw_validate_register() and - * bit_offset/bit_width is less than access_width here. - */ - if (bit_offset) { - new_value32 &= ACPI_MASK_BITS_BELOW(bit_offset); - } - if (bit_width < access_width) { - new_value32 &= ACPI_MASK_BITS_ABOVE(bit_width); - } - - if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { - if (bit_offset || bit_width < access_width) { - /* - * Read old values in order not to modify the bits that - * are beyond the register bit_width/bit_offset setting. - */ - status = - acpi_os_read_memory((acpi_physical_address) - address + - index * - ACPI_DIV_8 - (access_width), - &value64, - access_width); - old_value32 = (u32)value64; - - /* - * Use offset style bit masks because access_width is - * ensured to be less than 32-bits by - * acpi_hw_validate_register() and bit_offset/bit_width is - * less than access_width here. - */ - if (bit_offset) { - old_value32 &= - ACPI_MASK_BITS_ABOVE - (bit_offset); - bit_offset = 0; - } - if (bit_width < access_width) { - old_value32 &= - ACPI_MASK_BITS_BELOW - (bit_width); - } - - new_value32 |= old_value32; - } - - value64 = (u64)new_value32; - status = - acpi_os_write_memory((acpi_physical_address) - address + - index * - ACPI_DIV_8 - (access_width), - value64, access_width); - } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ - - if (bit_offset || bit_width < access_width) { - /* - * Read old values in order not to modify the bits that - * are beyond the register bit_width/bit_offset setting. - */ - status = - acpi_hw_read_port((acpi_io_address) - address + - index * - ACPI_DIV_8 - (access_width), - &old_value32, - access_width); - - /* - * Use offset style bit masks because access_width is - * ensured to be less than 32-bits by - * acpi_hw_validate_register() and bit_offset/bit_width is - * less than access_width here. - */ - if (bit_offset) { - old_value32 &= - ACPI_MASK_BITS_ABOVE - (bit_offset); - bit_offset = 0; - } - if (bit_width < access_width) { - old_value32 &= - ACPI_MASK_BITS_BELOW - (bit_width); - } - - new_value32 |= old_value32; - } - - status = acpi_hw_write_port((acpi_io_address) - address + - index * - ACPI_DIV_8 - (access_width), - new_value32, - access_width); - } - } - - /* - * Index * access_width is ensured to be less than 32-bits by - * acpi_hw_validate_register(). - */ - bit_width -= - bit_width > access_width ? access_width : bit_width; - index++; + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + status = acpi_os_write_memory((acpi_physical_address) + address, (u64)value, + reg->bit_width); + } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ + + status = acpi_hw_write_port((acpi_io_address) + address, value, reg->bit_width); } ACPI_DEBUG_PRINT((ACPI_DB_IO, "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n", - value, access_width, ACPI_FORMAT_UINT64(address), + value, reg->bit_width, ACPI_FORMAT_UINT64(address), acpi_ut_get_region_name(reg->space_id))); return (status); @@ -3,4 +3,4 @@ obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o obj-$(CONFIG_ACPI_APEI_ERST_DEBUG) += erst-dbg.o -apei-y := apei-base.o hest.o erst.o +apei-y := apei-base.o hest.o erst.o bert.o @@ -1,6 +1,6 @@ /* * apei-internal.h - ACPI Platform Error Interface internal - * definations. + * definitions. */ #ifndef APEI_INTERNAL_H diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c new file mode 100644 index 000000000000..a05b5c0cf181 --- /dev/null +++ b/ drivers/acpi/apei/bert.c@@ -0,0 +1,150 @@ +/* + * APEI Boot Error Record Table (BERT) support + * + * Copyright 2011 Intel Corp. + * Author: Huang Ying <ying.huang@intel.com> + * + * Under normal circumstances, when a hardware error occurs, the error + * handler receives control and processes the error. This gives OSPM a + * chance to process the error condition, report it, and optionally attempt + * recovery. In some cases, the system is unable to process an error. + * For example, system firmware or a management controller may choose to + * reset the system or the system might experience an uncontrolled crash + * or reset.The boot error source is used to report unhandled errors that + * occurred in a previous boot. This mechanism is described in the BERT + * table. + * + * For more information about BERT, please refer to ACPI Specification + * version 4.0, section 17.3.1 + * + * This file is licensed under GPLv2. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/acpi.h> +#include <linux/io.h> + +#include "apei-internal.h" + +#undef pr_fmt +#define pr_fmt(fmt) "BERT: " fmt + +static int bert_disable; + +static void __init bert_print_all(struct acpi_bert_region *region, + unsigned int region_len) +{ + struct acpi_hest_generic_status *estatus = + (struct acpi_hest_generic_status *)region; + int remain = region_len; + u32 estatus_len; + + if (!estatus->block_status) + return; + + while (remain > sizeof(struct acpi_bert_region)) { + if (cper_estatus_check(estatus)) { + pr_err(FW_BUG "Invalid error record.\n"); + return; + } + + estatus_len = cper_estatus_len(estatus); + if (remain < estatus_len) { + pr_err(FW_BUG "Truncated status block (length: %u).\n", + estatus_len); + return; + } + + pr_info_once("Error records from previous boot:\n"); + + cper_estatus_print(KERN_INFO HW_ERR, estatus); + + /* + * Because the boot error source is "one-time polled" type, + * clear Block Status of current Generic Error Status Block, + * once it's printed. + */ + estatus->block_status = 0; + + estatus = (void *)estatus + estatus_len; + /* No more error records. */ + if (!estatus->block_status) + return; + + remain -= estatus_len; + } +} + +static int __init setup_bert_disable(char *str) +{ + bert_disable = 1; + + return 0; +} +__setup("bert_disable", setup_bert_disable); + +static int __init bert_check_table(struct acpi_table_bert *bert_tab) +{ + if (bert_tab->header.length < sizeof(struct acpi_table_bert) || + bert_tab->region_length < sizeof(struct acpi_bert_region)) + return -EINVAL; + + return 0; +} + +static int __init bert_init(void) +{ + struct acpi_bert_region *boot_error_region; + struct acpi_table_bert *bert_tab; + unsigned int region_len; + acpi_status status; + int rc = 0; + + if (acpi_disabled) + return 0; + + if (bert_disable) { + pr_info("Boot Error Record Table support is disabled.\n"); + return 0; + } + + status = acpi_get_table(ACPI_SIG_BERT, 0, (struct acpi_table_header **)&bert_tab); + if (status == AE_NOT_FOUND) + return 0; + + if (ACPI_FAILURE(status)) { + pr_err("get table failed, %s.\n", acpi_format_exception(status)); + return -EINVAL; + } + + rc = bert_check_table(bert_tab); + if (rc) { + pr_err(FW_BUG "table invalid.\n"); + return rc; + } + + region_len = bert_tab->region_length; + if (!request_mem_region(bert_tab->address, region_len, "APEI BERT")) { + pr_err("Can't request iomem region <%016llx-%016llx>.\n", + (unsigned long long)bert_tab->address, + (unsigned long long)bert_tab->address + region_len - 1); + return -EIO; + } + + boot_error_region = ioremap_cache(bert_tab->address, region_len); + if (boot_error_region) { + bert_print_all(boot_error_region, region_len); + iounmap(boot_error_region); + } else { + rc = -ENOMEM; + } + + release_mem_region(bert_tab->address, region_len); + + return rc; +} + +late_initcall(bert_init); @@ -33,7 +33,8 @@ #include "apei-internal.h" -#define EINJ_PFX "EINJ: " +#undef pr_fmt +#define pr_fmt(fmt) "EINJ: " fmt #define SPIN_UNIT 100 /* 100ns */ /* Firmware should respond within 1 milliseconds */ @@ -179,8 +180,7 @@ static int einj_get_available_error_type(u32 *type) static int einj_timedout(u64 *t) { if ((s64)*t < SPIN_UNIT) { - pr_warning(FW_WARN EINJ_PFX - "Firmware does not respond in time\n"); + pr_warning(FW_WARN "Firmware does not respond in time\n"); return 1; } *t -= SPIN_UNIT; @@ -307,8 +307,7 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type, r = request_mem_region(trigger_paddr, sizeof(*trigger_tab), "APEI EINJ Trigger Table"); if (!r) { - pr_err(EINJ_PFX - "Can not request [mem %#010llx-%#010llx] for Trigger table\n", + pr_err("Can not request [mem %#010llx-%#010llx] for Trigger table\n", (unsigned long long)trigger_paddr, (unsigned long long)trigger_paddr + sizeof(*trigger_tab) - 1); @@ -316,13 +315,12 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type, } trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab)); if (!trigger_tab) { - pr_err(EINJ_PFX "Failed to map trigger table!\n"); + pr_err("Failed to map trigger table!\n"); goto out_rel_header; } rc = einj_check_trigger_header(trigger_tab); if (rc) { - pr_warning(FW_BUG EINJ_PFX - "The trigger error action table is invalid\n"); + pr_warning(FW_BUG "Invalid trigger error action table.\n"); goto out_rel_header; } @@ -336,8 +334,7 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type, table_size - sizeof(*trigger_tab), "APEI EINJ Trigger Table"); if (!r) { - pr_err(EINJ_PFX -"Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n", + pr_err("Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n", (unsigned long long)trigger_paddr + sizeof(*trigger_tab), (unsigned long long)trigger_paddr + table_size - 1); goto out_rel_header; @@ -345,7 +342,7 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type, iounmap(trigger_tab); trigger_tab = ioremap_cache(trigger_paddr, table_size); if (!trigger_tab) { - pr_err(EINJ_PFX "Failed to map trigger table!\n"); + pr_err("Failed to map trigger table!\n"); goto out_rel_entry; } trigger_entry = (struct acpi_whea_header *) @@ -695,34 +692,42 @@ static int __init einj_init(void) struct dentry *fentry; struct apei_exec_context ctx; - if (acpi_disabled) + if (acpi_disabled) { + pr_warn("ACPI disabled.\n"); return -ENODEV; + } status = acpi_get_table(ACPI_SIG_EINJ, 0, (struct acpi_table_header **)&einj_tab); - if (status == AE_NOT_FOUND) + if (status == AE_NOT_FOUND) { + pr_warn("EINJ table not found.\n"); return -ENODEV; + } else if (ACPI_FAILURE(status)) { - const char *msg = acpi_format_exception(status); - pr_err(EINJ_PFX "Failed to get table, %s\n", msg); + pr_err("Failed to get EINJ table: %s\n", + acpi_format_exception(status)); return -EINVAL; } rc = einj_check_table(einj_tab); if (rc) { - pr_warning(FW_BUG EINJ_PFX "EINJ table is invalid\n"); + pr_warn(FW_BUG "Invalid EINJ table.n"); return -EINVAL; } rc = -ENOMEM; einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir()); - if (!einj_debug_dir) + if (!einj_debug_dir) { + pr_err("Error creating debugfs node.\n"); goto err_cleanup; + } + fentry = debugfs_create_file("available_error_type", S_IRUSR, einj_debug_dir, NULL, &available_error_type_fops); if (!fentry) goto err_cleanup; + fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR, einj_debug_dir, NULL, &error_type_fops); if (!fentry) @@ -735,14 +740,22 @@ static int __init einj_init(void) apei_resources_init(&einj_resources); einj_exec_ctx_init(&ctx); rc = apei_exec_collect_resources(&ctx, &einj_resources); - if (rc) + if (rc) { + pr_err("Error collecting EINJ resources.\n"); goto err_fini; + } + rc = apei_resources_request(&einj_resources, "APEI EINJ"); - if (rc) + if (rc) { + pr_err("Error requesting memory/port resources.\n"); goto err_fini; + } + rc = apei_exec_pre_map_gars(&ctx); - if (rc) + if (rc) { + pr_err("Error pre-mapping GARs.\n"); goto err_release; + } rc = -ENOMEM; einj_param = einj_get_parameter_address(); @@ -787,7 +800,7 @@ static int __init einj_init(void) goto err_unmap; } - pr_info(EINJ_PFX "Error INJection is initialized.\n"); + pr_info("Error INJection is initialized.\n"); return 0; @@ -798,6 +811,7 @@ err_unmap: sizeof(struct einj_parameter); acpi_os_unmap_iomem(einj_param, size); + pr_err("Error creating param extension debugfs nodes.\n"); } apei_exec_post_unmap_gars(&ctx); err_release: @@ -805,6 +819,7 @@ err_release: err_fini: apei_resources_fini(&einj_resources); err_cleanup: + pr_err("Error creating primary debugfs nodes.\n"); debugfs_remove_recursive(einj_debug_dir); return rc; @@ -927,7 +927,8 @@ static int erst_open_pstore(struct pstore_info *psi); static int erst_close_pstore(struct pstore_info *psi); static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, int *count, struct timespec *time, char **buf, - bool *compressed, struct pstore_info *psi); + bool *compressed, ssize_t *ecc_notice_size, + struct pstore_info *psi); static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason, u64 *id, unsigned int part, int count, bool compressed, size_t size, struct pstore_info *psi); @@ -987,7 +988,8 @@ static int erst_close_pstore(struct pstore_info *psi) static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, int *count, struct timespec *time, char **buf, - bool *compressed, struct pstore_info *psi) + bool *compressed, ssize_t *ecc_notice_size, + struct pstore_info *psi) { int rc; ssize_t len = 0; @@ -1033,6 +1035,7 @@ skip: memcpy(*buf, rcd->data, len - sizeof(*rcd)); *id = record_id; *compressed = false; + *ecc_notice_size = 0; if (uuid_le_cmp(rcd->sec_hdr.section_type, CPER_SECTION_TYPE_DMESG_Z) == 0) { *type = PSTORE_TYPE_DMESG; @@ -30,6 +30,9 @@ #include <linux/acpi.h> #include <linux/slab.h> #include <linux/regulator/machine.h> +#include <linux/workqueue.h> +#include <linux/reboot.h> +#include <linux/delay.h> #ifdef CONFIG_X86 #include <asm/mpspec.h> #endif @@ -174,22 +177,17 @@ void acpi_bus_detach_private_data(acpi_handle handle) EXPORT_SYMBOL_GPL(acpi_bus_detach_private_data); static void acpi_print_osc_error(acpi_handle handle, - struct acpi_osc_context *context, char *error) + struct acpi_osc_context *context, char *error) { - struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER}; int i; - if (ACPI_FAILURE(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) - printk(KERN_DEBUG "%s: %s\n", context->uuid_str, error); - else { - printk(KERN_DEBUG "%s (%s): %s\n", - (char *)buffer.pointer, context->uuid_str, error); - kfree(buffer.pointer); - } - printk(KERN_DEBUG "_OSC request data:"); + acpi_handle_debug(handle, "(%s): %s\n", context->uuid_str, error); + + pr_debug("_OSC request data:"); for (i = 0; i < context->cap.length; i += sizeof(u32)) - printk(" %x", *((u32 *)(context->cap.pointer + i))); - printk("\n"); + pr_debug(" %x", *((u32 *)(context->cap.pointer + i))); + + pr_debug("\n"); } acpi_status acpi_str_to_uuid(char *str, u8 *uuid) @@ -302,6 +300,14 @@ out_kfree: EXPORT_SYMBOL(acpi_run_osc); bool osc_sb_apei_support_acked; + +/* + * ACPI 6.0 Section 8.4.4.2 Idle State Coordination + * OSPM supports platform coordinated low power idle(LPI) states + */ +bool osc_pc_lpi_support_confirmed; +EXPORT_SYMBOL_GPL(osc_pc_lpi_support_confirmed); + static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48"; static void acpi_bus_osc_support(void) { @@ -322,6 +328,7 @@ static void acpi_bus_osc_support(void) capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT; capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT; + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT; if (!ghes_disable) capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT; @@ -329,9 +336,12 @@ static void acpi_bus_osc_support(void) return; if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) { u32 *capbuf_ret = context.ret.pointer; - if (context.ret.length > OSC_SUPPORT_DWORD) + if (context.ret.length > OSC_SUPPORT_DWORD) { osc_sb_apei_support_acked = capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT; + osc_pc_lpi_support_confirmed = + capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT; + } kfree(context.ret.pointer); } /* do we need to check other returned cap? Sounds no */ @@ -475,6 +485,56 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device) acpi_device_notify); } +/* Handle events targeting \_SB device (at present only graceful shutdown) */ + +#define ACPI_SB_NOTIFY_SHUTDOWN_REQUEST 0x81 +#define ACPI_SB_INDICATE_INTERVAL 10000 + +static void sb_notify_work(struct work_struct *dummy) +{ + acpi_handle sb_handle; + + orderly_poweroff(true); + + /* + * After initiating graceful shutdown, the ACPI spec requires OSPM + * to evaluate _OST method once every 10seconds to indicate that + * the shutdown is in progress + */ + acpi_get_handle(NULL, "\\_SB", &sb_handle); + while (1) { + pr_info("Graceful shutdown in progress.\n"); + acpi_evaluate_ost(sb_handle, ACPI_OST_EC_OSPM_SHUTDOWN, + ACPI_OST_SC_OS_SHUTDOWN_IN_PROGRESS, NULL); + msleep(ACPI_SB_INDICATE_INTERVAL); + } +} + +static void acpi_sb_notify(acpi_handle handle, u32 event, void *data) +{ + static DECLARE_WORK(acpi_sb_work, sb_notify_work); + + if (event == ACPI_SB_NOTIFY_SHUTDOWN_REQUEST) { + if (!work_busy(&acpi_sb_work)) + schedule_work(&acpi_sb_work); + } else + pr_warn("event %x is not supported by \\_SB device\n", event); +} + +static int __init acpi_setup_sb_notify_handler(void) +{ + acpi_handle sb_handle; + + if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &sb_handle))) + return -ENXIO; + + if (ACPI_FAILURE(acpi_install_notify_handler(sb_handle, ACPI_DEVICE_NOTIFY, + acpi_sb_notify, NULL))) + return -EINVAL; + + return 0; +} + /* -------------------------------------------------------------------------- Device Matching -------------------------------------------------------------------------- */ @@ -961,8 +1021,7 @@ void __init acpi_early_init(void) /** * acpi_subsystem_init - Finalize the early initialization of ACPI. * - * Switch over the platform to the ACPI mode (if possible), initialize the - * handling of ACPI events, install the interrupt and global lock handlers. + * Switch over the platform to the ACPI mode (if possible). * * Doing this too early is generally unsafe, but at the same time it needs to be * done before all things that really depend on ACPI. The right spot appears to @@ -990,6 +1049,13 @@ void __init acpi_subsystem_init(void) } } +static acpi_status acpi_bus_table_handler(u32 event, void *table, void *context) +{ + acpi_scan_table_handler(event, table, context); + + return acpi_sysfs_table_handler(event, table, context); +} + static int __init acpi_bus_init(void) { int result; @@ -1043,6 +1109,8 @@ static int __init acpi_bus_init(void) * _PDC control method may load dynamic SSDT tables, * and we need to install the table handler before that. */ + status = acpi_install_table_handler(acpi_bus_table_handler, NULL); + acpi_sysfs_init(); acpi_early_processor_set_pdc(); @@ -1051,7 +1119,7 @@ static int __init acpi_bus_init(void) * Maybe EC region is required at bus_scan/acpi_get_devices. So it * is necessary to enable it as early as possible. */ - acpi_boot_ec_enable(); + acpi_ec_dsdt_probe(); printk(KERN_INFO PREFIX "Interpreter enabled\n"); @@ -1124,6 +1192,7 @@ static int __init acpi_init(void) acpi_sleep_proc_init(); acpi_wakeup_device_init(); acpi_debugger_init(); + acpi_setup_sb_notify_handler(); return 0; } @@ -53,6 +53,10 @@ #define ACPI_BUTTON_DEVICE_NAME_LID "Lid Switch" #define ACPI_BUTTON_TYPE_LID 0x05 +#define ACPI_BUTTON_LID_INIT_IGNORE 0x00 +#define ACPI_BUTTON_LID_INIT_OPEN 0x01 +#define ACPI_BUTTON_LID_INIT_METHOD 0x02 + #define _COMPONENT ACPI_BUTTON_COMPONENT ACPI_MODULE_NAME("button"); @@ -105,6 +109,7 @@ struct acpi_button { static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier); static struct acpi_device *lid_device; +static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD; /* -------------------------------------------------------------------------- FS Interface (/proc) @@ -113,16 +118,52 @@ static struct acpi_device *lid_device; static struct proc_dir_entry *acpi_button_dir; static struct proc_dir_entry *acpi_lid_dir; +static int acpi_lid_evaluate_state(struct acpi_device *device) +{ + unsigned long long lid_state; + acpi_status status; + + status = acpi_evaluate_integer(device->handle, "_LID", NULL, &lid_state); + if (ACPI_FAILURE(status)) + return -ENODEV; + + return lid_state ? 1 : 0; +} + +static int acpi_lid_notify_state(struct acpi_device *device, int state) +{ + struct acpi_button *button = acpi_driver_data(device); + int ret; + + /* input layer checks if event is redundant */ + input_report_switch(button->input, SW_LID, !state); + input_sync(button->input); + + if (state) + pm_wakeup_event(&device->dev, 0); + + ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); + if (ret == NOTIFY_DONE) + ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, + device); + if (ret == NOTIFY_DONE || ret == NOTIFY_OK) { + /* + * It is also regarded as success if the notifier_chain + * returns NOTIFY_OK or NOTIFY_DONE. + */ + ret = 0; + } + return ret; +} + static int acpi_button_state_seq_show(struct seq_file *seq, void *offset) { struct acpi_device *device = seq->private; - acpi_status status; - unsigned long long state; + int state; - status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state); + state = acpi_lid_evaluate_state(device); seq_printf(seq, "state: %s\n", - ACPI_FAILURE(status) ? "unsupported" : - (state ? "open" : "closed")); + state < 0 ? "unsupported" : (state ? "open" : "closed")); return 0; } @@ -231,51 +272,37 @@ EXPORT_SYMBOL(acpi_lid_notifier_unregister); int acpi_lid_open(void) { - acpi_status status; - unsigned long long state; - if (!lid_device) return -ENODEV; - status = acpi_evaluate_integer(lid_device->handle, "_LID", NULL, - &state); - if (ACPI_FAILURE(status)) - return -ENODEV; - - return !!state; + return acpi_lid_evaluate_state(lid_device); } EXPORT_SYMBOL(acpi_lid_open); -static int acpi_lid_send_state(struct acpi_device *device) +static int acpi_lid_update_state(struct acpi_device *device) { - struct acpi_button *button = acpi_driver_data(device); - unsigned long long state; - acpi_status status; - int ret; - - status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state); - if (ACPI_FAILURE(status)) - return -ENODEV; + int state; - /* input layer checks if event is redundant */ - input_report_switch(button->input, SW_LID, !state); - input_sync(button->input); + state = acpi_lid_evaluate_state(device); + if (state < 0) + return state; - if (state) - pm_wakeup_event(&device->dev, 0); + return acpi_lid_notify_state(device, state); +} - ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); - if (ret == NOTIFY_DONE) - ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, - device); - if (ret == NOTIFY_DONE || ret == NOTIFY_OK) { - /* - * It is also regarded as success if the notifier_chain - * returns NOTIFY_OK or NOTIFY_DONE. - */ - ret = 0; +static void acpi_lid_initialize_state(struct acpi_device *device) +{ + switch (lid_init_state) { + case ACPI_BUTTON_LID_INIT_OPEN: + (void)acpi_lid_notify_state(device, 1); + break; + case ACPI_BUTTON_LID_INIT_METHOD: + (void)acpi_lid_update_state(device); + break; + case ACPI_BUTTON_LID_INIT_IGNORE: + default: + break; } - return ret; } static void acpi_button_notify(struct acpi_device *device, u32 event) @@ -290,7 +317,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event) case ACPI_BUTTON_NOTIFY_STATUS: input = button->input; if (button->type == ACPI_BUTTON_TYPE_LID) { - acpi_lid_send_state(device); + acpi_lid_update_state(device); } else { int keycode; @@ -335,7 +362,7 @@ static int acpi_button_resume(struct device *dev) button->suspended = false; if (button->type == ACPI_BUTTON_TYPE_LID) - return acpi_lid_send_state(device); + acpi_lid_initialize_state(device); return 0; } #endif @@ -416,7 +443,7 @@ static int acpi_button_add(struct acpi_device *device) if (error) goto err_remove_fs; if (button->type == ACPI_BUTTON_TYPE_LID) { - acpi_lid_send_state(device); + acpi_lid_initialize_state(device); /* * This assumes there's only one lid device, or if there are * more we only care about the last one... @@ -446,4 +473,42 @@ static int acpi_button_remove(struct acpi_device *device) return 0; } +static int param_set_lid_init_state(const char *val, struct kernel_param *kp) +{ + int result = 0; + + if (!strncmp(val, "open", sizeof("open") - 1)) { + lid_init_state = ACPI_BUTTON_LID_INIT_OPEN; + pr_info("Notify initial lid state as open\n"); + } else if (!strncmp(val, "method", sizeof("method") - 1)) { + lid_init_state = ACPI_BUTTON_LID_INIT_METHOD; + pr_info("Notify initial lid state with _LID return value\n"); + } else if (!strncmp(val, "ignore", sizeof("ignore") - 1)) { + lid_init_state = ACPI_BUTTON_LID_INIT_IGNORE; + pr_info("Do not notify initial lid state\n"); + } else + result = -EINVAL; + return result; +} + +static int param_get_lid_init_state(char *buffer, struct kernel_param *kp) +{ + switch (lid_init_state) { + case ACPI_BUTTON_LID_INIT_OPEN: + return sprintf(buffer, "open"); + case ACPI_BUTTON_LID_INIT_METHOD: + return sprintf(buffer, "method"); + case ACPI_BUTTON_LID_INIT_IGNORE: + return sprintf(buffer, "ignore"); + default: + return sprintf(buffer, "invalid"); + } + return 0; +} + +module_param_call(lid_init_state, + param_set_lid_init_state, param_get_lid_init_state, + NULL, 0644); +MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state"); + module_acpi_driver(acpi_button_driver); @@ -299,8 +299,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data) continue; cpc_ptr = per_cpu(cpc_desc_ptr, i); - if (!cpc_ptr) - continue; + if (!cpc_ptr) { + retval = -EFAULT; + goto err_ret; + } pdomain = &(cpc_ptr->domain_info); cpumask_set_cpu(i, pr->shared_cpu_map); @@ -322,8 +324,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data) continue; match_cpc_ptr = per_cpu(cpc_desc_ptr, j); - if (!match_cpc_ptr) - continue; + if (!match_cpc_ptr) { + retval = -EFAULT; + goto err_ret; + } match_pdomain = &(match_cpc_ptr->domain_info); if (match_pdomain->domain != pdomain->domain) @@ -353,8 +357,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data) continue; match_cpc_ptr = per_cpu(cpc_desc_ptr, j); - if (!match_cpc_ptr) - continue; + if (!match_cpc_ptr) { + retval = -EFAULT; + goto err_ret; + } match_pdomain = &(match_cpc_ptr->domain_info); if (match_pdomain->domain != pdomain->domain) @@ -595,9 +601,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) /* Store CPU Logical ID */ cpc_ptr->cpu_id = pr->id; - /* Plug it into this CPUs CPC descriptor. */ - per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr; - /* Parse PSD data for this CPU */ ret = acpi_get_psd(cpc_ptr, handle); if (ret) @@ -610,6 +613,9 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) goto out_free; } + /* Plug PSD data into this CPUs CPC descriptor. */ + per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr; + /* Everything looks okay */ pr_debug("Parsed CPC struct for CPU: %d\n", pr->id); @@ -21,7 +21,7 @@ */ #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/types.h> @@ -33,12 +33,7 @@ #include "internal.h" -#define ACPI_DOCK_DRIVER_DESCRIPTION "ACPI Dock Station Driver" - ACPI_MODULE_NAME("dock"); -MODULE_AUTHOR("Kristen Carlson Accardi"); -MODULE_DESCRIPTION(ACPI_DOCK_DRIVER_DESCRIPTION); -MODULE_LICENSE("GPL"); static bool immediate_undock = 1; module_param(immediate_undock, bool, 0644); diff --git a/drivers/acpi/dptf/Kconfig b/drivers/acpi/dptf/Kconfig new file mode 100644 index 000000000000..ac0a6ed0cf46 --- /dev/null +++ b/ drivers/acpi/dptf/Kconfig@@ -0,0 +1,15 @@ +config DPTF_POWER + tristate "DPTF Platform Power Participant" + depends on X86 + help + This driver adds support for Dynamic Platform and Thermal Framework + (DPTF) Platform Power Participant device (INT3407) support. + This participant is responsible for exposing platform telemetry: + max_platform_power + platform_power_source + adapter_rating + battery_steady_power + charger_type + + To compile this driver as a module, choose M here: + the module will be called dptf_power. diff --git a/drivers/acpi/dptf/Makefile b/drivers/acpi/dptf/Makefile new file mode 100644 index 000000000000..06ea8809583d --- /dev/null +++ b/ drivers/acpi/dptf/Makefile@@ -0,0 +1,4 @@ +obj-$(CONFIG_ACPI) += int340x_thermal.o +obj-$(CONFIG_DPTF_POWER) += dptf_power.o + +ccflags-y += -Idrivers/acpi diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c new file mode 100644 index 000000000000..734642dc5008 --- /dev/null +++ b/ drivers/acpi/dptf/dptf_power.c@@ -0,0 +1,128 @@ +/* + * dptf_power: DPTF platform power driver + * Copyright (c) 2016, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/acpi.h> +#include <linux/platform_device.h> + +/* + * Presentation of attributes which are defined for INT3407. They are: + * PMAX : Maximum platform powe + * PSRC : Platform power source + * ARTG : Adapter rating + * CTYP : Charger type + * PBSS : Battery steady power + */ +#define DPTF_POWER_SHOW(name, object) \ +static ssize_t name##_show(struct device *dev,\ + struct device_attribute *attr,\ + char *buf)\ +{\ + struct platform_device *pdev = to_platform_device(dev);\ + struct acpi_device *acpi_dev = platform_get_drvdata(pdev);\ + unsigned long long val;\ + acpi_status status;\ +\ + status = acpi_evaluate_integer(acpi_dev->handle, #object,\ + NULL, &val);\ + if (ACPI_SUCCESS(status))\ + return sprintf(buf, "%d\n", (int)val);\ + else \ + return -EINVAL;\ +} + +DPTF_POWER_SHOW(max_platform_power_mw, PMAX) +DPTF_POWER_SHOW(platform_power_source, PSRC) +DPTF_POWER_SHOW(adapter_rating_mw, ARTG) +DPTF_POWER_SHOW(battery_steady_power_mw, PBSS) +DPTF_POWER_SHOW(charger_type, CTYP) + +static DEVICE_ATTR_RO(max_platform_power_mw); +static DEVICE_ATTR_RO(platform_power_source); +static DEVICE_ATTR_RO(adapter_rating_mw); +static DEVICE_ATTR_RO(battery_steady_power_mw); +static DEVICE_ATTR_RO(charger_type); + +static struct attribute *dptf_power_attrs[] = { + &dev_attr_max_platform_power_mw.attr, + &dev_attr_platform_power_source.attr, + &dev_attr_adapter_rating_mw.attr, + &dev_attr_battery_steady_power_mw.attr, + &dev_attr_charger_type.attr, + NULL +}; + +static struct attribute_group dptf_power_attribute_group = { + .attrs = dptf_power_attrs, + .name = "dptf_power" +}; + +static int dptf_power_add(struct platform_device *pdev) +{ + struct acpi_device *acpi_dev; + acpi_status status; + unsigned long long ptype; + int result; + + acpi_dev = ACPI_COMPANION(&(pdev->dev)); + if (!acpi_dev) + return -ENODEV; + + status = acpi_evaluate_integer(acpi_dev->handle, "PTYP", NULL, &ptype); + if (ACPI_FAILURE(status)) + return -ENODEV; + + if (ptype != 0x11) + return -ENODEV; + + result = sysfs_create_group(&pdev->dev.kobj, + &dptf_power_attribute_group); + if (result) + return result; + + platform_set_drvdata(pdev, acpi_dev); + + return 0; +} + +static int dptf_power_remove(struct platform_device *pdev) +{ + + sysfs_remove_group(&pdev->dev.kobj, &dptf_power_attribute_group); + + return 0; +} + +static const struct acpi_device_id int3407_device_ids[] = { + {"INT3407", 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, int3407_device_ids); + +static struct platform_driver dptf_power_driver = { + .probe = dptf_power_add, + .remove = dptf_power_remove, + .driver = { + .name = "DPTF Platform Power", + .acpi_match_table = int3407_device_ids, + }, +}; + +module_platform_driver(dptf_power_driver); + +MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("ACPI DPTF platform power driver"); @@ -1331,8 +1331,6 @@ static int ec_install_handlers(struct acpi_ec *ec) static void ec_remove_handlers(struct acpi_ec *ec) { - acpi_ec_stop(ec, false); - if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) @@ -1340,6 +1338,19 @@ static void ec_remove_handlers(struct acpi_ec *ec) clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags); } + /* + * Stops handling the EC transactions after removing the operation + * region handler. This is required because _REG(DISCONNECT) + * invoked during the removal can result in new EC transactions. + * + * Flushes the EC requests and thus disables the GPE before + * removing the GPE handler. This is required by the current ACPICA + * GPE core. ACPICA GPE core will automatically disable a GPE when + * it is indicated but there is no way to handle it. So the drivers + * must disable the GPEs prior to removing the GPE handlers. + */ + acpi_ec_stop(ec, false); + if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) { if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe, &acpi_ec_gpe_handler))) @@ -1348,13 +1359,9 @@ static void ec_remove_handlers(struct acpi_ec *ec) } } -static int acpi_ec_add(struct acpi_device *device) +static struct acpi_ec *acpi_ec_alloc(void) { - struct acpi_ec *ec = NULL; - int ret; - - strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); - strcpy(acpi_device_class(device), ACPI_EC_CLASS); + struct acpi_ec *ec; /* Check for boot EC */ if (boot_ec) { @@ -1365,9 +1372,21 @@ static int acpi_ec_add(struct acpi_device *device) first_ec = NULL; } else { ec = make_acpi_ec(); - if (!ec) - return -ENOMEM; } + return ec; +} + +static int acpi_ec_add(struct acpi_device *device) +{ + struct acpi_ec *ec = NULL; + int ret; + + strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); + strcpy(acpi_device_class(device), ACPI_EC_CLASS); + + ec = acpi_ec_alloc(); + if (!ec) + return -ENOMEM; if (ec_parse_device(device->handle, 0, ec, NULL) != AE_CTRL_TERMINATE) { kfree(ec); @@ -1446,22 +1465,41 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context) return AE_OK; } -int __init acpi_boot_ec_enable(void) -{ - if (!boot_ec) - return 0; - if (!ec_install_handlers(boot_ec)) { - first_ec = boot_ec; - return 0; - } - return -EFAULT; -} - static const struct acpi_device_id ec_device_ids[] = { {"PNP0C09", 0}, {"", 0}, }; +int __init acpi_ec_dsdt_probe(void) +{ + acpi_status status; + struct acpi_ec *ec; + int ret; + + ec = acpi_ec_alloc(); + if (!ec) + return -ENOMEM; + /* + * Finding EC from DSDT if there is no ECDT EC available. When this + * function is invoked, ACPI tables have been fully loaded, we can + * walk namespace now. + */ + status = acpi_get_devices(ec_device_ids[0].id, + ec_parse_device, ec, NULL); + if (ACPI_FAILURE(status) || !ec->handle) { + ret = -ENODEV; + goto error; + } + ret = ec_install_handlers(ec); + +error: + if (ret) + kfree(ec); + else + first_ec = boot_ec = ec; + return ret; +} + #if 0 /* * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not @@ -1503,6 +1541,11 @@ static int ec_clear_on_resume(const struct dmi_system_id *id) return 0; } +/* + * Some ECDTs contain wrong register addresses. + * MSI MS-171F + * https://bugzilla.kernel.org/show_bug.cgi?id=12461 + */ static int ec_correct_ecdt(const struct dmi_system_id *id) { pr_debug("Detected system needing ECDT address correction.\n"); @@ -1512,16 +1555,6 @@ static int ec_correct_ecdt(const struct dmi_system_id *id) static struct dmi_system_id ec_dmi_table[] __initdata = { { - ec_correct_ecdt, "Asus L4R", { - DMI_MATCH(DMI_BIOS_VERSION, "1008.006"), - DMI_MATCH(DMI_PRODUCT_NAME, "L4R"), - DMI_MATCH(DMI_BOARD_NAME, "L4R") }, NULL}, - { - ec_correct_ecdt, "Asus M6R", { - DMI_MATCH(DMI_BIOS_VERSION, "0207"), - DMI_MATCH(DMI_PRODUCT_NAME, "M6R"), - DMI_MATCH(DMI_BOARD_NAME, "M6R") }, NULL}, - { ec_correct_ecdt, "MSI MS-171F", { DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"), DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL}, @@ -1533,12 +1566,13 @@ static struct dmi_system_id ec_dmi_table[] __initdata = { int __init acpi_ec_ecdt_probe(void) { - int ret = 0; + int ret; acpi_status status; struct acpi_table_ecdt *ecdt_ptr; + struct acpi_ec *ec; - boot_ec = make_acpi_ec(); - if (!boot_ec) + ec = acpi_ec_alloc(); + if (!ec) return -ENOMEM; /* * Generate a boot ec context @@ -1562,28 +1596,20 @@ int __init acpi_ec_ecdt_probe(void) pr_info("EC description table is found, configuring boot EC\n"); if (EC_FLAGS_CORRECT_ECDT) { - /* - * Asus L4R, Asus M6R - * https://bugzilla.kernel.org/show_bug.cgi?id=9399 - * MSI MS-171F - * https://bugzilla.kernel.org/show_bug.cgi?id=12461 - */ - boot_ec->command_addr = ecdt_ptr->data.address; - boot_ec->data_addr = ecdt_ptr->control.address; + ec->command_addr = ecdt_ptr->data.address; + ec->data_addr = ecdt_ptr->control.address; } else { - boot_ec->command_addr = ecdt_ptr->control.address; - boot_ec->data_addr = ecdt_ptr->data.address; + ec->command_addr = ecdt_ptr->control.address; + ec->data_addr = ecdt_ptr->data.address; } - boot_ec->gpe = ecdt_ptr->gpe; - boot_ec->handle = ACPI_ROOT_OBJECT; - ret = ec_install_handlers(boot_ec); - if (!ret) - first_ec = boot_ec; + ec->gpe = ecdt_ptr->gpe; + ec->handle = ACPI_ROOT_OBJECT; + ret = ec_install_handlers(ec); error: - if (ret) { - kfree(boot_ec); - boot_ec = NULL; - } + if (ret) + kfree(ec); + else + first_ec = boot_ec = ec; return ret; } @@ -87,6 +87,9 @@ bool acpi_queue_hotplug_work(struct work_struct *work); void acpi_device_hotplug(struct acpi_device *adev, u32 src); bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent); +acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context); +void acpi_scan_table_handler(u32 event, void *table, void *context); + /* -------------------------------------------------------------------------- Device Node Initialization / Removal -------------------------------------------------------------------------- */ @@ -181,7 +184,7 @@ typedef int (*acpi_ec_query_func) (void *data); int acpi_ec_init(void); int acpi_ec_ecdt_probe(void); -int acpi_boot_ec_enable(void); +int acpi_ec_dsdt_probe(void); void acpi_ec_block_transactions(void); void acpi_ec_unblock_transactions(void); void acpi_ec_unblock_transactions_early(void); @@ -928,7 +928,7 @@ static ssize_t format_show(struct device *dev, { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); - return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->code)); + return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code)); } static DEVICE_ATTR_RO(format); @@ -961,8 +961,8 @@ static ssize_t format1_show(struct device *dev, continue; if (nfit_dcr->dcr->code == dcr->code) continue; - rc = sprintf(buf, "%#x\n", - be16_to_cpu(nfit_dcr->dcr->code)); + rc = sprintf(buf, "0x%04x\n", + le16_to_cpu(nfit_dcr->dcr->code)); break; } if (rc != ENXIO) @@ -1151,9 +1151,10 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, if (disable_vendor_specific) dsm_mask &= ~(1 << 8); } else { - dev_err(dev, "unknown dimm command family\n"); + dev_dbg(dev, "unknown dimm command family\n"); nfit_mem->family = -1; - return force_enable_dimms ? 0 : -ENODEV; + /* DSMs are optional, continue loading the driver... */ + return 0; } uuid = to_nfit_uuid(nfit_mem->family); @@ -53,12 +53,12 @@ enum nfit_uuids { }; /* - * Region format interface codes are stored as an array of bytes in the - * NFIT DIMM Control Region structure + * Region format interface codes are stored with the interface as the + * LSB and the function as the MSB. */ -#define NFIT_FIC_BYTE cpu_to_be16(0x101) /* byte-addressable energy backed */ -#define NFIT_FIC_BLK cpu_to_be16(0x201) /* block-addressable non-energy backed */ -#define NFIT_FIC_BYTEN cpu_to_be16(0x301) /* byte-addressable non-energy backed */ +#define NFIT_FIC_BYTE cpu_to_le16(0x101) /* byte-addressable energy backed */ +#define NFIT_FIC_BLK cpu_to_le16(0x201) /* block-addressable non-energy backed */ +#define NFIT_FIC_BYTEN cpu_to_le16(0x301) /* byte-addressable non-energy backed */ enum { NFIT_BLK_READ_FLUSH = 1, @@ -18,22 +18,21 @@ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * */ + +#define pr_fmt(fmt) "ACPI: " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/acpi.h> +#include <linux/bootmem.h> +#include <linux/memblock.h> #include <linux/numa.h> #include <linux/nodemask.h> #include <linux/topology.h> -#define PREFIX "ACPI: " - -#define ACPI_NUMA 0x80000000 -#define _COMPONENT ACPI_NUMA -ACPI_MODULE_NAME("numa"); - static nodemask_t nodes_found_map = NODE_MASK_NONE; /* maps to convert between proximity domain and logical node ID */ @@ -43,6 +42,7 @@ static int node_to_pxm_map[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; unsigned char acpi_srat_revision __initdata; +int acpi_numa __initdata; int pxm_to_node(int pxm) { @@ -128,68 +128,63 @@ EXPORT_SYMBOL(acpi_map_pxm_to_online_node); static void __init acpi_table_print_srat_entry(struct acpi_subtable_header *header) { - - ACPI_FUNCTION_NAME("acpi_table_print_srat_entry"); - - if (!header) - return; - switch (header->type) { - case ACPI_SRAT_TYPE_CPU_AFFINITY: -#ifdef ACPI_DEBUG_OUTPUT { struct acpi_srat_cpu_affinity *p = (struct acpi_srat_cpu_affinity *)header; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n", - p->apic_id, p->local_sapic_eid, - p->proximity_domain_lo, - (p->flags & ACPI_SRAT_CPU_ENABLED)? - "enabled" : "disabled")); + pr_debug("SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n", + p->apic_id, p->local_sapic_eid, + p->proximity_domain_lo, + (p->flags & ACPI_SRAT_CPU_ENABLED) ? + "enabled" : "disabled"); } -#endif /* ACPI_DEBUG_OUTPUT */ break; case ACPI_SRAT_TYPE_MEMORY_AFFINITY: -#ifdef ACPI_DEBUG_OUTPUT { struct acpi_srat_mem_affinity *p = (struct acpi_srat_mem_affinity *)header; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n", - (unsigned long)p->base_address, - (unsigned long)p->length, - p->proximity_domain, - (p->flags & ACPI_SRAT_MEM_ENABLED)? - "enabled" : "disabled", - (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)? - " hot-pluggable" : "", - (p->flags & ACPI_SRAT_MEM_NON_VOLATILE)? - " non-volatile" : "")); + pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n", + (unsigned long)p->base_address, + (unsigned long)p->length, + p->proximity_domain, + (p->flags & ACPI_SRAT_MEM_ENABLED) ? + "enabled" : "disabled", + (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ? + " hot-pluggable" : "", + (p->flags & ACPI_SRAT_MEM_NON_VOLATILE) ? + " non-volatile" : ""); } -#endif /* ACPI_DEBUG_OUTPUT */ break; case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY: -#ifdef ACPI_DEBUG_OUTPUT { struct acpi_srat_x2apic_cpu_affinity *p = (struct acpi_srat_x2apic_cpu_affinity *)header; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "SRAT Processor (x2apicid[0x%08x]) in" - " proximity domain %d %s\n", - p->apic_id, - p->proximity_domain, - (p->flags & ACPI_SRAT_CPU_ENABLED) ? - "enabled" : "disabled")); + pr_debug("SRAT Processor (x2apicid[0x%08x]) in proximity domain %d %s\n", + p->apic_id, + p->proximity_domain, + (p->flags & ACPI_SRAT_CPU_ENABLED) ? + "enabled" : "disabled"); } -#endif /* ACPI_DEBUG_OUTPUT */ break; + + case ACPI_SRAT_TYPE_GICC_AFFINITY: + { + struct acpi_srat_gicc_affinity *p = + (struct acpi_srat_gicc_affinity *)header; + pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n", + p->acpi_processor_uid, + p->proximity_domain, + (p->flags & ACPI_SRAT_GICC_ENABLED) ? + "enabled" : "disabled"); + } + break; + default: - printk(KERN_WARNING PREFIX - "Found unsupported SRAT entry (type = 0x%x)\n", - header->type); + pr_warn("Found unsupported SRAT entry (type = 0x%x)\n", + header->type); break; } } @@ -217,12 +212,117 @@ static int __init slit_valid(struct acpi_table_slit *slit) return 1; } +void __init bad_srat(void) +{ + pr_err("SRAT: SRAT not used.\n"); + acpi_numa = -1; +} + +int __init srat_disabled(void) +{ + return acpi_numa < 0; +} + +#if defined(CONFIG_X86) || defined(CONFIG_ARM64) +/* + * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for + * I/O localities since SRAT does not list them. I/O localities are + * not supported at this point. + */ +void __init acpi_numa_slit_init(struct acpi_table_slit *slit) +{ + int i, j; + + for (i = 0; i < slit->locality_count; i++) { + const int from_node = pxm_to_node(i); + + if (from_node == NUMA_NO_NODE) + continue; + + for (j = 0; j < slit->locality_count; j++) { + const int to_node = pxm_to_node(j); + + if (to_node == NUMA_NO_NODE) + continue; + + numa_set_distance(from_node, to_node, + slit->entry[slit->locality_count * i + j]); + } + } +} + +/* + * Default callback for parsing of the Proximity Domain <-> Memory + * Area mappings + */ +int __init +acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) +{ + u64 start, end; + u32 hotpluggable; + int node, pxm; + + if (srat_disabled()) + goto out_err; + if (ma->header.length < sizeof(struct acpi_srat_mem_affinity)) { + pr_err("SRAT: Unexpected header length: %d\n", + ma->header.length); + goto out_err_bad_srat; + } + if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) + goto out_err; + hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE; + if (hotpluggable && !IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) + goto out_err; + + start = ma->base_address; + end = start + ma->length; + pxm = ma->proximity_domain; + if (acpi_srat_revision <= 1) + pxm &= 0xff; + + node = acpi_map_pxm_to_node(pxm); + if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { + pr_err("SRAT: Too many proximity domains.\n"); + goto out_err_bad_srat; + } + + if (numa_add_memblk(node, start, end) < 0) { + pr_err("SRAT: Failed to add memblk to node %u [mem %#010Lx-%#010Lx]\n", + node, (unsigned long long) start, + (unsigned long long) end - 1); + goto out_err_bad_srat; + } + + node_set(node, numa_nodes_parsed); + + pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s%s\n", + node, pxm, + (unsigned long long) start, (unsigned long long) end - 1, + hotpluggable ? " hotplug" : "", + ma->flags & ACPI_SRAT_MEM_NON_VOLATILE ? " non-volatile" : ""); + + /* Mark hotplug range in memblock. */ + if (hotpluggable && memblock_mark_hotplug(start, ma->length)) + pr_warn("SRAT: Failed to mark hotplug range [mem %#010Lx-%#010Lx] in memblock\n", + (unsigned long long)start, (unsigned long long)end - 1); + + max_possible_pfn = max(max_possible_pfn, PFN_UP(end - 1)); + + return 0; +out_err_bad_srat: + bad_srat(); +out_err: + return -EINVAL; +} +#endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */ + static int __init acpi_parse_slit(struct acpi_table_header *table) { struct acpi_table_slit *slit = (struct acpi_table_slit *)table; if (!slit_valid(slit)) { - printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n"); + pr_info("SLIT table looks invalid. Not used.\n"); return -EINVAL; } acpi_numa_slit_init(slit); @@ -233,12 +333,9 @@ static int __init acpi_parse_slit(struct acpi_table_header *table) void __init __weak acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) { - printk(KERN_WARNING PREFIX - "Found unsupported x2apic [0x%08x] SRAT entry\n", pa->apic_id); - return; + pr_warn("Found unsupported x2apic [0x%08x] SRAT entry\n", pa->apic_id); } - static int __init acpi_parse_x2apic_affinity(struct acpi_subtable_header *header, const unsigned long end) @@ -275,6 +372,24 @@ acpi_parse_processor_affinity(struct acpi_subtable_header *header, return 0; } +static int __init +acpi_parse_gicc_affinity(struct acpi_subtable_header *header, + const unsigned long end) +{ + struct acpi_srat_gicc_affinity *processor_affinity; + + processor_affinity = (struct acpi_srat_gicc_affinity *)header; + if (!processor_affinity) + return -EINVAL; + + acpi_table_print_srat_entry(header); + + /* let architecture-dependent part to do it */ + acpi_numa_gicc_affinity_init(processor_affinity); + + return 0; +} + static int __initdata parsed_numa_memblks; static int __init @@ -319,6 +434,9 @@ int __init acpi_numa_init(void) { int cnt = 0; + if (acpi_disabled) + return -EINVAL; + /* * Should not limit number with cpu num that is from NR_CPUS or nr_cpus= * SRAT cpu entries could have different order with that in MADT. @@ -327,13 +445,15 @@ int __init acpi_numa_init(void) /* SRAT: Static Resource Affinity Table */ if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { - struct acpi_subtable_proc srat_proc[2]; + struct acpi_subtable_proc srat_proc[3]; memset(srat_proc, 0, sizeof(srat_proc)); srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY; srat_proc[0].handler = acpi_parse_processor_affinity; srat_proc[1].id = ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY; srat_proc[1].handler = acpi_parse_x2apic_affinity; + srat_proc[2].id = ACPI_SRAT_TYPE_GICC_AFFINITY; + srat_proc[2].handler = acpi_parse_gicc_affinity; acpi_table_parse_entries_array(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat), @@ -347,8 +467,6 @@ int __init acpi_numa_init(void) /* SLIT: System Locality Information Table */ acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit); - acpi_numa_arch_fixup(); - if (cnt < 0) return cnt; else if (!parsed_numa_memblks) @@ -470,6 +470,7 @@ static int acpi_irq_pci_sharing_penalty(int irq) { struct acpi_pci_link *link; int penalty = 0; + int i; list_for_each_entry(link, &acpi_link_list, list) { /* @@ -478,18 +479,14 @@ static int acpi_irq_pci_sharing_penalty(int irq) */ if (link->irq.active && link->irq.active == irq) penalty += PIRQ_PENALTY_PCI_USING; - else { - int i; - - /* - * If a link is inactive, penalize the IRQs it - * might use, but not as severely. - */ - for (i = 0; i < link->irq.possible_count; i++) - if (link->irq.possible[i] == irq) - penalty += PIRQ_PENALTY_PCI_POSSIBLE / - link->irq.possible_count; - } + + /* + * penalize the IRQs PCI might use, but not as severely. + */ + for (i = 0; i < link->irq.possible_count; i++) + if (link->irq.possible[i] == irq) + penalty += PIRQ_PENALTY_PCI_POSSIBLE / + link->irq.possible_count; } return penalty; @@ -499,9 +496,6 @@ static int acpi_irq_get_penalty(int irq) { int penalty = 0; - if (irq < ACPI_MAX_ISA_IRQS) - penalty += acpi_isa_irq_penalty[irq]; - /* * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict * with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be @@ -516,10 +510,49 @@ static int acpi_irq_get_penalty(int irq) penalty += PIRQ_PENALTY_PCI_USING; } + if (irq < ACPI_MAX_ISA_IRQS) + return penalty + acpi_isa_irq_penalty[irq]; + penalty += acpi_irq_pci_sharing_penalty(irq); return penalty; } +int __init acpi_irq_penalty_init(void) +{ + struct acpi_pci_link *link; + int i; + + /* + * Update penalties to facilitate IRQ balancing. + */ + list_for_each_entry(link, &acpi_link_list, list) { + + /* + * reflect the possible and active irqs in the penalty table -- + * useful for breaking ties. + */ + if (link->irq.possible_count) { + int penalty = + PIRQ_PENALTY_PCI_POSSIBLE / + link->irq.possible_count; + + for (i = 0; i < link->irq.possible_count; i++) { + if (link->irq.possible[i] < ACPI_MAX_ISA_IRQS) + acpi_isa_irq_penalty[link->irq. + possible[i]] += + penalty; + } + + } else if (link->irq.active && + (link->irq.active < ACPI_MAX_ISA_IRQS)) { + acpi_isa_irq_penalty[link->irq.active] += + PIRQ_PENALTY_PCI_POSSIBLE; + } + } + + return 0; +} + static int acpi_irq_balance = -1; /* 0: static, 1: balance */ static int acpi_pci_link_allocate(struct acpi_pci_link *link) @@ -839,7 +872,7 @@ void acpi_penalize_isa_irq(int irq, int active) { if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty))) acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) + - active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING; + (active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING); } bool acpi_isa_irq_available(int irq) @@ -22,8 +22,9 @@ * General Public License for more details. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> -#include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> @@ -33,30 +34,11 @@ #include <linux/dmi.h> #include <linux/pci-acpi.h> -static bool debug; static int check_sta_before_sun; -#define DRIVER_VERSION "0.1" -#define DRIVER_AUTHOR "Alex Chiang <achiang@hp.com>" -#define DRIVER_DESC "ACPI PCI Slot Detection Driver" -MODULE_AUTHOR(DRIVER_AUTHOR); -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_LICENSE("GPL"); -MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); -module_param(debug, bool, 0644); - #define _COMPONENT ACPI_PCI_COMPONENT ACPI_MODULE_NAME("pci_slot"); -#define MY_NAME "pci_slot" -#define err(format, arg...) pr_err("%s: " format , MY_NAME , ## arg) -#define info(format, arg...) pr_info("%s: " format , MY_NAME , ## arg) -#define dbg(format, arg...) \ - do { \ - if (debug) \ - pr_debug("%s: " format, MY_NAME , ## arg); \ - } while (0) - #define SLOT_NAME_SIZE 21 /* Inspired by #define in acpiphp.h */ struct acpi_pci_slot { @@ -76,7 +58,7 @@ check_slot(acpi_handle handle, unsigned long long *sun) struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); - dbg("Checking slot on path: %s\n", (char *)buffer.pointer); + pr_debug("Checking slot on path: %s\n", (char *)buffer.pointer); if (check_sta_before_sun) { /* If SxFy doesn't have _STA, we just assume it's there */ @@ -87,14 +69,16 @@ check_slot(acpi_handle handle, unsigned long long *sun) status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); if (ACPI_FAILURE(status)) { - dbg("_ADR returned %d on %s\n", status, (char *)buffer.pointer); + pr_debug("_ADR returned %d on %s\n", + status, (char *)buffer.pointer); goto out; } /* No _SUN == not a slot == bail */ status = acpi_evaluate_integer(handle, "_SUN", NULL, sun); if (ACPI_FAILURE(status)) { - dbg("_SUN returned %d on %s\n", status, (char *)buffer.pointer); + pr_debug("_SUN returned %d on %s\n", + status, (char *)buffer.pointer); goto out; } @@ -132,15 +116,13 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) } slot = kmalloc(sizeof(*slot), GFP_KERNEL); - if (!slot) { - err("%s: cannot allocate memory\n", __func__); + if (!slot) return AE_OK; - } snprintf(name, sizeof(name), "%llu", sun); pci_slot = pci_create_slot(pci_bus, device, name, NULL); if (IS_ERR(pci_slot)) { - err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot)); + pr_err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot)); kfree(slot); return AE_OK; } @@ -150,8 +132,8 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) get_device(&pci_bus->dev); - dbg("pci_slot: %p, pci_bus: %x, device: %d, name: %s\n", - pci_slot, pci_bus->number, device, name); + pr_debug("%p, pci_bus: %x, device: %d, name: %s\n", + pci_slot, pci_bus->number, device, name); return AE_OK; } @@ -186,7 +168,8 @@ void acpi_pci_slot_remove(struct pci_bus *bus) static int do_sta_before_sun(const struct dmi_system_id *d) { - info("%s detected: will evaluate _STA before calling _SUN\n", d->ident); + pr_info("%s detected: will evaluate _STA before calling _SUN\n", + d->ident); check_sta_before_sun = 1; return 0; } @@ -13,7 +13,7 @@ * GNU General Public License for more details. */ -#include <linux/module.h> +#include <linux/export.h> #include <linux/acpi.h> #include <linux/regmap.h> #include <acpi/acpi_lpat.h> @@ -21,12 +21,19 @@ #define PMIC_POWER_OPREGION_ID 0x8d #define PMIC_THERMAL_OPREGION_ID 0x8c +#define PMIC_REGS_OPREGION_ID 0x8f + +struct intel_pmic_regs_handler_ctx { + unsigned int val; + u16 addr; +}; struct intel_pmic_opregion { struct mutex lock; struct acpi_lpat_conversion_table *lpat_table; struct regmap *regmap; struct intel_pmic_opregion_data *data; + struct intel_pmic_regs_handler_ctx ctx; }; static int pmic_get_reg_bit(int address, struct pmic_table *table, @@ -131,7 +138,7 @@ static int pmic_thermal_aux(struct intel_pmic_opregion *opregion, int reg, } static int pmic_thermal_pen(struct intel_pmic_opregion *opregion, int reg, - u32 function, u64 *value) + int bit, u32 function, u64 *value) { struct intel_pmic_opregion_data *d = opregion->data; struct regmap *regmap = opregion->regmap; @@ -140,12 +147,12 @@ static int pmic_thermal_pen(struct intel_pmic_opregion *opregion, int reg, return -ENXIO; if (function == ACPI_READ) - return d->get_policy(regmap, reg, value); + return d->get_policy(regmap, reg, bit, value); if (*value != 0 && *value != 1) return -EINVAL; - return d->update_policy(regmap, reg, *value); + return d->update_policy(regmap, reg, bit, *value); } static bool pmic_thermal_is_temp(int address) @@ -170,13 +177,13 @@ static acpi_status intel_pmic_thermal_handler(u32 function, { struct intel_pmic_opregion *opregion = region_context; struct intel_pmic_opregion_data *d = opregion->data; - int reg, result; + int reg, bit, result; if (bits != 32 || !value64) return AE_BAD_PARAMETER; result = pmic_get_reg_bit(address, d->thermal_table, - d->thermal_table_count, ®, NULL); + d->thermal_table_count, ®, &bit); if (result == -ENOENT) return AE_BAD_PARAMETER; @@ -187,7 +194,8 @@ static acpi_status intel_pmic_thermal_handler(u32 function, else if (pmic_thermal_is_aux(address)) result = pmic_thermal_aux(opregion, reg, function, value64); else if (pmic_thermal_is_pen(address)) - result = pmic_thermal_pen(opregion, reg, function, value64); + result = pmic_thermal_pen(opregion, reg, bit, + function, value64); else result = -EINVAL; @@ -203,6 +211,48 @@ static acpi_status intel_pmic_thermal_handler(u32 function, return AE_OK; } +static acpi_status intel_pmic_regs_handler(u32 function, + acpi_physical_address address, u32 bits, u64 *value64, + void *handler_context, void *region_context) +{ + struct intel_pmic_opregion *opregion = region_context; + int result = 0; + + switch (address) { + case 0: + return AE_OK; + case 1: + opregion->ctx.addr |= (*value64 & 0xff) << 8; + return AE_OK; + case 2: + opregion->ctx.addr |= *value64 & 0xff; + return AE_OK; + case 3: + opregion->ctx.val = *value64 & 0xff; + return AE_OK; + case 4: + if (*value64) { + result = regmap_write(opregion->regmap, opregion->ctx.addr, + opregion->ctx.val); + } else { + result = regmap_read(opregion->regmap, opregion->ctx.addr, + &opregion->ctx.val); + if (result == 0) + *value64 = opregion->ctx.val; + } + memset(&opregion->ctx, 0x00, sizeof(opregion->ctx)); + } + + if (result < 0) { + if (result == -EINVAL) + return AE_BAD_PARAMETER; + else + return AE_ERROR; + } + + return AE_OK; +} + int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, struct regmap *regmap, struct intel_pmic_opregion_data *d) @@ -242,16 +292,30 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID, intel_pmic_power_handler); ret = -ENODEV; - goto out_error; + goto out_remove_power_handler; + } + + status = acpi_install_address_space_handler(handle, + PMIC_REGS_OPREGION_ID, intel_pmic_regs_handler, NULL, + opregion); + if (ACPI_FAILURE(status)) { + ret = -ENODEV; + goto out_remove_thermal_handler; } opregion->data = d; return 0; +out_remove_thermal_handler: + acpi_remove_address_space_handler(handle, PMIC_THERMAL_OPREGION_ID, + intel_pmic_thermal_handler); + +out_remove_power_handler: + acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID, + intel_pmic_power_handler); + out_error: acpi_lpat_free_conversion_table(opregion->lpat_table); return ret; } EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler); - -MODULE_LICENSE("GPL"); @@ -12,8 +12,8 @@ struct intel_pmic_opregion_data { int (*update_power)(struct regmap *r, int reg, int bit, bool on); int (*get_raw_temp)(struct regmap *r, int reg); int (*update_aux)(struct regmap *r, int reg, int raw_temp); - int (*get_policy)(struct regmap *r, int reg, u64 *value); - int (*update_policy)(struct regmap *r, int reg, int enable); + int (*get_policy)(struct regmap *r, int reg, int bit, u64 *value); + int (*update_policy)(struct regmap *r, int reg, int bit, int enable); struct pmic_table *power_table; int power_table_count; struct pmic_table *thermal_table; diff --git a/drivers/acpi/pmic/intel_pmic_bxtwc.c b/drivers/acpi/pmic/intel_pmic_bxtwc.c new file mode 100644 index 000000000000..90011aad4d20 --- /dev/null +++ b/ drivers/acpi/pmic/intel_pmic_bxtwc.c@@ -0,0 +1,420 @@ +/* + * intel_pmic_bxtwc.c - Intel BXT WhiskeyCove PMIC operation region driver + * + * Copyright (C) 2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/init.h> +#include <linux/acpi.h> +#include <linux/mfd/intel_soc_pmic.h> +#include <linux/regmap.h> +#include <linux/platform_device.h> +#include "intel_pmic.h" + +#define WHISKEY_COVE_ALRT_HIGH_BIT_MASK 0x0F +#define WHISKEY_COVE_ADC_HIGH_BIT(x) (((x & 0x0F) << 8)) +#define WHISKEY_COVE_ADC_CURSRC(x) (((x & 0xF0) >> 4)) +#define VR_MODE_DISABLED 0 +#define VR_MODE_AUTO BIT(0) +#define VR_MODE_NORMAL BIT(1) +#define VR_MODE_SWITCH BIT(2) +#define VR_MODE_ECO (BIT(0)|BIT(1)) +#define VSWITCH2_OUTPUT BIT(5) +#define VSWITCH1_OUTPUT BIT(4) +#define VUSBPHY_CHARGE BIT(1) + +static struct pmic_table power_table[] = { + { + .address = 0x0, + .reg = 0x63, + .bit = VR_MODE_AUTO, + }, /* VDD1 -> VDD1CNT */ + { + .address = 0x04, + .reg = 0x65, + .bit = VR_MODE_AUTO, + }, /* VDD2 -> VDD2CNT */ + { + .address = 0x08, + .reg = 0x67, + .bit = VR_MODE_AUTO, + }, /* VDD3 -> VDD3CNT */ + { + .address = 0x0c, + .reg = 0x6d, + .bit = VR_MODE_AUTO, + }, /* VLFX -> VFLEXCNT */ + { + .address = 0x10, + .reg = 0x6f, + .bit = VR_MODE_NORMAL, + }, /* VP1A -> VPROG1ACNT */ + { + .address = 0x14, + .reg = 0x70, + .bit = VR_MODE_NORMAL, + }, /* VP1B -> VPROG1BCNT */ + { + .address = 0x18, + .reg = 0x71, + .bit = VR_MODE_NORMAL, + }, /* VP1C -> VPROG1CCNT */ + { + .address = 0x1c, + .reg = 0x72, + .bit = VR_MODE_NORMAL, + }, /* VP1D -> VPROG1DCNT */ + { + .address = 0x20, + .reg = 0x73, + .bit = VR_MODE_NORMAL, + }, /* VP2A -> VPROG2ACNT */ + { + .address = 0x24, + .reg = 0x74, + .bit = VR_MODE_NORMAL, + }, /* VP2B -> VPROG2BCNT */ + { + .address = 0x28, + .reg = 0x75, + .bit = VR_MODE_NORMAL, + }, /* VP2C -> VPROG2CCNT */ + { + .address = 0x2c, + .reg = 0x76, + .bit = VR_MODE_NORMAL, + }, /* VP3A -> VPROG3ACNT */ + { + .address = 0x30, + .reg = 0x77, + .bit = VR_MODE_NORMAL, + }, /* VP3B -> VPROG3BCNT */ + { + .address = 0x34, + .reg = 0x78, + .bit = VSWITCH2_OUTPUT, + }, /* VSW2 -> VLD0CNT Bit 5*/ + { + .address = 0x38, + .reg = 0x78, + .bit = VSWITCH1_OUTPUT, + }, /* VSW1 -> VLD0CNT Bit 4 */ + { + .address = 0x3c, + .reg = 0x78, + .bit = VUSBPHY_CHARGE, + }, /* VUPY -> VLDOCNT Bit 1 */ + { + .address = 0x40, + .reg = 0x7b, + .bit = VR_MODE_NORMAL, + }, /* VRSO -> VREFSOCCNT*/ + { + .address = 0x44, + .reg = 0xA0, + .bit = VR_MODE_NORMAL, + }, /* VP1E -> VPROG1ECNT */ + { + .address = 0x48, + .reg = 0xA1, + .bit = VR_MODE_NORMAL, + }, /* VP1F -> VPROG1FCNT */ + { + .address = 0x4c, + .reg = 0xA2, + .bit = VR_MODE_NORMAL, + }, /* VP2D -> VPROG2DCNT */ + { + .address = 0x50, + .reg = 0xA3, + .bit = VR_MODE_NORMAL, + }, /* VP4A -> VPROG4ACNT */ + { + .address = 0x54, + .reg = 0xA4, + .bit = VR_MODE_NORMAL, + }, /* VP4B -> VPROG4BCNT */ + { + .address = 0x58, + .reg = 0xA5, + .bit = VR_MODE_NORMAL, + }, /* VP4C -> VPROG4CCNT */ + { + .address = 0x5c, + .reg = 0xA6, + .bit = VR_MODE_NORMAL, + }, /* VP4D -> VPROG4DCNT */ + { + .address = 0x60, + .reg = 0xA7, + .bit = VR_MODE_NORMAL, + }, /* VP5A -> VPROG5ACNT */ + { + .address = 0x64, + .reg = 0xA8, + .bit = VR_MODE_NORMAL, + }, /* VP5B -> VPROG5BCNT */ + { + .address = 0x68, + .reg = 0xA9, + .bit = VR_MODE_NORMAL, + }, /* VP6A -> VPROG6ACNT */ + { + .address = 0x6c, + .reg = 0xAA, + .bit = VR_MODE_NORMAL, + }, /* VP6B -> VPROG6BCNT */ + { + .address = 0x70, + .reg = 0x36, + .bit = BIT(2), + }, /* SDWN_N -> MODEMCTRL Bit 2 */ + { + .address = 0x74, + .reg = 0x36, + .bit = BIT(0), + } /* MOFF -> MODEMCTRL Bit 0 */ +}; + +static struct pmic_table thermal_table[] = { + { + .address = 0x00, + .reg = 0x4F39 + }, + { + .address = 0x04, + .reg = 0x4F24 + }, + { + .address = 0x08, + .reg = 0x4F26 + }, + { + .address = 0x0c, + .reg = 0x4F3B + }, + { + .address = 0x10, + .reg = 0x4F28 + }, + { + .address = 0x14, + .reg = 0x4F2A + }, + { + .address = 0x18, + .reg = 0x4F3D + }, + { + .address = 0x1c, + .reg = 0x4F2C + }, + { + .address = 0x20, + .reg = 0x4F2E + }, + { + .address = 0x24, + .reg = 0x4F3F + }, + { + .address = 0x28, + .reg = 0x4F30 + }, + { + .address = 0x30, + .reg = 0x4F41 + }, + { + .address = 0x34, + .reg = 0x4F32 + }, + { + .address = 0x3c, + .reg = 0x4F43 + }, + { + .address = 0x40, + .reg = 0x4F34 + }, + { + .address = 0x48, + .reg = 0x4F6A, + .bit = 0, + }, + { + .address = 0x4C, + .reg = 0x4F6A, + .bit = 1 + }, + { + .address = 0x50, + .reg = 0x4F6A, + .bit = 2 + }, + { + .address = 0x54, + .reg = 0x4F6A, + .bit = 4 + }, + { + .address = 0x58, + .reg = 0x4F6A, + .bit = 5 + }, + { + .address = 0x5C, + .reg = 0x4F6A, + .bit = 3 + }, +}; + +static int intel_bxtwc_pmic_get_power(struct regmap *regmap, int reg, + int bit, u64 *value) +{ + int data; + + if (regmap_read(regmap, reg, &data)) + return -EIO; + + *value = (data & bit) ? 1 : 0; + return 0; +} + +static int intel_bxtwc_pmic_update_power(struct regmap *regmap, int reg, + int bit, bool on) +{ + u8 val, mask = bit; + + if (on) + val = 0xFF; + else + val = 0x0; + + return regmap_update_bits(regmap, reg, mask, val); +} + +static int intel_bxtwc_pmic_get_raw_temp(struct regmap *regmap, int reg) +{ + unsigned int val, adc_val, reg_val; + u8 temp_l, temp_h, cursrc; + unsigned long rlsb; + static const unsigned long rlsb_array[] = { + 0, 260420, 130210, 65100, 32550, 16280, + 8140, 4070, 2030, 0, 260420, 130210 }; + + if (regmap_read(regmap, reg, &val)) + return -EIO; + temp_l = (u8) val; + + if (regmap_read(regmap, (reg - 1), &val)) + return -EIO; + temp_h = (u8) val; + + reg_val = temp_l | WHISKEY_COVE_ADC_HIGH_BIT(temp_h); + cursrc = WHISKEY_COVE_ADC_CURSRC(temp_h); + rlsb = rlsb_array[cursrc]; + adc_val = reg_val * rlsb / 1000; + + return adc_val; +} + +static int +intel_bxtwc_pmic_update_aux(struct regmap *regmap, int reg, int raw) +{ + u32 bsr_num; + u16 resi_val, count = 0, thrsh = 0; + u8 alrt_h, alrt_l, cursel = 0; + + bsr_num = raw; + bsr_num /= (1 << 5); + + count = fls(bsr_num) - 1; + + cursel = clamp_t(s8, (count - 7), 0, 7); + thrsh = raw / (1 << (4 + cursel)); + + resi_val = (cursel << 9) | thrsh; + alrt_h = (resi_val >> 8) & WHISKEY_COVE_ALRT_HIGH_BIT_MASK; + if (regmap_update_bits(regmap, + reg - 1, + WHISKEY_COVE_ALRT_HIGH_BIT_MASK, + alrt_h)) + return -EIO; + + alrt_l = (u8)resi_val; + return regmap_write(regmap, reg, alrt_l); +} + +static int +intel_bxtwc_pmic_get_policy(struct regmap *regmap, int reg, int bit, u64 *value) +{ + u8 mask = BIT(bit); + unsigned int val; + + if (regmap_read(regmap, reg, &val)) + return -EIO; + + *value = (val & mask) >> bit; + return 0; +} + +static int +intel_bxtwc_pmic_update_policy(struct regmap *regmap, + int reg, int bit, int enable) +{ + u8 mask = BIT(bit), val = enable << bit; + + return regmap_update_bits(regmap, reg, mask, val); +} + +static struct intel_pmic_opregion_data intel_bxtwc_pmic_opregion_data = { + .get_power = intel_bxtwc_pmic_get_power, + .update_power = intel_bxtwc_pmic_update_power, + .get_raw_temp = intel_bxtwc_pmic_get_raw_temp, + .update_aux = intel_bxtwc_pmic_update_aux, + .get_policy = intel_bxtwc_pmic_get_policy, + .update_policy = intel_bxtwc_pmic_update_policy, + .power_table = power_table, + .power_table_count = ARRAY_SIZE(power_table), + .thermal_table = thermal_table, + .thermal_table_count = ARRAY_SIZE(thermal_table), +}; + +static int intel_bxtwc_pmic_opregion_probe(struct platform_device *pdev) +{ + struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent); + + return intel_pmic_install_opregion_handler(&pdev->dev, + ACPI_HANDLE(pdev->dev.parent), + pmic->regmap, + &intel_bxtwc_pmic_opregion_data); +} + +static struct platform_device_id bxt_wc_opregion_id_table[] = { + { .name = "bxt_wcove_region" }, + {}, +}; + +static struct platform_driver intel_bxtwc_pmic_opregion_driver = { + .probe = intel_bxtwc_pmic_opregion_probe, + .driver = { + .name = "bxt_whiskey_cove_pmic", + }, + .id_table = bxt_wc_opregion_id_table, +}; + +static int __init intel_bxtwc_pmic_opregion_driver_init(void) +{ + return platform_driver_register(&intel_bxtwc_pmic_opregion_driver); +} +device_initcall(intel_bxtwc_pmic_opregion_driver_init); @@ -141,7 +141,8 @@ static int intel_crc_pmic_update_aux(struct regmap *regmap, int reg, int raw) regmap_update_bits(regmap, reg - 1, 0x3, raw >> 8) ? -EIO : 0; } -static int intel_crc_pmic_get_policy(struct regmap *regmap, int reg, u64 *value) +static int intel_crc_pmic_get_policy(struct regmap *regmap, + int reg, int bit, u64 *value) { int pen; @@ -152,7 +153,7 @@ static int intel_crc_pmic_get_policy(struct regmap *regmap, int reg, u64 *value) } static int intel_crc_pmic_update_policy(struct regmap *regmap, - int reg, int enable) + int reg, int bit, int enable) { int alert0; @@ -13,7 +13,7 @@ * GNU General Public License for more details. */ -#include <linux/module.h> +#include <linux/init.h> #include <linux/acpi.h> #include <linux/mfd/axp20x.h> #include <linux/regmap.h> @@ -262,7 +262,4 @@ static int __init intel_xpower_pmic_opregion_driver_init(void) { return platform_driver_register(&intel_xpower_pmic_opregion_driver); } -module_init(intel_xpower_pmic_opregion_driver_init); - -MODULE_DESCRIPTION("XPower AXP288 ACPI operation region driver"); -MODULE_LICENSE("GPL"); +device_initcall(intel_xpower_pmic_opregion_driver_init); @@ -108,13 +108,12 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry, return -EINVAL; } -static phys_cpuid_t map_madt_entry(int type, u32 acpi_id) +static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt, + int type, u32 acpi_id) { unsigned long madt_end, entry; phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */ - struct acpi_table_madt *madt; - madt = get_madt_table(); if (!madt) return phys_id; @@ -145,6 +144,25 @@ static phys_cpuid_t map_madt_entry(int type, u32 acpi_id) return phys_id; } +phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id) +{ + struct acpi_table_madt *madt = NULL; + acpi_size tbl_size; + phys_cpuid_t rv; + + acpi_get_table_with_size(ACPI_SIG_MADT, 0, + (struct acpi_table_header **)&madt, + &tbl_size); + if (!madt) + return PHYS_CPUID_INVALID; + + rv = map_madt_entry(madt, 1, acpi_id); + + early_acpi_os_unmap_memory(madt, tbl_size); + + return rv; +} + static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; @@ -185,7 +203,7 @@ phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id) phys_id = map_mat_entry(handle, type, acpi_id); if (invalid_phys_cpuid(phys_id)) - phys_id = map_madt_entry(type, acpi_id); + phys_id = map_madt_entry(get_madt_table(), type, acpi_id); return phys_id; } @@ -90,7 +90,7 @@ static void acpi_processor_notify(acpi_handle handle, u32 event, void *data) pr->performance_platform_limit); break; case ACPI_PROCESSOR_NOTIFY_POWER: - acpi_processor_cst_has_changed(pr); + acpi_processor_power_state_has_changed(pr); acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, 0); break; @@ -59,6 +59,12 @@ module_param(latency_factor, uint, 0644); static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); +struct cpuidle_driver acpi_idle_driver = { + .name = "acpi_idle", + .owner = THIS_MODULE, +}; + +#ifdef CONFIG_ACPI_PROCESSOR_CSTATE static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate); @@ -297,7 +303,6 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *cst; - if (nocst) return -ENODEV; @@ -570,7 +575,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) return (working); } -static int acpi_processor_get_power_info(struct acpi_processor *pr) +static int acpi_processor_get_cstate_info(struct acpi_processor *pr) { unsigned int i; int result; @@ -804,36 +809,12 @@ static void acpi_idle_enter_freeze(struct cpuidle_device *dev, acpi_idle_do_entry(cx); } -struct cpuidle_driver acpi_idle_driver = { - .name = "acpi_idle", - .owner = THIS_MODULE, -}; - -/** - * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE - * device i.e. per-cpu data - * - * @pr: the ACPI processor - * @dev : the cpuidle device - */ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, struct cpuidle_device *dev) { int i, count = CPUIDLE_DRIVER_STATE_START; struct acpi_processor_cx *cx; - if (!pr->flags.power_setup_done) - return -EINVAL; - - if (pr->flags.power == 0) { - return -EINVAL; - } - - if (!dev) - return -EINVAL; - - dev->cpu = pr->id; - if (max_cstate == 0) max_cstate = 1; @@ -856,31 +837,13 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, return 0; } -/** - * acpi_processor_setup_cpuidle states- prepares and configures cpuidle - * global state data i.e. idle routines - * - * @pr: the ACPI processor - */ -static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) +static int acpi_processor_setup_cstates(struct acpi_processor *pr) { int i, count = CPUIDLE_DRIVER_STATE_START; struct acpi_processor_cx *cx; struct cpuidle_state *state; struct cpuidle_driver *drv = &acpi_idle_driver; - if (!pr->flags.power_setup_done) - return -EINVAL; - - if (pr->flags.power == 0) - return -EINVAL; - - drv->safe_state_index = -1; - for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) { - drv->states[i].name[0] = '\0'; - drv->states[i].desc[0] = '\0'; - } - if (max_cstate == 0) max_cstate = 1; @@ -892,7 +855,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) state = &drv->states[count]; snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); - strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); + strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); state->exit_latency = cx->latency; state->target_residency = cx->latency * latency_factor; state->enter = acpi_idle_enter; @@ -925,6 +888,450 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) return 0; } +static inline void acpi_processor_cstate_first_run_checks(void) +{ + acpi_status status; + static int first_run; + + if (first_run) + return; + dmi_check_system(processor_power_dmi_table); + max_cstate = acpi_processor_cstate_check(max_cstate); + if (max_cstate < ACPI_C_STATES_MAX) + pr_notice("ACPI: processor limited to max C-state %d\n", + max_cstate); + first_run++; + + if (acpi_gbl_FADT.cst_control && !nocst) { + status = acpi_os_write_port(acpi_gbl_FADT.smi_command, + acpi_gbl_FADT.cst_control, 8); + if (ACPI_FAILURE(status)) + ACPI_EXCEPTION((AE_INFO, status, + "Notifying BIOS of _CST ability failed")); + } +} +#else + +static inline int disabled_by_idle_boot_param(void) { return 0; } +static inline void acpi_processor_cstate_first_run_checks(void) { } +static int acpi_processor_get_cstate_info(struct acpi_processor *pr) +{ + return -ENODEV; +} + +static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, + struct cpuidle_device *dev) +{ + return -EINVAL; +} + +static int acpi_processor_setup_cstates(struct acpi_processor *pr) +{ + return -EINVAL; +} + +#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */ + +struct acpi_lpi_states_array { + unsigned int size; + unsigned int composite_states_size; + struct acpi_lpi_state *entries; + struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER]; +}; + +static int obj_get_integer(union acpi_object *obj, u32 *value) +{ + if (obj->type != ACPI_TYPE_INTEGER) + return -EINVAL; + + *value = obj->integer.value; + return 0; +} + +static int acpi_processor_evaluate_lpi(acpi_handle handle, + struct acpi_lpi_states_array *info) +{ + acpi_status status; + int ret = 0; + int pkg_count, state_idx = 1, loop; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *lpi_data; + struct acpi_lpi_state *lpi_state; + + status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _LPI, giving up\n")); + return -ENODEV; + } + + lpi_data = buffer.pointer; + + /* There must be at least 4 elements = 3 elements + 1 package */ + if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE || + lpi_data->package.count < 4) { + pr_debug("not enough elements in _LPI\n"); + ret = -ENODATA; + goto end; + } + + pkg_count = lpi_data->package.elements[2].integer.value; + + /* Validate number of power states. */ + if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) { + pr_debug("count given by _LPI is not valid\n"); + ret = -ENODATA; + goto end; + } + + lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL); + if (!lpi_state) { + ret = -ENOMEM; + goto end; + } + + info->size = pkg_count; + info->entries = lpi_state; + + /* LPI States start at index 3 */ + for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) { + union acpi_object *element, *pkg_elem, *obj; + + element = &lpi_data->package.elements[loop]; + if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7) + continue; + + pkg_elem = element->package.elements; + + obj = pkg_elem + 6; + if (obj->type == ACPI_TYPE_BUFFER) { + struct acpi_power_register *reg; + + reg = (struct acpi_power_register *)obj->buffer.pointer; + if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && + reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) + continue; + + lpi_state->address = reg->address; + lpi_state->entry_method = + reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ? + ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO; + } else if (obj->type == ACPI_TYPE_INTEGER) { + lpi_state->entry_method = ACPI_CSTATE_INTEGER; + lpi_state->address = obj->integer.value; + } else { + continue; + } + + /* elements[7,8] skipped for now i.e. Residency/Usage counter*/ + + obj = pkg_elem + 9; + if (obj->type == ACPI_TYPE_STRING) + strlcpy(lpi_state->desc, obj->string.pointer, + ACPI_CX_DESC_LEN); + + lpi_state->index = state_idx; + if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) { + pr_debug("No min. residency found, assuming 10 us\n"); + lpi_state->min_residency = 10; + } + + if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) { + pr_debug("No wakeup residency found, assuming 10 us\n"); + lpi_state->wake_latency = 10; + } + + if (obj_get_integer(pkg_elem + 2, &lpi_state->flags)) + lpi_state->flags = 0; + + if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags)) + lpi_state->arch_flags = 0; + + if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq)) + lpi_state->res_cnt_freq = 1; + + if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state)) + lpi_state->enable_parent_state = 0; + } + + acpi_handle_debug(handle, "Found %d power states\n", state_idx); +end: + kfree(buffer.pointer); + return ret; +} + +/* + * flat_state_cnt - the number of composite LPI states after the process of flattening + */ +static int flat_state_cnt; + +/** + * combine_lpi_states - combine local and parent LPI states to form a composite LPI state + * + * @local: local LPI state + * @parent: parent LPI state + * @result: composite LPI state + */ +static bool combine_lpi_states(struct acpi_lpi_state *local, + struct acpi_lpi_state *parent, + struct acpi_lpi_state *result) +{ + if (parent->entry_method == ACPI_CSTATE_INTEGER) { + if (!parent->address) /* 0 means autopromotable */ + return false; + result->address = local->address + parent->address; + } else { + result->address = parent->address; + } + + result->min_residency = max(local->min_residency, parent->min_residency); + result->wake_latency = local->wake_latency + parent->wake_latency; + result->enable_parent_state = parent->enable_parent_state; + result->entry_method = local->entry_method; + + result->flags = parent->flags; + result->arch_flags = parent->arch_flags; + result->index = parent->index; + + strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN); + strlcat(result->desc, "+", ACPI_CX_DESC_LEN); + strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN); + return true; +} + +#define ACPI_LPI_STATE_FLAGS_ENABLED BIT(0) + +static void stash_composite_state(struct acpi_lpi_states_array *curr_level, + struct acpi_lpi_state *t) +{ + curr_level->composite_states[curr_level->composite_states_size++] = t; +} + +static int flatten_lpi_states(struct acpi_processor *pr, + struct acpi_lpi_states_array *curr_level, + struct acpi_lpi_states_array *prev_level) +{ + int i, j, state_count = curr_level->size; + struct acpi_lpi_state *p, *t = curr_level->entries; + + curr_level->composite_states_size = 0; + for (j = 0; j < state_count; j++, t++) { + struct acpi_lpi_state *flpi; + + if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED)) + continue; + + if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) { + pr_warn("Limiting number of LPI states to max (%d)\n", + ACPI_PROCESSOR_MAX_POWER); + pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); + break; + } + + flpi = &pr->power.lpi_states[flat_state_cnt]; + + if (!prev_level) { /* leaf/processor node */ + memcpy(flpi, t, sizeof(*t)); + stash_composite_state(curr_level, flpi); + flat_state_cnt++; + continue; + } + + for (i = 0; i < prev_level->composite_states_size; i++) { + p = prev_level->composite_states[i]; + if (t->index <= p->enable_parent_state && + combine_lpi_states(p, t, flpi)) { + stash_composite_state(curr_level, flpi); + flat_state_cnt++; + flpi++; + } + } + } + + kfree(curr_level->entries); + return 0; +} + +static int acpi_processor_get_lpi_info(struct acpi_processor *pr) +{ + int ret, i; + acpi_status status; + acpi_handle handle = pr->handle, pr_ahandle; + struct acpi_device *d = NULL; + struct acpi_lpi_states_array info[2], *tmp, *prev, *curr; + + if (!osc_pc_lpi_support_confirmed) + return -EOPNOTSUPP; + + if (!acpi_has_method(handle, "_LPI")) + return -EINVAL; + + flat_state_cnt = 0; + prev = &info[0]; + curr = &info[1]; + handle = pr->handle; + ret = acpi_processor_evaluate_lpi(handle, prev); + if (ret) + return ret; + flatten_lpi_states(pr, prev, NULL); + + status = acpi_get_parent(handle, &pr_ahandle); + while (ACPI_SUCCESS(status)) { + acpi_bus_get_device(pr_ahandle, &d); + handle = pr_ahandle; + + if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID)) + break; + + /* can be optional ? */ + if (!acpi_has_method(handle, "_LPI")) + break; + + ret = acpi_processor_evaluate_lpi(handle, curr); + if (ret) + break; + + /* flatten all the LPI states in this level of hierarchy */ + flatten_lpi_states(pr, curr, prev); + + tmp = prev, prev = curr, curr = tmp; + + status = acpi_get_parent(handle, &pr_ahandle); + } + + pr->power.count = flat_state_cnt; + /* reset the index after flattening */ + for (i = 0; i < pr->power.count; i++) + pr->power.lpi_states[i].index = i; + + /* Tell driver that _LPI is supported. */ + pr->flags.has_lpi = 1; + pr->flags.power = 1; + + return 0; +} + +int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) +{ + return -ENODEV; +} + +int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) +{ + return -ENODEV; +} + +/** + * acpi_idle_lpi_enter - enters an ACPI any LPI state + * @dev: the target CPU + * @drv: cpuidle driver containing cpuidle state info + * @index: index of target state + * + * Return: 0 for success or negative value for error + */ +static int acpi_idle_lpi_enter(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + struct acpi_processor *pr; + struct acpi_lpi_state *lpi; + + pr = __this_cpu_read(processors); + + if (unlikely(!pr)) + return -EINVAL; + + lpi = &pr->power.lpi_states[index]; + if (lpi->entry_method == ACPI_CSTATE_FFH) + return acpi_processor_ffh_lpi_enter(lpi); + + return -EINVAL; +} + +static int acpi_processor_setup_lpi_states(struct acpi_processor *pr) +{ + int i; + struct acpi_lpi_state *lpi; + struct cpuidle_state *state; + struct cpuidle_driver *drv = &acpi_idle_driver; + + if (!pr->flags.has_lpi) + return -EOPNOTSUPP; + + for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) { + lpi = &pr->power.lpi_states[i]; + + state = &drv->states[i]; + snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i); + strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN); + state->exit_latency = lpi->wake_latency; + state->target_residency = lpi->min_residency; + if (lpi->arch_flags) + state->flags |= CPUIDLE_FLAG_TIMER_STOP; + state->enter = acpi_idle_lpi_enter; + drv->safe_state_index = i; + } + + drv->state_count = i; + + return 0; +} + +/** + * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle + * global state data i.e. idle routines + * + * @pr: the ACPI processor + */ +static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) +{ + int i; + struct cpuidle_driver *drv = &acpi_idle_driver; + + if (!pr->flags.power_setup_done || !pr->flags.power) + return -EINVAL; + + drv->safe_state_index = -1; + for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) { + drv->states[i].name[0] = '\0'; + drv->states[i].desc[0] = '\0'; + } + + if (pr->flags.has_lpi) + return acpi_processor_setup_lpi_states(pr); + + return acpi_processor_setup_cstates(pr); +} + +/** + * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE + * device i.e. per-cpu data + * + * @pr: the ACPI processor + * @dev : the cpuidle device + */ +static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr, + struct cpuidle_device *dev) +{ + if (!pr->flags.power_setup_done || !pr->flags.power || !dev) + return -EINVAL; + + dev->cpu = pr->id; + if (pr->flags.has_lpi) + return acpi_processor_ffh_lpi_probe(pr->id); + + return acpi_processor_setup_cpuidle_cx(pr, dev); +} + +static int acpi_processor_get_power_info(struct acpi_processor *pr) +{ + int ret; + + ret = acpi_processor_get_lpi_info(pr); + if (ret) + ret = acpi_processor_get_cstate_info(pr); + + return ret; +} + int acpi_processor_hotplug(struct acpi_processor *pr) { int ret = 0; @@ -933,18 +1340,15 @@ int acpi_processor_hotplug(struct acpi_processor *pr) if (disabled_by_idle_boot_param()) return 0; - if (nocst) - return -ENODEV; - if (!pr->flags.power_setup_done) return -ENODEV; dev = per_cpu(acpi_cpuidle_device, pr->id); cpuidle_pause_and_lock(); cpuidle_disable_device(dev); - acpi_processor_get_power_info(pr); - if (pr->flags.power) { - acpi_processor_setup_cpuidle_cx(pr, dev); + ret = acpi_processor_get_power_info(pr); + if (!ret && pr->flags.power) { + acpi_processor_setup_cpuidle_dev(pr, dev); ret = cpuidle_enable_device(dev); } cpuidle_resume_and_unlock(); @@ -952,7 +1356,7 @@ int acpi_processor_hotplug(struct acpi_processor *pr) return ret; } -int acpi_processor_cst_has_changed(struct acpi_processor *pr) +int acpi_processor_power_state_has_changed(struct acpi_processor *pr) { int cpu; struct acpi_processor *_pr; @@ -961,9 +1365,6 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) if (disabled_by_idle_boot_param()) return 0; - if (nocst) - return -ENODEV; - if (!pr->flags.power_setup_done) return -ENODEV; @@ -1000,7 +1401,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) acpi_processor_get_power_info(_pr); if (_pr->flags.power) { dev = per_cpu(acpi_cpuidle_device, cpu); - acpi_processor_setup_cpuidle_cx(_pr, dev); + acpi_processor_setup_cpuidle_dev(_pr, dev); cpuidle_enable_device(dev); } } @@ -1015,35 +1416,16 @@ static int acpi_processor_registered; int acpi_processor_power_init(struct acpi_processor *pr) { - acpi_status status; int retval; struct cpuidle_device *dev; - static int first_run; if (disabled_by_idle_boot_param()) return 0; - if (!first_run) { - dmi_check_system(processor_power_dmi_table); - max_cstate = acpi_processor_cstate_check(max_cstate); - if (max_cstate < ACPI_C_STATES_MAX) - printk(KERN_NOTICE - "ACPI: processor limited to max C-state %d\n", - max_cstate); - first_run++; - } - - if (acpi_gbl_FADT.cst_control && !nocst) { - status = - acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8); - if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, - "Notifying BIOS of _CST ability failed")); - } - } + acpi_processor_cstate_first_run_checks(); - acpi_processor_get_power_info(pr); - pr->flags.power_setup_done = 1; + if (!acpi_processor_get_power_info(pr)) + pr->flags.power_setup_done = 1; /* * Install the idle handler if processor power management is supported. @@ -1066,7 +1448,7 @@ int acpi_processor_power_init(struct acpi_processor *pr) return -ENOMEM; per_cpu(acpi_cpuidle_device, pr->id) = dev; - acpi_processor_setup_cpuidle_cx(pr, dev); + acpi_processor_setup_cpuidle_dev(pr, dev); /* Register per-cpu cpuidle_device. Cpuidle driver * must already be registered before registering device @@ -676,6 +676,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) if (!pr->flags.throttling) return -ENODEV; + /* + * We don't care about error returns - we just try to mark + * these reserved so that nobody else is confused into thinking + * that this region might be unused.. + * + * (In particular, allocating the IO range for Cardbus) + */ + request_region(pr->throttling.address, 6, "ACPI CPU throttle"); + pr->throttling.state = 0; duty_mask = pr->throttling.state_count - 1; @@ -46,6 +46,13 @@ DEFINE_MUTEX(acpi_device_lock); LIST_HEAD(acpi_wakeup_device_list); static DEFINE_MUTEX(acpi_hp_context_lock); +/* + * The UART device described by the SPCR table is the only object which needs + * special-casing. Everything else is covered by ACPI namespace paths in STAO + * table. + */ +static u64 spcr_uart_addr; + struct acpi_dep_data { struct list_head node; acpi_handle master; @@ -494,6 +501,8 @@ static void acpi_device_del(struct acpi_device *device) device_del(&device->dev); } +static BLOCKING_NOTIFIER_HEAD(acpi_reconfig_chain); + static LIST_HEAD(acpi_device_del_list); static DEFINE_MUTEX(acpi_device_del_lock); @@ -514,6 +523,9 @@ static void acpi_device_del_work_fn(struct work_struct *work_not_used) mutex_unlock(&acpi_device_del_lock); + blocking_notifier_call_chain(&acpi_reconfig_chain, + ACPI_RECONFIG_DEVICE_REMOVE, adev); + acpi_device_del(adev); /* * Drop references to all power resources that might have been @@ -1406,7 +1418,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, acpi_bus_get_flags(device); device->flags.match_driver = false; device->flags.initialized = true; - device->flags.visited = false; + acpi_device_clear_enumerated(device); device_initialize(&device->dev); dev_set_uevent_suppress(&device->dev, true); acpi_init_coherency(device); @@ -1453,6 +1465,41 @@ static int acpi_add_single_object(struct acpi_device **child, return 0; } +static acpi_status acpi_get_resource_memory(struct acpi_resource *ares, + void *context) +{ + struct resource *res = context; + + if (acpi_dev_resource_memory(ares, res)) + return AE_CTRL_TERMINATE; + + return AE_OK; +} + +static bool acpi_device_should_be_hidden(acpi_handle handle) +{ + acpi_status status; + struct resource res; + + /* Check if it should ignore the UART device */ + if (!(spcr_uart_addr && acpi_has_method(handle, METHOD_NAME__CRS))) + return false; + + /* + * The UART device described in SPCR table is assumed to have only one + * memory resource present. So we only look for the first one here. + */ + status = acpi_walk_resources(handle, METHOD_NAME__CRS, + acpi_get_resource_memory, &res); + if (ACPI_FAILURE(status) || res.start != spcr_uart_addr) + return false; + + acpi_handle_info(handle, "The UART device @%pa in SPCR table will be hidden\n", + &res.start); + + return true; +} + static int acpi_bus_type_and_status(acpi_handle handle, int *type, unsigned long long *sta) { @@ -1466,6 +1513,9 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type, switch (acpi_type) { case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */ case ACPI_TYPE_DEVICE: + if (acpi_device_should_be_hidden(handle)) + return -ENODEV; + *type = ACPI_BUS_TYPE_DEVICE; status = acpi_bus_get_status_handle(handle, sta); if (ACPI_FAILURE(status)) @@ -1676,15 +1726,20 @@ static void acpi_default_enumeration(struct acpi_device *device) bool is_spi_i2c_slave = false; /* - * Do not enemerate SPI/I2C slaves as they will be enuerated by their + * Do not enumerate SPI/I2C slaves as they will be enumerated by their * respective parents. */ INIT_LIST_HEAD(&resource_list); acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave, &is_spi_i2c_slave); acpi_dev_free_resource_list(&resource_list); - if (!is_spi_i2c_slave) + if (!is_spi_i2c_slave) { acpi_create_platform_device(device); + acpi_device_set_enumerated(device); + } else { + blocking_notifier_call_chain(&acpi_reconfig_chain, + ACPI_RECONFIG_DEVICE_ADD, device); + } } static const struct acpi_device_id generic_device_ids[] = { @@ -1751,7 +1806,7 @@ static void acpi_bus_attach(struct acpi_device *device) acpi_bus_get_status(device); /* Skip devices that are not present. */ if (!acpi_device_is_present(device)) { - device->flags.visited = false; + acpi_device_clear_enumerated(device); device->flags.power_manageable = 0; return; } @@ -1766,7 +1821,7 @@ static void acpi_bus_attach(struct acpi_device *device) device->flags.initialized = true; } - device->flags.visited = false; + ret = acpi_scan_attach_handler(device); if (ret < 0) return; @@ -1780,7 +1835,6 @@ static void acpi_bus_attach(struct acpi_device *device) if (!ret && device->pnp.type.platform_id) acpi_default_enumeration(device); } - device->flags.visited = true; ok: list_for_each_entry(child, &device->children, node) @@ -1872,7 +1926,7 @@ void acpi_bus_trim(struct acpi_device *adev) */ acpi_device_set_power(adev, ACPI_STATE_D3_COLD); adev->flags.initialized = false; - adev->flags.visited = false; + acpi_device_clear_enumerated(adev); } EXPORT_SYMBOL_GPL(acpi_bus_trim); @@ -1916,9 +1970,26 @@ static int acpi_bus_scan_fixed(void) return result < 0 ? result : 0; } +static void __init acpi_get_spcr_uart_addr(void) +{ + acpi_status status; + struct acpi_table_spcr *spcr_ptr; + + status = acpi_get_table(ACPI_SIG_SPCR, 0, + (struct acpi_table_header **)&spcr_ptr); + if (ACPI_SUCCESS(status)) + spcr_uart_addr = spcr_ptr->serial_port.address; + else + printk(KERN_WARNING PREFIX "STAO table present, but SPCR is missing\n"); +} + +static bool acpi_scan_initialized; + int __init acpi_scan_init(void) { int result; + acpi_status status; + struct acpi_table_stao *stao_ptr; acpi_pci_root_init(); acpi_pci_link_init(); @@ -1934,6 +2005,20 @@ int __init acpi_scan_init(void) acpi_scan_add_handler(&generic_device_handler); + /* + * If there is STAO table, check whether it needs to ignore the UART + * device in SPCR table. + */ + status = acpi_get_table(ACPI_SIG_STAO, 0, + (struct acpi_table_header **)&stao_ptr); + if (ACPI_SUCCESS(status)) { + if (stao_ptr->header.length > sizeof(struct acpi_table_stao)) + printk(KERN_INFO PREFIX "STAO Name List not yet supported."); + + if (stao_ptr->ignore_uart) + acpi_get_spcr_uart_addr(); + } + mutex_lock(&acpi_scan_lock); /* * Enumerate devices in the ACPI namespace. @@ -1960,6 +2045,8 @@ int __init acpi_scan_init(void) acpi_update_all_gpes(); + acpi_scan_initialized = true; + out: mutex_unlock(&acpi_scan_lock); return result; @@ -2003,3 +2090,57 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) return count; } + +struct acpi_table_events_work { + struct work_struct work; + void *table; + u32 event; +}; + +static void acpi_table_events_fn(struct work_struct *work) +{ + struct acpi_table_events_work *tew; + + tew = container_of(work, struct acpi_table_events_work, work); + + if (tew->event == ACPI_TABLE_EVENT_LOAD) { + acpi_scan_lock_acquire(); + acpi_bus_scan(ACPI_ROOT_OBJECT); + acpi_scan_lock_release(); + } + + kfree(tew); +} + +void acpi_scan_table_handler(u32 event, void *table, void *context) +{ + struct acpi_table_events_work *tew; + + if (!acpi_scan_initialized) + return; + + if (event != ACPI_TABLE_EVENT_LOAD) + return; + + tew = kmalloc(sizeof(*tew), GFP_KERNEL); + if (!tew) + return; + + INIT_WORK(&tew->work, acpi_table_events_fn); + tew->table = table; + tew->event = event; + + schedule_work(&tew->work); +} + +int acpi_reconfig_notifier_register(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&acpi_reconfig_chain, nb); +} +EXPORT_SYMBOL(acpi_reconfig_notifier_register); + +int acpi_reconfig_notifier_unregister(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&acpi_reconfig_chain, nb); +} +EXPORT_SYMBOL(acpi_reconfig_notifier_unregister); @@ -47,15 +47,32 @@ static void acpi_sleep_tts_switch(u32 acpi_state) } } -static int tts_notify_reboot(struct notifier_block *this, +static void acpi_sleep_pts_switch(u32 acpi_state) +{ + acpi_status status; + + status = acpi_execute_simple_method(NULL, "\\_PTS", acpi_state); + if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { + /* + * OS can't evaluate the _PTS object correctly. Some warning + * message will be printed. But it won't break anything. + */ + printk(KERN_NOTICE "Failure in evaluating _PTS object\n"); + } +} + +static int sleep_notify_reboot(struct notifier_block *this, unsigned long code, void *x) { acpi_sleep_tts_switch(ACPI_STATE_S5); + + acpi_sleep_pts_switch(ACPI_STATE_S5); + return NOTIFY_DONE; } -static struct notifier_block tts_notifier = { - .notifier_call = tts_notify_reboot, +static struct notifier_block sleep_notifier = { + .notifier_call = sleep_notify_reboot, .next = NULL, .priority = 0, }; @@ -899,9 +916,9 @@ int __init acpi_sleep_init(void) pr_info(PREFIX "(supports%s)\n", supported); /* - * Register the tts_notifier to reboot notifier list so that the _TTS - * object can also be evaluated when the system enters S5. + * Register the sleep_notifier to reboot notifier list so that the _TTS + * and _PTS object can also be evaluated when the system enters S5. */ - register_reboot_notifier(&tts_notifier); + register_reboot_notifier(&sleep_notifier); return 0; } @@ -378,8 +378,7 @@ static void acpi_table_attr_init(struct acpi_table_attr *table_attr, return; } -static acpi_status -acpi_sysfs_table_handler(u32 event, void *table, void *context) +acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context) { struct acpi_table_attr *table_attr; @@ -452,9 +451,8 @@ static int acpi_tables_sysfs_init(void) kobject_uevent(tables_kobj, KOBJ_ADD); kobject_uevent(dynamic_tables_kobj, KOBJ_ADD); - status = acpi_install_table_handler(acpi_sysfs_table_handler, NULL); - return ACPI_FAILURE(status) ? -EINVAL : 0; + return 0; err_dynamic_tables: kobject_put(tables_kobj); err: @@ -34,6 +34,8 @@ #include <linux/bootmem.h> #include <linux/earlycpio.h> #include <linux/memblock.h> +#include <linux/initrd.h> +#include <linux/acpi.h> #include "internal.h" #ifdef CONFIG_ACPI_CUSTOM_DSDT @@ -481,8 +483,10 @@ static DECLARE_BITMAP(acpi_initrd_installed, NR_ACPI_INITRD_TABLES); #define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT) -static void __init acpi_table_initrd_init(void *data, size_t size) +void __init acpi_table_upgrade(void) { + void *data = (void *)initrd_start; + size_t size = initrd_end - initrd_start; int sig, no, table_nr = 0, total_offset = 0; long offset = 0; struct acpi_table_header *table; @@ -540,7 +544,7 @@ static void __init acpi_table_initrd_init(void *data, size_t size) return; acpi_tables_addr = - memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT, + memblock_find_in_range(0, ACPI_TABLE_UPGRADE_MAX_PHYS, all_tables_size, PAGE_SIZE); if (!acpi_tables_addr) { WARN_ON(1); @@ -578,10 +582,10 @@ static void __init acpi_table_initrd_init(void *data, size_t size) clen = size; if (clen > MAP_CHUNK_SIZE - slop) clen = MAP_CHUNK_SIZE - slop; - dest_p = early_ioremap(dest_addr & PAGE_MASK, - clen + slop); + dest_p = early_memremap(dest_addr & PAGE_MASK, + clen + slop); memcpy(dest_p + slop, src_p, clen); - early_iounmap(dest_p, clen + slop); + early_memunmap(dest_p, clen + slop); src_p += clen; dest_addr += clen; size -= clen; @@ -696,10 +700,6 @@ next_table: } } #else -static void __init acpi_table_initrd_init(void *data, size_t size) -{ -} - static acpi_status acpi_table_initrd_override(struct acpi_table_header *existing_table, acpi_physical_address *address, @@ -742,11 +742,6 @@ acpi_os_table_override(struct acpi_table_header *existing_table, return AE_OK; } -void __init early_acpi_table_init(void *data, size_t size) -{ - acpi_table_initrd_init(data, size); -} - /* * acpi_table_init() * @@ -1259,7 +1259,8 @@ static int __init acpi_thermal_init(void) return -ENODEV; } - acpi_thermal_pm_queue = create_workqueue("acpi_thermal_pm"); + acpi_thermal_pm_queue = alloc_workqueue("acpi_thermal_pm", + WQ_HIGHPRI | WQ_MEM_RECLAIM, 0); if (!acpi_thermal_pm_queue) return -ENODEV; @@ -167,6 +167,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"), }, }, + { + .callback = video_detect_force_video, + .ident = "ThinkPad X201T", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201T"), + }, + }, /* The native backlight controls do not work on some older machines */ { @@ -98,12 +98,12 @@ config SATA_AHCI_PLATFORM If unsure, say N. -config AHCI_BRCMSTB - tristate "Broadcom STB AHCI SATA support" - depends on ARCH_BRCMSTB || BMIPS_GENERIC +config AHCI_BRCM + tristate "Broadcom AHCI SATA support" + depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP help This option enables support for the AHCI SATA3 controller found on - STB SoC's. + Broadcom SoC's. If unsure, say N. @@ -11,7 +11,7 @@ obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o obj-$(CONFIG_SATA_SIL24) += sata_sil24.o obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o -obj-$(CONFIG_AHCI_BRCMSTB) += ahci_brcmstb.o libahci.o libahci_platform.o +obj-$(CONFIG_AHCI_BRCM) += ahci_brcm.o libahci.o libahci_platform.o obj-$(CONFIG_AHCI_CEVA) += ahci_ceva.o libahci.o libahci_platform.o obj-$(CONFIG_AHCI_DA850) += ahci_da850.o libahci.o libahci_platform.o obj-$(CONFIG_AHCI_IMX) += ahci_imx.o libahci.o libahci_platform.o @@ -580,7 +580,7 @@ static struct pci_driver ahci_pci_driver = { }, }; -#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE) +#if IS_ENABLED(CONFIG_PATA_MARVELL) static int marvell_enable; #else static int marvell_enable = 1; @@ -71,6 +71,12 @@ (DATA_ENDIAN << DMADESC_ENDIAN_SHIFT) | \ (MMIO_ENDIAN << MMIO_ENDIAN_SHIFT)) +enum brcm_ahci_version { + BRCM_SATA_BCM7425 = 1, + BRCM_SATA_BCM7445, + BRCM_SATA_NSP, +}; + enum brcm_ahci_quirks { BRCM_AHCI_QUIRK_NO_NCQ = BIT(0), BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(1), @@ -81,6 +87,7 @@ struct brcm_ahci_priv { void __iomem *top_ctrl; u32 port_mask; u32 quirks; + enum brcm_ahci_version version; }; static const struct ata_port_info ahci_brcm_port_info = { @@ -247,9 +254,19 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev, static void brcm_sata_init(struct brcm_ahci_priv *priv) { + void __iomem *ctrl = priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL; + /* Configure endianness */ - brcm_sata_writereg(BUS_CTRL_ENDIAN_CONF, - priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL); + if (priv->version == BRCM_SATA_NSP) { + u32 data = brcm_sata_readreg(ctrl); + + data &= ~((0x03 << DMADATA_ENDIAN_SHIFT) | + (0x03 << DMADESC_ENDIAN_SHIFT)); + data |= (0x02 << DMADATA_ENDIAN_SHIFT) | + (0x02 << DMADESC_ENDIAN_SHIFT); + brcm_sata_writereg(data, ctrl); + } else + brcm_sata_writereg(BUS_CTRL_ENDIAN_CONF, ctrl); } #ifdef CONFIG_PM_SLEEP @@ -282,8 +299,17 @@ static struct scsi_host_template ahci_platform_sht = { AHCI_SHT(DRV_NAME), }; +static const struct of_device_id ahci_of_match[] = { + {.compatible = "brcm,bcm7425-ahci", .data = (void *)BRCM_SATA_BCM7425}, + {.compatible = "brcm,bcm7445-ahci", .data = (void *)BRCM_SATA_BCM7445}, + {.compatible = "brcm,bcm-nsp-ahci", .data = (void *)BRCM_SATA_NSP}, + {}, +}; +MODULE_DEVICE_TABLE(of, ahci_of_match); + static int brcm_ahci_probe(struct platform_device *pdev) { + const struct of_device_id *of_id; struct device *dev = &pdev->dev; struct brcm_ahci_priv *priv; struct ahci_host_priv *hpriv; @@ -293,6 +319,12 @@ static int brcm_ahci_probe(struct platform_device *pdev) priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + + of_id = of_match_node(ahci_of_match, pdev->dev.of_node); + if (!of_id) + return -ENODEV; + + priv->version = (enum brcm_ahci_version)of_id->data; priv->dev = dev; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "top-ctrl"); @@ -300,7 +332,8 @@ static int brcm_ahci_probe(struct platform_device *pdev) if (IS_ERR(priv->top_ctrl)) return PTR_ERR(priv->top_ctrl); - if (of_device_is_compatible(dev->of_node, "brcm,bcm7425-ahci")) { + if ((priv->version == BRCM_SATA_BCM7425) || + (priv->version == BRCM_SATA_NSP)) { priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ; priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE; } @@ -354,13 +387,6 @@ static int brcm_ahci_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id ahci_of_match[] = { - {.compatible = "brcm,bcm7425-ahci"}, - {.compatible = "brcm,bcm7445-ahci"}, - {}, -}; -MODULE_DEVICE_TABLE(of, ahci_of_match); - static SIMPLE_DEV_PM_OPS(ahci_brcm_pm_ops, brcm_ahci_suspend, brcm_ahci_resume); static struct platform_driver brcm_ahci_driver = { @@ -137,7 +137,7 @@ static const struct ata_port_info *ahci_seattle_get_port_info( u32 val; plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL); - if (IS_ERR(plat_data)) + if (!plat_data) return &ahci_port_info; plat_data->sgpio_ctrl = devm_ioremap_resource(dev, @@ -1975,7 +1975,7 @@ unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) */ pp->active_link = qc->dev->link; - if (qc->tf.protocol == ATA_PROT_NCQ) + if (ata_is_ncq(qc->tf.protocol)) writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) { @@ -2392,12 +2392,20 @@ static int ahci_port_start(struct ata_port *ap) static void ahci_port_stop(struct ata_port *ap) { const char *emsg = NULL; + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *host_mmio = hpriv->mmio; int rc; /* de-initialize port */ rc = ahci_deinit_port(ap, &emsg); if (rc) ata_port_warn(ap, "%s (%d)\n", emsg, rc); + + /* + * Clear GHC.IS to prevent stuck INTx after disabling MSI and + * re-enabling INTx. + */ + writel(1 << ap->port_no, host_mmio + HOST_IRQ_STAT); } void ahci_print_info(struct ata_host *host, const char *scc_s) @@ -69,6 +69,7 @@ #include <asm/unaligned.h> #include <linux/cdrom.h> #include <linux/ratelimit.h> +#include <linux/leds.h> #include <linux/pm_runtime.h> #include <linux/platform_device.h> @@ -1238,7 +1239,7 @@ static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) } else tf.command = ATA_CMD_READ_NATIVE_MAX; - tf.protocol |= ATA_PROT_NODATA; + tf.protocol = ATA_PROT_NODATA; tf.device |= ATA_LBA; err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); @@ -1297,7 +1298,7 @@ static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) tf.device |= (new_sectors >> 24) & 0xf; } - tf.protocol |= ATA_PROT_NODATA; + tf.protocol = ATA_PROT_NODATA; tf.device |= ATA_LBA; tf.lbal = (new_sectors >> 0) & 0xff; @@ -4314,6 +4315,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { */ { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 }, + /* + * Device times out with higher max sects. + * https://bugzilla.kernel.org/show_bug.cgi?id=121671 + */ + { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 }, + /* Devices we expect to fail diagnostics */ /* Devices where NCQ should be avoided */ @@ -4842,7 +4849,7 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc) { struct ata_link *link = qc->dev->link; - if (qc->tf.protocol == ATA_PROT_NCQ) { + if (ata_is_ncq(qc->tf.protocol)) { if (!ata_tag_valid(link->active_tag)) return 0; } else { @@ -5007,7 +5014,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) ata_sg_clean(qc); /* command should be marked inactive atomically with qc completion */ - if (qc->tf.protocol == ATA_PROT_NCQ) { + if (ata_is_ncq(qc->tf.protocol)) { link->sactive &= ~(1 << qc->tag); if (!link->sactive) ap->nr_active_links--; @@ -5044,7 +5051,7 @@ static void ata_verify_xfer(struct ata_queued_cmd *qc) { struct ata_device *dev = qc->dev; - if (ata_is_nodata(qc->tf.protocol)) + if (!ata_is_data(qc->tf.protocol)) return; if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) @@ -5072,6 +5079,9 @@ void ata_qc_complete(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; + /* Trigger the LED (if available) */ + ledtrig_disk_activity(); + /* XXX: New EH and old EH use different mechanisms to * synchronize EH with regular execution path. * @@ -5127,7 +5137,9 @@ void ata_qc_complete(struct ata_queued_cmd *qc) switch (qc->tf.command) { case ATA_CMD_SET_FEATURES: if (qc->tf.feature != SETFEATURES_WC_ON && - qc->tf.feature != SETFEATURES_WC_OFF) + qc->tf.feature != SETFEATURES_WC_OFF && + qc->tf.feature != SETFEATURES_RA_ON && + qc->tf.feature != SETFEATURES_RA_OFF) break; /* fall through */ case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ @@ -606,7 +606,7 @@ void ata_scsi_error(struct Scsi_Host *host) ata_scsi_port_error_handler(host, ap); /* finish or retry handled scmd's and clean up */ - WARN_ON(host->host_failed || !list_empty(&eh_work_q)); + WARN_ON(!list_empty(&eh_work_q)); DPRINTK("EXIT\n"); } @@ -2607,9 +2607,13 @@ static void ata_eh_link_report(struct ata_link *link) [DMA_FROM_DEVICE] = "in", }; static const char *prot_str[] = { + [ATA_PROT_UNKNOWN] = "unknown", + [ATA_PROT_NODATA] = "nodata", [ATA_PROT_PIO] = "pio", [ATA_PROT_DMA] = "dma", - [ATA_PROT_NCQ] = "ncq", + [ATA_PROT_NCQ] = "ncq dma", + [ATA_PROT_NCQ_NODATA] = "ncq nodata", + [ATAPI_PROT_NODATA] = "nodata", [ATAPI_PROT_PIO] = "pio", [ATAPI_PROT_DMA] = "dma", }; @@ -3177,7 +3181,7 @@ static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) } tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; - tf.protocol |= ATA_PROT_NODATA; + tf.protocol = ATA_PROT_NODATA; err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); if (park && (err_mask || tf.lbal != 0xc4)) { ata_dev_err(dev, "head unload failed!\n"); @@ -304,7 +304,7 @@ static void ata_scsi_set_invalid_field(struct ata_device *dev, struct scsi_cmnd *cmd, u16 field, u8 bit) { ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x24, 0x0); - /* "Invalid field in cbd" */ + /* "Invalid field in CDB" */ scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, field, bit, 1); } @@ -1190,7 +1190,7 @@ static int atapi_drain_needed(struct request *rq) if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC)) return 0; - if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE)) + if (!blk_rq_bytes(rq) || op_is_write(req_op(rq))) return 0; return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC; @@ -2075,8 +2075,8 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) 0x03, 0x20, /* SBC-2 (no version claimed) */ - 0x02, - 0x60 /* SPC-3 (no version claimed) */ + 0x03, + 0x00 /* SPC-3 (no version claimed) */ }; const u8 versions_zbc[] = { 0x00, @@ -2097,7 +2097,10 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) 0, 0x5, /* claim SPC-3 version compatibility */ 2, - 95 - 4 + 95 - 4, + 0, + 0, + 2 }; VPRINTK("ENTER\n"); @@ -2109,8 +2112,10 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL)) hdr[1] |= (1 << 7); - if (args->dev->class == ATA_DEV_ZAC) + if (args->dev->class == ATA_DEV_ZAC) { hdr[0] = TYPE_ZBC; + hdr[2] = 0x7; /* claim SPC-5 version compatibility */ + } memcpy(rbuf, hdr, sizeof(hdr)); memcpy(&rbuf[8], "ATA ", 8); @@ -2314,7 +2319,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) * with the unmap bit set. */ if (ata_id_has_trim(args->id)) { - put_unaligned_be64(65535 * 512 / 8, &rbuf[36]); + put_unaligned_be64(65535 * ATA_MAX_TRIM_RNUM, &rbuf[36]); put_unaligned_be32(1, &rbuf[28]); } @@ -2424,15 +2429,17 @@ static void modecpy(u8 *dest, const u8 *src, int n, bool changeable) static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable) { modecpy(buf, def_cache_mpage, sizeof(def_cache_mpage), changeable); - if (changeable || ata_id_wcache_enabled(id)) - buf[2] |= (1 << 2); /* write cache enable */ - if (!changeable && !ata_id_rahead_enabled(id)) - buf[12] |= (1 << 5); /* disable read ahead */ + if (changeable) { + buf[2] |= (1 << 2); /* ata_mselect_caching() */ + } else { + buf[2] |= (ata_id_wcache_enabled(id) << 2); /* write cache enable */ + buf[12] |= (!ata_id_rahead_enabled(id) << 5); /* disable read ahead */ + } return sizeof(def_cache_mpage); } /** - * ata_msense_ctl_mode - Simulate MODE SENSE control mode page + * ata_msense_control - Simulate MODE SENSE control mode page * @dev: ATA device of interest * @buf: output buffer * @changeable: whether changeable parameters are requested @@ -2442,12 +2449,17 @@ static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable) * LOCKING: * None. */ -static unsigned int ata_msense_ctl_mode(struct ata_device *dev, u8 *buf, +static unsigned int ata_msense_control(struct ata_device *dev, u8 *buf, bool changeable) { modecpy(buf, def_control_mpage, sizeof(def_control_mpage), changeable); - if (changeable && (dev->flags & ATA_DFLAG_D_SENSE)) - buf[2] |= (1 << 2); /* Descriptor sense requested */ + if (changeable) { + buf[2] |= (1 << 2); /* ata_mselect_control() */ + } else { + bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE); + + buf[2] |= (d_sense << 2); /* descriptor format sense data */ + } return sizeof(def_control_mpage); } @@ -2566,13 +2578,13 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf) break; case CONTROL_MPAGE: - p += ata_msense_ctl_mode(args->dev, p, page_control == 1); + p += ata_msense_control(args->dev, p, page_control == 1); break; case ALL_MPAGES: p += ata_msense_rw_recovery(p, page_control == 1); p += ata_msense_caching(args->id, p, page_control == 1); - p += ata_msense_ctl_mode(args->dev, p, page_control == 1); + p += ata_msense_control(args->dev, p, page_control == 1); break; default: /* invalid page code */ @@ -3077,6 +3089,9 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) goto invalid_fld; } + if (ata_is_ncq(tf->protocol) && (cdb[2] & 0x3) == 0) + tf->protocol = ATA_PROT_NCQ_NODATA; + /* enable LBA */ tf->flags |= ATA_TFLAG_LBA; @@ -3125,8 +3140,8 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) tf->command = cdb[9]; } - /* For NCQ commands with FPDMA protocol, copy the tag value */ - if (tf->protocol == ATA_PROT_NCQ) + /* For NCQ commands copy the tag value */ + if (ata_is_ncq(tf->protocol)) tf->nsect = qc->tag << 3; /* enforce correct master/slave bit */ @@ -3305,7 +3320,13 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) goto invalid_param_len; buf = page_address(sg_page(scsi_sglist(scmd))); - size = ata_set_lba_range_entries(buf, 512, block, n_block); + + if (n_block <= 65535 * ATA_MAX_TRIM_RNUM) { + size = ata_set_lba_range_entries(buf, ATA_MAX_TRIM_RNUM, block, n_block); + } else { + fp = 2; + goto invalid_fld; + } if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) { /* Newer devices support queued TRIM commands */ @@ -3454,7 +3475,7 @@ static unsigned int ata_scsi_zbc_in_xlat(struct ata_queued_cmd *qc) goto invalid_param_len; } sect = n_block / 512; - options = cdb[14]; + options = cdb[14] & 0xbf; if (ata_ncq_enabled(qc->dev) && ata_fpdma_zac_mgmt_in_supported(qc->dev)) { @@ -3464,7 +3485,7 @@ static unsigned int ata_scsi_zbc_in_xlat(struct ata_queued_cmd *qc) tf->nsect = qc->tag << 3; tf->feature = sect & 0xff; tf->hob_feature = (sect >> 8) & 0xff; - tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES; + tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES | (options << 8); } else { tf->command = ATA_CMD_ZAC_MGMT_IN; tf->feature = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES; @@ -3506,7 +3527,7 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc) struct scsi_cmnd *scmd = qc->scsicmd; struct ata_device *dev = qc->dev; const u8 *cdb = scmd->cmnd; - u8 reset_all, sa; + u8 all, sa; u64 block; u32 n_block; u16 fp = (u16)-1; @@ -3533,20 +3554,20 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc) if (block > dev->n_sectors) goto out_of_range; - reset_all = cdb[14] & 0x1; + all = cdb[14] & 0x1; if (ata_ncq_enabled(qc->dev) && ata_fpdma_zac_mgmt_out_supported(qc->dev)) { - tf->protocol = ATA_PROT_NCQ; + tf->protocol = ATA_PROT_NCQ_NODATA; tf->command = ATA_CMD_NCQ_NON_DATA; - tf->hob_nsect = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT; + tf->feature = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT; tf->nsect = qc->tag << 3; - tf->auxiliary = sa | (reset_all & 0x1) << 8; + tf->auxiliary = sa | ((u16)all << 8); } else { tf->protocol = ATA_PROT_NODATA; tf->command = ATA_CMD_ZAC_MGMT_OUT; tf->feature = sa; - tf->hob_feature = reset_all & 0x1; + tf->hob_feature = all; } tf->lbah = (block >> 16) & 0xff; tf->lbam = (block >> 8) & 0xff; @@ -3667,7 +3688,7 @@ static int ata_mselect_control(struct ata_queued_cmd *qc, /* * Check that read-only bits are not modified. */ - ata_msense_ctl_mode(dev, mpage, false); + ata_msense_control(dev, mpage, false); for (i = 0; i < CONTROL_MPAGE_LEN - 2; i++) { if (i == 0) continue; @@ -4039,11 +4060,6 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) args.done = cmd->scsi_done; switch(scsicmd[0]) { - /* TODO: worth improving? */ - case FORMAT_UNIT: - ata_scsi_invalid_field(dev, cmd, 0); - break; - case INQUIRY: if (scsicmd[1] & 2) /* is CmdDt set? */ ata_scsi_invalid_field(dev, cmd, 1); @@ -495,12 +495,13 @@ struct ata_show_ering_arg { static int ata_show_ering(struct ata_ering_entry *ent, void *void_arg) { struct ata_show_ering_arg* arg = void_arg; - struct timespec time; + u64 seconds; + u32 rem; - jiffies_to_timespec(ent->timestamp,&time); + seconds = div_u64_rem(ent->timestamp, HZ, &rem); arg->written += sprintf(arg->buf + arg->written, - "[%5lu.%06lu]", - time.tv_sec, time.tv_nsec); + "[%5llu.%09lu]", seconds, + rem * NSEC_PER_SEC / HZ); arg->written += get_ata_err_names(ent->err_mask, arg->buf + arg->written); return 0; @@ -565,7 +565,7 @@ chan_request_fail: qc->ap->hsm_task_state = HSM_ST_ERR; cf_ctrl_reset(acdev); - spin_unlock_irqrestore(qc->ap->lock, flags); + spin_unlock_irqrestore(&acdev->host->lock, flags); sff_intr: dma_complete(acdev); } @@ -183,8 +183,8 @@ static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev) * We must now look at the PIO mode situation. We may need to * adjust the PIO mode to keep the timings acceptable */ - if (adev->dma_mode >= XFER_MW_DMA_2) - wanted_pio = 4; + if (adev->dma_mode >= XFER_MW_DMA_2) + wanted_pio = 4; else if (adev->dma_mode == XFER_MW_DMA_1) wanted_pio = 3; else if (adev->dma_mode == XFER_MW_DMA_0) @@ -368,7 +368,7 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id) /* PCI clocking determines the ATA timing values to use */ /* info_hpt366 is safe against re-entry so we can scribble on it */ - switch ((reg1 & 0x700) >> 8) { + switch ((reg1 & 0xf00) >> 8) { case 9: hpriv = &hpt366_40; break; @@ -146,7 +146,7 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i if (pdev->device == 0x6101) ppi[1] = &ata_dummy_port_info; -#if defined(CONFIG_SATA_AHCI) || defined(CONFIG_SATA_AHCI_MODULE) +#if IS_ENABLED(CONFIG_SATA_AHCI) if (!marvell_pata_active(pdev)) { printk(KERN_INFO DRV_NAME ": PATA port not active, deferring to AHCI driver.\n"); return -ENODEV; @@ -259,11 +259,8 @@ static int sata_dwc_dma_init_old(struct platform_device *pdev, /* Get physical SATA DMA register base address */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); hsdev->dma->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(hsdev->dma->regs)) { - dev_err(&pdev->dev, - "ioremap failed for AHBDMA register address\n"); + if (IS_ERR(hsdev->dma->regs)) return PTR_ERR(hsdev->dma->regs); - } /* Initialize AHB DMAC */ return dw_dma_probe(hsdev->dma); @@ -281,7 +278,7 @@ static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev) static const char *get_prot_descript(u8 protocol) { - switch ((enum ata_tf_protocols)protocol) { + switch (protocol) { case ATA_PROT_NODATA: return "ATA no data"; case ATA_PROT_PIO: @@ -290,6 +287,8 @@ static const char *get_prot_descript(u8 protocol) return "ATA DMA"; case ATA_PROT_NCQ: return "ATA NCQ"; + case ATA_PROT_NCQ_NODATA: + return "ATA NCQ no data"; case ATAPI_PROT_NODATA: return "ATAPI no data"; case ATAPI_PROT_PIO: @@ -1225,11 +1224,8 @@ static int sata_dwc_probe(struct platform_device *ofdev) /* Ioremap SATA registers */ res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&ofdev->dev, res); - if (IS_ERR(base)) { - dev_err(&ofdev->dev, - "ioremap failed for SATA register address\n"); + if (IS_ERR(base)) return PTR_ERR(base); - } dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n"); /* Synopsys DWC SATA specific Registers */ @@ -986,7 +986,7 @@ static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new) * Looks like a lot of fuss, but it avoids an unnecessary * +1 usec read-after-write delay for unaffected registers. */ - laddr = (long)addr & 0xffff; + laddr = (unsigned long)addr & 0xffff; if (laddr >= 0x300 && laddr <= 0x33c) { laddr &= 0x000f; if (laddr == 0x4 || laddr == 0xc) { @@ -181,13 +181,17 @@ static char *res_strings[] = { "reserved 27", "reserved 28", "reserved 29", - "reserved 30", + "reserved 30", /* FIXME: The strings between 30-40 might be wrong. */ "reassembly abort: no buffers", "receive buffer overflow", "change in GFC", "receive buffer full", "low priority discard - no receive descriptor", "low priority discard - missing end of packet", + "reserved 37", + "reserved 38", + "reserved 39", + "reseverd 40", "reserved 41", "reserved 42", "reserved 43", @@ -2795,9 +2795,7 @@ static int hrz_probe(struct pci_dev *pci_dev, dev->atm_dev->ci_range.vpi_bits = vpi_bits; dev->atm_dev->ci_range.vci_bits = 10-vpi_bits; - init_timer(&dev->housekeeping); - dev->housekeeping.function = do_housekeeping; - dev->housekeeping.data = (unsigned long) dev; + setup_timer(&dev->housekeeping, do_housekeeping, (unsigned long) dev); mod_timer(&dev->housekeeping, jiffies); out: @@ -1128,7 +1128,7 @@ static int rx_pkt(struct atm_dev *dev) /* make the ptr point to the corresponding buffer desc entry */ buf_desc_ptr += desc; if (!desc || (desc > iadev->num_rx_desc) || - ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { + ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) { free_desc(dev, desc); IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);) return -1; @@ -874,7 +874,8 @@ static scq_info *get_scq(ns_dev *card, int size, u32 scd) scq->skb = kmalloc(sizeof(struct sk_buff *) * (size / NS_SCQE_SIZE), GFP_KERNEL); if (!scq->skb) { - kfree(scq->org); + dma_free_coherent(&card->pcidev->dev, + 2 * size, scq->org, scq->dma); kfree(scq); return NULL; } @@ -10,7 +10,7 @@ obj-$(CONFIG_DMA_CMA) += dma-contiguous.o obj-y += power/ obj-$(CONFIG_HAS_DMA) += dma-mapping.o obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o -obj-$(CONFIG_ISA) += isa.o +obj-$(CONFIG_ISA_BUS_API) += isa.o obj-$(CONFIG_FW_LOADER) += firmware_class.o obj-$(CONFIG_NUMA) += node.o obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o @@ -180,4 +180,4 @@ static int __init isa_bus_init(void) return error; } -device_initcall(isa_bus_init); +postcore_initcall(isa_bus_init); @@ -391,6 +391,7 @@ static ssize_t show_valid_zones(struct device *dev, unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; struct page *first_page; struct zone *zone; + int zone_shift = 0; start_pfn = section_nr_to_pfn(mem->start_section_nr); end_pfn = start_pfn + nr_pages; @@ -402,21 +403,26 @@ static ssize_t show_valid_zones(struct device *dev, zone = page_zone(first_page); - if (zone_idx(zone) == ZONE_MOVABLE - 1) { - /*The mem block is the last memoryblock of this zone.*/ - if (end_pfn == zone_end_pfn(zone)) - return sprintf(buf, "%s %s\n", - zone->name, (zone + 1)->name); + /* MMOP_ONLINE_KEEP */ + sprintf(buf, "%s", zone->name); + + /* MMOP_ONLINE_KERNEL */ + zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL); + if (zone_shift) { + strcat(buf, " "); + strcat(buf, (zone + zone_shift)->name); } - if (zone_idx(zone) == ZONE_MOVABLE) { - /*The mem block is the first memoryblock of ZONE_MOVABLE.*/ - if (start_pfn == zone->zone_start_pfn) - return sprintf(buf, "%s %s\n", - zone->name, (zone - 1)->name); + /* MMOP_ONLINE_MOVABLE */ + zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE); + if (zone_shift) { + strcat(buf, " "); + strcat(buf, (zone + zone_shift)->name); } - return sprintf(buf, "%s\n", zone->name); + strcat(buf, "\n"); + + return strlen(buf); } static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL); #endif @@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv) static void module_create_drivers_dir(struct module_kobject *mk) { - if (!mk || mk->drivers_dir) - return; + static DEFINE_MUTEX(drivers_dir_mutex); - mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj); + mutex_lock(&drivers_dir_mutex); + if (mk && !mk->drivers_dir) + mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj); + mutex_unlock(&drivers_dir_mutex); } void module_add_driver(struct module *mod, struct device_driver *drv) @@ -56,6 +56,7 @@ static ssize_t node_read_meminfo(struct device *dev, { int n; int nid = dev->id; + struct pglist_data *pgdat = NODE_DATA(nid); struct sysinfo i; si_meminfo_node(&i, nid); @@ -74,16 +75,16 @@ static ssize_t node_read_meminfo(struct device *dev, nid, K(i.totalram), nid, K(i.freeram), nid, K(i.totalram - i.freeram), - nid, K(node_page_state(nid, NR_ACTIVE_ANON) + - node_page_state(nid, NR_ACTIVE_FILE)), - nid, K(node_page_state(nid, NR_INACTIVE_ANON) + - node_page_state(nid, NR_INACTIVE_FILE)), - nid, K(node_page_state(nid, NR_ACTIVE_ANON)), - nid, K(node_page_state(nid, NR_INACTIVE_ANON)), - nid, K(node_page_state(nid, NR_ACTIVE_FILE)), - nid, K(node_page_state(nid, NR_INACTIVE_FILE)), - nid, K(node_page_state(nid, NR_UNEVICTABLE)), - nid, K(node_page_state(nid, NR_MLOCK))); + nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) + + node_page_state(pgdat, NR_ACTIVE_FILE)), + nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) + + node_page_state(pgdat, NR_INACTIVE_FILE)), + nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)), + nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)), + nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)), + nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)), + nid, K(node_page_state(pgdat, NR_UNEVICTABLE)), + nid, K(sum_zone_node_page_state(nid, NR_MLOCK))); #ifdef CONFIG_HIGHMEM n += sprintf(buf + n, @@ -113,30 +114,34 @@ static ssize_t node_read_meminfo(struct device *dev, "Node %d SUnreclaim: %8lu kB\n" #ifdef CONFIG_TRANSPARENT_HUGEPAGE "Node %d AnonHugePages: %8lu kB\n" + "Node %d ShmemHugePages: %8lu kB\n" + "Node %d ShmemPmdMapped: %8lu kB\n" #endif , - nid, K(node_page_state(nid, NR_FILE_DIRTY)), - nid, K(node_page_state(nid, NR_WRITEBACK)), - nid, K(node_page_state(nid, NR_FILE_PAGES)), - nid, K(node_page_state(nid, NR_FILE_MAPPED)), - nid, K(node_page_state(nid, NR_ANON_PAGES)), + nid, K(node_page_state(pgdat, NR_FILE_DIRTY)), + nid, K(node_page_state(pgdat, NR_WRITEBACK)), + nid, K(node_page_state(pgdat, NR_FILE_PAGES)), + nid, K(node_page_state(pgdat, NR_FILE_MAPPED)), + nid, K(node_page_state(pgdat, NR_ANON_MAPPED)), nid, K(i.sharedram), - nid, node_page_state(nid, NR_KERNEL_STACK) * - THREAD_SIZE / 1024, - nid, K(node_page_state(nid, NR_PAGETABLE)), - nid, K(node_page_state(nid, NR_UNSTABLE_NFS)), - nid, K(node_page_state(nid, NR_BOUNCE)), - nid, K(node_page_state(nid, NR_WRITEBACK_TEMP)), - nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + - node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), - nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), + nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK_KB), + nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)), + nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)), + nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)), + nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), + nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE) + + sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), + nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE)), #ifdef CONFIG_TRANSPARENT_HUGEPAGE - nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)) - , nid, - K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * - HPAGE_PMD_NR)); + nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), + nid, K(node_page_state(pgdat, NR_ANON_THPS) * + HPAGE_PMD_NR), + nid, K(node_page_state(pgdat, NR_SHMEM_THPS) * + HPAGE_PMD_NR), + nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) * + HPAGE_PMD_NR)); #else - nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))); + nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE))); #endif n += hugetlb_report_node_meminfo(nid, buf + n); return n; @@ -155,12 +160,12 @@ static ssize_t node_read_numastat(struct device *dev, "interleave_hit %lu\n" "local_node %lu\n" "other_node %lu\n", - node_page_state(dev->id, NUMA_HIT), - node_page_state(dev->id, NUMA_MISS), - node_page_state(dev->id, NUMA_FOREIGN), - node_page_state(dev->id, NUMA_INTERLEAVE_HIT), - node_page_state(dev->id, NUMA_LOCAL), - node_page_state(dev->id, NUMA_OTHER)); + sum_zone_node_page_state(dev->id, NUMA_HIT), + sum_zone_node_page_state(dev->id, NUMA_MISS), + sum_zone_node_page_state(dev->id, NUMA_FOREIGN), + sum_zone_node_page_state(dev->id, NUMA_INTERLEAVE_HIT), + sum_zone_node_page_state(dev->id, NUMA_LOCAL), + sum_zone_node_page_state(dev->id, NUMA_OTHER)); } static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL); @@ -168,12 +173,18 @@ static ssize_t node_read_vmstat(struct device *dev, struct device_attribute *attr, char *buf) { int nid = dev->id; + struct pglist_data *pgdat = NODE_DATA(nid); int i; int n = 0; for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) n += sprintf(buf+n, "%s %lu\n", vmstat_text[i], - node_page_state(nid, i)); + sum_zone_node_page_state(nid, i)); + + for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) + n += sprintf(buf+n, "%s %lu\n", + vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], + node_page_state(pgdat, i)); return n; } @@ -121,6 +121,7 @@ int pm_clk_add(struct device *dev, const char *con_id) { return __pm_clk_add(dev, con_id, NULL); } +EXPORT_SYMBOL_GPL(pm_clk_add); /** * pm_clk_add_clk - Start using a device clock for power management. @@ -136,9 +137,42 @@ int pm_clk_add_clk(struct device *dev, struct clk *clk) { return __pm_clk_add(dev, NULL, clk); } +EXPORT_SYMBOL_GPL(pm_clk_add_clk); /** + * of_pm_clk_add_clk - Start using a device clock for power management. + * @dev: Device whose clock is going to be used for power management. + * @name: Name of clock that is going to be used for power management. + * + * Add the clock described in the 'clocks' device-tree node that matches + * with the 'name' provided, to the list of clocks used for the power + * management of @dev. On success, returns 0. Returns a negative error + * code if the clock is not found or cannot be added. + */ +int of_pm_clk_add_clk(struct device *dev, const char *name) +{ + struct clk *clk; + int ret; + + if (!dev || !dev->of_node || !name) + return -EINVAL; + + clk = of_clk_get_by_name(dev->of_node, name); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + ret = pm_clk_add_clk(dev, clk); + if (ret) { + clk_put(clk); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(of_pm_clk_add_clk); + +/** * of_pm_clk_add_clks - Start using device clock(s) for power management. * @dev: Device whose clock(s) is going to be used for power management. * @@ -192,6 +226,7 @@ error: return ret; } +EXPORT_SYMBOL_GPL(of_pm_clk_add_clks); /** * __pm_clk_remove - Destroy PM clock entry. @@ -252,6 +287,7 @@ void pm_clk_remove(struct device *dev, const char *con_id) __pm_clk_remove(ce); } +EXPORT_SYMBOL_GPL(pm_clk_remove); /** * pm_clk_remove_clk - Stop using a device clock for power management. @@ -285,6 +321,7 @@ void pm_clk_remove_clk(struct device *dev, struct clk *clk) __pm_clk_remove(ce); } +EXPORT_SYMBOL_GPL(pm_clk_remove_clk); /** * pm_clk_init - Initialize a device's list of power management clocks. @@ -299,6 +336,7 @@ void pm_clk_init(struct device *dev) if (psd) INIT_LIST_HEAD(&psd->clock_list); } +EXPORT_SYMBOL_GPL(pm_clk_init); /** * pm_clk_create - Create and initialize a device's list of PM clocks. @@ -311,6 +349,7 @@ int pm_clk_create(struct device *dev) { return dev_pm_get_subsys_data(dev); } +EXPORT_SYMBOL_GPL(pm_clk_create); /** * pm_clk_destroy - Destroy a device's list of power management clocks. @@ -345,6 +384,7 @@ void pm_clk_destroy(struct device *dev) __pm_clk_remove(ce); } } +EXPORT_SYMBOL_GPL(pm_clk_destroy); /** * pm_clk_suspend - Disable clocks in a device's PM clock list. @@ -375,6 +415,7 @@ int pm_clk_suspend(struct device *dev) return 0; } +EXPORT_SYMBOL_GPL(pm_clk_suspend); /** * pm_clk_resume - Enable clocks in a device's PM clock list. @@ -400,6 +441,7 @@ int pm_clk_resume(struct device *dev) return 0; } +EXPORT_SYMBOL_GPL(pm_clk_resume); /** * pm_clk_notify - Notify routine for device addition and removal. @@ -480,6 +522,7 @@ int pm_clk_runtime_suspend(struct device *dev) return 0; } +EXPORT_SYMBOL_GPL(pm_clk_runtime_suspend); int pm_clk_runtime_resume(struct device *dev) { @@ -495,6 +538,7 @@ int pm_clk_runtime_resume(struct device *dev) return pm_generic_runtime_resume(dev); } +EXPORT_SYMBOL_GPL(pm_clk_runtime_resume); #else /* !CONFIG_PM_CLK */ @@ -598,3 +642,4 @@ void pm_clk_add_notifier(struct bus_type *bus, clknb->nb.notifier_call = pm_clk_notify; bus_register_notifier(bus, &clknb->nb); } +EXPORT_SYMBOL_GPL(pm_clk_add_notifier); @@ -187,8 +187,7 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth) struct gpd_link *link; int ret = 0; - if (genpd->status == GPD_STATE_ACTIVE - || (genpd->prepared_count > 0 && genpd->suspend_power_off)) + if (genpd->status == GPD_STATE_ACTIVE) return 0; /* @@ -735,82 +734,24 @@ static int pm_genpd_prepare(struct device *dev) mutex_lock(&genpd->lock); - if (genpd->prepared_count++ == 0) { + if (genpd->prepared_count++ == 0) genpd->suspended_count = 0; - genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; - } mutex_unlock(&genpd->lock); - if (genpd->suspend_power_off) - return 0; - - /* - * The PM domain must be in the GPD_STATE_ACTIVE state at this point, - * so genpd_poweron() will return immediately, but if the device - * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need - * to make it operational. - */ - pm_runtime_resume(dev); - __pm_runtime_disable(dev, false); - ret = pm_generic_prepare(dev); if (ret) { mutex_lock(&genpd->lock); - if (--genpd->prepared_count == 0) - genpd->suspend_power_off = false; + genpd->prepared_count--; mutex_unlock(&genpd->lock); - pm_runtime_enable(dev); } return ret; } /** - * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain. - * @dev: Device to suspend. - * - * Suspend a device under the assumption that its pm_domain field points to the - * domain member of an object of type struct generic_pm_domain representing - * a PM domain consisting of I/O devices. - */ -static int pm_genpd_suspend(struct device *dev) -{ - struct generic_pm_domain *genpd; - - dev_dbg(dev, "%s()\n", __func__); - - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return -EINVAL; - - return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); -} - -/** - * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain. - * @dev: Device to suspend. - * - * Carry out a late suspend of a device under the assumption that its - * pm_domain field points to the domain member of an object of type - * struct generic_pm_domain representing a PM domain consisting of I/O devices. - */ -static int pm_genpd_suspend_late(struct device *dev) -{ - struct generic_pm_domain *genpd; - - dev_dbg(dev, "%s()\n", __func__); - - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return -EINVAL; - - return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev); -} - -/** * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. * @dev: Device to suspend. * @@ -820,6 +761,7 @@ static int pm_genpd_suspend_late(struct device *dev) static int pm_genpd_suspend_noirq(struct device *dev) { struct generic_pm_domain *genpd; + int ret; dev_dbg(dev, "%s()\n", __func__); @@ -827,11 +769,14 @@ static int pm_genpd_suspend_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - if (genpd->suspend_power_off - || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) + if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)) return 0; - genpd_stop_dev(genpd, dev); + if (genpd->dev_ops.stop && genpd->dev_ops.start) { + ret = pm_runtime_force_suspend(dev); + if (ret) + return ret; + } /* * Since all of the "noirq" callbacks are executed sequentially, it is @@ -853,6 +798,7 @@ static int pm_genpd_suspend_noirq(struct device *dev) static int pm_genpd_resume_noirq(struct device *dev) { struct generic_pm_domain *genpd; + int ret = 0; dev_dbg(dev, "%s()\n", __func__); @@ -860,8 +806,7 @@ static int pm_genpd_resume_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - if (genpd->suspend_power_off - || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) + if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)) return 0; /* @@ -872,93 +817,10 @@ static int pm_genpd_resume_noirq(struct device *dev) pm_genpd_sync_poweron(genpd, true); genpd->suspended_count--; - return genpd_start_dev(genpd, dev); -} - -/** - * pm_genpd_resume_early - Early resume of a device in an I/O PM domain. - * @dev: Device to resume. - * - * Carry out an early resume of a device under the assumption that its - * pm_domain field points to the domain member of an object of type - * struct generic_pm_domain representing a power domain consisting of I/O - * devices. - */ -static int pm_genpd_resume_early(struct device *dev) -{ - struct generic_pm_domain *genpd; - - dev_dbg(dev, "%s()\n", __func__); - - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return -EINVAL; - - return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev); -} - -/** - * pm_genpd_resume - Resume of device in an I/O PM domain. - * @dev: Device to resume. - * - * Resume a device under the assumption that its pm_domain field points to the - * domain member of an object of type struct generic_pm_domain representing - * a power domain consisting of I/O devices. - */ -static int pm_genpd_resume(struct device *dev) -{ - struct generic_pm_domain *genpd; - - dev_dbg(dev, "%s()\n", __func__); - - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return -EINVAL; - - return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); -} - -/** - * pm_genpd_freeze - Freezing a device in an I/O PM domain. - * @dev: Device to freeze. - * - * Freeze a device under the assumption that its pm_domain field points to the - * domain member of an object of type struct generic_pm_domain representing - * a power domain consisting of I/O devices. - */ -static int pm_genpd_freeze(struct device *dev) -{ - struct generic_pm_domain *genpd; - - dev_dbg(dev, "%s()\n", __func__); - - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return -EINVAL; - - return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); -} + if (genpd->dev_ops.stop && genpd->dev_ops.start) + ret = pm_runtime_force_resume(dev); -/** - * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain. - * @dev: Device to freeze. - * - * Carry out a late freeze of a device under the assumption that its - * pm_domain field points to the domain member of an object of type - * struct generic_pm_domain representing a power domain consisting of I/O - * devices. - */ -static int pm_genpd_freeze_late(struct device *dev) -{ - struct generic_pm_domain *genpd; - - dev_dbg(dev, "%s()\n", __func__); - - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return -EINVAL; - - return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev); + return ret; } /** @@ -973,6 +835,7 @@ static int pm_genpd_freeze_late(struct device *dev) static int pm_genpd_freeze_noirq(struct device *dev) { struct generic_pm_domain *genpd; + int ret = 0; dev_dbg(dev, "%s()\n", __func__); @@ -980,7 +843,10 @@ static int pm_genpd_freeze_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev); + if (genpd->dev_ops.stop && genpd->dev_ops.start) + ret = pm_runtime_force_suspend(dev); + + return ret; } /** @@ -993,6 +859,7 @@ static int pm_genpd_freeze_noirq(struct device *dev) static int pm_genpd_thaw_noirq(struct device *dev) { struct generic_pm_domain *genpd; + int ret = 0; dev_dbg(dev, "%s()\n", __func__); @@ -1000,51 +867,10 @@ static int pm_genpd_thaw_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? - 0 : genpd_start_dev(genpd, dev); -} - -/** - * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain. - * @dev: Device to thaw. - * - * Carry out an early thaw of a device under the assumption that its - * pm_domain field points to the domain member of an object of type - * struct generic_pm_domain representing a power domain consisting of I/O - * devices. - */ -static int pm_genpd_thaw_early(struct device *dev) -{ - struct generic_pm_domain *genpd; - - dev_dbg(dev, "%s()\n", __func__); - - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return -EINVAL; - - return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev); -} - -/** - * pm_genpd_thaw - Thaw a device belonging to an I/O power domain. - * @dev: Device to thaw. - * - * Thaw a device under the assumption that its pm_domain field points to the - * domain member of an object of type struct generic_pm_domain representing - * a power domain consisting of I/O devices. - */ -static int pm_genpd_thaw(struct device *dev) -{ - struct generic_pm_domain *genpd; - - dev_dbg(dev, "%s()\n", __func__); + if (genpd->dev_ops.stop && genpd->dev_ops.start) + ret = pm_runtime_force_resume(dev); - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return -EINVAL; - - return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); + return ret; } /** @@ -1057,6 +883,7 @@ static int pm_genpd_thaw(struct device *dev) static int pm_genpd_restore_noirq(struct device *dev) { struct generic_pm_domain *genpd; + int ret = 0; dev_dbg(dev, "%s()\n", __func__); @@ -1072,30 +899,20 @@ static int pm_genpd_restore_noirq(struct device *dev) * At this point suspended_count == 0 means we are being run for the * first time for the given domain in the present cycle. */ - if (genpd->suspended_count++ == 0) { + if (genpd->suspended_count++ == 0) /* * The boot kernel might put the domain into arbitrary state, * so make it appear as powered off to pm_genpd_sync_poweron(), * so that it tries to power it on in case it was really off. */ genpd->status = GPD_STATE_POWER_OFF; - if (genpd->suspend_power_off) { - /* - * If the domain was off before the hibernation, make - * sure it will be off going forward. - */ - genpd_power_off(genpd, true); - - return 0; - } - } - - if (genpd->suspend_power_off) - return 0; pm_genpd_sync_poweron(genpd, true); - return genpd_start_dev(genpd, dev); + if (genpd->dev_ops.stop && genpd->dev_ops.start) + ret = pm_runtime_force_resume(dev); + + return ret; } /** @@ -1110,7 +927,6 @@ static int pm_genpd_restore_noirq(struct device *dev) static void pm_genpd_complete(struct device *dev) { struct generic_pm_domain *genpd; - bool run_complete; dev_dbg(dev, "%s()\n", __func__); @@ -1118,20 +934,15 @@ static void pm_genpd_complete(struct device *dev) if (IS_ERR(genpd)) return; + pm_generic_complete(dev); + mutex_lock(&genpd->lock); - run_complete = !genpd->suspend_power_off; - if (--genpd->prepared_count == 0) - genpd->suspend_power_off = false; + genpd->prepared_count--; + if (!genpd->prepared_count) + genpd_queue_power_off_work(genpd); mutex_unlock(&genpd->lock); - - if (run_complete) { - pm_generic_complete(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - pm_request_idle(dev); - } } /** @@ -1173,18 +984,10 @@ EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron); #else /* !CONFIG_PM_SLEEP */ #define pm_genpd_prepare NULL -#define pm_genpd_suspend NULL -#define pm_genpd_suspend_late NULL #define pm_genpd_suspend_noirq NULL -#define pm_genpd_resume_early NULL #define pm_genpd_resume_noirq NULL -#define pm_genpd_resume NULL -#define pm_genpd_freeze NULL -#define pm_genpd_freeze_late NULL #define pm_genpd_freeze_noirq NULL -#define pm_genpd_thaw_early NULL #define pm_genpd_thaw_noirq NULL -#define pm_genpd_thaw NULL #define pm_genpd_restore_noirq NULL #define pm_genpd_complete NULL @@ -1455,12 +1258,14 @@ EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); * @genpd: PM domain object to initialize. * @gov: PM domain governor to associate with the domain (may be NULL). * @is_off: Initial value of the domain's power_is_off field. + * + * Returns 0 on successful initialization, else a negative error code. */ -void pm_genpd_init(struct generic_pm_domain *genpd, - struct dev_power_governor *gov, bool is_off) +int pm_genpd_init(struct generic_pm_domain *genpd, + struct dev_power_governor *gov, bool is_off) { if (IS_ERR_OR_NULL(genpd)) - return; + return -EINVAL; INIT_LIST_HEAD(&genpd->master_links); INIT_LIST_HEAD(&genpd->slave_links); @@ -1476,24 +1281,24 @@ void pm_genpd_init(struct generic_pm_domain *genpd, genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; genpd->domain.ops.runtime_resume = genpd_runtime_resume; genpd->domain.ops.prepare = pm_genpd_prepare; - genpd->domain.ops.suspend = pm_genpd_suspend; - genpd->domain.ops.suspend_late = pm_genpd_suspend_late; + genpd->domain.ops.suspend = pm_generic_suspend; + genpd->domain.ops.suspend_late = pm_generic_suspend_late; genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; - genpd->domain.ops.resume_early = pm_genpd_resume_early; - genpd->domain.ops.resume = pm_genpd_resume; - genpd->domain.ops.freeze = pm_genpd_freeze; - genpd->domain.ops.freeze_late = pm_genpd_freeze_late; + genpd->domain.ops.resume_early = pm_generic_resume_early; + genpd->domain.ops.resume = pm_generic_resume; + genpd->domain.ops.freeze = pm_generic_freeze; + genpd->domain.ops.freeze_late = pm_generic_freeze_late; genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; - genpd->domain.ops.thaw_early = pm_genpd_thaw_early; - genpd->domain.ops.thaw = pm_genpd_thaw; - genpd->domain.ops.poweroff = pm_genpd_suspend; - genpd->domain.ops.poweroff_late = pm_genpd_suspend_late; + genpd->domain.ops.thaw_early = pm_generic_thaw_early; + genpd->domain.ops.thaw = pm_generic_thaw; + genpd->domain.ops.poweroff = pm_generic_poweroff; + genpd->domain.ops.poweroff_late = pm_generic_poweroff_late; genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; - genpd->domain.ops.restore_early = pm_genpd_resume_early; - genpd->domain.ops.restore = pm_genpd_resume; + genpd->domain.ops.restore_early = pm_generic_restore_early; + genpd->domain.ops.restore = pm_generic_restore; genpd->domain.ops.complete = pm_genpd_complete; if (genpd->flags & GENPD_FLAG_PM_CLK) { @@ -1518,6 +1323,8 @@ void pm_genpd_init(struct generic_pm_domain *genpd, mutex_lock(&gpd_list_lock); list_add(&genpd->gpd_list_node, &gpd_list); mutex_unlock(&gpd_list_lock); + + return 0; } EXPORT_SYMBOL_GPL(pm_genpd_init); @@ -211,7 +211,7 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, } /* Mark opp-table as multiple CPUs are sharing it now */ - opp_table->shared_opp = true; + opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED; } unlock: mutex_unlock(&opp_table_lock); @@ -227,7 +227,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); * * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev. * - * Returns -ENODEV if OPP table isn't already present. + * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP + * table's status is access-unknown. * * Locking: The internal opp_table and opp structures are RCU protected. * Hence this function internally uses RCU updater strategy with mutex locks @@ -249,9 +250,14 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) goto unlock; } + if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) { + ret = -EINVAL; + goto unlock; + } + cpumask_clear(cpumask); - if (opp_table->shared_opp) { + if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) { list_for_each_entry(opp_dev, &opp_table->dev_list, node) cpumask_set_cpu(opp_dev->dev->id, cpumask); } else { @@ -34,7 +34,10 @@ static struct opp_table *_managed_opp(const struct device_node *np) * But the OPPs will be considered as shared only if the * OPP table contains a "opp-shared" property. */ - return opp_table->shared_opp ? opp_table : NULL; + if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) + return opp_table; + + return NULL; } } @@ -353,7 +356,10 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) } opp_table->np = opp_np; - opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared"); + if (of_property_read_bool(opp_np, "opp-shared")) + opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED; + else + opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE; mutex_unlock(&opp_table_lock); @@ -119,6 +119,12 @@ struct opp_device { #endif }; +enum opp_table_access { + OPP_TABLE_ACCESS_UNKNOWN = 0, + OPP_TABLE_ACCESS_EXCLUSIVE = 1, + OPP_TABLE_ACCESS_SHARED = 2, +}; + /** * struct opp_table - Device opp structure * @node: table node - contains the devices with OPPs that @@ -166,7 +172,7 @@ struct opp_table { /* For backward compatibility with v1 bindings */ unsigned int voltage_tolerance_v1; - bool shared_opp; + enum opp_table_access shared_opp; struct dev_pm_opp *suspend_opp; unsigned int *supported_hw; @@ -1045,10 +1045,14 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) */ if (!parent->power.disable_depth && !parent->power.ignore_children - && parent->power.runtime_status != RPM_ACTIVE) + && parent->power.runtime_status != RPM_ACTIVE) { + dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n", + dev_name(dev), + dev_name(parent)); error = -EBUSY; - else if (dev->power.runtime_status == RPM_SUSPENDED) + } else if (dev->power.runtime_status == RPM_SUSPENDED) { atomic_inc(&parent->power.child_count); + } spin_unlock(&parent->power.lock); @@ -1256,7 +1260,7 @@ void pm_runtime_allow(struct device *dev) dev->power.runtime_auto = true; if (atomic_dec_and_test(&dev->power.usage_count)) - rpm_idle(dev, RPM_AUTO); + rpm_idle(dev, RPM_AUTO | RPM_ASYNC); out: spin_unlock_irq(&dev->power.lock); @@ -1506,6 +1510,9 @@ int pm_runtime_force_resume(struct device *dev) goto out; } + if (!pm_runtime_status_suspended(dev)) + goto out; + ret = pm_runtime_set_active(dev); if (ret) goto out; @@ -259,7 +259,7 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c, { if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C)) return ®map_i2c; - else if (config->reg_bits == 8 && + else if (config->val_bits == 8 && config->reg_bits == 8 && i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) return ®map_i2c_smbus_i2c_block; @@ -268,13 +268,16 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) bool handled = false; u32 reg; + if (chip->handle_pre_irq) + chip->handle_pre_irq(chip->irq_drv_data); + if (chip->runtime_pm) { ret = pm_runtime_get_sync(map->dev); if (ret < 0) { dev_err(map->dev, "IRQ thread failed to resume: %d\n", ret); pm_runtime_put(map->dev); - return IRQ_NONE; + goto exit; } } @@ -296,7 +299,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) if (ret != 0) { dev_err(map->dev, "Failed to read IRQ status: %d\n", ret); - return IRQ_NONE; + goto exit; } for (i = 0; i < data->chip->num_regs; i++) { @@ -312,7 +315,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) break; default: BUG(); - return IRQ_NONE; + goto exit; } } @@ -329,7 +332,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) ret); if (chip->runtime_pm) pm_runtime_put(map->dev); - return IRQ_NONE; + goto exit; } } } @@ -365,6 +368,10 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) if (chip->runtime_pm) pm_runtime_put(map->dev); +exit: + if (chip->handle_post_irq) + chip->handle_post_irq(chip->irq_drv_data); + if (handled) return IRQ_HANDLED; else @@ -1777,8 +1777,6 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, size_t val_bytes = map->format.val_bytes; size_t total_size = val_bytes * val_count; - if (map->bus && !map->format.parse_inplace) - return -EINVAL; if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; @@ -1789,7 +1787,8 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, * * The first if block is used for memory mapped io. It does not allow * val_bytes of 3 for example. - * The second one is used for busses which do not have this limitation + * The second one is for busses that do not provide raw I/O. + * The third one is used for busses which do not have these limitations * and can write arbitrary value lengths. */ if (!map->bus) { @@ -1825,6 +1824,32 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, } out: map->unlock(map->lock_arg); + } else if (map->bus && !map->format.parse_inplace) { + const u8 *u8 = val; + const u16 *u16 = val; + const u32 *u32 = val; + unsigned int ival; + + for (i = 0; i < val_count; i++) { + switch (map->format.val_bytes) { + case 4: + ival = u32[i]; + break; + case 2: + ival = u16[i]; + break; + case 1: + ival = u8[i]; + break; + default: + return -EINVAL; + } + + ret = regmap_write(map, reg + (i * map->reg_stride), + ival); + if (ret) + return ret; + } } else if (map->use_single_write || (map->max_raw_write && map->max_raw_write < total_size)) { int chunk_stride = map->reg_stride; @@ -77,6 +77,14 @@ static DEVICE_ATTR_RO(book_siblings); static DEVICE_ATTR_RO(book_siblings_list); #endif +#ifdef CONFIG_SCHED_DRAWER +define_id_show_func(drawer_id); +static DEVICE_ATTR_RO(drawer_id); +define_siblings_show_func(drawer_siblings, drawer_cpumask); +static DEVICE_ATTR_RO(drawer_siblings); +static DEVICE_ATTR_RO(drawer_siblings_list); +#endif + static struct attribute *default_attrs[] = { &dev_attr_physical_package_id.attr, &dev_attr_core_id.attr, @@ -89,6 +97,11 @@ static struct attribute *default_attrs[] = { &dev_attr_book_siblings.attr, &dev_attr_book_siblings_list.attr, #endif +#ifdef CONFIG_SCHED_DRAWER + &dev_attr_drawer_id.attr, + &dev_attr_drawer_siblings.attr, + &dev_attr_drawer_siblings_list.attr, +#endif NULL }; @@ -76,9 +76,16 @@ config BCMA_PFLASH default y config BCMA_SFLASH - bool - depends on BCMA_DRIVER_MIPS + bool "ChipCommon-attached serial flash support" + depends on BCMA_HOST_SOC default y + help + Some cheap devices have serial flash connected to the ChipCommon + instead of independent SPI controller. It requires using a separated + driver that implements ChipCommon specific interface communication. + + Enabling this symbol will let bcma recognize serial flash and register + it as platform device. config BCMA_NFLASH bool @@ -8,8 +8,6 @@ #include <linux/bcma/bcma.h> #include <linux/delay.h> -#define BCMA_CORE_SIZE 0x1000 - #define bcma_err(bus, fmt, ...) \ pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__) #define bcma_warn(bus, fmt, ...) \ @@ -33,11 +33,12 @@ static bool bcma_wait_reg(struct bcma_bus *bus, void __iomem *addr, u32 mask, void bcma_chipco_b_mii_write(struct bcma_drv_cc_b *ccb, u32 offset, u32 value) { struct bcma_bus *bus = ccb->core->bus; + void __iomem *mii = ccb->mii; - writel(offset, ccb->mii + 0x00); - bcma_wait_reg(bus, ccb->mii + 0x00, 0x0100, 0x0000, 100); - writel(value, ccb->mii + 0x04); - bcma_wait_reg(bus, ccb->mii + 0x00, 0x0100, 0x0000, 100); + writel(offset, mii + BCMA_CCB_MII_MNG_CTL); + bcma_wait_reg(bus, mii + BCMA_CCB_MII_MNG_CTL, 0x0100, 0x0000, 100); + writel(value, mii + BCMA_CCB_MII_MNG_CMD_DATA); + bcma_wait_reg(bus, mii + BCMA_CCB_MII_MNG_CTL, 0x0100, 0x0000, 100); } EXPORT_SYMBOL_GPL(bcma_chipco_b_mii_write); @@ -295,6 +295,7 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0016) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_FOXCONN, 0xe092) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) }, @@ -1750,7 +1750,7 @@ aoecmd_init(void) int ret; /* get_zeroed_page returns page with ref count 1 */ - p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); + p = (void *) get_zeroed_page(GFP_KERNEL); if (!p) return -ENOMEM; empty_page = virt_to_page(p); @@ -339,7 +339,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) goto io_error; - if (unlikely(bio->bi_rw & REQ_DISCARD)) { + if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) { if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) || bio->bi_iter.bi_size & ~PAGE_MASK) goto io_error; @@ -347,9 +347,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) goto out; } - rw = bio_rw(bio); - if (rw == READA) - rw = READ; + rw = bio_data_dir(bio); bio_for_each_segment(bvec, bio, iter) { unsigned int len = bvec.bv_len; @@ -509,7 +507,9 @@ static struct brd_device *brd_alloc(int i) blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX); brd->brd_queue->limits.discard_zeroes_data = 1; queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue); - +#ifdef CONFIG_BLK_DEV_RAM_DAX + queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue); +#endif disk = brd->brd_disk = alloc_disk(max_part); if (!disk) goto out_free_queue; @@ -1951,7 +1951,6 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, if (cciss_create_ld_sysfs_entry(h, drv_index)) goto cleanup_queue; disk->private_data = h->drv[drv_index]; - disk->driverfs_dev = &h->drv[drv_index]->dev; /* Set up queue information */ blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); @@ -1973,7 +1972,7 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, /* allows the interrupt handler to start the queue */ wmb(); h->drv[drv_index]->queue = disk->queue; - add_disk(disk); + device_add_disk(&h->drv[drv_index]->dev, disk); return 0; cleanup_queue: @@ -137,19 +137,19 @@ void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_b static int _drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev, - sector_t sector, int rw) + sector_t sector, int op) { struct bio *bio; /* we do all our meta data IO in aligned 4k blocks. */ const int size = 4096; - int err; + int err, op_flags = 0; device->md_io.done = 0; device->md_io.error = -ENODEV; - if ((rw & WRITE) && !test_bit(MD_NO_FUA, &device->flags)) - rw |= REQ_FUA | REQ_FLUSH; - rw |= REQ_SYNC | REQ_NOIDLE; + if ((op == REQ_OP_WRITE) && !test_bit(MD_NO_FUA, &device->flags)) + op_flags |= REQ_FUA | REQ_PREFLUSH; + op_flags |= REQ_SYNC | REQ_NOIDLE; bio = bio_alloc_drbd(GFP_NOIO); bio->bi_bdev = bdev->md_bdev; @@ -159,9 +159,9 @@ static int _drbd_md_sync_page_io(struct drbd_device *device, goto out; bio->bi_private = device; bio->bi_end_io = drbd_md_endio; - bio->bi_rw = rw; + bio_set_op_attrs(bio, op, op_flags); - if (!(rw & WRITE) && device->state.disk == D_DISKLESS && device->ldev == NULL) + if (op != REQ_OP_WRITE && device->state.disk == D_DISKLESS && device->ldev == NULL) /* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */ ; else if (!get_ldev_if_state(device, D_ATTACHING)) { @@ -174,10 +174,10 @@ static int _drbd_md_sync_page_io(struct drbd_device *device, bio_get(bio); /* one bio_put() is in the completion handler */ atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */ device->md_io.submit_jif = jiffies; - if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) + if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) bio_io_error(bio); else - submit_bio(rw, bio); + submit_bio(bio); wait_until_done_or_force_detached(device, bdev, &device->md_io.done); if (!bio->bi_error) err = device->md_io.error; @@ -188,7 +188,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device, } int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev, - sector_t sector, int rw) + sector_t sector, int op) { int err; D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1); @@ -197,19 +197,21 @@ int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bd dynamic_drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n", current->comm, current->pid, __func__, - (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", + (unsigned long long)sector, (op == REQ_OP_WRITE) ? "WRITE" : "READ", (void*)_RET_IP_ ); if (sector < drbd_md_first_sector(bdev) || sector + 7 > drbd_md_last_sector(bdev)) drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n", current->comm, current->pid, __func__, - (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ"); + (unsigned long long)sector, + (op == REQ_OP_WRITE) ? "WRITE" : "READ"); - err = _drbd_md_sync_page_io(device, bdev, sector, rw); + err = _drbd_md_sync_page_io(device, bdev, sector, op); if (err) { drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n", - (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err); + (unsigned long long)sector, + (op == REQ_OP_WRITE) ? "WRITE" : "READ", err); } return err; } @@ -256,7 +258,7 @@ bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval unsigned first = i->sector >> (AL_EXTENT_SHIFT-9); unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9); - D_ASSERT(device, (unsigned)(last - first) <= 1); + D_ASSERT(device, first <= last); D_ASSERT(device, atomic_read(&device->local_cnt) > 0); /* FIXME figure out a fast path for bios crossing AL extent boundaries */ @@ -339,6 +341,8 @@ static int __al_write_transaction(struct drbd_device *device, struct al_transact i = 0; + drbd_bm_reset_al_hints(device); + /* Even though no one can start to change this list * once we set the LC_LOCKED -- from drbd_al_begin_io(), * lc_try_lock_for_transaction() --, someone may still @@ -768,10 +772,18 @@ static bool lazy_bitmap_update_due(struct drbd_device *device) static void maybe_schedule_on_disk_bitmap_update(struct drbd_device *device, bool rs_done) { - if (rs_done) - set_bit(RS_DONE, &device->flags); - /* and also set RS_PROGRESS below */ - else if (!lazy_bitmap_update_due(device)) + if (rs_done) { + struct drbd_connection *connection = first_peer_device(device)->connection; + if (connection->agreed_pro_version <= 95 || + is_sync_target_state(device->state.conn)) + set_bit(RS_DONE, &device->flags); + /* and also set RS_PROGRESS below */ + + /* Else: rather wait for explicit notification via receive_state, + * to avoid uuids-rotated-too-fast causing full resync + * in next handshake, in case the replication link breaks + * at the most unfortunate time... */ + } else if (!lazy_bitmap_update_due(device)) return; drbd_device_post_work(device, RS_PROGRESS); @@ -830,6 +842,13 @@ static int update_sync_bits(struct drbd_device *device, return count; } +static bool plausible_request_size(int size) +{ + return size > 0 + && size <= DRBD_MAX_BATCH_BIO_SIZE + && IS_ALIGNED(size, 512); +} + /* clear the bit corresponding to the piece of storage in question: * size byte of data starting from sector. Only clear a bits of the affected * one ore more _aligned_ BM_BLOCK_SIZE blocks. @@ -845,11 +864,11 @@ int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size, unsigned long count = 0; sector_t esector, nr_sectors; - /* This would be an empty REQ_FLUSH, be silent. */ + /* This would be an empty REQ_PREFLUSH, be silent. */ if ((mode == SET_OUT_OF_SYNC) && size == 0) return 0; - if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_DISCARD_SIZE) { + if (!plausible_request_size(size)) { drbd_err(device, "%s: sector=%llus size=%d nonsense!\n", drbd_change_sync_fname[mode], (unsigned long long)sector, size); @@ -96,6 +96,13 @@ struct drbd_bitmap { struct page **bm_pages; spinlock_t bm_lock; + /* exclusively to be used by __al_write_transaction(), + * drbd_bm_mark_for_writeout() and + * and drbd_bm_write_hinted() -> bm_rw() called from there. + */ + unsigned int n_bitmap_hints; + unsigned int al_bitmap_hints[AL_UPDATES_PER_TRANSACTION]; + /* see LIMITATIONS: above */ unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */ @@ -242,6 +249,11 @@ static void bm_set_page_need_writeout(struct page *page) set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); } +void drbd_bm_reset_al_hints(struct drbd_device *device) +{ + device->bitmap->n_bitmap_hints = 0; +} + /** * drbd_bm_mark_for_writeout() - mark a page with a "hint" to be considered for writeout * @device: DRBD device. @@ -253,6 +265,7 @@ static void bm_set_page_need_writeout(struct page *page) */ void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr) { + struct drbd_bitmap *b = device->bitmap; struct page *page; if (page_nr >= device->bitmap->bm_number_of_pages) { drbd_warn(device, "BAD: page_nr: %u, number_of_pages: %u\n", @@ -260,7 +273,9 @@ void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr) return; } page = device->bitmap->bm_pages[page_nr]; - set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page)); + BUG_ON(b->n_bitmap_hints >= ARRAY_SIZE(b->al_bitmap_hints)); + if (!test_and_set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page))) + b->al_bitmap_hints[b->n_bitmap_hints++] = page_nr; } static int bm_test_page_unchanged(struct page *page) @@ -427,8 +442,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want) } /* - * called on driver init only. TODO call when a device is created. - * allocates the drbd_bitmap, and stores it in device->bitmap. + * allocates the drbd_bitmap and stores it in device->bitmap. */ int drbd_bm_init(struct drbd_device *device) { @@ -633,7 +647,8 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi unsigned long bits, words, owords, obits; unsigned long want, have, onpages; /* number of pages */ struct page **npages, **opages = NULL; - int err = 0, growing; + int err = 0; + bool growing; if (!expect(b)) return -ENOMEM; @@ -980,7 +995,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho struct drbd_bitmap *b = device->bitmap; struct page *page; unsigned int len; - unsigned int rw = (ctx->flags & BM_AIO_READ) ? READ : WRITE; + unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE; sector_t on_disk_sector = device->ldev->md.md_offset + device->ldev->md.bm_offset; @@ -1011,12 +1026,12 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho bio_add_page(bio, page, len, 0); bio->bi_private = ctx; bio->bi_end_io = drbd_bm_endio; + bio_set_op_attrs(bio, op, 0); - if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { - bio->bi_rw |= rw; + if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { bio_io_error(bio); } else { - submit_bio(rw, bio); + submit_bio(bio); /* this should not count as user activity and cause the * resync to throttle -- see drbd_rs_should_slow_down(). */ atomic_add(len >> 9, &device->rs_sect_ev); @@ -1030,7 +1045,7 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned { struct drbd_bm_aio_ctx *ctx; struct drbd_bitmap *b = device->bitmap; - int num_pages, i, count = 0; + unsigned int num_pages, i, count = 0; unsigned long now; char ppb[10]; int err = 0; @@ -1078,16 +1093,37 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned now = jiffies; /* let the layers below us try to merge these bios... */ - for (i = 0; i < num_pages; i++) { - /* ignore completely unchanged pages */ - if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx) - break; - if (!(flags & BM_AIO_READ)) { - if ((flags & BM_AIO_WRITE_HINTED) && - !test_and_clear_bit(BM_PAGE_HINT_WRITEOUT, - &page_private(b->bm_pages[i]))) - continue; + if (flags & BM_AIO_READ) { + for (i = 0; i < num_pages; i++) { + atomic_inc(&ctx->in_flight); + bm_page_io_async(ctx, i); + ++count; + cond_resched(); + } + } else if (flags & BM_AIO_WRITE_HINTED) { + /* ASSERT: BM_AIO_WRITE_ALL_PAGES is not set. */ + unsigned int hint; + for (hint = 0; hint < b->n_bitmap_hints; hint++) { + i = b->al_bitmap_hints[hint]; + if (i >= num_pages) /* == -1U: no hint here. */ + continue; + /* Several AL-extents may point to the same page. */ + if (!test_and_clear_bit(BM_PAGE_HINT_WRITEOUT, + &page_private(b->bm_pages[i]))) + continue; + /* Has it even changed? */ + if (bm_test_page_unchanged(b->bm_pages[i])) + continue; + atomic_inc(&ctx->in_flight); + bm_page_io_async(ctx, i); + ++count; + } + } else { + for (i = 0; i < num_pages; i++) { + /* ignore completely unchanged pages */ + if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx) + break; if (!(flags & BM_AIO_WRITE_ALL_PAGES) && bm_test_page_unchanged(b->bm_pages[i])) { dynamic_drbd_dbg(device, "skipped bm write for idx %u\n", i); @@ -1100,11 +1136,11 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned dynamic_drbd_dbg(device, "skipped bm lazy write for idx %u\n", i); continue; } + atomic_inc(&ctx->in_flight); + bm_page_io_async(ctx, i); + ++count; + cond_resched(); } - atomic_inc(&ctx->in_flight); - bm_page_io_async(ctx, i); - ++count; - cond_resched(); } /* @@ -1121,10 +1157,14 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy); /* summary for global bitmap IO */ - if (flags == 0) - drbd_info(device, "bitmap %s of %u pages took %lu jiffies\n", - (flags & BM_AIO_READ) ? "READ" : "WRITE", - count, jiffies - now); + if (flags == 0) { + unsigned int ms = jiffies_to_msecs(jiffies - now); + if (ms > 5) { + drbd_info(device, "bitmap %s of %u pages took %u ms\n", + (flags & BM_AIO_READ) ? "READ" : "WRITE", + count, ms); + } + } if (ctx->error) { drbd_alert(device, "we had at least one MD IO ERROR during bitmap IO\n"); @@ -237,14 +237,9 @@ static void seq_print_peer_request_flags(struct seq_file *m, struct drbd_peer_re seq_print_rq_state_bit(m, f & EE_SEND_WRITE_ACK, &sep, "C"); seq_print_rq_state_bit(m, f & EE_MAY_SET_IN_SYNC, &sep, "set-in-sync"); - if (f & EE_IS_TRIM) { - seq_putc(m, sep); - sep = '|'; - if (f & EE_IS_TRIM_USE_ZEROOUT) - seq_puts(m, "zero-out"); - else - seq_puts(m, "trim"); - } + if (f & EE_IS_TRIM) + __seq_print_rq_state_bit(m, f & EE_IS_TRIM_USE_ZEROOUT, &sep, "zero-out", "trim"); + seq_print_rq_state_bit(m, f & EE_WRITE_SAME, &sep, "write-same"); seq_putc(m, '\n'); } @@ -430,9 +425,6 @@ static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, vo /* Are we still linked, * or has debugfs_remove() already been called? */ parent = file->f_path.dentry->d_parent; - /* not sure if this can happen: */ - if (!parent || d_really_is_negative(parent)) - goto out; /* serialize with d_delete() */ inode_lock(d_inode(parent)); /* Make sure the object is still alive */ @@ -445,7 +437,6 @@ static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, vo if (ret) kref_put(kref, release); } -out: return ret; } @@ -908,7 +899,7 @@ static int drbd_version_open(struct inode *inode, struct file *file) return single_open(file, drbd_version_show, NULL); } -static struct file_operations drbd_version_fops = { +static const struct file_operations drbd_version_fops = { .owner = THIS_MODULE, .open = drbd_version_open, .llseek = seq_lseek, @@ -468,9 +468,15 @@ enum { /* this is/was a write request */ __EE_WRITE, + /* this is/was a write same request */ + __EE_WRITE_SAME, + /* this originates from application on peer * (not some resync or verify or other DRBD internal request) */ __EE_APPLICATION, + + /* If it contains only 0 bytes, send back P_RS_DEALLOCATED */ + __EE_RS_THIN_REQ, }; #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) @@ -484,7 +490,9 @@ enum { #define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE) #define EE_SUBMITTED (1<<__EE_SUBMITTED) #define EE_WRITE (1<<__EE_WRITE) +#define EE_WRITE_SAME (1<<__EE_WRITE_SAME) #define EE_APPLICATION (1<<__EE_APPLICATION) +#define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ) /* flag bits per device */ enum { @@ -1123,6 +1131,7 @@ extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int extern int drbd_send_bitmap(struct drbd_device *device); extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode); extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode); +extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *); extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev); extern void drbd_device_cleanup(struct drbd_device *device); void drbd_print_uuids(struct drbd_device *device, const char *text); @@ -1327,14 +1336,14 @@ struct bm_extent { #endif #endif -/* BIO_MAX_SIZE is 256 * PAGE_SIZE, +/* Estimate max bio size as 256 * PAGE_SIZE, * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte. * Since we may live in a mixed-platform cluster, * we limit us to a platform agnostic constant here for now. * A followup commit may allow even bigger BIO sizes, * once we thought that through. */ #define DRBD_MAX_BIO_SIZE (1U << 20) -#if DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE +#if DRBD_MAX_BIO_SIZE > (BIO_MAX_PAGES << PAGE_SHIFT) #error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE #endif #define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */ @@ -1342,11 +1351,11 @@ struct bm_extent { #define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */ #define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */ -/* For now, don't allow more than one activity log extent worth of data - * to be discarded in one go. We may need to rework drbd_al_begin_io() - * to allow for even larger discard ranges */ -#define DRBD_MAX_DISCARD_SIZE AL_EXTENT_SIZE -#define DRBD_MAX_DISCARD_SECTORS (DRBD_MAX_DISCARD_SIZE >> 9) +/* For now, don't allow more than half of what we can "activate" in one + * activity log transaction to be discarded in one go. We may need to rework + * drbd_al_begin_io() to allow for even larger discard ranges */ +#define DRBD_MAX_BATCH_BIO_SIZE (AL_UPDATES_PER_TRANSACTION/2*AL_EXTENT_SIZE) +#define DRBD_MAX_BBIO_SECTORS (DRBD_MAX_BATCH_BIO_SIZE >> 9) extern int drbd_bm_init(struct drbd_device *device); extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits); @@ -1369,6 +1378,7 @@ extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr); extern int drbd_bm_read(struct drbd_device *device) __must_hold(local); extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr); extern int drbd_bm_write(struct drbd_device *device) __must_hold(local); +extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local); extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local); extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local); extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local); @@ -1483,12 +1493,14 @@ enum determine_dev_size { extern enum determine_dev_size drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local); extern void resync_after_online_grow(struct drbd_device *); -extern void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev); +extern void drbd_reconsider_queue_parameters(struct drbd_device *device, + struct drbd_backing_dev *bdev, struct o_qlim *o); extern enum drbd_state_rv drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force); extern bool conn_try_outdate_peer(struct drbd_connection *connection); extern void conn_try_outdate_peer_async(struct drbd_connection *connection); +extern enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd); extern int drbd_khelper(struct drbd_device *device, char *cmd); /* drbd_worker.c */ @@ -1507,7 +1519,7 @@ extern int drbd_resync_finished(struct drbd_device *device); extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent); extern void drbd_md_put_buffer(struct drbd_device *device); extern int drbd_md_sync_page_io(struct drbd_device *device, - struct drbd_backing_dev *bdev, sector_t sector, int rw); + struct drbd_backing_dev *bdev, sector_t sector, int op); extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int); extern void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_backing_dev *bdev, unsigned int *done); @@ -1548,6 +1560,8 @@ extern void start_resync_timer_fn(unsigned long data); extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req); /* drbd_receiver.c */ +extern int drbd_issue_discard_or_zero_out(struct drbd_device *device, + sector_t start, unsigned int nr_sectors, bool discard); extern int drbd_receiver(struct drbd_thread *thi); extern int drbd_ack_receiver(struct drbd_thread *thi); extern void drbd_send_ping_wf(struct work_struct *ws); @@ -1557,11 +1571,11 @@ extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector bool throttle_if_app_is_waiting); extern int drbd_submit_peer_request(struct drbd_device *, struct drbd_peer_request *, const unsigned, - const int); + const unsigned, const int); extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *); extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64, sector_t, unsigned int, - bool, + unsigned int, gfp_t) __must_hold(local); extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *, int); @@ -1635,8 +1649,6 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin /* drbd_proc.c */ extern struct proc_dir_entry *drbd_proc; extern const struct file_operations drbd_proc_fops; -extern const char *drbd_conn_str(enum drbd_conns s); -extern const char *drbd_role_str(enum drbd_role s); /* drbd_actlog.c */ extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i); @@ -2095,13 +2107,22 @@ static inline void _sub_unacked(struct drbd_device *device, int n, const char *f ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line); } +static inline bool is_sync_target_state(enum drbd_conns connection_state) +{ + return connection_state == C_SYNC_TARGET || + connection_state == C_PAUSED_SYNC_T; +} + +static inline bool is_sync_source_state(enum drbd_conns connection_state) +{ + return connection_state == C_SYNC_SOURCE || + connection_state == C_PAUSED_SYNC_S; +} + static inline bool is_sync_state(enum drbd_conns connection_state) { - return - (connection_state == C_SYNC_SOURCE - || connection_state == C_SYNC_TARGET - || connection_state == C_PAUSED_SYNC_S - || connection_state == C_PAUSED_SYNC_T); + return is_sync_source_state(connection_state) || + is_sync_target_state(connection_state); } /** @@ -6,13 +6,13 @@ struct drbd_interval { struct rb_node rb; - sector_t sector; /* start sector of the interval */ - unsigned int size; /* size in bytes */ - sector_t end; /* highest interval end in subtree */ - int local:1 /* local or remote request? */; - int waiting:1; /* someone is waiting for this to complete */ - int completed:1; /* this has been completed already; - * ignore for conflict detection */ + sector_t sector; /* start sector of the interval */ + unsigned int size; /* size in bytes */ + sector_t end; /* highest interval end in subtree */ + unsigned int local:1 /* local or remote request? */; + unsigned int waiting:1; /* someone is waiting for completion */ + unsigned int completed:1; /* this has been completed already; + * ignore for conflict detection */ }; static inline void drbd_clear_interval(struct drbd_interval *i) @@ -31,7 +31,7 @@ #include <linux/module.h> #include <linux/jiffies.h> #include <linux/drbd.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <asm/types.h> #include <net/sock.h> #include <linux/ctype.h> @@ -920,6 +920,31 @@ void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device) } } +/* communicated if (agreed_features & DRBD_FF_WSAME) */ +void assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, struct request_queue *q) +{ + if (q) { + p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q)); + p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q)); + p->qlim->alignment_offset = cpu_to_be32(queue_alignment_offset(q)); + p->qlim->io_min = cpu_to_be32(queue_io_min(q)); + p->qlim->io_opt = cpu_to_be32(queue_io_opt(q)); + p->qlim->discard_enabled = blk_queue_discard(q); + p->qlim->discard_zeroes_data = queue_discard_zeroes_data(q); + p->qlim->write_same_capable = !!q->limits.max_write_same_sectors; + } else { + q = device->rq_queue; + p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q)); + p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q)); + p->qlim->alignment_offset = 0; + p->qlim->io_min = cpu_to_be32(queue_io_min(q)); + p->qlim->io_opt = cpu_to_be32(queue_io_opt(q)); + p->qlim->discard_enabled = 0; + p->qlim->discard_zeroes_data = 0; + p->qlim->write_same_capable = 0; + } +} + int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags) { struct drbd_device *device = peer_device->device; @@ -928,29 +953,37 @@ int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enu sector_t d_size, u_size; int q_order_type; unsigned int max_bio_size; + unsigned int packet_size; + + sock = &peer_device->connection->data; + p = drbd_prepare_command(peer_device, sock); + if (!p) + return -EIO; + packet_size = sizeof(*p); + if (peer_device->connection->agreed_features & DRBD_FF_WSAME) + packet_size += sizeof(p->qlim[0]); + + memset(p, 0, packet_size); if (get_ldev_if_state(device, D_NEGOTIATING)) { - D_ASSERT(device, device->ldev->backing_bdev); + struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev); d_size = drbd_get_max_capacity(device->ldev); rcu_read_lock(); u_size = rcu_dereference(device->ldev->disk_conf)->disk_size; rcu_read_unlock(); q_order_type = drbd_queue_order_type(device); - max_bio_size = queue_max_hw_sectors(device->ldev->backing_bdev->bd_disk->queue) << 9; + max_bio_size = queue_max_hw_sectors(q) << 9; max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE); + assign_p_sizes_qlim(device, p, q); put_ldev(device); } else { d_size = 0; u_size = 0; q_order_type = QUEUE_ORDERED_NONE; max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */ + assign_p_sizes_qlim(device, p, NULL); } - sock = &peer_device->connection->data; - p = drbd_prepare_command(peer_device, sock); - if (!p) - return -EIO; - if (peer_device->connection->agreed_pro_version <= 94) max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET); else if (peer_device->connection->agreed_pro_version < 100) @@ -962,7 +995,8 @@ int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enu p->max_bio_size = cpu_to_be32(max_bio_size); p->queue_order_type = cpu_to_be16(q_order_type); p->dds_flags = cpu_to_be16(flags); - return drbd_send_command(peer_device, sock, P_SIZES, sizeof(*p), NULL, 0); + + return drbd_send_command(peer_device, sock, P_SIZES, packet_size, NULL, 0); } /** @@ -1377,6 +1411,22 @@ int drbd_send_ack_ex(struct drbd_peer_device *peer_device, enum drbd_packet cmd, cpu_to_be64(block_id)); } +int drbd_send_rs_deallocated(struct drbd_peer_device *peer_device, + struct drbd_peer_request *peer_req) +{ + struct drbd_socket *sock; + struct p_block_desc *p; + + sock = &peer_device->connection->data; + p = drbd_prepare_command(peer_device, sock); + if (!p) + return -EIO; + p->sector = cpu_to_be64(peer_req->i.sector); + p->blksize = cpu_to_be32(peer_req->i.size); + p->pad = 0; + return drbd_send_command(peer_device, sock, P_RS_DEALLOCATED, sizeof(*p), NULL, 0); +} + int drbd_send_drequest(struct drbd_peer_device *peer_device, int cmd, sector_t sector, int size, u64 block_id) { @@ -1561,6 +1611,9 @@ static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio) ? 0 : MSG_MORE); if (err) return err; + /* REQ_OP_WRITE_SAME has only one segment */ + if (bio_op(bio) == REQ_OP_WRITE_SAME) + break; } return 0; } @@ -1579,6 +1632,9 @@ static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *b bio_iter_last(bvec, iter) ? 0 : MSG_MORE); if (err) return err; + /* REQ_OP_WRITE_SAME has only one segment */ + if (bio_op(bio) == REQ_OP_WRITE_SAME) + break; } return 0; } @@ -1603,15 +1659,17 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device, return 0; } -static u32 bio_flags_to_wire(struct drbd_connection *connection, unsigned long bi_rw) +static u32 bio_flags_to_wire(struct drbd_connection *connection, + struct bio *bio) { if (connection->agreed_pro_version >= 95) - return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | - (bi_rw & REQ_FUA ? DP_FUA : 0) | - (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | - (bi_rw & REQ_DISCARD ? DP_DISCARD : 0); + return (bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | + (bio->bi_rw & REQ_FUA ? DP_FUA : 0) | + (bio->bi_rw & REQ_PREFLUSH ? DP_FLUSH : 0) | + (bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) | + (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0); else - return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0; + return bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0; } /* Used to send write or TRIM aka REQ_DISCARD requests @@ -1622,6 +1680,8 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request * struct drbd_device *device = peer_device->device; struct drbd_socket *sock; struct p_data *p; + struct p_wsame *wsame = NULL; + void *digest_out; unsigned int dp_flags = 0; int digest_size; int err; @@ -1636,7 +1696,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request * p->sector = cpu_to_be64(req->i.sector); p->block_id = (unsigned long)req; p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq)); - dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw); + dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio); if (device->state.conn >= C_SYNC_SOURCE && device->state.conn <= C_PAUSED_SYNC_T) dp_flags |= DP_MAY_SET_IN_SYNC; @@ -1657,12 +1717,29 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request * err = __send_command(peer_device->connection, device->vnr, sock, P_TRIM, sizeof(*t), NULL, 0); goto out; } + if (dp_flags & DP_WSAME) { + /* this will only work if DRBD_FF_WSAME is set AND the + * handshake agreed that all nodes and backend devices are + * WRITE_SAME capable and agree on logical_block_size */ + wsame = (struct p_wsame*)p; + digest_out = wsame + 1; + wsame->size = cpu_to_be32(req->i.size); + } else + digest_out = p + 1; /* our digest is still only over the payload. * TRIM does not carry any payload. */ if (digest_size) - drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, p + 1); - err = __send_command(peer_device->connection, device->vnr, sock, P_DATA, sizeof(*p) + digest_size, NULL, req->i.size); + drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest_out); + if (wsame) { + err = + __send_command(peer_device->connection, device->vnr, sock, P_WSAME, + sizeof(*wsame) + digest_size, NULL, + bio_iovec(req->master_bio).bv_len); + } else + err = + __send_command(peer_device->connection, device->vnr, sock, P_DATA, + sizeof(*p) + digest_size, NULL, req->i.size); if (!err) { /* For protocol A, we have to memcpy the payload into * socket buffers, as we may complete right away @@ -3061,7 +3138,7 @@ void drbd_md_write(struct drbd_device *device, void *b) D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset); sector = device->ldev->md.md_offset; - if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) { + if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) { /* this was a try anyways ... */ drbd_err(device, "meta data update failed!\n"); drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR); @@ -3263,7 +3340,8 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev) * Affects the paranoia out-of-range access check in drbd_md_sync_page_io(). */ bdev->md.md_size_sect = 8; - if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) { + if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, + REQ_OP_READ)) { /* NOTE: can't do normal error processing here as this is called BEFORE disk is attached */ drbd_err(device, "Error while reading metadata.\n"); @@ -3505,7 +3583,12 @@ static int w_bitmap_io(struct drbd_work *w, int unused) struct bm_io_work *work = &device->bm_io_work; int rv = -EIO; - D_ASSERT(device, atomic_read(&device->ap_bio_cnt) == 0); + if (work->flags != BM_LOCKED_CHANGE_ALLOWED) { + int cnt = atomic_read(&device->ap_bio_cnt); + if (cnt) + drbd_err(device, "FIXME: ap_bio_cnt %d, expected 0; queued for '%s'\n", + cnt, work->why); + } if (get_ldev(device)) { drbd_bm_lock(device, work->why, work->flags); @@ -3585,18 +3668,20 @@ void drbd_queue_bitmap_io(struct drbd_device *device, int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *), char *why, enum bm_flag flags) { + /* Only suspend io, if some operation is supposed to be locked out */ + const bool do_suspend_io = flags & (BM_DONT_CLEAR|BM_DONT_SET|BM_DONT_TEST); int rv; D_ASSERT(device, current != first_peer_device(device)->connection->worker.task); - if ((flags & BM_LOCKED_SET_ALLOWED) == 0) + if (do_suspend_io) drbd_suspend_io(device); drbd_bm_lock(device, why, flags); rv = io_fn(device); drbd_bm_unlock(device); - if ((flags & BM_LOCKED_SET_ALLOWED) == 0) + if (do_suspend_io) drbd_resume_io(device); return rv; @@ -3635,6 +3720,8 @@ const char *cmdname(enum drbd_packet cmd) * one PRO_VERSION */ static const char *cmdnames[] = { [P_DATA] = "Data", + [P_WSAME] = "WriteSame", + [P_TRIM] = "Trim", [P_DATA_REPLY] = "DataReply", [P_RS_DATA_REPLY] = "RSDataReply", [P_BARRIER] = "Barrier", @@ -3679,6 +3766,8 @@ const char *cmdname(enum drbd_packet cmd) [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply", [P_RETRY_WRITE] = "retry_write", [P_PROTOCOL_UPDATE] = "protocol_update", + [P_RS_THIN_REQ] = "rs_thin_req", + [P_RS_DEALLOCATED] = "rs_deallocated", /* enum drbd_packet, but not commands - obsoleted flags: * P_MAY_IGNORE @@ -343,7 +343,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd) (char[20]) { }, /* address family */ (char[60]) { }, /* address */ NULL }; - char mb[12]; + char mb[14]; char *argv[] = {usermode_helper, cmd, mb, NULL }; struct drbd_connection *connection = first_peer_device(device)->connection; struct sib_info sib; @@ -352,7 +352,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd) if (current == connection->worker.task) set_bit(CALLBACK_PENDING, &connection->flags); - snprintf(mb, 12, "minor-%d", device_to_minor(device)); + snprintf(mb, 14, "minor-%d", device_to_minor(device)); setup_khelper_env(connection, envp); /* The helper may take some time. @@ -387,7 +387,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd) return ret; } -static int conn_khelper(struct drbd_connection *connection, char *cmd) +enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd) { char *envp[] = { "HOME=/", "TERM=linux", @@ -442,19 +442,17 @@ static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connec } rcu_read_unlock(); - if (fp == FP_NOT_AVAIL) { - /* IO Suspending works on the whole resource. - Do it only for one device. */ - vnr = 0; - peer_device = idr_get_next(&connection->peer_devices, &vnr); - drbd_change_state(peer_device->device, CS_VERBOSE | CS_HARD, NS(susp_fen, 0)); - } - return fp; } +static bool resource_is_supended(struct drbd_resource *resource) +{ + return resource->susp || resource->susp_fen || resource->susp_nod; +} + bool conn_try_outdate_peer(struct drbd_connection *connection) { + struct drbd_resource * const resource = connection->resource; unsigned int connect_cnt; union drbd_state mask = { }; union drbd_state val = { }; @@ -462,21 +460,41 @@ bool conn_try_outdate_peer(struct drbd_connection *connection) char *ex_to_string; int r; - spin_lock_irq(&connection->resource->req_lock); + spin_lock_irq(&resource->req_lock); if (connection->cstate >= C_WF_REPORT_PARAMS) { drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n"); - spin_unlock_irq(&connection->resource->req_lock); + spin_unlock_irq(&resource->req_lock); return false; } connect_cnt = connection->connect_cnt; - spin_unlock_irq(&connection->resource->req_lock); + spin_unlock_irq(&resource->req_lock); fp = highest_fencing_policy(connection); switch (fp) { case FP_NOT_AVAIL: drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n"); - goto out; + spin_lock_irq(&resource->req_lock); + if (connection->cstate < C_WF_REPORT_PARAMS) { + _conn_request_state(connection, + (union drbd_state) { { .susp_fen = 1 } }, + (union drbd_state) { { .susp_fen = 0 } }, + CS_VERBOSE | CS_HARD | CS_DC_SUSP); + /* We are no longer suspended due to the fencing policy. + * We may still be suspended due to the on-no-data-accessible policy. + * If that was OND_IO_ERROR, fail pending requests. */ + if (!resource_is_supended(resource)) + _tl_restart(connection, CONNECTION_LOST_WHILE_PENDING); + } + /* Else: in case we raced with a connection handshake, + * let the handshake figure out if we maybe can RESEND, + * and do not resume/fail pending requests here. + * Worst case is we stay suspended for now, which may be + * resolved by either re-establishing the replication link, or + * the next link failure, or eventually the administrator. */ + spin_unlock_irq(&resource->req_lock); + return false; + case FP_DONT_CARE: return true; default: ; @@ -485,17 +503,17 @@ bool conn_try_outdate_peer(struct drbd_connection *connection) r = conn_khelper(connection, "fence-peer"); switch ((r>>8) & 0xff) { - case 3: /* peer is inconsistent */ + case P_INCONSISTENT: /* peer is inconsistent */ ex_to_string = "peer is inconsistent or worse"; mask.pdsk = D_MASK; val.pdsk = D_INCONSISTENT; break; - case 4: /* peer got outdated, or was already outdated */ + case P_OUTDATED: /* peer got outdated, or was already outdated */ ex_to_string = "peer was fenced"; mask.pdsk = D_MASK; val.pdsk = D_OUTDATED; break; - case 5: /* peer was down */ + case P_DOWN: /* peer was down */ if (conn_highest_disk(connection) == D_UP_TO_DATE) { /* we will(have) create(d) a new UUID anyways... */ ex_to_string = "peer is unreachable, assumed to be dead"; @@ -505,7 +523,7 @@ bool conn_try_outdate_peer(struct drbd_connection *connection) ex_to_string = "peer unreachable, doing nothing since disk != UpToDate"; } break; - case 6: /* Peer is primary, voluntarily outdate myself. + case P_PRIMARY: /* Peer is primary, voluntarily outdate myself. * This is useful when an unconnected R_SECONDARY is asked to * become R_PRIMARY, but finds the other peer being active. */ ex_to_string = "peer is active"; @@ -513,7 +531,9 @@ bool conn_try_outdate_peer(struct drbd_connection *connection) mask.disk = D_MASK; val.disk = D_OUTDATED; break; - case 7: + case P_FENCING: + /* THINK: do we need to handle this + * like case 4, or more like case 5? */ if (fp != FP_STONITH) drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n"); ex_to_string = "peer was stonithed"; @@ -529,13 +549,11 @@ bool conn_try_outdate_peer(struct drbd_connection *connection) drbd_info(connection, "fence-peer helper returned %d (%s)\n", (r>>8) & 0xff, ex_to_string); - out: - /* Not using conn_request_state(connection, mask, val, CS_VERBOSE); here, because we might were able to re-establish the connection in the meantime. */ - spin_lock_irq(&connection->resource->req_lock); + spin_lock_irq(&resource->req_lock); if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) { if (connection->connect_cnt != connect_cnt) /* In case the connection was established and droped @@ -544,7 +562,7 @@ bool conn_try_outdate_peer(struct drbd_connection *connection) else _conn_request_state(connection, mask, val, CS_VERBOSE); } - spin_unlock_irq(&connection->resource->req_lock); + spin_unlock_irq(&resource->req_lock); return conn_highest_pdsk(connection) <= D_OUTDATED; } @@ -1154,51 +1172,160 @@ static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc) return 0; } +static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity) +{ + q->limits.discard_granularity = granularity; +} + +static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection) +{ + /* when we introduced REQ_WRITE_SAME support, we also bumped + * our maximum supported batch bio size used for discards. */ + if (connection->agreed_features & DRBD_FF_WSAME) + return DRBD_MAX_BBIO_SECTORS; + /* before, with DRBD <= 8.4.6, we only allowed up to one AL_EXTENT_SIZE. */ + return AL_EXTENT_SIZE >> 9; +} + +static void decide_on_discard_support(struct drbd_device *device, + struct request_queue *q, + struct request_queue *b, + bool discard_zeroes_if_aligned) +{ + /* q = drbd device queue (device->rq_queue) + * b = backing device queue (device->ldev->backing_bdev->bd_disk->queue), + * or NULL if diskless + */ + struct drbd_connection *connection = first_peer_device(device)->connection; + bool can_do = b ? blk_queue_discard(b) : true; + + if (can_do && b && !b->limits.discard_zeroes_data && !discard_zeroes_if_aligned) { + can_do = false; + drbd_info(device, "discard_zeroes_data=0 and discard_zeroes_if_aligned=no: disabling discards\n"); + } + if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_TRIM)) { + can_do = false; + drbd_info(connection, "peer DRBD too old, does not support TRIM: disabling discards\n"); + } + if (can_do) { + /* We don't care for the granularity, really. + * Stacking limits below should fix it for the local + * device. Whether or not it is a suitable granularity + * on the remote device is not our problem, really. If + * you care, you need to use devices with similar + * topology on all peers. */ + blk_queue_discard_granularity(q, 512); + q->limits.max_discard_sectors = drbd_max_discard_sectors(connection); + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + } else { + queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_discard_granularity(q, 0); + q->limits.max_discard_sectors = 0; + } +} + +static void fixup_discard_if_not_supported(struct request_queue *q) +{ + /* To avoid confusion, if this queue does not support discard, clear + * max_discard_sectors, which is what lsblk -D reports to the user. + * Older kernels got this wrong in "stack limits". + * */ + if (!blk_queue_discard(q)) { + blk_queue_max_discard_sectors(q, 0); + blk_queue_discard_granularity(q, 0); + } +} + +static void decide_on_write_same_support(struct drbd_device *device, + struct request_queue *q, + struct request_queue *b, struct o_qlim *o) +{ + struct drbd_peer_device *peer_device = first_peer_device(device); + struct drbd_connection *connection = peer_device->connection; + bool can_do = b ? b->limits.max_write_same_sectors : true; + + if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_WSAME)) { + can_do = false; + drbd_info(peer_device, "peer does not support WRITE_SAME\n"); + } + + if (o) { + /* logical block size; queue_logical_block_size(NULL) is 512 */ + unsigned int peer_lbs = be32_to_cpu(o->logical_block_size); + unsigned int me_lbs_b = queue_logical_block_size(b); + unsigned int me_lbs = queue_logical_block_size(q); + + if (me_lbs_b != me_lbs) { + drbd_warn(device, + "logical block size of local backend does not match (drbd:%u, backend:%u); was this a late attach?\n", + me_lbs, me_lbs_b); + /* rather disable write same than trigger some BUG_ON later in the scsi layer. */ + can_do = false; + } + if (me_lbs_b != peer_lbs) { + drbd_warn(peer_device, "logical block sizes do not match (me:%u, peer:%u); this may cause problems.\n", + me_lbs, peer_lbs); + if (can_do) { + drbd_dbg(peer_device, "logical block size mismatch: WRITE_SAME disabled.\n"); + can_do = false; + } + me_lbs = max(me_lbs, me_lbs_b); + /* We cannot change the logical block size of an in-use queue. + * We can only hope that access happens to be properly aligned. + * If not, the peer will likely produce an IO error, and detach. */ + if (peer_lbs > me_lbs) { + if (device->state.role != R_PRIMARY) { + blk_queue_logical_block_size(q, peer_lbs); + drbd_warn(peer_device, "logical block size set to %u\n", peer_lbs); + } else { + drbd_warn(peer_device, + "current Primary must NOT adjust logical block size (%u -> %u); hope for the best.\n", + me_lbs, peer_lbs); + } + } + } + if (can_do && !o->write_same_capable) { + /* If we introduce an open-coded write-same loop on the receiving side, + * the peer would present itself as "capable". */ + drbd_dbg(peer_device, "WRITE_SAME disabled (peer device not capable)\n"); + can_do = false; + } + } + + blk_queue_max_write_same_sectors(q, can_do ? DRBD_MAX_BBIO_SECTORS : 0); +} + static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev, - unsigned int max_bio_size) + unsigned int max_bio_size, struct o_qlim *o) { struct request_queue * const q = device->rq_queue; unsigned int max_hw_sectors = max_bio_size >> 9; unsigned int max_segments = 0; struct request_queue *b = NULL; + struct disk_conf *dc; + bool discard_zeroes_if_aligned = true; if (bdev) { b = bdev->backing_bdev->bd_disk->queue; max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); rcu_read_lock(); - max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs; + dc = rcu_dereference(device->ldev->disk_conf); + max_segments = dc->max_bio_bvecs; + discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned; rcu_read_unlock(); blk_set_stacking_limits(&q->limits); - blk_queue_max_write_same_sectors(q, 0); } - blk_queue_logical_block_size(q, 512); blk_queue_max_hw_sectors(q, max_hw_sectors); /* This is the workaround for "bio would need to, but cannot, be split" */ blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); blk_queue_segment_boundary(q, PAGE_SIZE-1); + decide_on_discard_support(device, q, b, discard_zeroes_if_aligned); + decide_on_write_same_support(device, q, b, o); if (b) { - struct drbd_connection *connection = first_peer_device(device)->connection; - - blk_queue_max_discard_sectors(q, DRBD_MAX_DISCARD_SECTORS); - - if (blk_queue_discard(b) && - (connection->cstate < C_CONNECTED || connection->agreed_features & FF_TRIM)) { - /* We don't care, stacking below should fix it for the local device. - * Whether or not it is a suitable granularity on the remote device - * is not our problem, really. If you care, you need to - * use devices with similar topology on all peers. */ - q->limits.discard_granularity = 512; - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); - } else { - blk_queue_max_discard_sectors(q, 0); - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); - q->limits.discard_granularity = 0; - } - blk_queue_stack_limits(q, b); if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { @@ -1208,15 +1335,10 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; } } - /* To avoid confusion, if this queue does not support discard, clear - * max_discard_sectors, which is what lsblk -D reports to the user. */ - if (!blk_queue_discard(q)) { - blk_queue_max_discard_sectors(q, 0); - q->limits.discard_granularity = 0; - } + fixup_discard_if_not_supported(q); } -void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev) +void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev, struct o_qlim *o) { unsigned int now, new, local, peer; @@ -1259,7 +1381,7 @@ void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backin if (new != now) drbd_info(device, "max BIO size = %u\n", new); - drbd_setup_queue_param(device, bdev, new); + drbd_setup_queue_param(device, bdev, new, o); } /* Starts the worker thread */ @@ -1348,6 +1470,43 @@ static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b) a->disk_drain != b->disk_drain; } +static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *disk_conf, + struct drbd_backing_dev *nbc) +{ + struct request_queue * const q = nbc->backing_bdev->bd_disk->queue; + + if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN) + disk_conf->al_extents = DRBD_AL_EXTENTS_MIN; + if (disk_conf->al_extents > drbd_al_extents_max(nbc)) + disk_conf->al_extents = drbd_al_extents_max(nbc); + + if (!blk_queue_discard(q) + || (!q->limits.discard_zeroes_data && !disk_conf->discard_zeroes_if_aligned)) { + if (disk_conf->rs_discard_granularity) { + disk_conf->rs_discard_granularity = 0; /* disable feature */ + drbd_info(device, "rs_discard_granularity feature disabled\n"); + } + } + + if (disk_conf->rs_discard_granularity) { + int orig_value = disk_conf->rs_discard_granularity; + int remainder; + + if (q->limits.discard_granularity > disk_conf->rs_discard_granularity) + disk_conf->rs_discard_granularity = q->limits.discard_granularity; + + remainder = disk_conf->rs_discard_granularity % q->limits.discard_granularity; + disk_conf->rs_discard_granularity += remainder; + + if (disk_conf->rs_discard_granularity > q->limits.max_discard_sectors << 9) + disk_conf->rs_discard_granularity = q->limits.max_discard_sectors << 9; + + if (disk_conf->rs_discard_granularity != orig_value) + drbd_info(device, "rs_discard_granularity changed to %d\n", + disk_conf->rs_discard_granularity); + } +} + int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) { struct drbd_config_context adm_ctx; @@ -1395,10 +1554,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) if (!expect(new_disk_conf->resync_rate >= 1)) new_disk_conf->resync_rate = 1; - if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN) - new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN; - if (new_disk_conf->al_extents > drbd_al_extents_max(device->ldev)) - new_disk_conf->al_extents = drbd_al_extents_max(device->ldev); + sanitize_disk_conf(device, new_disk_conf, device->ldev); if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX) new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX; @@ -1457,6 +1613,9 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) if (write_ordering_changed(old_disk_conf, new_disk_conf)) drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH); + if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned) + drbd_reconsider_queue_parameters(device, device->ldev, NULL); + drbd_md_sync(device); if (device->state.conn >= C_CONNECTED) { @@ -1693,10 +1852,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) if (retcode != NO_ERROR) goto fail; - if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN) - new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN; - if (new_disk_conf->al_extents > drbd_al_extents_max(nbc)) - new_disk_conf->al_extents = drbd_al_extents_max(nbc); + sanitize_disk_conf(device, new_disk_conf, nbc); if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) { drbd_err(device, "max capacity %llu smaller than disk size %llu\n", @@ -1838,7 +1994,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) device->read_cnt = 0; device->writ_cnt = 0; - drbd_reconsider_max_bio_size(device, device->ldev); + drbd_reconsider_queue_parameters(device, device->ldev, NULL); /* If I am currently not R_PRIMARY, * but meta data primary indicator is set, @@ -25,7 +25,7 @@ #include <linux/module.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/proc_fs.h> @@ -122,18 +122,18 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se x = res/50; y = 20-x; - seq_printf(seq, "\t["); + seq_puts(seq, "\t["); for (i = 1; i < x; i++) - seq_printf(seq, "="); - seq_printf(seq, ">"); + seq_putc(seq, '='); + seq_putc(seq, '>'); for (i = 0; i < y; i++) seq_printf(seq, "."); - seq_printf(seq, "] "); + seq_puts(seq, "] "); if (state.conn == C_VERIFY_S || state.conn == C_VERIFY_T) - seq_printf(seq, "verified:"); + seq_puts(seq, "verified:"); else - seq_printf(seq, "sync'ed:"); + seq_puts(seq, "sync'ed:"); seq_printf(seq, "%3u.%u%% ", res / 10, res % 10); /* if more than a few GB, display in MB */ @@ -146,7 +146,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se (unsigned long) Bit2KB(rs_left), (unsigned long) Bit2KB(rs_total)); - seq_printf(seq, "\n\t"); + seq_puts(seq, "\n\t"); /* see drivers/md/md.c * We do not want to overflow, so the order of operands and @@ -175,9 +175,9 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se rt / 3600, (rt % 3600) / 60, rt % 60); dbdt = Bit2KB(db/dt); - seq_printf(seq, " speed: "); + seq_puts(seq, " speed: "); seq_printf_with_thousands_grouping(seq, dbdt); - seq_printf(seq, " ("); + seq_puts(seq, " ("); /* ------------------------- ~3s average ------------------------ */ if (proc_details >= 1) { /* this is what drbd_rs_should_slow_down() uses */ @@ -188,7 +188,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se db = device->rs_mark_left[i] - rs_left; dbdt = Bit2KB(db/dt); seq_printf_with_thousands_grouping(seq, dbdt); - seq_printf(seq, " -- "); + seq_puts(seq, " -- "); } /* --------------------- long term average ---------------------- */ @@ -200,11 +200,11 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se db = rs_total - rs_left; dbdt = Bit2KB(db/dt); seq_printf_with_thousands_grouping(seq, dbdt); - seq_printf(seq, ")"); + seq_putc(seq, ')'); if (state.conn == C_SYNC_TARGET || state.conn == C_VERIFY_S) { - seq_printf(seq, " want: "); + seq_puts(seq, " want: "); seq_printf_with_thousands_grouping(seq, device->c_sync_rate); } seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : ""); @@ -231,7 +231,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se (unsigned long long)bm_bits * BM_SECT_PER_BIT); if (stop_sector != 0 && stop_sector != ULLONG_MAX) seq_printf(seq, " stop sector: %llu", stop_sector); - seq_printf(seq, "\n"); + seq_putc(seq, '\n'); } } @@ -276,7 +276,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v) rcu_read_lock(); idr_for_each_entry(&drbd_devices, device, i) { if (prev_i != i - 1) - seq_printf(seq, "\n"); + seq_putc(seq, '\n'); prev_i = i; state = device->state; @@ -60,6 +60,15 @@ enum drbd_packet { * which is why I chose TRIM here, to disambiguate. */ P_TRIM = 0x31, + /* Only use these two if both support FF_THIN_RESYNC */ + P_RS_THIN_REQ = 0x32, /* Request a block for resync or reply P_RS_DEALLOCATED */ + P_RS_DEALLOCATED = 0x33, /* Contains only zeros on sync source node */ + + /* REQ_WRITE_SAME. + * On a receiving side without REQ_WRITE_SAME, + * we may fall back to an opencoded loop instead. */ + P_WSAME = 0x34, + P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */ P_MAX_OPT_CMD = 0x101, @@ -106,16 +115,20 @@ struct p_header100 { u32 pad; } __packed; -/* these defines must not be changed without changing the protocol version */ -#define DP_HARDBARRIER 1 /* depricated */ +/* These defines must not be changed without changing the protocol version. + * New defines may only be introduced together with protocol version bump or + * new protocol feature flags. + */ +#define DP_HARDBARRIER 1 /* no longer used */ #define DP_RW_SYNC 2 /* equals REQ_SYNC */ #define DP_MAY_SET_IN_SYNC 4 #define DP_UNPLUG 8 /* not used anymore */ #define DP_FUA 16 /* equals REQ_FUA */ -#define DP_FLUSH 32 /* equals REQ_FLUSH */ +#define DP_FLUSH 32 /* equals REQ_PREFLUSH */ #define DP_DISCARD 64 /* equals REQ_DISCARD */ #define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */ #define DP_SEND_WRITE_ACK 256 /* This is a proto C write request */ +#define DP_WSAME 512 /* equiv. REQ_WRITE_SAME */ struct p_data { u64 sector; /* 64 bits sector number */ @@ -129,6 +142,11 @@ struct p_trim { u32 size; /* == bio->bi_size */ } __packed; +struct p_wsame { + struct p_data p_data; + u32 size; /* == bio->bi_size */ +} __packed; + /* * commands which share a struct: * p_block_ack: @@ -160,7 +178,23 @@ struct p_block_req { * ReportParams */ -#define FF_TRIM 1 +/* supports TRIM/DISCARD on the "wire" protocol */ +#define DRBD_FF_TRIM 1 + +/* Detect all-zeros during resync, and rather TRIM/UNMAP/DISCARD those blocks + * instead of fully allocate a supposedly thin volume on initial resync */ +#define DRBD_FF_THIN_RESYNC 2 + +/* supports REQ_WRITE_SAME on the "wire" protocol. + * Note: this flag is overloaded, + * its presence also + * - indicates support for 128 MiB "batch bios", + * max discard size of 128 MiB + * instead of 4M before that. + * - indicates that we exchange additional settings in p_sizes + * drbd_send_sizes()/receive_sizes() + */ +#define DRBD_FF_WSAME 4 struct p_connection_features { u32 protocol_min; @@ -235,6 +269,40 @@ struct p_rs_uuid { u64 uuid; } __packed; +/* optional queue_limits if (agreed_features & DRBD_FF_WSAME) + * see also struct queue_limits, as of late 2015 */ +struct o_qlim { + /* we don't need it yet, but we may as well communicate it now */ + u32 physical_block_size; + + /* so the original in struct queue_limits is unsigned short, + * but I'd have to put in padding anyways. */ + u32 logical_block_size; + + /* One incoming bio becomes one DRBD request, + * which may be translated to several bio on the receiving side. + * We don't need to communicate chunk/boundary/segment ... limits. + */ + + /* various IO hints may be useful with "diskless client" setups */ + u32 alignment_offset; + u32 io_min; + u32 io_opt; + + /* We may need to communicate integrity stuff at some point, + * but let's not get ahead of ourselves. */ + + /* Backend discard capabilities. + * Receiving side uses "blkdev_issue_discard()", no need to communicate + * more specifics. If the backend cannot do discards, the DRBD peer + * may fall back to blkdev_issue_zeroout(). + */ + u8 discard_enabled; + u8 discard_zeroes_data; + u8 write_same_capable; + u8 _pad; +} __packed; + struct p_sizes { u64 d_size; /* size of disk */ u64 u_size; /* user requested size */ @@ -242,6 +310,9 @@ struct p_sizes { u32 max_bio_size; /* Maximal size of a BIO */ u16 queue_order_type; /* not yet implemented in DRBD*/ u16 dds_flags; /* use enum dds_flags here. */ + + /* optional queue_limits if (agreed_features & DRBD_FF_WSAME) */ + struct o_qlim qlim[0]; } __packed; struct p_state { @@ -25,7 +25,7 @@ #include <linux/module.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <net/sock.h> #include <linux/drbd.h> @@ -48,7 +48,7 @@ #include "drbd_req.h" #include "drbd_vli.h" -#define PRO_FEATURES (FF_TRIM) +#define PRO_FEATURES (DRBD_FF_TRIM|DRBD_FF_THIN_RESYNC|DRBD_FF_WSAME) struct packet_info { enum drbd_packet cmd; @@ -361,14 +361,17 @@ You must not have the req_lock: drbd_wait_ee_list_empty() */ +/* normal: payload_size == request size (bi_size) + * w_same: payload_size == logical_block_size + * trim: payload_size == 0 */ struct drbd_peer_request * drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector, - unsigned int data_size, bool has_payload, gfp_t gfp_mask) __must_hold(local) + unsigned int request_size, unsigned int payload_size, gfp_t gfp_mask) __must_hold(local) { struct drbd_device *device = peer_device->device; struct drbd_peer_request *peer_req; struct page *page = NULL; - unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; + unsigned nr_pages = (payload_size + PAGE_SIZE -1) >> PAGE_SHIFT; if (drbd_insert_fault(device, DRBD_FAULT_AL_EE)) return NULL; @@ -380,7 +383,7 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto return NULL; } - if (has_payload && data_size) { + if (nr_pages) { page = drbd_alloc_pages(peer_device, nr_pages, gfpflags_allow_blocking(gfp_mask)); if (!page) @@ -390,7 +393,7 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto memset(peer_req, 0, sizeof(*peer_req)); INIT_LIST_HEAD(&peer_req->w.list); drbd_clear_interval(&peer_req->i); - peer_req->i.size = data_size; + peer_req->i.size = request_size; peer_req->i.sector = sector; peer_req->submit_jif = jiffies; peer_req->peer_device = peer_device; @@ -1204,13 +1207,84 @@ static int drbd_recv_header(struct drbd_connection *connection, struct packet_in return err; } -static void drbd_flush(struct drbd_connection *connection) +/* This is blkdev_issue_flush, but asynchronous. + * We want to submit to all component volumes in parallel, + * then wait for all completions. + */ +struct issue_flush_context { + atomic_t pending; + int error; + struct completion done; +}; +struct one_flush_context { + struct drbd_device *device; + struct issue_flush_context *ctx; +}; + +void one_flush_endio(struct bio *bio) { - int rv; - struct drbd_peer_device *peer_device; - int vnr; + struct one_flush_context *octx = bio->bi_private; + struct drbd_device *device = octx->device; + struct issue_flush_context *ctx = octx->ctx; + + if (bio->bi_error) { + ctx->error = bio->bi_error; + drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_error); + } + kfree(octx); + bio_put(bio); + + clear_bit(FLUSH_PENDING, &device->flags); + put_ldev(device); + kref_put(&device->kref, drbd_destroy_device); + + if (atomic_dec_and_test(&ctx->pending)) + complete(&ctx->done); +} + +static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx) +{ + struct bio *bio = bio_alloc(GFP_NOIO, 0); + struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO); + if (!bio || !octx) { + drbd_warn(device, "Could not allocate a bio, CANNOT ISSUE FLUSH\n"); + /* FIXME: what else can I do now? disconnecting or detaching + * really does not help to improve the state of the world, either. + */ + kfree(octx); + if (bio) + bio_put(bio); + + ctx->error = -ENOMEM; + put_ldev(device); + kref_put(&device->kref, drbd_destroy_device); + return; + } + octx->device = device; + octx->ctx = ctx; + bio->bi_bdev = device->ldev->backing_bdev; + bio->bi_private = octx; + bio->bi_end_io = one_flush_endio; + bio_set_op_attrs(bio, REQ_OP_FLUSH, WRITE_FLUSH); + + device->flush_jif = jiffies; + set_bit(FLUSH_PENDING, &device->flags); + atomic_inc(&ctx->pending); + submit_bio(bio); +} + +static void drbd_flush(struct drbd_connection *connection) +{ if (connection->resource->write_ordering >= WO_BDEV_FLUSH) { + struct drbd_peer_device *peer_device; + struct issue_flush_context ctx; + int vnr; + + atomic_set(&ctx.pending, 1); + ctx.error = 0; + init_completion(&ctx.done); + rcu_read_lock(); idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { struct drbd_device *device = peer_device->device; @@ -1220,31 +1294,24 @@ static void drbd_flush(struct drbd_connection *connection) kref_get(&device->kref); rcu_read_unlock(); - /* Right now, we have only this one synchronous code path - * for flushes between request epochs. - * We may want to make those asynchronous, - * or at least parallelize the flushes to the volume devices. - */ - device->flush_jif = jiffies; - set_bit(FLUSH_PENDING, &device->flags); - rv = blkdev_issue_flush(device->ldev->backing_bdev, - GFP_NOIO, NULL); - clear_bit(FLUSH_PENDING, &device->flags); - if (rv) { - drbd_info(device, "local disk flush failed with status %d\n", rv); - /* would rather check on EOPNOTSUPP, but that is not reliable. - * don't try again for ANY return value != 0 - * if (rv == -EOPNOTSUPP) */ - drbd_bump_write_ordering(connection->resource, NULL, WO_DRAIN_IO); - } - put_ldev(device); - kref_put(&device->kref, drbd_destroy_device); + submit_one_flush(device, &ctx); rcu_read_lock(); - if (rv) - break; } rcu_read_unlock(); + + /* Do we want to add a timeout, + * if disk-timeout is set? */ + if (!atomic_dec_and_test(&ctx.pending)) + wait_for_completion(&ctx.done); + + if (ctx.error) { + /* would rather check on EOPNOTSUPP, but that is not reliable. + * don't try again for ANY return value != 0 + * if (rv == -EOPNOTSUPP) */ + /* Any error is already reported by bio_endio callback. */ + drbd_bump_write_ordering(connection->resource, NULL, WO_DRAIN_IO); + } } } @@ -1379,6 +1446,120 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]); } +/* + * We *may* ignore the discard-zeroes-data setting, if so configured. + * + * Assumption is that it "discard_zeroes_data=0" is only because the backend + * may ignore partial unaligned discards. + * + * LVM/DM thin as of at least + * LVM version: 2.02.115(2)-RHEL7 (2015-01-28) + * Library version: 1.02.93-RHEL7 (2015-01-28) + * Driver version: 4.29.0 + * still behaves this way. + * + * For unaligned (wrt. alignment and granularity) or too small discards, + * we zero-out the initial (and/or) trailing unaligned partial chunks, + * but discard all the aligned full chunks. + * + * At least for LVM/DM thin, the result is effectively "discard_zeroes_data=1". + */ +int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, bool discard) +{ + struct block_device *bdev = device->ldev->backing_bdev; + struct request_queue *q = bdev_get_queue(bdev); + sector_t tmp, nr; + unsigned int max_discard_sectors, granularity; + int alignment; + int err = 0; + + if (!discard) + goto zero_out; + + /* Zero-sector (unknown) and one-sector granularities are the same. */ + granularity = max(q->limits.discard_granularity >> 9, 1U); + alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; + + max_discard_sectors = min(q->limits.max_discard_sectors, (1U << 22)); + max_discard_sectors -= max_discard_sectors % granularity; + if (unlikely(!max_discard_sectors)) + goto zero_out; + + if (nr_sectors < granularity) + goto zero_out; + + tmp = start; + if (sector_div(tmp, granularity) != alignment) { + if (nr_sectors < 2*granularity) + goto zero_out; + /* start + gran - (start + gran - align) % gran */ + tmp = start + granularity - alignment; + tmp = start + granularity - sector_div(tmp, granularity); + + nr = tmp - start; + err |= blkdev_issue_zeroout(bdev, start, nr, GFP_NOIO, 0); + nr_sectors -= nr; + start = tmp; + } + while (nr_sectors >= granularity) { + nr = min_t(sector_t, nr_sectors, max_discard_sectors); + err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO, 0); + nr_sectors -= nr; + start += nr; + } + zero_out: + if (nr_sectors) { + err |= blkdev_issue_zeroout(bdev, start, nr_sectors, GFP_NOIO, 0); + } + return err != 0; +} + +static bool can_do_reliable_discards(struct drbd_device *device) +{ + struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev); + struct disk_conf *dc; + bool can_do; + + if (!blk_queue_discard(q)) + return false; + + if (q->limits.discard_zeroes_data) + return true; + + rcu_read_lock(); + dc = rcu_dereference(device->ldev->disk_conf); + can_do = dc->discard_zeroes_if_aligned; + rcu_read_unlock(); + return can_do; +} + +static void drbd_issue_peer_discard(struct drbd_device *device, struct drbd_peer_request *peer_req) +{ + /* If the backend cannot discard, or does not guarantee + * read-back zeroes in discarded ranges, we fall back to + * zero-out. Unless configuration specifically requested + * otherwise. */ + if (!can_do_reliable_discards(device)) + peer_req->flags |= EE_IS_TRIM_USE_ZEROOUT; + + if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector, + peer_req->i.size >> 9, !(peer_req->flags & EE_IS_TRIM_USE_ZEROOUT))) + peer_req->flags |= EE_WAS_ERROR; + drbd_endio_write_sec_final(peer_req); +} + +static void drbd_issue_peer_wsame(struct drbd_device *device, + struct drbd_peer_request *peer_req) +{ + struct block_device *bdev = device->ldev->backing_bdev; + sector_t s = peer_req->i.sector; + sector_t nr = peer_req->i.size >> 9; + if (blkdev_issue_write_same(bdev, s, nr, GFP_NOIO, peer_req->pages)) + peer_req->flags |= EE_WAS_ERROR; + drbd_endio_write_sec_final(peer_req); +} + + /** * drbd_submit_peer_request() * @device: DRBD device. @@ -1398,7 +1579,8 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin /* TODO allocate from our own bio_set. */ int drbd_submit_peer_request(struct drbd_device *device, struct drbd_peer_request *peer_req, - const unsigned rw, const int fault_type) + const unsigned op, const unsigned op_flags, + const int fault_type) { struct bio *bios = NULL; struct bio *bio; @@ -1409,7 +1591,13 @@ int drbd_submit_peer_request(struct drbd_device *device, unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; int err = -ENOMEM; - if (peer_req->flags & EE_IS_TRIM_USE_ZEROOUT) { + /* TRIM/DISCARD: for now, always use the helper function + * blkdev_issue_zeroout(..., discard=true). + * It's synchronous, but it does the right thing wrt. bio splitting. + * Correctness first, performance later. Next step is to code an + * asynchronous variant of the same. + */ + if (peer_req->flags & (EE_IS_TRIM|EE_WRITE_SAME)) { /* wait for all pending IO completions, before we start * zeroing things out. */ conn_wait_active_ee_empty(peer_req->peer_device->connection); @@ -1417,22 +1605,22 @@ int drbd_submit_peer_request(struct drbd_device *device, * so we can find it to present it in debugfs */ peer_req->submit_jif = jiffies; peer_req->flags |= EE_SUBMITTED; - spin_lock_irq(&device->resource->req_lock); - list_add_tail(&peer_req->w.list, &device->active_ee); - spin_unlock_irq(&device->resource->req_lock); - if (blkdev_issue_zeroout(device->ldev->backing_bdev, - sector, data_size >> 9, GFP_NOIO, false)) - peer_req->flags |= EE_WAS_ERROR; - drbd_endio_write_sec_final(peer_req); + + /* If this was a resync request from receive_rs_deallocated(), + * it is already on the sync_ee list */ + if (list_empty(&peer_req->w.list)) { + spin_lock_irq(&device->resource->req_lock); + list_add_tail(&peer_req->w.list, &device->active_ee); + spin_unlock_irq(&device->resource->req_lock); + } + + if (peer_req->flags & EE_IS_TRIM) + drbd_issue_peer_discard(device, peer_req); + else /* EE_WRITE_SAME */ + drbd_issue_peer_wsame(device, peer_req); return 0; } - /* Discards don't have any payload. - * But the scsi layer still expects a bio_vec it can use internally, - * see sd_setup_discard_cmnd() and blk_add_request_payload(). */ - if (peer_req->flags & EE_IS_TRIM) - nr_pages = 1; - /* In most cases, we will only need one bio. But in case the lower * level restrictions happen to be different at this offset on this * side than those of the sending peer, we may need to submit the @@ -1450,7 +1638,7 @@ next_bio: /* > peer_req->i.sector, unless this is the first bio */ bio->bi_iter.bi_sector = sector; bio->bi_bdev = device->ldev->backing_bdev; - bio->bi_rw = rw; + bio_set_op_attrs(bio, op, op_flags); bio->bi_private = peer_req; bio->bi_end_io = drbd_peer_request_endio; @@ -1458,11 +1646,6 @@ next_bio: bios = bio; ++n_bios; - if (rw & REQ_DISCARD) { - bio->bi_iter.bi_size = data_size; - goto submit; - } - page_chain_for_each(page) { unsigned len = min_t(unsigned, data_size, PAGE_SIZE); if (!bio_add_page(bio, page, len, 0)) { @@ -1484,7 +1667,6 @@ next_bio: --nr_pages; } D_ASSERT(device, data_size == 0); -submit: D_ASSERT(device, page == NULL); atomic_set(&peer_req->pending_bios, n_bios); @@ -1608,8 +1790,26 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf return 0; } +/* quick wrapper in case payload size != request_size (write same) */ +static void drbd_csum_ee_size(struct crypto_ahash *h, + struct drbd_peer_request *r, void *d, + unsigned int payload_size) +{ + unsigned int tmp = r->i.size; + r->i.size = payload_size; + drbd_csum_ee(h, r, d); + r->i.size = tmp; +} + /* used from receive_RSDataReply (recv_resync_read) - * and from receive_Data */ + * and from receive_Data. + * data_size: actual payload ("data in") + * for normal writes that is bi_size. + * for discards, that is zero. + * for write same, it is logical_block_size. + * both trim and write same have the bi_size ("data len to be affected") + * as extra argument in the packet header. + */ static struct drbd_peer_request * read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, struct packet_info *pi) __must_hold(local) @@ -1624,6 +1824,7 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, void *dig_vv = peer_device->connection->int_dig_vv; unsigned long *data; struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL; + struct p_trim *wsame = (pi->cmd == P_WSAME) ? pi->data : NULL; digest_size = 0; if (!trim && peer_device->connection->peer_integrity_tfm) { @@ -1638,38 +1839,60 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, data_size -= digest_size; } + /* assume request_size == data_size, but special case trim and wsame. */ + ds = data_size; if (trim) { - D_ASSERT(peer_device, data_size == 0); - data_size = be32_to_cpu(trim->size); + if (!expect(data_size == 0)) + return NULL; + ds = be32_to_cpu(trim->size); + } else if (wsame) { + if (data_size != queue_logical_block_size(device->rq_queue)) { + drbd_err(peer_device, "data size (%u) != drbd logical block size (%u)\n", + data_size, queue_logical_block_size(device->rq_queue)); + return NULL; + } + if (data_size != bdev_logical_block_size(device->ldev->backing_bdev)) { + drbd_err(peer_device, "data size (%u) != backend logical block size (%u)\n", + data_size, bdev_logical_block_size(device->ldev->backing_bdev)); + return NULL; + } + ds = be32_to_cpu(wsame->size); } - if (!expect(IS_ALIGNED(data_size, 512))) + if (!expect(IS_ALIGNED(ds, 512))) return NULL; - /* prepare for larger trim requests. */ - if (!trim && !expect(data_size <= DRBD_MAX_BIO_SIZE)) + if (trim || wsame) { + if (!expect(ds <= (DRBD_MAX_BBIO_SECTORS << 9))) + return NULL; + } else if (!expect(ds <= DRBD_MAX_BIO_SIZE)) return NULL; /* even though we trust out peer, * we sometimes have to double check. */ - if (sector + (data_size>>9) > capacity) { + if (sector + (ds>>9) > capacity) { drbd_err(device, "request from peer beyond end of local disk: " "capacity: %llus < sector: %llus + size: %u\n", (unsigned long long)capacity, - (unsigned long long)sector, data_size); + (unsigned long long)sector, ds); return NULL; } /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD * "criss-cross" setup, that might cause write-out on some other DRBD, * which in turn might block on the other node at this very place. */ - peer_req = drbd_alloc_peer_req(peer_device, id, sector, data_size, trim == NULL, GFP_NOIO); + peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO); if (!peer_req) return NULL; peer_req->flags |= EE_WRITE; - if (trim) + if (trim) { + peer_req->flags |= EE_IS_TRIM; return peer_req; + } + if (wsame) + peer_req->flags |= EE_WRITE_SAME; + /* receive payload size bytes into page chain */ ds = data_size; page = peer_req->pages; page_chain_for_each(page) { @@ -1689,7 +1912,7 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, } if (digest_size) { - drbd_csum_ee(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv); + drbd_csum_ee_size(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv, data_size); if (memcmp(dig_in, dig_vv, digest_size)) { drbd_err(device, "Digest integrity check FAILED: %llus +%u\n", (unsigned long long)sector, data_size); @@ -1830,7 +2053,8 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto spin_unlock_irq(&device->resource->req_lock); atomic_add(pi->size >> 9, &device->rs_sect_ev); - if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0) + if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0, + DRBD_FAULT_RS_WR) == 0) return 0; /* don't care for the reason here */ @@ -2065,13 +2289,13 @@ static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2) static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req) { struct drbd_peer_request *rs_req; - bool rv = 0; + bool rv = false; spin_lock_irq(&device->resource->req_lock); list_for_each_entry(rs_req, &device->sync_ee, w.list) { if (overlaps(peer_req->i.sector, peer_req->i.size, rs_req->i.sector, rs_req->i.size)) { - rv = 1; + rv = true; break; } } @@ -2152,12 +2376,19 @@ static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, co /* see also bio_flags_to_wire() * DRBD_REQ_*, because we need to semantically map the flags to data packet * flags and back. We may replicate to other kernel versions. */ -static unsigned long wire_flags_to_bio(u32 dpf) +static unsigned long wire_flags_to_bio_flags(u32 dpf) { return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | (dpf & DP_FUA ? REQ_FUA : 0) | - (dpf & DP_FLUSH ? REQ_FLUSH : 0) | - (dpf & DP_DISCARD ? REQ_DISCARD : 0); + (dpf & DP_FLUSH ? REQ_PREFLUSH : 0); +} + +static unsigned long wire_flags_to_bio_op(u32 dpf) +{ + if (dpf & DP_DISCARD) + return REQ_OP_DISCARD; + else + return REQ_OP_WRITE; } static void fail_postponed_requests(struct drbd_device *device, sector_t sector, @@ -2303,7 +2534,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * struct drbd_peer_request *peer_req; struct p_data *p = pi->data; u32 peer_seq = be32_to_cpu(p->seq_num); - int rw = WRITE; + int op, op_flags; u32 dp_flags; int err, tp; @@ -2342,14 +2573,11 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * peer_req->flags |= EE_APPLICATION; dp_flags = be32_to_cpu(p->dp_flags); - rw |= wire_flags_to_bio(dp_flags); + op = wire_flags_to_bio_op(dp_flags); + op_flags = wire_flags_to_bio_flags(dp_flags); if (pi->cmd == P_TRIM) { - struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev); - peer_req->flags |= EE_IS_TRIM; - if (!blk_queue_discard(q)) - peer_req->flags |= EE_IS_TRIM_USE_ZEROOUT; D_ASSERT(peer_device, peer_req->i.size > 0); - D_ASSERT(peer_device, rw & REQ_DISCARD); + D_ASSERT(peer_device, op == REQ_OP_DISCARD); D_ASSERT(peer_device, peer_req->pages == NULL); } else if (peer_req->pages == NULL) { D_ASSERT(device, peer_req->i.size == 0); @@ -2414,11 +2642,11 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * update_peer_seq(peer_device, peer_seq); spin_lock_irq(&device->resource->req_lock); } - /* if we use the zeroout fallback code, we process synchronously - * and we wait for all pending requests, respectively wait for + /* TRIM and WRITE_SAME are processed synchronously, + * we wait for all pending requests, respectively wait for * active_ee to become empty in drbd_submit_peer_request(); * better not add ourselves here. */ - if ((peer_req->flags & EE_IS_TRIM_USE_ZEROOUT) == 0) + if ((peer_req->flags & (EE_IS_TRIM|EE_WRITE_SAME)) == 0) list_add_tail(&peer_req->w.list, &device->active_ee); spin_unlock_irq(&device->resource->req_lock); @@ -2433,7 +2661,8 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * peer_req->flags |= EE_CALL_AL_COMPLETE_IO; } - err = drbd_submit_peer_request(device, peer_req, rw, DRBD_FAULT_DT_WR); + err = drbd_submit_peer_request(device, peer_req, op, op_flags, + DRBD_FAULT_DT_WR); if (!err) return 0; @@ -2449,7 +2678,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * } out_interrupted: - drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT + EV_CLEANUP); + drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT | EV_CLEANUP); put_ldev(device); drbd_free_peer_req(device, peer_req); return err; @@ -2574,6 +2803,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet case P_DATA_REQUEST: drbd_send_ack_rp(peer_device, P_NEG_DREPLY, p); break; + case P_RS_THIN_REQ: case P_RS_DATA_REQUEST: case P_CSUM_RS_REQUEST: case P_OV_REQUEST: @@ -2599,7 +2829,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet * "criss-cross" setup, that might cause write-out on some other DRBD, * which in turn might block on the other node at this very place. */ peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size, - true /* has real payload */, GFP_NOIO); + size, GFP_NOIO); if (!peer_req) { put_ldev(device); return -ENOMEM; @@ -2613,6 +2843,12 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet peer_req->flags |= EE_APPLICATION; goto submit; + case P_RS_THIN_REQ: + /* If at some point in the future we have a smart way to + find out if this data block is completely deallocated, + then we would do something smarter here than reading + the block... */ + peer_req->flags |= EE_RS_THIN_REQ; case P_RS_DATA_REQUEST: peer_req->w.cb = w_e_end_rsdata_req; fault_type = DRBD_FAULT_RS_RD; @@ -2723,7 +2959,8 @@ submit_for_resync: submit: update_receiver_timing_details(connection, drbd_submit_peer_request); inc_unacked(device); - if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0) + if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0, + fault_type) == 0) return 0; /* don't care for the reason here */ @@ -2957,7 +3194,8 @@ static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid, -1091 requires proto 91 -1096 requires proto 96 */ -static int drbd_uuid_compare(struct drbd_device *const device, int *rule_nr) __must_hold(local) + +static int drbd_uuid_compare(struct drbd_device *const device, enum drbd_role const peer_role, int *rule_nr) __must_hold(local) { struct drbd_peer_device *const peer_device = first_peer_device(device); struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; @@ -3037,8 +3275,39 @@ static int drbd_uuid_compare(struct drbd_device *const device, int *rule_nr) __m * next bit (weight 2) is set when peer was primary */ *rule_nr = 40; + /* Neither has the "crashed primary" flag set, + * only a replication link hickup. */ + if (rct == 0) + return 0; + + /* Current UUID equal and no bitmap uuid; does not necessarily + * mean this was a "simultaneous hard crash", maybe IO was + * frozen, so no UUID-bump happened. + * This is a protocol change, overload DRBD_FF_WSAME as flag + * for "new-enough" peer DRBD version. */ + if (device->state.role == R_PRIMARY || peer_role == R_PRIMARY) { + *rule_nr = 41; + if (!(connection->agreed_features & DRBD_FF_WSAME)) { + drbd_warn(peer_device, "Equivalent unrotated UUIDs, but current primary present.\n"); + return -(0x10000 | PRO_VERSION_MAX | (DRBD_FF_WSAME << 8)); + } + if (device->state.role == R_PRIMARY && peer_role == R_PRIMARY) { + /* At least one has the "crashed primary" bit set, + * both are primary now, but neither has rotated its UUIDs? + * "Can not happen." */ + drbd_err(peer_device, "Equivalent unrotated UUIDs, but both are primary. Can not resolve this.\n"); + return -100; + } + if (device->state.role == R_PRIMARY) + return 1; + return -1; + } + + /* Both are secondary. + * Really looks like recovery from simultaneous hard crash. + * Check which had been primary before, and arbitrate. */ switch (rct) { - case 0: /* !self_pri && !peer_pri */ return 0; + case 0: /* !self_pri && !peer_pri */ return 0; /* already handled */ case 1: /* self_pri && !peer_pri */ return 1; case 2: /* !self_pri && peer_pri */ return -1; case 3: /* self_pri && peer_pri */ @@ -3165,7 +3434,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device, drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]); - hg = drbd_uuid_compare(device, &rule_nr); + hg = drbd_uuid_compare(device, peer_role, &rule_nr); spin_unlock_irq(&device->ldev->md.uuid_lock); drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr); @@ -3174,6 +3443,15 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device, drbd_alert(device, "Unrelated data, aborting!\n"); return C_MASK; } + if (hg < -0x10000) { + int proto, fflags; + hg = -hg; + proto = hg & 0xff; + fflags = (hg >> 8) & 0xff; + drbd_alert(device, "To resolve this both sides have to support at least protocol %d and feature flags 0x%x\n", + proto, fflags); + return C_MASK; + } if (hg < -1000) { drbd_alert(device, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000); return C_MASK; @@ -3403,7 +3681,8 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in */ peer_integrity_tfm = crypto_alloc_ahash(integrity_alg, 0, CRYPTO_ALG_ASYNC); - if (!peer_integrity_tfm) { + if (IS_ERR(peer_integrity_tfm)) { + peer_integrity_tfm = NULL; drbd_err(connection, "peer data-integrity-alg %s not supported\n", integrity_alg); goto disconnect; @@ -3754,6 +4033,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info struct drbd_peer_device *peer_device; struct drbd_device *device; struct p_sizes *p = pi->data; + struct o_qlim *o = (connection->agreed_features & DRBD_FF_WSAME) ? p->qlim : NULL; enum determine_dev_size dd = DS_UNCHANGED; sector_t p_size, p_usize, p_csize, my_usize; int ldsc = 0; /* local disk size changed */ @@ -3773,6 +4053,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info device->p_size = p_size; if (get_ldev(device)) { + sector_t new_size, cur_size; rcu_read_lock(); my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size; rcu_read_unlock(); @@ -3789,11 +4070,13 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info /* Never shrink a device with usable data during connect. But allow online shrinking if we are connected. */ - if (drbd_new_dev_size(device, device->ldev, p_usize, 0) < - drbd_get_capacity(device->this_bdev) && + new_size = drbd_new_dev_size(device, device->ldev, p_usize, 0); + cur_size = drbd_get_capacity(device->this_bdev); + if (new_size < cur_size && device->state.disk >= D_OUTDATED && device->state.conn < C_CONNECTED) { - drbd_err(device, "The peer's disk size is too small!\n"); + drbd_err(device, "The peer's disk size is too small! (%llu < %llu sectors)\n", + (unsigned long long)new_size, (unsigned long long)cur_size); conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD); put_ldev(device); return -EIO; @@ -3827,14 +4110,14 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info } device->peer_max_bio_size = be32_to_cpu(p->max_bio_size); - /* Leave drbd_reconsider_max_bio_size() before drbd_determine_dev_size(). + /* Leave drbd_reconsider_queue_parameters() before drbd_determine_dev_size(). In case we cleared the QUEUE_FLAG_DISCARD from our queue in - drbd_reconsider_max_bio_size(), we can be sure that after + drbd_reconsider_queue_parameters(), we can be sure that after drbd_determine_dev_size() no REQ_DISCARDs are in the queue. */ ddsf = be16_to_cpu(p->dds_flags); if (get_ldev(device)) { - drbd_reconsider_max_bio_size(device, device->ldev); + drbd_reconsider_queue_parameters(device, device->ldev, o); dd = drbd_determine_dev_size(device, ddsf, NULL); put_ldev(device); if (dd == DS_ERROR) @@ -3854,7 +4137,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info * However, if he sends a zero current size, * take his (user-capped or) backing disk size anyways. */ - drbd_reconsider_max_bio_size(device, NULL); + drbd_reconsider_queue_parameters(device, NULL, o); drbd_set_my_capacity(device, p_csize ?: p_usize ?: p_size); } @@ -4587,9 +4870,75 @@ static int receive_out_of_sync(struct drbd_connection *connection, struct packet return 0; } +static int receive_rs_deallocated(struct drbd_connection *connection, struct packet_info *pi) +{ + struct drbd_peer_device *peer_device; + struct p_block_desc *p = pi->data; + struct drbd_device *device; + sector_t sector; + int size, err = 0; + + peer_device = conn_peer_device(connection, pi->vnr); + if (!peer_device) + return -EIO; + device = peer_device->device; + + sector = be64_to_cpu(p->sector); + size = be32_to_cpu(p->blksize); + + dec_rs_pending(device); + + if (get_ldev(device)) { + struct drbd_peer_request *peer_req; + const int op = REQ_OP_DISCARD; + + peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector, + size, 0, GFP_NOIO); + if (!peer_req) { + put_ldev(device); + return -ENOMEM; + } + + peer_req->w.cb = e_end_resync_block; + peer_req->submit_jif = jiffies; + peer_req->flags |= EE_IS_TRIM; + + spin_lock_irq(&device->resource->req_lock); + list_add_tail(&peer_req->w.list, &device->sync_ee); + spin_unlock_irq(&device->resource->req_lock); + + atomic_add(pi->size >> 9, &device->rs_sect_ev); + err = drbd_submit_peer_request(device, peer_req, op, 0, DRBD_FAULT_RS_WR); + + if (err) { + spin_lock_irq(&device->resource->req_lock); + list_del(&peer_req->w.list); + spin_unlock_irq(&device->resource->req_lock); + + drbd_free_peer_req(device, peer_req); + put_ldev(device); + err = 0; + goto fail; + } + + inc_unacked(device); + + /* No put_ldev() here. Gets called in drbd_endio_write_sec_final(), + as well as drbd_rs_complete_io() */ + } else { + fail: + drbd_rs_complete_io(device, sector); + drbd_send_ack_ex(peer_device, P_NEG_ACK, sector, size, ID_SYNCER); + } + + atomic_add(size >> 9, &device->rs_sect_in); + + return err; +} + struct data_cmd { int expect_payload; - size_t pkt_size; + unsigned int pkt_size; int (*fn)(struct drbd_connection *, struct packet_info *); }; @@ -4614,11 +4963,14 @@ static struct data_cmd drbd_cmd_handler[] = { [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest }, [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest }, + [P_RS_THIN_REQ] = { 0, sizeof(struct p_block_req), receive_DataRequest }, [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip }, [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync }, [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state }, [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol }, [P_TRIM] = { 0, sizeof(struct p_trim), receive_Data }, + [P_RS_DEALLOCATED] = { 0, sizeof(struct p_block_desc), receive_rs_deallocated }, + [P_WSAME] = { 1, sizeof(struct p_wsame), receive_Data }, }; static void drbdd(struct drbd_connection *connection) @@ -4628,7 +4980,7 @@ static void drbdd(struct drbd_connection *connection) int err; while (get_t_state(&connection->receiver) == RUNNING) { - struct data_cmd *cmd; + struct data_cmd const *cmd; drbd_thread_current_set_cpu(&connection->receiver); update_receiver_timing_details(connection, drbd_recv_header); @@ -4643,11 +4995,18 @@ static void drbdd(struct drbd_connection *connection) } shs = cmd->pkt_size; + if (pi.cmd == P_SIZES && connection->agreed_features & DRBD_FF_WSAME) + shs += sizeof(struct o_qlim); if (pi.size > shs && !cmd->expect_payload) { drbd_err(connection, "No payload expected %s l:%d\n", cmdname(pi.cmd), pi.size); goto err_out; } + if (pi.size < shs) { + drbd_err(connection, "%s: unexpected packet size, expected:%d received:%d\n", + cmdname(pi.cmd), (int)shs, pi.size); + goto err_out; + } if (shs) { update_receiver_timing_details(connection, drbd_recv_all_warn); @@ -4783,9 +5142,11 @@ static int drbd_disconnected(struct drbd_peer_device *peer_device) drbd_md_sync(device); - /* serialize with bitmap writeout triggered by the state change, - * if any. */ - wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags)); + if (get_ldev(device)) { + drbd_bitmap_io(device, &drbd_bm_write_copy_pages, + "write from disconnected", BM_LOCKED_CHANGE_ALLOWED); + put_ldev(device); + } /* tcp_close and release of sendpage pages can be deferred. I don't * want to use SO_LINGER, because apparently it can be deferred for @@ -4892,8 +5253,12 @@ static int drbd_do_features(struct drbd_connection *connection) drbd_info(connection, "Handshake successful: " "Agreed network protocol version %d\n", connection->agreed_pro_version); - drbd_info(connection, "Agreed to%ssupport TRIM on protocol level\n", - connection->agreed_features & FF_TRIM ? " " : " not "); + drbd_info(connection, "Feature flags enabled on protocol level: 0x%x%s%s%s.\n", + connection->agreed_features, + connection->agreed_features & DRBD_FF_TRIM ? " TRIM" : "", + connection->agreed_features & DRBD_FF_THIN_RESYNC ? " THIN_RESYNC" : "", + connection->agreed_features & DRBD_FF_WSAME ? " WRITE_SAME" : + connection->agreed_features ? "" : " none"); return 1; @@ -47,8 +47,7 @@ static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *r &device->vdisk->part0, req->start_jif); } -static struct drbd_request *drbd_req_new(struct drbd_device *device, - struct bio *bio_src) +static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src) { struct drbd_request *req; @@ -58,10 +57,12 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, memset(req, 0, sizeof(*req)); drbd_req_make_private_bio(req, bio_src); - req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; - req->device = device; - req->master_bio = bio_src; - req->epoch = 0; + req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0) + | (bio_op(bio_src) == REQ_OP_WRITE_SAME ? RQ_WSAME : 0) + | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0); + req->device = device; + req->master_bio = bio_src; + req->epoch = 0; drbd_clear_interval(&req->i); req->i.sector = bio_src->bi_iter.bi_sector; @@ -218,7 +219,6 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) { const unsigned s = req->rq_state; struct drbd_device *device = req->device; - int rw; int error, ok; /* we must not complete the master bio, while it is @@ -242,8 +242,6 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) return; } - rw = bio_rw(req->master_bio); - /* * figure out whether to report success or failure. * @@ -267,7 +265,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) * epoch number. If they match, increase the current_tle_nr, * and reset the transfer log epoch write_cnt. */ - if (rw == WRITE && + if (op_is_write(bio_op(req->master_bio)) && req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr)) start_new_tl_epoch(first_peer_device(device)->connection); @@ -284,11 +282,14 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) * because no path was available, in which case * it was not even added to the transfer_log. * - * READA may fail, and will not be retried. + * read-ahead may fail, and will not be retried. * * WRITE should have used all available paths already. */ - if (!ok && rw == READ && !list_empty(&req->tl_requests)) + if (!ok && + bio_op(req->master_bio) == REQ_OP_READ && + !(req->master_bio->bi_rw & REQ_RAHEAD) && + !list_empty(&req->tl_requests)) req->rq_state |= RQ_POSTPONED; if (!(req->rq_state & RQ_POSTPONED)) { @@ -644,7 +645,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, __drbd_chk_io_error(device, DRBD_READ_ERROR); /* fall through. */ case READ_AHEAD_COMPLETED_WITH_ERROR: - /* it is legal to fail READA, no __drbd_chk_io_error in that case. */ + /* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */ mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); break; @@ -656,7 +657,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, break; case QUEUE_FOR_NET_READ: - /* READ or READA, and + /* READ, and * no local disk, * or target area marked as invalid, * or just got an io-error. */ @@ -977,16 +978,20 @@ static void complete_conflicting_writes(struct drbd_request *req) sector_t sector = req->i.sector; int size = req->i.size; - i = drbd_find_overlap(&device->write_requests, sector, size); - if (!i) - return; - for (;;) { - prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE); - i = drbd_find_overlap(&device->write_requests, sector, size); - if (!i) + drbd_for_each_overlap(i, &device->write_requests, sector, size) { + /* Ignore, if already completed to upper layers. */ + if (i->completed) + continue; + /* Handle the first found overlap. After the schedule + * we have to restart the tree walk. */ + break; + } + if (!i) /* if any */ break; + /* Indicate to wake up device->misc_wait on progress. */ + prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE); i->waiting = true; spin_unlock_irq(&device->resource->req_lock); schedule(); @@ -995,7 +1000,7 @@ static void complete_conflicting_writes(struct drbd_request *req) finish_wait(&device->misc_wait, &wait); } -/* called within req_lock and rcu_read_lock() */ +/* called within req_lock */ static void maybe_pull_ahead(struct drbd_device *device) { struct drbd_connection *connection = first_peer_device(device)->connection; @@ -1132,7 +1137,7 @@ static int drbd_process_write_request(struct drbd_request *req) * replicating, in which case there is no point. */ if (unlikely(req->i.size == 0)) { /* The only size==0 bios we expect are empty flushes. */ - D_ASSERT(device, req->master_bio->bi_rw & REQ_FLUSH); + D_ASSERT(device, req->master_bio->bi_rw & REQ_PREFLUSH); if (remote) _req_mod(req, QUEUE_AS_DRBD_BARRIER); return remote; @@ -1152,12 +1157,29 @@ static int drbd_process_write_request(struct drbd_request *req) return remote; } +static void drbd_process_discard_req(struct drbd_request *req) +{ + int err = drbd_issue_discard_or_zero_out(req->device, + req->i.sector, req->i.size >> 9, true); + + if (err) + req->private_bio->bi_error = -EIO; + bio_endio(req->private_bio); +} + static void drbd_submit_req_private_bio(struct drbd_request *req) { struct drbd_device *device = req->device; struct bio *bio = req->private_bio; - const int rw = bio_rw(bio); + unsigned int type; + + if (bio_op(bio) != REQ_OP_READ) + type = DRBD_FAULT_DT_WR; + else if (bio->bi_rw & REQ_RAHEAD) + type = DRBD_FAULT_DT_RA; + else + type = DRBD_FAULT_DT_RD; bio->bi_bdev = device->ldev->backing_bdev; @@ -1167,11 +1189,10 @@ drbd_submit_req_private_bio(struct drbd_request *req) * stable storage, and this is a WRITE, we may not even submit * this bio. */ if (get_ldev(device)) { - if (drbd_insert_fault(device, - rw == WRITE ? DRBD_FAULT_DT_WR - : rw == READ ? DRBD_FAULT_DT_RD - : DRBD_FAULT_DT_RA)) + if (drbd_insert_fault(device, type)) bio_io_error(bio); + else if (bio_op(bio) == REQ_OP_DISCARD) + drbd_process_discard_req(req); else generic_make_request(bio); put_ldev(device); @@ -1223,24 +1244,45 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long /* Update disk stats */ _drbd_start_io_acct(device, req); + /* process discards always from our submitter thread */ + if (bio_op(bio) & REQ_OP_DISCARD) + goto queue_for_submitter_thread; + if (rw == WRITE && req->private_bio && req->i.size && !test_bit(AL_SUSPENDED, &device->flags)) { - if (!drbd_al_begin_io_fastpath(device, &req->i)) { - atomic_inc(&device->ap_actlog_cnt); - drbd_queue_write(device, req); - return NULL; - } + if (!drbd_al_begin_io_fastpath(device, &req->i)) + goto queue_for_submitter_thread; req->rq_state |= RQ_IN_ACT_LOG; req->in_actlog_jif = jiffies; } - return req; + + queue_for_submitter_thread: + atomic_inc(&device->ap_actlog_cnt); + drbd_queue_write(device, req); + return NULL; +} + +/* Require at least one path to current data. + * We don't want to allow writes on C_STANDALONE D_INCONSISTENT: + * We would not allow to read what was written, + * we would not have bumped the data generation uuids, + * we would cause data divergence for all the wrong reasons. + * + * If we don't see at least one D_UP_TO_DATE, we will fail this request, + * which either returns EIO, or, if OND_SUSPEND_IO is set, suspends IO, + * and queues for retry later. + */ +static bool may_do_writes(struct drbd_device *device) +{ + const union drbd_dev_state s = device->state; + return s.disk == D_UP_TO_DATE || s.pdsk == D_UP_TO_DATE; } static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req) { struct drbd_resource *resource = device->resource; - const int rw = bio_rw(req->master_bio); + const int rw = bio_data_dir(req->master_bio); struct bio_and_error m = { NULL, }; bool no_remote = false; bool submit_private_bio = false; @@ -1270,7 +1312,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request goto out; } - /* We fail READ/READA early, if we can not serve it. + /* We fail READ early, if we can not serve it. * We must do this before req is registered on any lists. * Otherwise, drbd_req_complete() will queue failed READ for retry. */ if (rw != WRITE) { @@ -1291,6 +1333,12 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request } if (rw == WRITE) { + if (req->private_bio && !may_do_writes(device)) { + bio_put(req->private_bio); + req->private_bio = NULL; + put_ldev(device); + goto nodata; + } if (!drbd_process_write_request(req)) no_remote = true; } else { @@ -206,6 +206,8 @@ enum drbd_req_state_bits { /* Set when this is a write, clear for a read */ __RQ_WRITE, + __RQ_WSAME, + __RQ_UNMAP, /* Should call drbd_al_complete_io() for this request... */ __RQ_IN_ACT_LOG, @@ -241,10 +243,11 @@ enum drbd_req_state_bits { #define RQ_NET_OK (1UL << __RQ_NET_OK) #define RQ_NET_SIS (1UL << __RQ_NET_SIS) -/* 0x1f8 */ #define RQ_NET_MASK (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK) #define RQ_WRITE (1UL << __RQ_WRITE) +#define RQ_WSAME (1UL << __RQ_WSAME) +#define RQ_UNMAP (1UL << __RQ_UNMAP) #define RQ_IN_ACT_LOG (1UL << __RQ_IN_ACT_LOG) #define RQ_POSTPONED (1UL << __RQ_POSTPONED) #define RQ_COMPLETION_SUSP (1UL << __RQ_COMPLETION_SUSP) @@ -814,7 +814,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns) } if (rv <= 0) - /* already found a reason to abort */; + goto out; /* already found a reason to abort */ else if (ns.role == R_SECONDARY && device->open_cnt) rv = SS_DEVICE_IN_USE; @@ -862,6 +862,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns) else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN) rv = SS_CONNECTED_OUTDATES; +out: rcu_read_unlock(); return rv; @@ -906,6 +907,15 @@ is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_c (ns.conn >= C_CONNECTED && os.conn == C_WF_REPORT_PARAMS))) rv = SS_IN_TRANSIENT_STATE; + /* Do not promote during resync handshake triggered by "force primary". + * This is a hack. It should really be rejected by the peer during the + * cluster wide state change request. */ + if (os.role != R_PRIMARY && ns.role == R_PRIMARY + && ns.pdsk == D_UP_TO_DATE + && ns.disk != D_UP_TO_DATE && ns.disk != D_DISKLESS + && (ns.conn <= C_WF_SYNC_UUID || ns.conn != os.conn)) + rv = SS_IN_TRANSIENT_STATE; + if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED) rv = SS_NEED_CONNECTION; @@ -1628,6 +1638,26 @@ static void broadcast_state_change(struct drbd_state_change *state_change) #undef REMEMBER_STATE_CHANGE } +/* takes old and new peer disk state */ +static bool lost_contact_to_peer_data(enum drbd_disk_state os, enum drbd_disk_state ns) +{ + if ((os >= D_INCONSISTENT && os != D_UNKNOWN && os != D_OUTDATED) + && (ns < D_INCONSISTENT || ns == D_UNKNOWN || ns == D_OUTDATED)) + return true; + + /* Scenario, starting with normal operation + * Connected Primary/Secondary UpToDate/UpToDate + * NetworkFailure Primary/Unknown UpToDate/DUnknown (frozen) + * ... + * Connected Primary/Secondary UpToDate/Diskless (resumed; needs to bump uuid!) + */ + if (os == D_UNKNOWN + && (ns == D_DISKLESS || ns == D_FAILED || ns == D_OUTDATED)) + return true; + + return false; +} + /** * after_state_ch() - Perform after state change actions that may sleep * @device: DRBD device. @@ -1675,7 +1705,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, what = RESEND; if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) && - conn_lowest_disk(connection) > D_NEGOTIATING) + conn_lowest_disk(connection) == D_UP_TO_DATE) what = RESTART_FROZEN_DISK_IO; if (resource->susp_nod && what != NOTHING) { @@ -1699,6 +1729,13 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, idr_for_each_entry(&connection->peer_devices, peer_device, vnr) clear_bit(NEW_CUR_UUID, &peer_device->device->flags); rcu_read_unlock(); + + /* We should actively create a new uuid, _before_ + * we resume/resent, if the peer is diskless + * (recovery from a multiple error scenario). + * Currently, this happens with a slight delay + * below when checking lost_contact_to_peer_data() ... + */ _tl_restart(connection, RESEND); _conn_request_state(connection, (union drbd_state) { { .susp_fen = 1 } }, @@ -1742,12 +1779,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, BM_LOCKED_TEST_ALLOWED); /* Lost contact to peer's copy of the data */ - if ((os.pdsk >= D_INCONSISTENT && - os.pdsk != D_UNKNOWN && - os.pdsk != D_OUTDATED) - && (ns.pdsk < D_INCONSISTENT || - ns.pdsk == D_UNKNOWN || - ns.pdsk == D_OUTDATED)) { + if (lost_contact_to_peer_data(os.pdsk, ns.pdsk)) { if (get_ldev(device)) { if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) && device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) { @@ -1934,12 +1966,17 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, /* This triggers bitmap writeout of potentially still unwritten pages * if the resync finished cleanly, or aborted because of peer disk - * failure, or because of connection loss. + * failure, or on transition from resync back to AHEAD/BEHIND. + * + * Connection loss is handled in drbd_disconnected() by the receiver. + * * For resync aborted because of local disk failure, we cannot do * any bitmap writeout anymore. + * * No harm done if some bits change during this phase. */ - if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(device)) { + if ((os.conn > C_CONNECTED && os.conn < C_AHEAD) && + (ns.conn == C_CONNECTED || ns.conn >= C_AHEAD) && get_ldev(device)) { drbd_queue_bitmap_io(device, &drbd_bm_write_copy_pages, NULL, "write from resync_finished", BM_LOCKED_CHANGE_ALLOWED); put_ldev(device); @@ -2160,9 +2197,7 @@ conn_set_state(struct drbd_connection *connection, union drbd_state mask, union ns.disk = os.disk; rv = _drbd_set_state(device, ns, flags, NULL); - if (rv < SS_SUCCESS) - BUG(); - + BUG_ON(rv < SS_SUCCESS); ns.i = device->state.i; ns_max.role = max_role(ns.role, ns_max.role); ns_max.peer = max_role(ns.peer, ns_max.peer); @@ -140,7 +140,7 @@ extern void drbd_resume_al(struct drbd_device *device); extern bool conn_all_vols_unconf(struct drbd_connection *connection); /** - * drbd_request_state() - Reqest a state change + * drbd_request_state() - Request a state change * @device: DRBD device. * @mask: mask of state bits to change. * @val: value of new state bits. @@ -26,7 +26,7 @@ #include <linux/drbd.h> #include "drbd_strings.h" -static const char *drbd_conn_s_names[] = { +static const char * const drbd_conn_s_names[] = { [C_STANDALONE] = "StandAlone", [C_DISCONNECTING] = "Disconnecting", [C_UNCONNECTED] = "Unconnected", @@ -53,13 +53,13 @@ static const char *drbd_conn_s_names[] = { [C_BEHIND] = "Behind", }; -static const char *drbd_role_s_names[] = { +static const char * const drbd_role_s_names[] = { [R_PRIMARY] = "Primary", [R_SECONDARY] = "Secondary", [R_UNKNOWN] = "Unknown" }; -static const char *drbd_disk_s_names[] = { +static const char * const drbd_disk_s_names[] = { [D_DISKLESS] = "Diskless", [D_ATTACHING] = "Attaching", [D_FAILED] = "Failed", @@ -71,7 +71,7 @@ static const char *drbd_disk_s_names[] = { [D_UP_TO_DATE] = "UpToDate", }; -static const char *drbd_state_sw_errors[] = { +static const char * const drbd_state_sw_errors[] = { [-SS_TWO_PRIMARIES] = "Multiple primaries not allowed by config", [-SS_NO_UP_TO_DATE_DISK] = "Need access to UpToDate data", [-SS_NO_LOCAL_DISK] = "Can not resync without local disk", @@ -173,8 +173,8 @@ void drbd_peer_request_endio(struct bio *bio) { struct drbd_peer_request *peer_req = bio->bi_private; struct drbd_device *device = peer_req->peer_device->device; - int is_write = bio_data_dir(bio) == WRITE; - int is_discard = !!(bio->bi_rw & REQ_DISCARD); + bool is_write = bio_data_dir(bio) == WRITE; + bool is_discard = !!(bio_op(bio) == REQ_OP_DISCARD); if (bio->bi_error && __ratelimit(&drbd_ratelimit_state)) drbd_warn(device, "%s: error=%d s=%llus\n", @@ -248,18 +248,26 @@ void drbd_request_endio(struct bio *bio) /* to avoid recursion in __req_mod */ if (unlikely(bio->bi_error)) { - if (bio->bi_rw & REQ_DISCARD) - what = (bio->bi_error == -EOPNOTSUPP) - ? DISCARD_COMPLETED_NOTSUPP - : DISCARD_COMPLETED_WITH_ERROR; - else - what = (bio_data_dir(bio) == WRITE) - ? WRITE_COMPLETED_WITH_ERROR - : (bio_rw(bio) == READ) - ? READ_COMPLETED_WITH_ERROR - : READ_AHEAD_COMPLETED_WITH_ERROR; - } else + switch (bio_op(bio)) { + case REQ_OP_DISCARD: + if (bio->bi_error == -EOPNOTSUPP) + what = DISCARD_COMPLETED_NOTSUPP; + else + what = DISCARD_COMPLETED_WITH_ERROR; + break; + case REQ_OP_READ: + if (bio->bi_rw & REQ_RAHEAD) + what = READ_AHEAD_COMPLETED_WITH_ERROR; + else + what = READ_COMPLETED_WITH_ERROR; + break; + default: + what = WRITE_COMPLETED_WITH_ERROR; + break; + } + } else { what = COMPLETED_OK; + } bio_put(req->private_bio); req->private_bio = ERR_PTR(bio->bi_error); @@ -320,6 +328,10 @@ void drbd_csum_bio(struct crypto_ahash *tfm, struct bio *bio, void *digest) sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); ahash_request_set_crypt(req, &sg, NULL, sg.length); crypto_ahash_update(req); + /* REQ_OP_WRITE_SAME has only one segment, + * checksum the payload only once. */ + if (bio_op(bio) == REQ_OP_WRITE_SAME) + break; } ahash_request_set_crypt(req, NULL, digest, 0); crypto_ahash_final(req); @@ -387,7 +399,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, /* GFP_TRY, because if there is no memory available right now, this may * be rescheduled for later. It is "only" background resync, after all. */ peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector, - size, true /* has real payload */, GFP_TRY); + size, size, GFP_TRY); if (!peer_req) goto defer; @@ -397,7 +409,8 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, spin_unlock_irq(&device->resource->req_lock); atomic_add(size >> 9, &device->rs_sect_ev); - if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0) + if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0, + DRBD_FAULT_RS_RD) == 0) return 0; /* If it failed because of ENOMEM, retry should help. If it failed @@ -582,6 +595,7 @@ static int make_resync_request(struct drbd_device *const device, int cancel) int number, rollback_i, size; int align, requeue = 0; int i = 0; + int discard_granularity = 0; if (unlikely(cancel)) return 0; @@ -601,6 +615,12 @@ static int make_resync_request(struct drbd_device *const device, int cancel) return 0; } + if (connection->agreed_features & DRBD_FF_THIN_RESYNC) { + rcu_read_lock(); + discard_granularity = rcu_dereference(device->ldev->disk_conf)->rs_discard_granularity; + rcu_read_unlock(); + } + max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9; number = drbd_rs_number_requests(device); if (number <= 0) @@ -665,6 +685,9 @@ next_sector: if (sector & ((1<<(align+3))-1)) break; + if (discard_granularity && size == discard_granularity) + break; + /* do not cross extent boundaries */ if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0) break; @@ -711,7 +734,8 @@ next_sector: int err; inc_rs_pending(device); - err = drbd_send_drequest(peer_device, P_RS_DATA_REQUEST, + err = drbd_send_drequest(peer_device, + size == discard_granularity ? P_RS_THIN_REQ : P_RS_DATA_REQUEST, sector, size, ID_SYNCER); if (err) { drbd_err(device, "drbd_send_drequest() failed, aborting...\n"); @@ -828,6 +852,7 @@ static void ping_peer(struct drbd_device *device) int drbd_resync_finished(struct drbd_device *device) { + struct drbd_connection *connection = first_peer_device(device)->connection; unsigned long db, dt, dbdt; unsigned long n_oos; union drbd_state os, ns; @@ -849,8 +874,7 @@ int drbd_resync_finished(struct drbd_device *device) if (dw) { dw->w.cb = w_resync_finished; dw->device = device; - drbd_queue_work(&first_peer_device(device)->connection->sender_work, - &dw->w); + drbd_queue_work(&connection->sender_work, &dw->w); return 1; } drbd_err(device, "Warn failed to drbd_rs_del_all() and to kmalloc(dw).\n"); @@ -963,6 +987,30 @@ int drbd_resync_finished(struct drbd_device *device) _drbd_set_state(device, ns, CS_VERBOSE, NULL); out_unlock: spin_unlock_irq(&device->resource->req_lock); + + /* If we have been sync source, and have an effective fencing-policy, + * once *all* volumes are back in sync, call "unfence". */ + if (os.conn == C_SYNC_SOURCE) { + enum drbd_disk_state disk_state = D_MASK; + enum drbd_disk_state pdsk_state = D_MASK; + enum drbd_fencing_p fp = FP_DONT_CARE; + + rcu_read_lock(); + fp = rcu_dereference(device->ldev->disk_conf)->fencing; + if (fp != FP_DONT_CARE) { + struct drbd_peer_device *peer_device; + int vnr; + idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { + struct drbd_device *device = peer_device->device; + disk_state = min_t(enum drbd_disk_state, disk_state, device->state.disk); + pdsk_state = min_t(enum drbd_disk_state, pdsk_state, device->state.pdsk); + } + } + rcu_read_unlock(); + if (disk_state == D_UP_TO_DATE && pdsk_state == D_UP_TO_DATE) + conn_khelper(connection, "unfence-peer"); + } + put_ldev(device); out: device->rs_total = 0; @@ -999,7 +1047,6 @@ static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_ /** * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST - * @device: DRBD device. * @w: work object. * @cancel: The connection will be closed anyways */ @@ -1035,6 +1082,30 @@ int w_e_end_data_req(struct drbd_work *w, int cancel) return err; } +static bool all_zero(struct drbd_peer_request *peer_req) +{ + struct page *page = peer_req->pages; + unsigned int len = peer_req->i.size; + + page_chain_for_each(page) { + unsigned int l = min_t(unsigned int, len, PAGE_SIZE); + unsigned int i, words = l / sizeof(long); + unsigned long *d; + + d = kmap_atomic(page); + for (i = 0; i < words; i++) { + if (d[i]) { + kunmap_atomic(d); + return false; + } + } + kunmap_atomic(d); + len -= l; + } + + return true; +} + /** * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUEST * @w: work object. @@ -1063,7 +1134,10 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel) } else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { if (likely(device->state.pdsk >= D_INCONSISTENT)) { inc_rs_pending(device); - err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req); + if (peer_req->flags & EE_RS_THIN_REQ && all_zero(peer_req)) + err = drbd_send_rs_deallocated(peer_device, peer_req); + else + err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req); } else { if (__ratelimit(&drbd_ratelimit_state)) drbd_err(device, "Not sending RSDataReply, " @@ -1633,7 +1707,7 @@ static bool use_checksum_based_resync(struct drbd_connection *connection, struct rcu_read_unlock(); return connection->agreed_pro_version >= 89 && /* supported? */ connection->csums_tfm && /* configured? */ - (csums_after_crash_only == 0 /* use for each resync? */ + (csums_after_crash_only == false /* use for each resync? */ || test_bit(CRASHED_PRIMARY, &device->flags)); /* or only after Primary crash? */ } @@ -1768,7 +1842,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) device->bm_resync_fo = 0; device->use_csums = use_checksum_based_resync(connection, device); } else { - device->use_csums = 0; + device->use_csums = false; } /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid @@ -3822,8 +3822,9 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive) bio.bi_flags |= (1 << BIO_QUIET); bio.bi_private = &cbdata; bio.bi_end_io = floppy_rb0_cb; + bio_set_op_attrs(&bio, REQ_OP_READ, 0); - submit_bio(READ, &bio); + submit_bio(&bio); process_fd_request(); init_completion(&cbdata.complete); @@ -4349,8 +4350,7 @@ static int __init do_floppy_init(void) /* to be cleaned up... */ disks[drive]->private_data = (void *)(long)drive; disks[drive]->flags |= GENHD_FL_REMOVABLE; - disks[drive]->driverfs_dev = &floppy_device[drive].dev; - add_disk(disks[drive]); + device_add_disk(&floppy_device[drive].dev, disks[drive]); } return 0; @@ -447,7 +447,7 @@ static int lo_req_flush(struct loop_device *lo, struct request *rq) static inline void handle_partial_read(struct loop_cmd *cmd, long bytes) { - if (bytes < 0 || (cmd->rq->cmd_flags & REQ_WRITE)) + if (bytes < 0 || op_is_write(req_op(cmd->rq))) return; if (unlikely(bytes < blk_rq_bytes(cmd->rq))) { @@ -541,10 +541,10 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq) pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset; - if (rq->cmd_flags & REQ_WRITE) { - if (rq->cmd_flags & REQ_FLUSH) + if (op_is_write(req_op(rq))) { + if (req_op(rq) == REQ_OP_FLUSH) ret = lo_req_flush(lo, rq); - else if (rq->cmd_flags & REQ_DISCARD) + else if (req_op(rq) == REQ_OP_DISCARD) ret = lo_discard(lo, rq, pos); else if (lo->transfer) ret = lo_write_transfer(lo, rq, pos); @@ -1659,8 +1659,8 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx, if (lo->lo_state != Lo_bound) return -EIO; - if (lo->use_dio && !(cmd->rq->cmd_flags & (REQ_FLUSH | - REQ_DISCARD))) + if (lo->use_dio && (req_op(cmd->rq) != REQ_OP_FLUSH || + req_op(cmd->rq) == REQ_OP_DISCARD)) cmd->use_aio = true; else cmd->use_aio = false; @@ -1672,7 +1672,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx, static void loop_handle_cmd(struct loop_cmd *cmd) { - const bool write = cmd->rq->cmd_flags & REQ_WRITE; + const bool write = op_is_write(req_op(cmd->rq)); struct loop_device *lo = cmd->rq->q->queuedata; int ret = 0; @@ -1765,6 +1765,7 @@ static int loop_add(struct loop_device **l, int i) */ queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); + err = -ENOMEM; disk = lo->lo_disk = alloc_disk(1 << part_shift); if (!disk) goto out_free_queue; @@ -687,15 +687,13 @@ static unsigned int mg_issue_req(struct request *req, unsigned int sect_num, unsigned int sect_cnt) { - switch (rq_data_dir(req)) { - case READ: + if (rq_data_dir(req) == READ) { if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr) != MG_ERR_NONE) { mg_bad_rw_intr(host); return host->error; } - break; - case WRITE: + } else { /* TODO : handler */ outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr) @@ -714,7 +712,6 @@ static unsigned int mg_issue_req(struct request *req, mod_timer(&host->timer, jiffies + 3 * HZ); outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); - break; } return MG_ERR_NONE; } @@ -1018,7 +1015,7 @@ probe_err_7: probe_err_6: blk_cleanup_queue(host->breq); probe_err_5: - unregister_blkdev(MG_DISK_MAJ, MG_DISK_NAME); + unregister_blkdev(host->major, MG_DISK_NAME); probe_err_4: if (!prv_data->use_polling) free_irq(host->irq, host); @@ -3765,7 +3765,7 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq) return -ENODATA; } - if (rq->cmd_flags & REQ_DISCARD) { + if (req_op(rq) == REQ_OP_DISCARD) { int err; err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq)); @@ -3956,7 +3956,6 @@ static int mtip_block_initialize(struct driver_data *dd) if (rv) goto disk_index_error; - dd->disk->driverfs_dev = &dd->pdev->dev; dd->disk->major = dd->major; dd->disk->first_minor = index * MTIP_MAX_MINORS; dd->disk->minors = MTIP_MAX_MINORS; @@ -4008,7 +4007,7 @@ skip_create_disk: /* * if rebuild pending, start the service thread, and delay the block - * queue creation and add_disk() + * queue creation and device_add_disk() */ if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC) goto start_service_thread; @@ -4042,7 +4041,7 @@ skip_create_disk: set_capacity(dd->disk, capacity); /* Enable the block device and add it to /dev */ - add_disk(dd->disk); + device_add_disk(&dd->pdev->dev, dd->disk); dd->bdev = bdget_disk(dd->disk, 0); /* @@ -282,9 +282,9 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req) if (req->cmd_type == REQ_TYPE_DRV_PRIV) type = NBD_CMD_DISC; - else if (req->cmd_flags & REQ_DISCARD) + else if (req_op(req) == REQ_OP_DISCARD) type = NBD_CMD_TRIM; - else if (req->cmd_flags & REQ_FLUSH) + else if (req_op(req) == REQ_OP_FLUSH) type = NBD_CMD_FLUSH; else if (rq_data_dir(req) == WRITE) type = NBD_CMD_WRITE; @@ -941,7 +941,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd) debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize); debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout); debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize); - debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops); + debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops); return 0; } @@ -448,7 +448,7 @@ static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) struct request *rq; struct bio *bio = rqd->bio; - rq = blk_mq_alloc_request(q, bio_rw(bio), 0); + rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0); if (IS_ERR(rq)) return -ENOMEM; @@ -321,7 +321,7 @@ static void osdblk_rq_fn(struct request_queue *q) * driver-specific, etc. */ - do_flush = rq->cmd_flags & REQ_FLUSH; + do_flush = (req_op(rq) == REQ_OP_FLUSH); do_write = (rq_data_dir(rq) == WRITE); if (!do_flush) { /* osd_flush does not use a bio */ @@ -1074,7 +1074,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) BUG(); atomic_inc(&pkt->io_wait); - bio->bi_rw = READ; + bio_set_op_attrs(bio, REQ_OP_READ, 0); pkt_queue_bio(pd, bio); frames_read++; } @@ -1336,7 +1336,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) /* Start the write request */ atomic_set(&pkt->io_wait, 1); - pkt->w_bio->bi_rw = WRITE; + bio_set_op_attrs(pkt->w_bio, REQ_OP_WRITE, 0); pkt_queue_bio(pd, pkt->w_bio); } @@ -196,7 +196,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev, dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); while ((req = blk_fetch_request(q))) { - if (req->cmd_flags & REQ_FLUSH) { + if (req_op(req) == REQ_OP_FLUSH) { if (ps3disk_submit_flush_request(dev, req)) break; } else if (req->cmd_type == REQ_TYPE_FS) { @@ -256,7 +256,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data) return IRQ_HANDLED; } - if (req->cmd_flags & REQ_FLUSH) { + if (req_op(req) == REQ_OP_FLUSH) { read = 0; op = "flush"; } else { @@ -487,7 +487,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev) gendisk->fops = &ps3disk_fops; gendisk->queue = queue; gendisk->private_data = dev; - gendisk->driverfs_dev = &dev->sbd.core; snprintf(gendisk->disk_name, sizeof(gendisk->disk_name), PS3DISK_NAME, devidx+'a'); priv->blocking_factor = dev->blk_size >> 9; @@ -499,7 +498,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev) gendisk->disk_name, priv->model, priv->raw_capacity >> 11, get_capacity(gendisk) >> 11); - add_disk(gendisk); + device_add_disk(&dev->sbd.core, gendisk); return 0; fail_cleanup_queue: @@ -773,14 +773,13 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev) gendisk->fops = &ps3vram_fops; gendisk->queue = queue; gendisk->private_data = dev; - gendisk->driverfs_dev = &dev->core; strlcpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name)); set_capacity(gendisk, priv->size >> 9); dev_info(&dev->core, "%s: Using %lu MiB of GPU memory\n", gendisk->disk_name, get_capacity(gendisk) >> 11); - add_disk(gendisk); + device_add_disk(&dev->core, gendisk); return 0; fail_cleanup_queue: @@ -3286,9 +3286,9 @@ static void rbd_queue_workfn(struct work_struct *work) goto err; } - if (rq->cmd_flags & REQ_DISCARD) + if (req_op(rq) == REQ_OP_DISCARD) op_type = OBJ_OP_DISCARD; - else if (rq->cmd_flags & REQ_WRITE) + else if (req_op(rq) == REQ_OP_WRITE) op_type = OBJ_OP_WRITE; else op_type = OBJ_OP_READ; @@ -230,8 +230,7 @@ int rsxx_attach_dev(struct rsxx_cardinfo *card) set_capacity(card->gendisk, card->size8 >> 9); else set_capacity(card->gendisk, 0); - add_disk(card->gendisk); - + device_add_disk(CARD_TO_DEV(card), card->gendisk); card->bdev_attached = 1; } @@ -308,7 +307,6 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card) snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name), "rsxx%d", card->disk_id); - card->gendisk->driverfs_dev = &card->dev->dev; card->gendisk->major = card->major; card->gendisk->first_minor = 0; card->gendisk->fops = &rsxx_fops; @@ -705,7 +705,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, dma_cnt[i] = 0; } - if (bio->bi_rw & REQ_DISCARD) { + if (bio_op(bio) == REQ_OP_DISCARD) { bv_len = bio->bi_iter.bi_size; while (bv_len > 0) { @@ -597,7 +597,7 @@ static void skd_request_fn(struct request_queue *q) data_dir = rq_data_dir(req); io_flags = req->cmd_flags; - if (io_flags & REQ_FLUSH) + if (req_op(req) == REQ_OP_FLUSH) flush++; if (io_flags & REQ_FUA) @@ -4690,10 +4690,10 @@ static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo) return -EIO; } -static int skd_bdev_attach(struct skd_device *skdev) +static int skd_bdev_attach(struct device *parent, struct skd_device *skdev) { pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__); - add_disk(skdev->disk); + device_add_disk(parent, skdev->disk); return 0; } @@ -4812,8 +4812,6 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, skdev); - skdev->disk->driverfs_dev = &pdev->dev; - for (i = 0; i < SKD_MAX_BARS; i++) { skdev->mem_phys[i] = pci_resource_start(pdev, i); skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); @@ -4851,7 +4849,7 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) (SKD_START_WAIT_SECONDS * HZ)); if (skdev->gendisk_on > 0) { /* device came on-line after reset */ - skd_bdev_attach(skdev); + skd_bdev_attach(&pdev->dev, skdev); rc = 0; } else { /* we timed out, something is wrong with the device, @@ -804,7 +804,6 @@ static int probe_disk(struct vdc_port *port) g->fops = &vdc_fops; g->queue = q; g->private_data = port; - g->driverfs_dev = &port->vio.vdev->dev; set_capacity(g, port->vdisk_size); @@ -835,7 +834,7 @@ static int probe_disk(struct vdc_port *port) port->vdisk_size, (port->vdisk_size >> (20 - 9)), port->vio.ver.major, port->vio.ver.minor); - add_disk(g); + device_add_disk(&port->vio.vdev->dev, g); return 0; } @@ -344,7 +344,6 @@ static int add_bio(struct cardinfo *card) int offset; struct bio *bio; struct bio_vec vec; - int rw; bio = card->currentbio; if (!bio && card->bio) { @@ -359,7 +358,6 @@ static int add_bio(struct cardinfo *card) if (!bio) return 0; - rw = bio_rw(bio); if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE) return 0; @@ -369,7 +367,7 @@ static int add_bio(struct cardinfo *card) vec.bv_page, vec.bv_offset, vec.bv_len, - (rw == READ) ? + bio_op(bio) == REQ_OP_READ ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); p = &card->mm_pages[card->Ready]; @@ -398,7 +396,7 @@ static int add_bio(struct cardinfo *card) DMASCR_CHAIN_EN | DMASCR_SEM_EN | pci_cmds); - if (rw == WRITE) + if (bio_op(bio) == REQ_OP_WRITE) desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ); desc->sem_control_bits = desc->control_bits; @@ -462,7 +460,7 @@ static void process_page(unsigned long data) le32_to_cpu(desc->local_addr)>>9, le32_to_cpu(desc->transfer_size)); dump_dmastat(card, control); - } else if ((bio->bi_rw & REQ_WRITE) && + } else if (op_is_write(bio_op(bio)) && le32_to_cpu(desc->local_addr) >> 9 == card->init_size) { card->init_size += le32_to_cpu(desc->transfer_size) >> 9; @@ -172,7 +172,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); vbr->req = req; - if (req->cmd_flags & REQ_FLUSH) { + if (req_op(req) == REQ_OP_FLUSH) { vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH); vbr->out_hdr.sector = 0; vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); @@ -236,25 +236,22 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, static int virtblk_get_id(struct gendisk *disk, char *id_str) { struct virtio_blk *vblk = disk->private_data; + struct request_queue *q = vblk->disk->queue; struct request *req; - struct bio *bio; int err; - bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES, - GFP_KERNEL); - if (IS_ERR(bio)) - return PTR_ERR(bio); - - req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL); - if (IS_ERR(req)) { - bio_put(bio); + req = blk_get_request(q, READ, GFP_KERNEL); + if (IS_ERR(req)) return PTR_ERR(req); - } - req->cmd_type = REQ_TYPE_DRV_PRIV; + + err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); + if (err) + goto out; + err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); +out: blk_put_request(req); - return err; } @@ -656,7 +653,6 @@ static int virtblk_probe(struct virtio_device *vdev) vblk->disk->first_minor = index_to_minor(index); vblk->disk->private_data = vblk; vblk->disk->fops = &virtblk_fops; - vblk->disk->driverfs_dev = &vdev->dev; vblk->disk->flags |= GENHD_FL_EXT_DEVT; vblk->index = index; @@ -733,7 +729,7 @@ static int virtblk_probe(struct virtio_device *vdev) virtio_device_ready(vdev); - add_disk(vblk->disk); + device_add_disk(&vdev->dev, vblk->disk); err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial); if (err) goto out_del_disk; @@ -501,7 +501,7 @@ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, struct xen_vbd *vbd = &blkif->vbd; int rc = -EACCES; - if ((operation != READ) && vbd->readonly) + if ((operation != REQ_OP_READ) && vbd->readonly) goto out; if (likely(req->nr_sects)) { @@ -1014,7 +1014,7 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring, preq.sector_number = req->u.discard.sector_number; preq.nr_sects = req->u.discard.nr_sectors; - err = xen_vbd_translate(&preq, blkif, WRITE); + err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE); if (err) { pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n", preq.sector_number, @@ -1229,6 +1229,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, struct bio **biolist = pending_req->biolist; int i, nbio = 0; int operation; + int operation_flags = 0; struct blk_plug plug; bool drain = false; struct grant_page **pages = pending_req->segments; @@ -1247,17 +1248,19 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, switch (req_operation) { case BLKIF_OP_READ: ring->st_rd_req++; - operation = READ; + operation = REQ_OP_READ; break; case BLKIF_OP_WRITE: ring->st_wr_req++; - operation = WRITE_ODIRECT; + operation = REQ_OP_WRITE; + operation_flags = WRITE_ODIRECT; break; case BLKIF_OP_WRITE_BARRIER: drain = true; case BLKIF_OP_FLUSH_DISKCACHE: ring->st_f_req++; - operation = WRITE_FLUSH; + operation = REQ_OP_WRITE; + operation_flags = WRITE_FLUSH; break; default: operation = 0; /* make gcc happy */ @@ -1269,7 +1272,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, nseg = req->operation == BLKIF_OP_INDIRECT ? req->u.indirect.nr_segments : req->u.rw.nr_segments; - if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || + if (unlikely(nseg == 0 && operation_flags != WRITE_FLUSH) || unlikely((req->operation != BLKIF_OP_INDIRECT) && (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || unlikely((req->operation == BLKIF_OP_INDIRECT) && @@ -1310,7 +1313,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) { pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n", - operation == READ ? "read" : "write", + operation == REQ_OP_READ ? "read" : "write", preq.sector_number, preq.sector_number + preq.nr_sects, ring->blkif->vbd.pdevice); @@ -1369,6 +1372,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; bio->bi_iter.bi_sector = preq.sector_number; + bio_set_op_attrs(bio, operation, operation_flags); } preq.sector_number += seg[i].nsec; @@ -1376,7 +1380,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, /* This will be hit if the operation was a flush or discard. */ if (!bio) { - BUG_ON(operation != WRITE_FLUSH); + BUG_ON(operation_flags != WRITE_FLUSH); bio = bio_alloc(GFP_KERNEL, 0); if (unlikely(bio == NULL)) @@ -1386,20 +1390,21 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, bio->bi_bdev = preq.bdev; bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; + bio_set_op_attrs(bio, operation, operation_flags); } atomic_set(&pending_req->pendcnt, nbio); blk_start_plug(&plug); for (i = 0; i < nbio; i++) - submit_bio(operation, biolist[i]); + submit_bio(biolist[i]); /* Let the I/Os go.. */ blk_finish_plug(&plug); - if (operation == READ) + if (operation == REQ_OP_READ) ring->st_rd_sect += preq.nr_sects; - else if (operation & WRITE) + else if (operation == REQ_OP_WRITE) ring->st_wr_sect += preq.nr_sects; return 0; @@ -379,7 +379,7 @@ static struct attribute *xen_vbdstat_attrs[] = { NULL }; -static struct attribute_group xen_vbdstat_group = { +static const struct attribute_group xen_vbdstat_group = { .name = "statistics", .attrs = xen_vbdstat_attrs, }; @@ -480,7 +480,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags)) vbd->flush_support = true; - if (q && blk_queue_secdiscard(q)) + if (q && blk_queue_secure_erase(q)) vbd->discard_secure = true; pr_debug("Successful creation of handle=%04x (dom=%u)\n", @@ -715,8 +715,11 @@ static void backend_changed(struct xenbus_watch *watch, /* Front end dir is a number, which is used as the handle. */ err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle); - if (err) + if (err) { + kfree(be->mode); + be->mode = NULL; return; + } be->major = major; be->minor = minor; @@ -1022,9 +1025,9 @@ static int connect_ring(struct backend_info *be) pr_debug("%s %s\n", __func__, dev->otherend); be->blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT; - err = xenbus_gather(XBT_NIL, dev->otherend, "protocol", - "%63s", protocol, NULL); - if (err) + err = xenbus_scanf(XBT_NIL, dev->otherend, "protocol", + "%63s", protocol); + if (err <= 0) strcpy(protocol, "unspecified, assuming default"); else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE)) be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; @@ -1036,10 +1039,9 @@ static int connect_ring(struct backend_info *be) xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); return -ENOSYS; } - err = xenbus_gather(XBT_NIL, dev->otherend, - "feature-persistent", "%u", - &pers_grants, NULL); - if (err) + err = xenbus_scanf(XBT_NIL, dev->otherend, + "feature-persistent", "%u", &pers_grants); + if (err <= 0) pers_grants = 0; be->blkif->vbd.feature_gnt_persistent = pers_grants; @@ -196,6 +196,7 @@ struct blkfront_info unsigned int nr_ring_pages; struct request_queue *rq; unsigned int feature_flush; + unsigned int feature_fua; unsigned int feature_discard:1; unsigned int feature_secdiscard:1; unsigned int discard_granularity; @@ -207,6 +208,9 @@ struct blkfront_info struct blk_mq_tag_set tag_set; struct blkfront_ring_info *rinfo; unsigned int nr_rings; + /* Save uncomplete reqs and bios for migration. */ + struct list_head requests; + struct bio_list bio_list; }; static unsigned int nr_minors; @@ -544,7 +548,7 @@ static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_inf ring_req->u.discard.nr_sectors = blk_rq_sectors(req); ring_req->u.discard.id = id; ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req); - if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard) + if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard) ring_req->u.discard.flag = BLKIF_DISCARD_SECURE; else ring_req->u.discard.flag = 0; @@ -743,7 +747,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri * The indirect operation can only be a BLKIF_OP_READ or * BLKIF_OP_WRITE */ - BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA)); + BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA); ring_req->operation = BLKIF_OP_INDIRECT; ring_req->u.indirect.indirect_op = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; @@ -755,7 +759,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri ring_req->u.rw.handle = info->handle; ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; - if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { + if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) { /* * Ideally we can do an unordered flush-to-disk. * In case the backend onlysupports barriers, use that. @@ -763,19 +767,14 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri * implement it the same way. (It's also a FLUSH+FUA, * since it is guaranteed ordered WRT previous writes.) */ - switch (info->feature_flush & - ((REQ_FLUSH|REQ_FUA))) { - case REQ_FLUSH|REQ_FUA: + if (info->feature_flush && info->feature_fua) ring_req->operation = BLKIF_OP_WRITE_BARRIER; - break; - case REQ_FLUSH: + else if (info->feature_flush) ring_req->operation = BLKIF_OP_FLUSH_DISKCACHE; - break; - default: + else ring_req->operation = 0; - } } ring_req->u.rw.nr_segments = num_grant; if (unlikely(require_extra_req)) { @@ -844,7 +843,8 @@ static int blkif_queue_request(struct request *req, struct blkfront_ring_info *r if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED)) return 1; - if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) + if (unlikely(req_op(req) == REQ_OP_DISCARD || + req_op(req) == REQ_OP_SECURE_ERASE)) return blkif_queue_discard_req(req, rinfo); else return blkif_queue_rw_req(req, rinfo); @@ -864,18 +864,22 @@ static inline bool blkif_request_flush_invalid(struct request *req, struct blkfront_info *info) { return ((req->cmd_type != REQ_TYPE_FS) || - ((req->cmd_flags & REQ_FLUSH) && - !(info->feature_flush & REQ_FLUSH)) || + ((req_op(req) == REQ_OP_FLUSH) && + !info->feature_flush) || ((req->cmd_flags & REQ_FUA) && - !(info->feature_flush & REQ_FUA))); + !info->feature_fua)); } static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *qd) { unsigned long flags; - struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data; + int qid = hctx->queue_num; + struct blkfront_info *info = hctx->queue->queuedata; + struct blkfront_ring_info *rinfo = NULL; + BUG_ON(info->nr_rings <= qid); + rinfo = &info->rinfo[qid]; blk_mq_start_request(qd->rq); spin_lock_irqsave(&rinfo->ring_lock, flags); if (RING_FULL(&rinfo->ring)) @@ -901,20 +905,9 @@ out_busy: return BLK_MQ_RQ_QUEUE_BUSY; } -static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, - unsigned int index) -{ - struct blkfront_info *info = (struct blkfront_info *)data; - - BUG_ON(info->nr_rings <= index); - hctx->driver_data = &info->rinfo[index]; - return 0; -} - static struct blk_mq_ops blkfront_mq_ops = { .queue_rq = blkif_queue_rq, .map_queue = blk_mq_map_queue, - .init_hctx = blk_mq_init_hctx, }; static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, @@ -950,6 +943,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, return PTR_ERR(rq); } + rq->queuedata = info; queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); if (info->feature_discard) { @@ -958,7 +952,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, rq->limits.discard_granularity = info->discard_granularity; rq->limits.discard_alignment = info->discard_alignment; if (info->feature_secdiscard) - queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq); + queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq); } /* Hard sector size and max sectors impersonate the equiv. hardware. */ @@ -984,24 +978,22 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, return 0; } -static const char *flush_info(unsigned int feature_flush) +static const char *flush_info(struct blkfront_info *info) { - switch (feature_flush & ((REQ_FLUSH | REQ_FUA))) { - case REQ_FLUSH|REQ_FUA: + if (info->feature_flush && info->feature_fua) return "barrier: enabled;"; - case REQ_FLUSH: + else if (info->feature_flush) return "flush diskcache: enabled;"; - default: + else return "barrier or flush: disabled;"; - } } static void xlvbd_flush(struct blkfront_info *info) { - blk_queue_write_cache(info->rq, info->feature_flush & REQ_FLUSH, - info->feature_flush & REQ_FUA); + blk_queue_write_cache(info->rq, info->feature_flush ? true : false, + info->feature_fua ? true : false); pr_info("blkfront: %s: %s %s %s %s %s\n", - info->gd->disk_name, flush_info(info->feature_flush), + info->gd->disk_name, flush_info(info), "persistent grants:", info->feature_persistent ? "enabled;" : "disabled;", "indirect descriptors:", info->max_indirect_segments ? "enabled;" : "disabled;"); @@ -1142,7 +1134,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, gd->first_minor = minor; gd->fops = &xlvbd_block_fops; gd->private_data = info; - gd->driverfs_dev = &(info->xbdev->dev); set_capacity(gd, capacity); if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size, @@ -1600,7 +1591,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) info->feature_discard = 0; info->feature_secdiscard = 0; queue_flag_clear(QUEUE_FLAG_DISCARD, rq); - queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); + queue_flag_clear(QUEUE_FLAG_SECERASE, rq); } blk_mq_complete_request(req, error); break; @@ -1620,6 +1611,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) if (unlikely(error)) { if (error == -EOPNOTSUPP) error = 0; + info->feature_fua = 0; info->feature_flush = 0; xlvbd_flush(info); } @@ -2008,69 +2000,22 @@ static int blkif_recover(struct blkfront_info *info) { unsigned int i, r_index; struct request *req, *n; - struct blk_shadow *copy; int rc; struct bio *bio, *cloned_bio; - struct bio_list bio_list, merge_bio; unsigned int segs, offset; int pending, size; struct split_bio *split_bio; - struct list_head requests; blkfront_gather_backend_features(info); segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; blk_queue_max_segments(info->rq, segs); - bio_list_init(&bio_list); - INIT_LIST_HEAD(&requests); for (r_index = 0; r_index < info->nr_rings; r_index++) { - struct blkfront_ring_info *rinfo; - - rinfo = &info->rinfo[r_index]; - /* Stage 1: Make a safe copy of the shadow state. */ - copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow), - GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); - if (!copy) - return -ENOMEM; - - /* Stage 2: Set up free list. */ - memset(&rinfo->shadow, 0, sizeof(rinfo->shadow)); - for (i = 0; i < BLK_RING_SIZE(info); i++) - rinfo->shadow[i].req.u.rw.id = i+1; - rinfo->shadow_free = rinfo->ring.req_prod_pvt; - rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff; + struct blkfront_ring_info *rinfo = &info->rinfo[r_index]; rc = blkfront_setup_indirect(rinfo); - if (rc) { - kfree(copy); + if (rc) return rc; - } - - for (i = 0; i < BLK_RING_SIZE(info); i++) { - /* Not in use? */ - if (!copy[i].request) - continue; - - /* - * Get the bios in the request so we can re-queue them. - */ - if (copy[i].request->cmd_flags & - (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) { - /* - * Flush operations don't contain bios, so - * we need to requeue the whole request - */ - list_add(©[i].request->queuelist, &requests); - continue; - } - merge_bio.head = copy[i].request->bio; - merge_bio.tail = copy[i].request->biotail; - bio_list_merge(&bio_list, &merge_bio); - copy[i].request->bio = NULL; - blk_end_request_all(copy[i].request, 0); - } - - kfree(copy); } xenbus_switch_state(info->xbdev, XenbusStateConnected); @@ -2085,7 +2030,7 @@ static int blkif_recover(struct blkfront_info *info) kick_pending_request_queues(rinfo); } - list_for_each_entry_safe(req, n, &requests, queuelist) { + list_for_each_entry_safe(req, n, &info->requests, queuelist) { /* Requeue pending requests (flush or discard) */ list_del_init(&req->queuelist); BUG_ON(req->nr_phys_segments > segs); @@ -2093,7 +2038,7 @@ static int blkif_recover(struct blkfront_info *info) } blk_mq_kick_requeue_list(info->rq); - while ((bio = bio_list_pop(&bio_list)) != NULL) { + while ((bio = bio_list_pop(&info->bio_list)) != NULL) { /* Traverse the list of pending bios and re-queue them */ if (bio_segments(bio) > segs) { /* @@ -2114,7 +2059,7 @@ static int blkif_recover(struct blkfront_info *info) bio_trim(cloned_bio, offset, size); cloned_bio->bi_private = split_bio; cloned_bio->bi_end_io = split_bio_end; - submit_bio(cloned_bio->bi_rw, cloned_bio); + submit_bio(cloned_bio); } /* * Now we have to wait for all those smaller bios to @@ -2123,7 +2068,7 @@ static int blkif_recover(struct blkfront_info *info) continue; } /* We don't need to split this bio */ - submit_bio(bio->bi_rw, bio); + submit_bio(bio); } return 0; @@ -2139,9 +2084,47 @@ static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); int err = 0; + unsigned int i, j; dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); + bio_list_init(&info->bio_list); + INIT_LIST_HEAD(&info->requests); + for (i = 0; i < info->nr_rings; i++) { + struct blkfront_ring_info *rinfo = &info->rinfo[i]; + struct bio_list merge_bio; + struct blk_shadow *shadow = rinfo->shadow; + + for (j = 0; j < BLK_RING_SIZE(info); j++) { + /* Not in use? */ + if (!shadow[j].request) + continue; + + /* + * Get the bios in the request so we can re-queue them. + */ + if (req_op(shadow[i].request) == REQ_OP_FLUSH || + req_op(shadow[i].request) == REQ_OP_DISCARD || + req_op(shadow[i].request) == REQ_OP_SECURE_ERASE || + shadow[j].request->cmd_flags & REQ_FUA) { + /* + * Flush operations don't contain bios, so + * we need to requeue the whole request + * + * XXX: but this doesn't make any sense for a + * write with the FUA flag set.. + */ + list_add(&shadow[j].request->queuelist, &info->requests); + continue; + } + merge_bio.head = shadow[j].request->bio; + merge_bio.tail = shadow[j].request->biotail; + bio_list_merge(&info->bio_list, &merge_bio); + shadow[j].request->bio = NULL; + blk_mq_end_request(shadow[j].request, 0); + } + } + blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); err = negotiate_mq(info); @@ -2149,6 +2132,8 @@ static int blkfront_resume(struct xenbus_device *dev) return err; err = talk_to_blkback(dev, info); + if (!err) + blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); /* * We have to wait for the backend to switch to @@ -2212,10 +2197,9 @@ static void blkfront_setup_discard(struct blkfront_info *info) info->discard_granularity = discard_granularity; info->discard_alignment = discard_alignment; } - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "discard-secure", "%d", &discard_secure, - NULL); - if (!err) + err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, + "discard-secure", "%u", &discard_secure); + if (err > 0) info->feature_secdiscard = !!discard_secure; } @@ -2313,10 +2297,10 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) unsigned int indirect_segments; info->feature_flush = 0; + info->feature_fua = 0; - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-barrier", "%d", &barrier, - NULL); + err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, + "feature-barrier", "%d", &barrier); /* * If there's no "feature-barrier" defined, then it means @@ -2325,38 +2309,40 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) * * If there are barriers, then we use flush. */ - if (!err && barrier) - info->feature_flush = REQ_FLUSH | REQ_FUA; + if (err > 0 && barrier) { + info->feature_flush = 1; + info->feature_fua = 1; + } + /* * And if there is "feature-flush-cache" use that above * barriers. */ - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-flush-cache", "%d", &flush, - NULL); + err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, + "feature-flush-cache", "%d", &flush); - if (!err && flush) - info->feature_flush = REQ_FLUSH; + if (err > 0 && flush) { + info->feature_flush = 1; + info->feature_fua = 0; + } - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-discard", "%d", &discard, - NULL); + err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, + "feature-discard", "%d", &discard); - if (!err && discard) + if (err > 0 && discard) blkfront_setup_discard(info); - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-persistent", "%u", &persistent, - NULL); - if (err) + err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, + "feature-persistent", "%d", &persistent); + if (err <= 0) info->feature_persistent = 0; else info->feature_persistent = persistent; - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-max-indirect-segments", "%u", &indirect_segments, - NULL); - if (err) + err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, + "feature-max-indirect-segments", "%u", + &indirect_segments); + if (err <= 0) info->max_indirect_segments = 0; else info->max_indirect_segments = min(indirect_segments, @@ -2456,7 +2442,7 @@ static void blkfront_connect(struct blkfront_info *info) for (i = 0; i < info->nr_rings; i++) kick_pending_request_queues(&info->rinfo[i]); - add_disk(info->gd); + device_add_disk(&info->xbdev->dev, info->gd); info->is_ready = 1; } @@ -2485,10 +2471,23 @@ static void blkback_changed(struct xenbus_device *dev, break; case XenbusStateConnected: - if (dev->state != XenbusStateInitialised) { + /* + * talk_to_blkback sets state to XenbusStateInitialised + * and blkfront_connect sets it to XenbusStateConnected + * (if connection went OK). + * + * If the backend (or toolstack) decides to poke at backend + * state (and re-trigger the watch by setting the state repeatedly + * to XenbusStateConnected (4)) we need to deal with this. + * This is allowed as this is used to communicate to the guest + * that the size of disk has changed! + */ + if ((dev->state != XenbusStateInitialised) && + (dev->state != XenbusStateConnected)) { if (talk_to_blkback(dev, info)) break; } + blkfront_connect(info); break; @@ -1,8 +1,7 @@ config ZRAM tristate "Compressed RAM block device support" - depends on BLOCK && SYSFS && ZSMALLOC - select LZO_COMPRESS - select LZO_DECOMPRESS + depends on BLOCK && SYSFS && ZSMALLOC && CRYPTO + select CRYPTO_LZO default n help Creates virtual block devices called /dev/zramX (X = 0, 1, ...). @@ -14,13 +13,3 @@ config ZRAM disks and maybe many more. See zram.txt for more information. - -config ZRAM_LZ4_COMPRESS - bool "Enable LZ4 algorithm support" - depends on ZRAM - select LZ4_COMPRESS - select LZ4_DECOMPRESS - default n - help - This option enables LZ4 compression algorithm support. Compression - algorithm can be changed using `comp_algorithm' device attribute.
\ No newline at end of file @@ -1,5 +1,3 @@ -zram-y := zcomp_lzo.o zcomp.o zram_drv.o - -zram-$(CONFIG_ZRAM_LZ4_COMPRESS) += zcomp_lz4.o +zram-y := zcomp.o zram_drv.o obj-$(CONFIG_ZRAM) += zram.o @@ -14,108 +14,150 @@ #include <linux/wait.h> #include <linux/sched.h> #include <linux/cpu.h> +#include <linux/crypto.h> #include "zcomp.h" -#include "zcomp_lzo.h" -#ifdef CONFIG_ZRAM_LZ4_COMPRESS -#include "zcomp_lz4.h" -#endif -static struct zcomp_backend *backends[] = { - &zcomp_lzo, -#ifdef CONFIG_ZRAM_LZ4_COMPRESS - &zcomp_lz4, +static const char * const backends[] = { + "lzo", +#if IS_ENABLED(CONFIG_CRYPTO_LZ4) + "lz4", +#endif +#if IS_ENABLED(CONFIG_CRYPTO_DEFLATE) + "deflate", +#endif +#if IS_ENABLED(CONFIG_CRYPTO_LZ4HC) + "lz4hc", +#endif +#if IS_ENABLED(CONFIG_CRYPTO_842) + "842", #endif NULL }; -static struct zcomp_backend *find_backend(const char *compress) -{ - int i = 0; - while (backends[i]) { - if (sysfs_streq(compress, backends[i]->name)) - break; - i++; - } - return backends[i]; -} - -static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm) +static void zcomp_strm_free(struct zcomp_strm *zstrm) { - if (zstrm->private) - comp->backend->destroy(zstrm->private); + if (!IS_ERR_OR_NULL(zstrm->tfm)) + crypto_free_comp(zstrm->tfm); free_pages((unsigned long)zstrm->buffer, 1); kfree(zstrm); } /* - * allocate new zcomp_strm structure with ->private initialized by + * allocate new zcomp_strm structure with ->tfm initialized by * backend, return NULL on error */ -static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp, gfp_t flags) +static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp) { - struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), flags); + struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL); if (!zstrm) return NULL; - zstrm->private = comp->backend->create(flags); + zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0); /* * allocate 2 pages. 1 for compressed data, plus 1 extra for the * case when compressed size is larger than the original one */ - zstrm->buffer = (void *)__get_free_pages(flags | __GFP_ZERO, 1); - if (!zstrm->private || !zstrm->buffer) { - zcomp_strm_free(comp, zstrm); + zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); + if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) { + zcomp_strm_free(zstrm); zstrm = NULL; } return zstrm; } +bool zcomp_available_algorithm(const char *comp) +{ + int i = 0; + + while (backends[i]) { + if (sysfs_streq(comp, backends[i])) + return true; + i++; + } + + /* + * Crypto does not ignore a trailing new line symbol, + * so make sure you don't supply a string containing + * one. + * This also means that we permit zcomp initialisation + * with any compressing algorithm known to crypto api. + */ + return crypto_has_comp(comp, 0, 0) == 1; +} + /* show available compressors */ ssize_t zcomp_available_show(const char *comp, char *buf) { + bool known_algorithm = false; ssize_t sz = 0; int i = 0; - while (backends[i]) { - if (!strcmp(comp, backends[i]->name)) + for (; backends[i]; i++) { + if (!strcmp(comp, backends[i])) { + known_algorithm = true; sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, - "[%s] ", backends[i]->name); - else + "[%s] ", backends[i]); + } else { sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, - "%s ", backends[i]->name); - i++; + "%s ", backends[i]); + } } + + /* + * Out-of-tree module known to crypto api or a missing + * entry in `backends'. + */ + if (!known_algorithm && crypto_has_comp(comp, 0, 0) == 1) + sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, + "[%s] ", comp); + sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n"); return sz; } -bool zcomp_available_algorithm(const char *comp) -{ - return find_backend(comp) != NULL; -} - -struct zcomp_strm *zcomp_strm_find(struct zcomp *comp) +struct zcomp_strm *zcomp_stream_get(struct zcomp *comp) { return *get_cpu_ptr(comp->stream); } -void zcomp_strm_release(struct zcomp *comp, struct zcomp_strm *zstrm) +void zcomp_stream_put(struct zcomp *comp) { put_cpu_ptr(comp->stream); } -int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm, - const unsigned char *src, size_t *dst_len) +int zcomp_compress(struct zcomp_strm *zstrm, + const void *src, unsigned int *dst_len) { - return comp->backend->compress(src, zstrm->buffer, dst_len, - zstrm->private); + /* + * Our dst memory (zstrm->buffer) is always `2 * PAGE_SIZE' sized + * because sometimes we can endup having a bigger compressed data + * due to various reasons: for example compression algorithms tend + * to add some padding to the compressed buffer. Speaking of padding, + * comp algorithm `842' pads the compressed length to multiple of 8 + * and returns -ENOSP when the dst memory is not big enough, which + * is not something that ZRAM wants to see. We can handle the + * `compressed_size > PAGE_SIZE' case easily in ZRAM, but when we + * receive -ERRNO from the compressing backend we can't help it + * anymore. To make `842' happy we need to tell the exact size of + * the dst buffer, zram_drv will take care of the fact that + * compressed buffer is too big. + */ + *dst_len = PAGE_SIZE * 2; + + return crypto_comp_compress(zstrm->tfm, + src, PAGE_SIZE, + zstrm->buffer, dst_len); } -int zcomp_decompress(struct zcomp *comp, const unsigned char *src, - size_t src_len, unsigned char *dst) +int zcomp_decompress(struct zcomp_strm *zstrm, + const void *src, unsigned int src_len, void *dst) { - return comp->backend->decompress(src, src_len, dst); + unsigned int dst_len = PAGE_SIZE; + + return crypto_comp_decompress(zstrm->tfm, + src, src_len, + dst, &dst_len); } static int __zcomp_cpu_notifier(struct zcomp *comp, @@ -127,7 +169,7 @@ static int __zcomp_cpu_notifier(struct zcomp *comp, case CPU_UP_PREPARE: if (WARN_ON(*per_cpu_ptr(comp->stream, cpu))) break; - zstrm = zcomp_strm_alloc(comp, GFP_KERNEL); + zstrm = zcomp_strm_alloc(comp); if (IS_ERR_OR_NULL(zstrm)) { pr_err("Can't allocate a compression stream\n"); return NOTIFY_BAD; @@ -138,7 +180,7 @@ static int __zcomp_cpu_notifier(struct zcomp *comp, case CPU_UP_CANCELED: zstrm = *per_cpu_ptr(comp->stream, cpu); if (!IS_ERR_OR_NULL(zstrm)) - zcomp_strm_free(comp, zstrm); + zcomp_strm_free(zstrm); *per_cpu_ptr(comp->stream, cpu) = NULL; break; default: @@ -209,18 +251,16 @@ void zcomp_destroy(struct zcomp *comp) struct zcomp *zcomp_create(const char *compress) { struct zcomp *comp; - struct zcomp_backend *backend; int error; - backend = find_backend(compress); - if (!backend) + if (!zcomp_available_algorithm(compress)) return ERR_PTR(-EINVAL); comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL); if (!comp) return ERR_PTR(-ENOMEM); - comp->backend = backend; + comp->name = compress; error = zcomp_init(comp); if (error) { kfree(comp); @@ -13,33 +13,15 @@ struct zcomp_strm { /* compression/decompression buffer */ void *buffer; - /* - * The private data of the compression stream, only compression - * stream backend can touch this (e.g. compression algorithm - * working memory) - */ - void *private; -}; - -/* static compression backend */ -struct zcomp_backend { - int (*compress)(const unsigned char *src, unsigned char *dst, - size_t *dst_len, void *private); - - int (*decompress)(const unsigned char *src, size_t src_len, - unsigned char *dst); - - void *(*create)(gfp_t flags); - void (*destroy)(void *private); - - const char *name; + struct crypto_comp *tfm; }; /* dynamic per-device compression frontend */ struct zcomp { struct zcomp_strm * __percpu *stream; - struct zcomp_backend *backend; struct notifier_block notifier; + + const char *name; }; ssize_t zcomp_available_show(const char *comp, char *buf); @@ -48,14 +30,14 @@ bool zcomp_available_algorithm(const char *comp); struct zcomp *zcomp_create(const char *comp); void zcomp_destroy(struct zcomp *comp); -struct zcomp_strm *zcomp_strm_find(struct zcomp *comp); -void zcomp_strm_release(struct zcomp *comp, struct zcomp_strm *zstrm); +struct zcomp_strm *zcomp_stream_get(struct zcomp *comp); +void zcomp_stream_put(struct zcomp *comp); -int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm, - const unsigned char *src, size_t *dst_len); +int zcomp_compress(struct zcomp_strm *zstrm, + const void *src, unsigned int *dst_len); -int zcomp_decompress(struct zcomp *comp, const unsigned char *src, - size_t src_len, unsigned char *dst); +int zcomp_decompress(struct zcomp_strm *zstrm, + const void *src, unsigned int src_len, void *dst); bool zcomp_set_max_streams(struct zcomp *comp, int num_strm); #endif /* _ZCOMP_H_ */ diff --git a/drivers/block/zram/zcomp_lz4.c b/drivers/block/zram/zcomp_lz4.c deleted file mode 100644 index 0110086accba..000000000000 --- a/ drivers/block/zram/zcomp_lz4.c+++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (C) 2014 Sergey Senozhatsky. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/lz4.h> -#include <linux/vmalloc.h> -#include <linux/mm.h> - -#include "zcomp_lz4.h" - -static void *zcomp_lz4_create(gfp_t flags) -{ - void *ret; - - ret = kmalloc(LZ4_MEM_COMPRESS, flags); - if (!ret) - ret = __vmalloc(LZ4_MEM_COMPRESS, - flags | __GFP_HIGHMEM, - PAGE_KERNEL); - return ret; -} - -static void zcomp_lz4_destroy(void *private) -{ - kvfree(private); -} - -static int zcomp_lz4_compress(const unsigned char *src, unsigned char *dst, - size_t *dst_len, void *private) -{ - /* return : Success if return 0 */ - return lz4_compress(src, PAGE_SIZE, dst, dst_len, private); -} - -static int zcomp_lz4_decompress(const unsigned char *src, size_t src_len, - unsigned char *dst) -{ - size_t dst_len = PAGE_SIZE; - /* return : Success if return 0 */ - return lz4_decompress_unknownoutputsize(src, src_len, dst, &dst_len); -} - -struct zcomp_backend zcomp_lz4 = { - .compress = zcomp_lz4_compress, - .decompress = zcomp_lz4_decompress, - .create = zcomp_lz4_create, - .destroy = zcomp_lz4_destroy, - .name = "lz4", -}; diff --git a/drivers/block/zram/zcomp_lz4.h b/drivers/block/zram/zcomp_lz4.h deleted file mode 100644 index 60613fb29dd8..000000000000 --- a/ drivers/block/zram/zcomp_lz4.h+++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright (C) 2014 Sergey Senozhatsky. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _ZCOMP_LZ4_H_ -#define _ZCOMP_LZ4_H_ - -#include "zcomp.h" - -extern struct zcomp_backend zcomp_lz4; - -#endif /* _ZCOMP_LZ4_H_ */ diff --git a/drivers/block/zram/zcomp_lzo.c b/drivers/block/zram/zcomp_lzo.c deleted file mode 100644 index ed7a1f0549ec..000000000000 --- a/ drivers/block/zram/zcomp_lzo.c+++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (C) 2014 Sergey Senozhatsky. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/lzo.h> -#include <linux/vmalloc.h> -#include <linux/mm.h> - -#include "zcomp_lzo.h" - -static void *lzo_create(gfp_t flags) -{ - void *ret; - - ret = kmalloc(LZO1X_MEM_COMPRESS, flags); - if (!ret) - ret = __vmalloc(LZO1X_MEM_COMPRESS, - flags | __GFP_HIGHMEM, - PAGE_KERNEL); - return ret; -} - -static void lzo_destroy(void *private) -{ - kvfree(private); -} - -static int lzo_compress(const unsigned char *src, unsigned char *dst, - size_t *dst_len, void *private) -{ - int ret = lzo1x_1_compress(src, PAGE_SIZE, dst, dst_len, private); - return ret == LZO_E_OK ? 0 : ret; -} - -static int lzo_decompress(const unsigned char *src, size_t src_len, - unsigned char *dst) -{ - size_t dst_len = PAGE_SIZE; - int ret = lzo1x_decompress_safe(src, src_len, dst, &dst_len); - return ret == LZO_E_OK ? 0 : ret; -} - -struct zcomp_backend zcomp_lzo = { - .compress = lzo_compress, - .decompress = lzo_decompress, - .create = lzo_create, - .destroy = lzo_destroy, - .name = "lzo", -}; diff --git a/drivers/block/zram/zcomp_lzo.h b/drivers/block/zram/zcomp_lzo.h deleted file mode 100644 index 128c5807fa14..000000000000 --- a/ drivers/block/zram/zcomp_lzo.h+++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright (C) 2014 Sergey Senozhatsky. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _ZCOMP_LZO_H_ -#define _ZCOMP_LZO_H_ - -#include "zcomp.h" - -extern struct zcomp_backend zcomp_lzo; - -#endif /* _ZCOMP_LZO_H_ */ @@ -342,9 +342,16 @@ static ssize_t comp_algorithm_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct zram *zram = dev_to_zram(dev); + char compressor[CRYPTO_MAX_ALG_NAME]; size_t sz; - if (!zcomp_available_algorithm(buf)) + strlcpy(compressor, buf, sizeof(compressor)); + /* ignore trailing newline */ + sz = strlen(compressor); + if (sz > 0 && compressor[sz - 1] == '\n') + compressor[sz - 1] = 0x00; + + if (!zcomp_available_algorithm(compressor)) return -EINVAL; down_write(&zram->init_lock); @@ -353,13 +360,8 @@ static ssize_t comp_algorithm_store(struct device *dev, pr_info("Can't change algorithm for initialized device\n"); return -EBUSY; } - strlcpy(zram->compressor, buf, sizeof(zram->compressor)); - - /* ignore trailing newline */ - sz = strlen(zram->compressor); - if (sz > 0 && zram->compressor[sz - 1] == '\n') - zram->compressor[sz - 1] = 0x00; + strlcpy(zram->compressor, compressor, sizeof(compressor)); up_write(&zram->init_lock); return len; } @@ -563,7 +565,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) unsigned char *cmem; struct zram_meta *meta = zram->meta; unsigned long handle; - size_t size; + unsigned int size; bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); handle = meta->table[index].handle; @@ -576,10 +578,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) } cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); - if (size == PAGE_SIZE) + if (size == PAGE_SIZE) { copy_page(mem, cmem); - else - ret = zcomp_decompress(zram->comp, cmem, size, mem); + } else { + struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); + + ret = zcomp_decompress(zstrm, cmem, size, mem); + zcomp_stream_put(zram->comp); + } zs_unmap_object(meta->mem_pool, handle); bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); @@ -646,7 +652,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, int offset) { int ret = 0; - size_t clen; + unsigned int clen; unsigned long handle = 0; struct page *page; unsigned char *user_mem, *cmem, *src, *uncmem = NULL; @@ -695,8 +701,8 @@ compress_again: goto out; } - zstrm = zcomp_strm_find(zram->comp); - ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen); + zstrm = zcomp_stream_get(zram->comp); + ret = zcomp_compress(zstrm, uncmem, &clen); if (!is_partial_io(bvec)) { kunmap_atomic(user_mem); user_mem = NULL; @@ -732,19 +738,21 @@ compress_again: handle = zs_malloc(meta->mem_pool, clen, __GFP_KSWAPD_RECLAIM | __GFP_NOWARN | - __GFP_HIGHMEM); + __GFP_HIGHMEM | + __GFP_MOVABLE); if (!handle) { - zcomp_strm_release(zram->comp, zstrm); + zcomp_stream_put(zram->comp); zstrm = NULL; atomic64_inc(&zram->stats.writestall); handle = zs_malloc(meta->mem_pool, clen, - GFP_NOIO | __GFP_HIGHMEM); + GFP_NOIO | __GFP_HIGHMEM | + __GFP_MOVABLE); if (handle) goto compress_again; - pr_err("Error allocating memory for compressed page: %u, size=%zu\n", + pr_err("Error allocating memory for compressed page: %u, size=%u\n", index, clen); ret = -ENOMEM; goto out; @@ -769,7 +777,7 @@ compress_again: memcpy(cmem, src, clen); } - zcomp_strm_release(zram->comp, zstrm); + zcomp_stream_put(zram->comp); zstrm = NULL; zs_unmap_object(meta->mem_pool, handle); @@ -789,7 +797,7 @@ compress_again: atomic64_inc(&zram->stats.pages_stored); out: if (zstrm) - zcomp_strm_release(zram->comp, zstrm); + zcomp_stream_put(zram->comp); if (is_partial_io(bvec)) kfree(uncmem); return ret; @@ -874,7 +882,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) offset = (bio->bi_iter.bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; - if (unlikely(bio->bi_rw & REQ_DISCARD)) { + if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) { zram_bio_discard(zram, index, offset, bio); bio_endio(bio); return; @@ -15,8 +15,9 @@ #ifndef _ZRAM_DRV_H_ #define _ZRAM_DRV_H_ -#include <linux/spinlock.h> +#include <linux/rwsem.h> #include <linux/zsmalloc.h> +#include <linux/crypto.h> #include "zcomp.h" @@ -113,7 +114,7 @@ struct zram { * we can store in a disk. */ u64 disksize; /* bytes */ - char compressor[10]; + char compressor[CRYPTO_MAX_ALG_NAME]; /* * zram is claimed so open request will be failed */ @@ -123,6 +123,7 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x13d3, 0x3472) }, { USB_DEVICE(0x13d3, 0x3474) }, { USB_DEVICE(0x13d3, 0x3487) }, + { USB_DEVICE(0x13d3, 0x3490) }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xE02C) }, @@ -190,6 +191,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3490), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU22 with sflash firmware */ { USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 }, @@ -274,6 +274,8 @@ static int bpa10x_setup(struct hci_dev *hdev) BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1)); + hci_set_fw_info(hdev, "%s", skb->data + 1); + kfree_skb(skb); return 0; } @@ -138,7 +138,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb) if (event->length > 3 && event->data[3]) priv->btmrvl_dev.dev_type = HCI_AMP; else - priv->btmrvl_dev.dev_type = HCI_BREDR; + priv->btmrvl_dev.dev_type = HCI_PRIMARY; BT_DBG("dev_type: %d", priv->btmrvl_dev.dev_type); } else if (priv->btmrvl_dev.sendcmdflag && @@ -1071,7 +1071,6 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv, { struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; int ret = 0; - int buf_block_len; int blksz; int i = 0; u8 *buf = NULL; @@ -1083,9 +1082,13 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv, return -EINVAL; } + blksz = DIV_ROUND_UP(nb, SDIO_BLOCK_SIZE) * SDIO_BLOCK_SIZE; + buf = payload; - if ((unsigned long) payload & (BTSDIO_DMA_ALIGN - 1)) { - tmpbufsz = ALIGN_SZ(nb, BTSDIO_DMA_ALIGN); + if ((unsigned long) payload & (BTSDIO_DMA_ALIGN - 1) || + nb < blksz) { + tmpbufsz = ALIGN_SZ(blksz, BTSDIO_DMA_ALIGN) + + BTSDIO_DMA_ALIGN; tmpbuf = kzalloc(tmpbufsz, GFP_KERNEL); if (!tmpbuf) return -ENOMEM; @@ -1093,15 +1096,12 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv, memcpy(buf, payload, nb); } - blksz = SDIO_BLOCK_SIZE; - buf_block_len = DIV_ROUND_UP(nb, blksz); - sdio_claim_host(card->func); do { /* Transfer data to card */ ret = sdio_writesb(card->func, card->ioport, buf, - buf_block_len * blksz); + blksz); if (ret < 0) { i++; BT_ERR("i=%d writesb failed: %d", i, ret); @@ -1625,6 +1625,7 @@ static int btmrvl_sdio_suspend(struct device *dev) if (priv->adapter->hs_state != HS_ACTIVATED) { if (btmrvl_enable_hs(priv)) { BT_ERR("HS not actived, suspend failed!"); + priv->adapter->is_suspending = false; return -EBUSY; } } @@ -311,7 +311,7 @@ static int btsdio_probe(struct sdio_func *func, if (id->class == SDIO_CLASS_BT_AMP) hdev->dev_type = HCI_AMP; else - hdev->dev_type = HCI_BREDR; + hdev->dev_type = HCI_PRIMARY; data->hdev = hdev; @@ -237,6 +237,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3490), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, @@ -249,6 +250,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME }, /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, @@ -314,6 +316,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL }, { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW }, + { USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL }, /* Other Intel Bluetooth devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01), @@ -2103,10 +2106,14 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) /* With this Intel bootloader only the hardware variant and device * revision information are used to select the right firmware. * - * Currently this bootloader support is limited to hardware variant - * iBT 3.0 (LnP/SfP) which is identified by the value 11 (0x0b). + * The firmware filename is ibt-<hw_variant>-<dev_revid>.sfi. + * + * Currently the supported hardware variants are: + * 11 (0x0b) for iBT3.0 (LnP/SfP) + * 12 (0x0c) for iBT3.5 (WsP) */ - snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.sfi", + snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi", + le16_to_cpu(ver.hw_variant), le16_to_cpu(params->dev_revid)); err = request_firmware(&fw, fwname, &hdev->dev); @@ -2122,7 +2129,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) /* Save the DDC file name for later use to apply once the firmware * downloading is done. */ - snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.ddc", + snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc", + le16_to_cpu(ver.hw_variant), le16_to_cpu(params->dev_revid)); kfree_skb(skb); @@ -2825,7 +2833,7 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_AMP) hdev->dev_type = HCI_AMP; else - hdev->dev_type = HCI_BREDR; + hdev->dev_type = HCI_PRIMARY; data->hdev = hdev; @@ -51,7 +51,7 @@ */ struct ti_st { struct hci_dev *hdev; - char reg_status; + int reg_status; long (*st_write) (struct sk_buff *); struct completion wait_reg_completion; }; @@ -83,7 +83,7 @@ static inline void ti_st_tx_complete(struct ti_st *hst, int pkt_type) * status.ti_st_open() function will wait for signal from this * API when st_register() function returns ST_PENDING. */ -static void st_reg_completion_cb(void *priv_data, char data) +static void st_reg_completion_cb(void *priv_data, int data) { struct ti_st *lhst = priv_data; @@ -537,9 +537,7 @@ static int intel_setup(struct hci_uart *hu) { static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01, 0x00, 0x08, 0x04, 0x00 }; - static const u8 lpm_param[] = { 0x03, 0x07, 0x01, 0x0b }; struct intel_data *intel = hu->priv; - struct intel_device *idev = NULL; struct hci_dev *hdev = hu->hdev; struct sk_buff *skb; struct intel_version ver; @@ -884,35 +882,23 @@ done: bt_dev_info(hdev, "Device booted in %llu usecs", duration); - /* Enable LPM if matching pdev with wakeup enabled */ + /* Enable LPM if matching pdev with wakeup enabled, set TX active + * until further LPM TX notification. + */ mutex_lock(&intel_device_list_lock); list_for_each(p, &intel_device_list) { struct intel_device *dev = list_entry(p, struct intel_device, list); if (hu->tty->dev->parent == dev->pdev->dev.parent) { - if (device_may_wakeup(&dev->pdev->dev)) - idev = dev; + if (device_may_wakeup(&dev->pdev->dev)) { + set_bit(STATE_LPM_ENABLED, &intel->flags); + set_bit(STATE_TX_ACTIVE, &intel->flags); + } break; } } mutex_unlock(&intel_device_list_lock); - if (!idev) - goto no_lpm; - - bt_dev_info(hdev, "Enabling LPM"); - - skb = __hci_cmd_sync(hdev, 0xfc8b, sizeof(lpm_param), lpm_param, - HCI_CMD_TIMEOUT); - if (IS_ERR(skb)) { - bt_dev_err(hdev, "Failed to enable LPM"); - goto no_lpm; - } - kfree_skb(skb); - - set_bit(STATE_LPM_ENABLED, &intel->flags); - -no_lpm: /* Ignore errors, device can work without DDC parameters */ btintel_load_ddc_config(hdev, fwname); @@ -609,7 +609,7 @@ static int hci_uart_register_dev(struct hci_uart *hu) if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags)) hdev->dev_type = HCI_AMP; else - hdev->dev_type = HCI_BREDR; + hdev->dev_type = HCI_PRIMARY; if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags)) return 0; @@ -97,10 +97,10 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode) if (data->hdev) return -EBADFD; - /* bits 0-1 are dev_type (BR/EDR or AMP) */ + /* bits 0-1 are dev_type (Primary or AMP) */ dev_type = opcode & 0x03; - if (dev_type != HCI_BREDR && dev_type != HCI_AMP) + if (dev_type != HCI_PRIMARY && dev_type != HCI_AMP) return -EINVAL; /* bits 2-5 are reserved (must be zero) */ @@ -316,7 +316,7 @@ static void vhci_open_timeout(struct work_struct *work) struct vhci_data *data = container_of(work, struct vhci_data, open_timeout.work); - vhci_create_device(data, amp ? HCI_AMP : HCI_BREDR); + vhci_create_device(data, amp ? HCI_AMP : HCI_PRIMARY); } static int vhci_open(struct inode *inode, struct file *file) @@ -2032,7 +2032,7 @@ static int cdrom_read_subchannel(struct cdrom_device_info *cdi, init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ); cgc.cmd[0] = GPCMD_READ_SUBCHANNEL; - cgc.cmd[1] = 2; /* MSF addressing */ + cgc.cmd[1] = subchnl->cdsc_format;/* MSF or LBA addressing */ cgc.cmd[2] = 0x40; /* request subQ data */ cgc.cmd[3] = mcn ? 2 : 1; cgc.cmd[8] = 16; @@ -2041,17 +2041,27 @@ static int cdrom_read_subchannel(struct cdrom_device_info *cdi, return ret; subchnl->cdsc_audiostatus = cgc.buffer[1]; - subchnl->cdsc_format = CDROM_MSF; subchnl->cdsc_ctrl = cgc.buffer[5] & 0xf; subchnl->cdsc_trk = cgc.buffer[6]; subchnl->cdsc_ind = cgc.buffer[7]; - subchnl->cdsc_reladdr.msf.minute = cgc.buffer[13]; - subchnl->cdsc_reladdr.msf.second = cgc.buffer[14]; - subchnl->cdsc_reladdr.msf.frame = cgc.buffer[15]; - subchnl->cdsc_absaddr.msf.minute = cgc.buffer[9]; - subchnl->cdsc_absaddr.msf.second = cgc.buffer[10]; - subchnl->cdsc_absaddr.msf.frame = cgc.buffer[11]; + if (subchnl->cdsc_format == CDROM_LBA) { + subchnl->cdsc_absaddr.lba = ((cgc.buffer[8] << 24) | + (cgc.buffer[9] << 16) | + (cgc.buffer[10] << 8) | + (cgc.buffer[11])); + subchnl->cdsc_reladdr.lba = ((cgc.buffer[12] << 24) | + (cgc.buffer[13] << 16) | + (cgc.buffer[14] << 8) | + (cgc.buffer[15])); + } else { + subchnl->cdsc_reladdr.msf.minute = cgc.buffer[13]; + subchnl->cdsc_reladdr.msf.second = cgc.buffer[14]; + subchnl->cdsc_reladdr.msf.frame = cgc.buffer[15]; + subchnl->cdsc_absaddr.msf.minute = cgc.buffer[9]; + subchnl->cdsc_absaddr.msf.second = cgc.buffer[10]; + subchnl->cdsc_absaddr.msf.frame = cgc.buffer[11]; + } return 0; } @@ -3022,7 +3032,7 @@ static noinline int mmc_ioctl_cdrom_subchannel(struct cdrom_device_info *cdi, if (!((requested == CDROM_MSF) || (requested == CDROM_LBA))) return -EINVAL; - q.cdsc_format = CDROM_MSF; + ret = cdrom_read_subchannel(cdi, &q, 0); if (ret) return ret; @@ -325,7 +325,7 @@ static long dsp56k_ioctl(struct file *file, unsigned int cmd, if(get_user(bin, &binary->bin) < 0) return -EFAULT; - if (len == 0) { + if (len <= 0) { return -EINVAL; /* nothing to upload?!? */ } if (len > DSP56K_MAX_BINARY_LENGTH) { @@ -90,7 +90,7 @@ config HW_RANDOM_BCM63XX config HW_RANDOM_BCM2835 tristate "Broadcom BCM2835 Random Number Generator support" - depends on ARCH_BCM2835 + depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X default HW_RANDOM ---help--- This driver provides kernel-side support for the Random Number @@ -396,6 +396,20 @@ config HW_RANDOM_PIC32 If unsure, say Y. +config HW_RANDOM_MESON + tristate "Amlogic Meson Random Number Generator support" + depends on HW_RANDOM + depends on ARCH_MESON || COMPILE_TEST + default y + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on Amlogic Meson SoCs. + + To compile this driver as a module, choose M here. the + module will be called meson-rng. + + If unsure, say Y. + endif # HW_RANDOM config UML_RANDOM @@ -34,3 +34,4 @@ obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o +obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o @@ -19,6 +19,7 @@ #define RNG_CTRL 0x0 #define RNG_STATUS 0x4 #define RNG_DATA 0x8 +#define RNG_INT_MASK 0x10 /* enable rng */ #define RNG_RBGEN 0x1 @@ -26,10 +27,24 @@ /* the initial numbers generated are "less random" so will be discarded */ #define RNG_WARMUP_COUNT 0x40000 +#define RNG_INT_OFF 0x1 + +static void __init nsp_rng_init(void __iomem *base) +{ + u32 val; + + /* mask the interrupt */ + val = readl(base + RNG_INT_MASK); + val |= RNG_INT_OFF; + writel(val, base + RNG_INT_MASK); +} + static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { void __iomem *rng_base = (void __iomem *)rng->priv; + u32 max_words = max / sizeof(u32); + u32 num_words, count; while ((__raw_readl(rng_base + RNG_STATUS) >> 24) == 0) { if (!wait) @@ -37,8 +52,14 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max, cpu_relax(); } - *(u32 *)buf = __raw_readl(rng_base + RNG_DATA); - return sizeof(u32); + num_words = readl(rng_base + RNG_STATUS) >> 24; + if (num_words > max_words) + num_words = max_words; + + for (count = 0; count < num_words; count++) + ((u32 *)buf)[count] = readl(rng_base + RNG_DATA); + + return num_words * sizeof(u32); } static struct hwrng bcm2835_rng_ops = { @@ -46,10 +67,19 @@ static struct hwrng bcm2835_rng_ops = { .read = bcm2835_rng_read, }; +static const struct of_device_id bcm2835_rng_of_match[] = { + { .compatible = "brcm,bcm2835-rng"}, + { .compatible = "brcm,bcm-nsp-rng", .data = nsp_rng_init}, + { .compatible = "brcm,bcm5301x-rng", .data = nsp_rng_init}, + {}, +}; + static int bcm2835_rng_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; + void (*rng_setup)(void __iomem *base); + const struct of_device_id *rng_id; void __iomem *rng_base; int err; @@ -61,6 +91,15 @@ static int bcm2835_rng_probe(struct platform_device *pdev) } bcm2835_rng_ops.priv = (unsigned long)rng_base; + rng_id = of_match_node(bcm2835_rng_of_match, np); + if (!rng_id) + return -EINVAL; + + /* Check for rng init function, execute it */ + rng_setup = rng_id->data; + if (rng_setup) + rng_setup(rng_base); + /* set warm-up count & enable */ __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS); __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL); @@ -90,10 +129,6 @@ static int bcm2835_rng_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id bcm2835_rng_of_match[] = { - { .compatible = "brcm,bcm2835-rng", }, - {}, -}; MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match); static struct platform_driver bcm2835_rng_driver = { @@ -45,12 +45,12 @@ struct exynos_rng { static u32 exynos_rng_readl(struct exynos_rng *rng, u32 offset) { - return __raw_readl(rng->mem + offset); + return readl_relaxed(rng->mem + offset); } static void exynos_rng_writel(struct exynos_rng *rng, u32 val, u32 offset) { - __raw_writel(val, rng->mem + offset); + writel_relaxed(val, rng->mem + offset); } static int exynos_rng_configure(struct exynos_rng *exynos_rng) diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c new file mode 100644 index 000000000000..0cfd81bcaeac --- /dev/null +++ b/ drivers/char/hw_random/meson-rng.c@@ -0,0 +1,131 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong <narmstrong@baylibre.com> + * Copyright (C) 2014 Amlogic, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * BSD LICENSE + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong <narmstrong@baylibre.com> + * Copyright (C) 2014 Amlogic, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include <linux/err.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/hw_random.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/of.h> + +#define RNG_DATA 0x00 + +struct meson_rng_data { + void __iomem *base; + struct platform_device *pdev; + struct hwrng rng; +}; + +static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct meson_rng_data *data = + container_of(rng, struct meson_rng_data, rng); + + if (max < sizeof(u32)) + return 0; + + *(u32 *)buf = readl_relaxed(data->base + RNG_DATA); + + return sizeof(u32); +} + +static int meson_rng_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct meson_rng_data *data; + struct resource *res; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->pdev = pdev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->base = devm_ioremap_resource(dev, res); + if (IS_ERR(data->base)) + return PTR_ERR(data->base); + + data->rng.name = pdev->name; + data->rng.read = meson_rng_read; + + platform_set_drvdata(pdev, data); + + return devm_hwrng_register(dev, &data->rng); +} + +static const struct of_device_id meson_rng_of_match[] = { + { .compatible = "amlogic,meson-rng", }, + {}, +}; + +static struct platform_driver meson_rng_driver = { + .probe = meson_rng_probe, + .driver = { + .name = "meson-rng", + .of_match_table = meson_rng_of_match, + }, +}; + +module_platform_driver(meson_rng_driver); + +MODULE_ALIAS("platform:meson-rng"); +MODULE_DESCRIPTION("Meson H/W Random Number Generator driver"); +MODULE_AUTHOR("Lawrence Mok <lawrence.mok@amlogic.com>"); +MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>"); +MODULE_LICENSE("Dual BSD/GPL"); @@ -384,7 +384,12 @@ static int omap_rng_probe(struct platform_device *pdev) } pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); + ret = pm_runtime_get_sync(&pdev->dev); + if (ret) { + dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret); + pm_runtime_put_noidle(&pdev->dev); + goto err_ioremap; + } ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) : get_omap_rng_device_details(priv); @@ -435,8 +440,15 @@ static int __maybe_unused omap_rng_suspend(struct device *dev) static int __maybe_unused omap_rng_resume(struct device *dev) { struct omap_rng_dev *priv = dev_get_drvdata(dev); + int ret; + + ret = pm_runtime_get_sync(dev); + if (ret) { + dev_err(dev, "Failed to runtime_get device: %d\n", ret); + pm_runtime_put_noidle(dev); + return ret; + } - pm_runtime_get_sync(dev); priv->pdata->init(priv); return 0; @@ -69,8 +69,12 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) } /* If error detected or data not ready... */ - if (sr != RNG_SR_DRDY) + if (sr != RNG_SR_DRDY) { + if (WARN_ONCE(sr & (RNG_SR_SEIS | RNG_SR_CEIS), + "bad RNG status - %x\n", sr)) + writel_relaxed(0, priv->base + RNG_SR); break; + } *(u32 *)data = readl_relaxed(priv->base + RNG_DR); @@ -79,10 +83,6 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) max -= sizeof(u32); } - if (WARN_ONCE(sr & (RNG_SR_SEIS | RNG_SR_CEIS), - "bad RNG status - %x\n", sr)) - writel_relaxed(0, priv->base + RNG_SR); - pm_runtime_mark_last_busy((struct device *) priv->rng.priv); pm_runtime_put_sync_autosuspend((struct device *) priv->rng.priv); @@ -50,18 +50,6 @@ config IPMI_SI Currently, only KCS and SMIC are supported. If you are using IPMI, you should probably say "y" here. -config IPMI_SI_PROBE_DEFAULTS - bool 'Probe for all possible IPMI system interfaces by default' - default n - depends on IPMI_SI - help - Modern systems will usually expose IPMI interfaces via a discoverable - firmware mechanism such as ACPI or DMI. Older systems do not, and so - the driver is forced to probe hardware manually. This may cause boot - delays. Say "n" here to disable this manual probing. IPMI will then - only be available on older systems if the "ipmi_si_intf.trydefaults=1" - boot argument is passed. - config IPMI_SSIF tristate 'IPMI SMBus handler (SSIF)' select I2C @@ -474,12 +474,12 @@ static DEFINE_MUTEX(smi_watchers_mutex); static const char * const addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI", - "device-tree", "default" + "device-tree" }; const char *ipmi_addr_src_to_str(enum ipmi_addr_src src) { - if (src > SI_DEFAULT) + if (src >= SI_LAST) src = 0; /* Invalid */ return addr_src_to_str[src]; } @@ -3820,6 +3820,7 @@ static void handle_new_recv_msgs(ipmi_smi_t intf) while (!list_empty(&intf->waiting_rcv_msgs)) { smi_msg = list_entry(intf->waiting_rcv_msgs.next, struct ipmi_smi_msg, link); + list_del(&smi_msg->link); if (!run_to_completion) spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); @@ -3829,11 +3830,14 @@ static void handle_new_recv_msgs(ipmi_smi_t intf) if (rv > 0) { /* * To preserve message order, quit if we - * can't handle a message. + * can't handle a message. Add the message + * back at the head, this is safe because this + * tasklet is the only thing that pulls the + * messages. */ + list_add(&smi_msg->link, &intf->waiting_rcv_msgs); break; } else { - list_del(&smi_msg->link); if (rv == 0) /* Message handled */ ipmi_free_smi_msg(smi_msg); @@ -1322,7 +1322,6 @@ static bool si_tryplatform = true; #ifdef CONFIG_PCI static bool si_trypci = true; #endif -static bool si_trydefaults = IS_ENABLED(CONFIG_IPMI_SI_PROBE_DEFAULTS); static char *si_type[SI_MAX_PARMS]; #define MAX_SI_TYPE_STR 30 static char si_type_str[MAX_SI_TYPE_STR]; @@ -1371,10 +1370,6 @@ module_param_named(trypci, si_trypci, bool, 0); MODULE_PARM_DESC(trypci, "Setting this to zero will disable the" " default scan of the interfaces identified via pci"); #endif -module_param_named(trydefaults, si_trydefaults, bool, 0); -MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the" - " default scan of the KCS and SMIC interface at the standard" - " address"); module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0); MODULE_PARM_DESC(type, "Defines the type of each interface, each" " interface separated by commas. The types are 'kcs'," @@ -3461,62 +3456,6 @@ static inline void wait_for_timer_and_thread(struct smi_info *smi_info) del_timer_sync(&smi_info->si_timer); } -static const struct ipmi_default_vals -{ - const int type; - const int port; -} ipmi_defaults[] = -{ - { .type = SI_KCS, .port = 0xca2 }, - { .type = SI_SMIC, .port = 0xca9 }, - { .type = SI_BT, .port = 0xe4 }, - { .port = 0 } -}; - -static void default_find_bmc(void) -{ - struct smi_info *info; - int i; - - for (i = 0; ; i++) { - if (!ipmi_defaults[i].port) - break; -#ifdef CONFIG_PPC - if (check_legacy_ioport(ipmi_defaults[i].port)) - continue; -#endif - info = smi_info_alloc(); - if (!info) - return; - - info->addr_source = SI_DEFAULT; - - info->si_type = ipmi_defaults[i].type; - info->io_setup = port_setup; - info->io.addr_data = ipmi_defaults[i].port; - info->io.addr_type = IPMI_IO_ADDR_SPACE; - - info->io.addr = NULL; - info->io.regspacing = DEFAULT_REGSPACING; - info->io.regsize = DEFAULT_REGSPACING; - info->io.regshift = 0; - - if (add_smi(info) == 0) { - if ((try_smi_init(info)) == 0) { - /* Found one... */ - printk(KERN_INFO PFX "Found default %s" - " state machine at %s address 0x%lx\n", - si_to_str[info->si_type], - addr_space_to_str[info->io.addr_type], - info->io.addr_data); - } else - cleanup_one_si(info); - } else { - kfree(info); - } - } -} - static int is_new_interface(struct smi_info *info) { struct smi_info *e; @@ -3844,8 +3783,6 @@ static int init_ipmi_si(void) #ifdef CONFIG_PARISC register_parisc_driver(&ipmi_parisc_driver); parisc_registered = true; - /* poking PC IO addresses will crash machine, don't do it */ - si_trydefaults = 0; #endif /* We prefer devices with interrupts, but in the case of a machine @@ -3885,16 +3822,6 @@ static int init_ipmi_si(void) if (type) return 0; - if (si_trydefaults) { - mutex_lock(&smi_infos_lock); - if (list_empty(&smi_infos)) { - /* No BMC was found, try defaults. */ - mutex_unlock(&smi_infos_lock); - default_find_bmc(); - } else - mutex_unlock(&smi_infos_lock); - } - mutex_lock(&smi_infos_lock); if (unload_when_empty && list_empty(&smi_infos)) { mutex_unlock(&smi_infos_lock); @@ -568,12 +568,16 @@ static void retry_timeout(unsigned long data) } -static void ssif_alert(struct i2c_client *client, unsigned int data) +static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type, + unsigned int data) { struct ssif_info *ssif_info = i2c_get_clientdata(client); unsigned long oflags, *flags; bool do_get = false; + if (type != I2C_PROTOCOL_SMBUS_ALERT) + return; + ssif_inc_stat(ssif_info, alerts); flags = ipmi_ssif_lock_cond(ssif_info, &oflags); @@ -22,6 +22,7 @@ #include <linux/device.h> #include <linux/highmem.h> #include <linux/backing-dev.h> +#include <linux/shmem_fs.h> #include <linux/splice.h> #include <linux/pfn.h> #include <linux/export.h> @@ -66,12 +67,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) u64 cursor = from; while (cursor < to) { - if (!devmem_is_allowed(pfn)) { - printk(KERN_INFO - "Program %s tried to access /dev/mem between %Lx->%Lx.\n", - current->comm, from, to); + if (!devmem_is_allowed(pfn)) return 0; - } cursor += PAGE_SIZE; pfn++; } @@ -661,6 +658,28 @@ static int mmap_zero(struct file *file, struct vm_area_struct *vma) return 0; } +static unsigned long get_unmapped_area_zero(struct file *file, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ +#ifdef CONFIG_MMU + if (flags & MAP_SHARED) { + /* + * mmap_zero() will call shmem_zero_setup() to create a file, + * so use shmem's get_unmapped_area in case it can be huge; + * and pass NULL for file as in mmap.c's get_unmapped_area(), + * so as not to confuse shmem with our handle on "/dev/zero". + */ + return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags); + } + + /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */ + return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); +#else + return -ENOSYS; +#endif +} + static ssize_t write_full(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { @@ -768,6 +787,7 @@ static const struct file_operations zero_fops = { .read_iter = read_iter_zero, .write_iter = write_iter_zero, .mmap = mmap_zero, + .get_unmapped_area = get_unmapped_area_zero, #ifndef CONFIG_MMU .mmap_capabilities = zero_mmap_capabilities, #endif @@ -261,6 +261,7 @@ #include <linux/syscalls.h> #include <linux/completion.h> #include <linux/uuid.h> +#include <crypto/chacha20.h> #include <asm/processor.h> #include <asm/uaccess.h> @@ -413,6 +414,34 @@ static struct fasync_struct *fasync; static DEFINE_SPINLOCK(random_ready_list_lock); static LIST_HEAD(random_ready_list); +struct crng_state { + __u32 state[16]; + unsigned long init_time; + spinlock_t lock; +}; + +struct crng_state primary_crng = { + .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock), +}; + +/* + * crng_init = 0 --> Uninitialized + * 1 --> Initialized + * 2 --> Initialized from input_pool + * + * crng_init is protected by primary_crng->lock, and only increases + * its value (from 0->1->2). + */ +static int crng_init = 0; +#define crng_ready() (likely(crng_init > 0)) +static int crng_init_cnt = 0; +#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) +static void _extract_crng(struct crng_state *crng, + __u8 out[CHACHA20_BLOCK_SIZE]); +static void _crng_backtrack_protect(struct crng_state *crng, + __u8 tmp[CHACHA20_BLOCK_SIZE], int used); +static void process_random_ready_list(void); + /********************************************************************** * * OS independent entropy store. Here are the functions which handle @@ -442,10 +471,15 @@ struct entropy_store { __u8 last_data[EXTRACT_SIZE]; }; +static ssize_t extract_entropy(struct entropy_store *r, void *buf, + size_t nbytes, int min, int rsvd); +static ssize_t _extract_entropy(struct entropy_store *r, void *buf, + size_t nbytes, int fips); + +static void crng_reseed(struct crng_state *crng, struct entropy_store *r); static void push_to_pool(struct work_struct *work); static __u32 input_pool_data[INPUT_POOL_WORDS]; static __u32 blocking_pool_data[OUTPUT_POOL_WORDS]; -static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS]; static struct entropy_store input_pool = { .poolinfo = &poolinfo_table[0], @@ -466,16 +500,6 @@ static struct entropy_store blocking_pool = { push_to_pool), }; -static struct entropy_store nonblocking_pool = { - .poolinfo = &poolinfo_table[1], - .name = "nonblocking", - .pull = &input_pool, - .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock), - .pool = nonblocking_pool_data, - .push_work = __WORK_INITIALIZER(nonblocking_pool.push_work, - push_to_pool), -}; - static __u32 const twist_table[8] = { 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; @@ -678,12 +702,6 @@ retry: if (!r->initialized && r->entropy_total > 128) { r->initialized = 1; r->entropy_total = 0; - if (r == &nonblocking_pool) { - prandom_reseed_late(); - process_random_ready_list(); - wake_up_all(&urandom_init_wait); - pr_notice("random: %s pool is initialized\n", r->name); - } } trace_credit_entropy_bits(r->name, nbits, @@ -693,49 +711,266 @@ retry: if (r == &input_pool) { int entropy_bits = entropy_count >> ENTROPY_SHIFT; + if (crng_init < 2 && entropy_bits >= 128) { + crng_reseed(&primary_crng, r); + entropy_bits = r->entropy_count >> ENTROPY_SHIFT; + } + /* should we wake readers? */ if (entropy_bits >= random_read_wakeup_bits) { wake_up_interruptible(&random_read_wait); kill_fasync(&fasync, SIGIO, POLL_IN); } /* If the input pool is getting full, send some - * entropy to the two output pools, flipping back and - * forth between them, until the output pools are 75% - * full. + * entropy to the blocking pool until it is 75% full. */ if (entropy_bits > random_write_wakeup_bits && r->initialized && r->entropy_total >= 2*random_read_wakeup_bits) { - static struct entropy_store *last = &blocking_pool; struct entropy_store *other = &blocking_pool; - if (last == &blocking_pool) - other = &nonblocking_pool; if (other->entropy_count <= - 3 * other->poolinfo->poolfracbits / 4) - last = other; - if (last->entropy_count <= - 3 * last->poolinfo->poolfracbits / 4) { - schedule_work(&last->push_work); + 3 * other->poolinfo->poolfracbits / 4) { + schedule_work(&other->push_work); r->entropy_total = 0; } } } } -static void credit_entropy_bits_safe(struct entropy_store *r, int nbits) +static int credit_entropy_bits_safe(struct entropy_store *r, int nbits) { const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1)); + if (nbits < 0) + return -EINVAL; + /* Cap the value to avoid overflows */ nbits = min(nbits, nbits_max); - nbits = max(nbits, -nbits_max); credit_entropy_bits(r, nbits); + return 0; } /********************************************************************* * + * CRNG using CHACHA20 + * + *********************************************************************/ + +#define CRNG_RESEED_INTERVAL (300*HZ) + +static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); + +#ifdef CONFIG_NUMA +/* + * Hack to deal with crazy userspace progams when they are all trying + * to access /dev/urandom in parallel. The programs are almost + * certainly doing something terribly wrong, but we'll work around + * their brain damage. + */ +static struct crng_state **crng_node_pool __read_mostly; +#endif + +static void crng_initialize(struct crng_state *crng) +{ + int i; + unsigned long rv; + + memcpy(&crng->state[0], "expand 32-byte k", 16); + if (crng == &primary_crng) + _extract_entropy(&input_pool, &crng->state[4], + sizeof(__u32) * 12, 0); + else + get_random_bytes(&crng->state[4], sizeof(__u32) * 12); + for (i = 4; i < 16; i++) { + if (!arch_get_random_seed_long(&rv) && + !arch_get_random_long(&rv)) + rv = random_get_entropy(); + crng->state[i] ^= rv; + } + crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; +} + +static int crng_fast_load(const char *cp, size_t len) +{ + unsigned long flags; + char *p; + + if (!spin_trylock_irqsave(&primary_crng.lock, flags)) + return 0; + if (crng_ready()) { + spin_unlock_irqrestore(&primary_crng.lock, flags); + return 0; + } + p = (unsigned char *) &primary_crng.state[4]; + while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) { + p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp; + cp++; crng_init_cnt++; len--; + } + if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { + crng_init = 1; + wake_up_interruptible(&crng_init_wait); + pr_notice("random: fast init done\n"); + } + spin_unlock_irqrestore(&primary_crng.lock, flags); + return 1; +} + +static void crng_reseed(struct crng_state *crng, struct entropy_store *r) +{ + unsigned long flags; + int i, num; + union { + __u8 block[CHACHA20_BLOCK_SIZE]; + __u32 key[8]; + } buf; + + if (r) { + num = extract_entropy(r, &buf, 32, 16, 0); + if (num == 0) + return; + } else { + _extract_crng(&primary_crng, buf.block); + _crng_backtrack_protect(&primary_crng, buf.block, + CHACHA20_KEY_SIZE); + } + spin_lock_irqsave(&primary_crng.lock, flags); + for (i = 0; i < 8; i++) { + unsigned long rv; + if (!arch_get_random_seed_long(&rv) && + !arch_get_random_long(&rv)) + rv = random_get_entropy(); + crng->state[i+4] ^= buf.key[i] ^ rv; + } + memzero_explicit(&buf, sizeof(buf)); + crng->init_time = jiffies; + if (crng == &primary_crng && crng_init < 2) { + crng_init = 2; + process_random_ready_list(); + wake_up_interruptible(&crng_init_wait); + pr_notice("random: crng init done\n"); + } + spin_unlock_irqrestore(&primary_crng.lock, flags); +} + +static inline void maybe_reseed_primary_crng(void) +{ + if (crng_init > 2 && + time_after(jiffies, primary_crng.init_time + CRNG_RESEED_INTERVAL)) + crng_reseed(&primary_crng, &input_pool); +} + +static inline void crng_wait_ready(void) +{ + wait_event_interruptible(crng_init_wait, crng_ready()); +} + +static void _extract_crng(struct crng_state *crng, + __u8 out[CHACHA20_BLOCK_SIZE]) +{ + unsigned long v, flags; + + if (crng_init > 1 && + time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)) + crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL); + spin_lock_irqsave(&crng->lock, flags); + if (arch_get_random_long(&v)) + crng->state[14] ^= v; + chacha20_block(&crng->state[0], out); + if (crng->state[12] == 0) + crng->state[13]++; + spin_unlock_irqrestore(&crng->lock, flags); +} + +static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE]) +{ + struct crng_state *crng = NULL; + +#ifdef CONFIG_NUMA + if (crng_node_pool) + crng = crng_node_pool[numa_node_id()]; + if (crng == NULL) +#endif + crng = &primary_crng; + _extract_crng(crng, out); +} + +/* + * Use the leftover bytes from the CRNG block output (if there is + * enough) to mutate the CRNG key to provide backtracking protection. + */ +static void _crng_backtrack_protect(struct crng_state *crng, + __u8 tmp[CHACHA20_BLOCK_SIZE], int used) +{ + unsigned long flags; + __u32 *s, *d; + int i; + + used = round_up(used, sizeof(__u32)); + if (used + CHACHA20_KEY_SIZE > CHACHA20_BLOCK_SIZE) { + extract_crng(tmp); + used = 0; + } + spin_lock_irqsave(&crng->lock, flags); + s = (__u32 *) &tmp[used]; + d = &crng->state[4]; + for (i=0; i < 8; i++) + *d++ ^= *s++; + spin_unlock_irqrestore(&crng->lock, flags); +} + +static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used) +{ + struct crng_state *crng = NULL; + +#ifdef CONFIG_NUMA + if (crng_node_pool) + crng = crng_node_pool[numa_node_id()]; + if (crng == NULL) +#endif + crng = &primary_crng; + _crng_backtrack_protect(crng, tmp, used); +} + +static ssize_t extract_crng_user(void __user *buf, size_t nbytes) +{ + ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE; + __u8 tmp[CHACHA20_BLOCK_SIZE]; + int large_request = (nbytes > 256); + + while (nbytes) { + if (large_request && need_resched()) { + if (signal_pending(current)) { + if (ret == 0) + ret = -ERESTARTSYS; + break; + } + schedule(); + } + + extract_crng(tmp); + i = min_t(int, nbytes, CHACHA20_BLOCK_SIZE); + if (copy_to_user(buf, tmp, i)) { + ret = -EFAULT; + break; + } + + nbytes -= i; + buf += i; + ret += i; + } + crng_backtrack_protect(tmp, i); + + /* Wipe data just written to memory */ + memzero_explicit(tmp, sizeof(tmp)); + + return ret; +} + + +/********************************************************************* + * * Entropy input management * *********************************************************************/ @@ -750,12 +985,12 @@ struct timer_rand_state { #define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, }; /* - * Add device- or boot-specific data to the input and nonblocking - * pools to help initialize them to unique values. + * Add device- or boot-specific data to the input pool to help + * initialize it. * - * None of this adds any entropy, it is meant to avoid the - * problem of the nonblocking pool having similar initial state - * across largely identical devices. + * None of this adds any entropy; it is meant to avoid the problem of + * the entropy pool having similar initial state across largely + * identical devices. */ void add_device_randomness(const void *buf, unsigned int size) { @@ -767,11 +1002,6 @@ void add_device_randomness(const void *buf, unsigned int size) _mix_pool_bytes(&input_pool, buf, size); _mix_pool_bytes(&input_pool, &time, sizeof(time)); spin_unlock_irqrestore(&input_pool.lock, flags); - - spin_lock_irqsave(&nonblocking_pool.lock, flags); - _mix_pool_bytes(&nonblocking_pool, buf, size); - _mix_pool_bytes(&nonblocking_pool, &time, sizeof(time)); - spin_unlock_irqrestore(&nonblocking_pool.lock, flags); } EXPORT_SYMBOL(add_device_randomness); @@ -802,7 +1032,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) sample.jiffies = jiffies; sample.cycles = random_get_entropy(); sample.num = num; - r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; + r = &input_pool; mix_pool_bytes(r, &sample, sizeof(sample)); /* @@ -918,11 +1148,21 @@ void add_interrupt_randomness(int irq, int irq_flags) fast_mix(fast_pool); add_interrupt_bench(cycles); + if (!crng_ready()) { + if ((fast_pool->count >= 64) && + crng_fast_load((char *) fast_pool->pool, + sizeof(fast_pool->pool))) { + fast_pool->count = 0; + fast_pool->last = now; + } + return; + } + if ((fast_pool->count < 64) && !time_after(now, fast_pool->last + HZ)) return; - r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; + r = &input_pool; if (!spin_trylock(&r->lock)) return; @@ -946,6 +1186,7 @@ void add_interrupt_randomness(int irq, int irq_flags) /* award one bit for the contents of the fast pool */ credit_entropy_bits(r, credit + 1); } +EXPORT_SYMBOL_GPL(add_interrupt_randomness); #ifdef CONFIG_BLOCK void add_disk_randomness(struct gendisk *disk) @@ -965,9 +1206,6 @@ EXPORT_SYMBOL_GPL(add_disk_randomness); * *********************************************************************/ -static ssize_t extract_entropy(struct entropy_store *r, void *buf, - size_t nbytes, int min, int rsvd); - /* * This utility inline function is responsible for transferring entropy * from the primary pool to the secondary extraction pool. We make @@ -1142,6 +1380,36 @@ static void extract_buf(struct entropy_store *r, __u8 *out) memzero_explicit(&hash, sizeof(hash)); } +static ssize_t _extract_entropy(struct entropy_store *r, void *buf, + size_t nbytes, int fips) +{ + ssize_t ret = 0, i; + __u8 tmp[EXTRACT_SIZE]; + unsigned long flags; + + while (nbytes) { + extract_buf(r, tmp); + + if (fips) { + spin_lock_irqsave(&r->lock, flags); + if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) + panic("Hardware RNG duplicated output!\n"); + memcpy(r->last_data, tmp, EXTRACT_SIZE); + spin_unlock_irqrestore(&r->lock, flags); + } + i = min_t(int, nbytes, EXTRACT_SIZE); + memcpy(buf, tmp, i); + nbytes -= i; + buf += i; + ret += i; + } + + /* Wipe data just returned from memory */ + memzero_explicit(tmp, sizeof(tmp)); + + return ret; +} + /* * This function extracts randomness from the "entropy pool", and * returns it in a buffer. @@ -1154,7 +1422,6 @@ static void extract_buf(struct entropy_store *r, __u8 *out) static ssize_t extract_entropy(struct entropy_store *r, void *buf, size_t nbytes, int min, int reserved) { - ssize_t ret = 0, i; __u8 tmp[EXTRACT_SIZE]; unsigned long flags; @@ -1178,27 +1445,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, xfer_secondary_pool(r, nbytes); nbytes = account(r, nbytes, min, reserved); - while (nbytes) { - extract_buf(r, tmp); - - if (fips_enabled) { - spin_lock_irqsave(&r->lock, flags); - if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) - panic("Hardware RNG duplicated output!\n"); - memcpy(r->last_data, tmp, EXTRACT_SIZE); - spin_unlock_irqrestore(&r->lock, flags); - } - i = min_t(int, nbytes, EXTRACT_SIZE); - memcpy(buf, tmp, i); - nbytes -= i; - buf += i; - ret += i; - } - - /* Wipe data just returned from memory */ - memzero_explicit(tmp, sizeof(tmp)); - - return ret; + return _extract_entropy(r, buf, nbytes, fips_enabled); } /* @@ -1253,15 +1500,28 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, */ void get_random_bytes(void *buf, int nbytes) { + __u8 tmp[CHACHA20_BLOCK_SIZE]; + #if DEBUG_RANDOM_BOOT > 0 - if (unlikely(nonblocking_pool.initialized == 0)) + if (!crng_ready()) printk(KERN_NOTICE "random: %pF get_random_bytes called " - "with %d bits of entropy available\n", - (void *) _RET_IP_, - nonblocking_pool.entropy_total); + "with crng_init = %d\n", (void *) _RET_IP_, crng_init); #endif trace_get_random_bytes(nbytes, _RET_IP_); - extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0); + + while (nbytes >= CHACHA20_BLOCK_SIZE) { + extract_crng(buf); + buf += CHACHA20_BLOCK_SIZE; + nbytes -= CHACHA20_BLOCK_SIZE; + } + + if (nbytes > 0) { + extract_crng(tmp); + memcpy(buf, tmp, nbytes); + crng_backtrack_protect(tmp, nbytes); + } else + crng_backtrack_protect(tmp, CHACHA20_BLOCK_SIZE); + memzero_explicit(tmp, sizeof(tmp)); } EXPORT_SYMBOL(get_random_bytes); @@ -1279,7 +1539,7 @@ int add_random_ready_callback(struct random_ready_callback *rdy) unsigned long flags; int err = -EALREADY; - if (likely(nonblocking_pool.initialized)) + if (crng_ready()) return err; owner = rdy->owner; @@ -1287,7 +1547,7 @@ int add_random_ready_callback(struct random_ready_callback *rdy) return -ENOENT; spin_lock_irqsave(&random_ready_list_lock, flags); - if (nonblocking_pool.initialized) + if (crng_ready()) goto out; owner = NULL; @@ -1351,7 +1611,7 @@ void get_random_bytes_arch(void *buf, int nbytes) } if (nbytes) - extract_entropy(&nonblocking_pool, p, nbytes, 0, 0); + get_random_bytes(p, nbytes); } EXPORT_SYMBOL(get_random_bytes_arch); @@ -1394,9 +1654,30 @@ static void init_std_data(struct entropy_store *r) */ static int rand_initialize(void) { +#ifdef CONFIG_NUMA + int i; + int num_nodes = num_possible_nodes(); + struct crng_state *crng; + struct crng_state **pool; +#endif + init_std_data(&input_pool); init_std_data(&blocking_pool); - init_std_data(&nonblocking_pool); + crng_initialize(&primary_crng); + +#ifdef CONFIG_NUMA + pool = kmalloc(num_nodes * sizeof(void *), + GFP_KERNEL|__GFP_NOFAIL|__GFP_ZERO); + for_each_online_node(i) { + crng = kmalloc_node(sizeof(struct crng_state), + GFP_KERNEL | __GFP_NOFAIL, i); + spin_lock_init(&crng->lock); + crng_initialize(crng); + pool[i] = crng; + } + mb(); + crng_node_pool = pool; +#endif return 0; } early_initcall(rand_initialize); @@ -1458,18 +1739,22 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { + unsigned long flags; + static int maxwarn = 10; int ret; - if (unlikely(nonblocking_pool.initialized == 0)) - printk_once(KERN_NOTICE "random: %s urandom read " - "with %d bits of entropy available\n", - current->comm, nonblocking_pool.entropy_total); - + if (!crng_ready() && maxwarn > 0) { + maxwarn--; + printk(KERN_NOTICE "random: %s: uninitialized urandom read " + "(%zd bytes read)\n", + current->comm, nbytes); + spin_lock_irqsave(&primary_crng.lock, flags); + crng_init_cnt = 0; + spin_unlock_irqrestore(&primary_crng.lock, flags); + } nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3)); - ret = extract_entropy_user(&nonblocking_pool, buf, nbytes); - - trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool), - ENTROPY_BITS(&input_pool)); + ret = extract_crng_user(buf, nbytes); + trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool)); return ret; } @@ -1515,10 +1800,7 @@ static ssize_t random_write(struct file *file, const char __user *buffer, { size_t ret; - ret = write_pool(&blocking_pool, buffer, count); - if (ret) - return ret; - ret = write_pool(&nonblocking_pool, buffer, count); + ret = write_pool(&input_pool, buffer, count); if (ret) return ret; @@ -1543,8 +1825,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return -EPERM; if (get_user(ent_count, p)) return -EFAULT; - credit_entropy_bits_safe(&input_pool, ent_count); - return 0; + return credit_entropy_bits_safe(&input_pool, ent_count); case RNDADDENTROPY: if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -1558,8 +1839,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) size); if (retval < 0) return retval; - credit_entropy_bits_safe(&input_pool, ent_count); - return 0; + return credit_entropy_bits_safe(&input_pool, ent_count); case RNDZAPENTCNT: case RNDCLEARPOOL: /* @@ -1569,7 +1849,6 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) if (!capable(CAP_SYS_ADMIN)) return -EPERM; input_pool.entropy_count = 0; - nonblocking_pool.entropy_count = 0; blocking_pool.entropy_count = 0; return 0; default: @@ -1611,11 +1890,10 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, if (flags & GRND_RANDOM) return _random_read(flags & GRND_NONBLOCK, buf, count); - if (unlikely(nonblocking_pool.initialized == 0)) { + if (!crng_ready()) { if (flags & GRND_NONBLOCK) return -EAGAIN; - wait_event_interruptible(urandom_init_wait, - nonblocking_pool.initialized); + crng_wait_ready(); if (signal_pending(current)) return -ERESTARTSYS; } @@ -1773,13 +2051,15 @@ int random_int_secret_init(void) return 0; } +static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash) + __aligned(sizeof(unsigned long)); + /* * Get a random word for internal kernel use only. Similar to urandom but * with the goal of minimal entropy pool depletion. As a result, the random * value is not cryptographically secure but for several uses the cost of * depleting entropy is too high */ -static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash); unsigned int get_random_int(void) { __u32 *hash; @@ -1849,6 +2129,11 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, { struct entropy_store *poolp = &input_pool; + if (!crng_ready()) { + crng_fast_load(buffer, count); + return; + } + /* Suspend writing if we're above the trickle threshold. * We'll be woken up again once below random_write_wakeup_thresh, * or when the calling thread is about to terminate. @@ -175,6 +175,7 @@ config COMMON_CLK_KEYSTONE config COMMON_CLK_NXP def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX) select REGMAP_MMIO if ARCH_LPC32XX + select MFD_SYSCON if ARCH_LPC18XX ---help--- Support for clock providers on NXP platforms. @@ -99,7 +99,7 @@ static int clk_programmable_set_parent(struct clk_hw *hw, u8 index) struct clk_programmable *prog = to_clk_programmable(hw); const struct clk_programmable_layout *layout = prog->layout; unsigned int mask = layout->css_mask; - unsigned int pckr = 0; + unsigned int pckr = index; if (layout->have_slck_mck) mask |= AT91_PMC_CSSMCK_MCK; @@ -144,9 +144,9 @@ static int oxnas_stdclk_probe(struct platform_device *pdev) return -ENOMEM; regmap = syscon_node_to_regmap(of_get_parent(np)); - if (!regmap) { + if (IS_ERR(regmap)) { dev_err(&pdev->dev, "failed to have parent regmap\n"); - return -EINVAL; + return PTR_ERR(regmap); } for (i = 0; i < ARRAY_SIZE(clk_oxnas_init); i++) { @@ -180,15 +180,15 @@ static int pic32mzda_clk_probe(struct platform_device *pdev) /* register fixed rate clocks */ clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL, - CLK_IS_ROOT, 24000000); + 0, 24000000); clks[FRCCLK] = clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL, - CLK_IS_ROOT, 8000000); + 0, 8000000); clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL, - CLK_IS_ROOT, 8000000); + 0, 8000000); clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL, - CLK_IS_ROOT, 32000); + 0, 32000); clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL, - CLK_IS_ROOT, 24000000); + 0, 24000000); /* fixed rate (optional) clock */ if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) { pr_info("pic32-clk: dt requests SOSC.\n"); @@ -321,9 +321,9 @@ struct clk *rockchip_clk_register_cpuclk(const char *name, } cclk = clk_register(NULL, &cpuclk->hw); - if (IS_ERR(clk)) { + if (IS_ERR(cclk)) { pr_err("%s: could not register cpuclk %s\n", __func__, name); - ret = PTR_ERR(clk); + ret = PTR_ERR(cclk); goto free_rate_table; } @@ -41,8 +41,6 @@ static unsigned long rockchip_mmc_recalc(struct clk_hw *hw, #define ROCKCHIP_MMC_DEGREE_MASK 0x3 #define ROCKCHIP_MMC_DELAYNUM_OFFSET 2 #define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET) -#define ROCKCHIP_MMC_INIT_STATE_RESET 0x1 -#define ROCKCHIP_MMC_INIT_STATE_SHIFT 1 #define PSECS_PER_SEC 1000000000000LL @@ -154,6 +152,7 @@ struct clk *rockchip_clk_register_mmc(const char *name, return ERR_PTR(-ENOMEM); init.name = name; + init.flags = 0; init.num_parents = num_parents; init.parent_names = parent_names; init.ops = &rockchip_mmc_clk_ops; @@ -162,15 +161,6 @@ struct clk *rockchip_clk_register_mmc(const char *name, mmc_clock->reg = reg; mmc_clock->shift = shift; - /* - * Assert init_state to soft reset the CLKGEN - * for mmc tuning phase and degree - */ - if (mmc_clock->shift == ROCKCHIP_MMC_INIT_STATE_SHIFT) - writel(HIWORD_UPDATE(ROCKCHIP_MMC_INIT_STATE_RESET, - ROCKCHIP_MMC_INIT_STATE_RESET, - mmc_clock->shift), mmc_clock->reg); - clk = clk_register(NULL, &mmc_clock->hw); if (IS_ERR(clk)) kfree(mmc_clock); @@ -832,9 +832,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { RK3399_CLKGATE_CON(13), 1, GFLAGS), /* perihp */ - GATE(0, "cpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED, + GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(5), 0, GFLAGS), - GATE(0, "gpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED, + GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(5), 1, GFLAGS), COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED, RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS, @@ -1466,6 +1466,8 @@ static struct rockchip_clk_branch rk3399_clk_pmu_branches[] __initdata = { static const char *const rk3399_cru_critical_clocks[] __initconst = { "aclk_cci_pre", + "aclk_gic", + "aclk_gic_noc", "pclk_perilp0", "pclk_perilp0", "hclk_perilp0", @@ -1508,6 +1510,7 @@ static void __init rk3399_clk_init(struct device_node *np) ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); if (IS_ERR(ctx)) { pr_err("%s: rockchip clk init failed\n", __func__); + iounmap(reg_base); return; } @@ -1553,6 +1556,7 @@ static void __init rk3399_pmu_clk_init(struct device_node *np) ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS); if (IS_ERR(ctx)) { pr_err("%s: rockchip pmu clk init failed\n", __func__); + iounmap(reg_base); return; } @@ -33,6 +33,8 @@ struct sun4i_a10_display_clk_data { u8 width_div; u8 width_mux; + + u32 flags; }; struct reset_data { @@ -166,7 +168,7 @@ static void __init sun4i_a10_display_init(struct device_node *node, data->has_div ? &div->hw : NULL, data->has_div ? &clk_divider_ops : NULL, &gate->hw, &clk_gate_ops, - 0); + data->flags); if (IS_ERR(clk)) { pr_err("%s: Couldn't register the clock\n", clk_name); goto free_div; @@ -232,6 +234,7 @@ static const struct sun4i_a10_display_clk_data sun4i_a10_tcon_ch0_data __initcon .offset_rst = 29, .offset_mux = 24, .width_mux = 2, + .flags = CLK_SET_RATE_PARENT, }; static void __init sun4i_a10_tcon_ch0_setup(struct device_node *node) @@ -79,15 +79,11 @@ static int tcon_ch1_is_enabled(struct clk_hw *hw) static u8 tcon_ch1_get_parent(struct clk_hw *hw) { struct tcon_ch1_clk *tclk = hw_to_tclk(hw); - int num_parents = clk_hw_get_num_parents(hw); u32 reg; reg = readl(tclk->reg) >> TCON_CH1_SCLK2_MUX_SHIFT; reg &= reg >> TCON_CH1_SCLK2_MUX_MASK; - if (reg >= num_parents) - return -EINVAL; - return reg; } @@ -27,6 +27,20 @@ config CLKBLD_I8253 config CLKSRC_MMIO bool +config BCM2835_TIMER + bool "BCM2835 timer driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + select CLKSRC_MMIO + help + Enables the support for the BCM2835 timer driver. + +config BCM_KONA_TIMER + bool "BCM mobile timer driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + select CLKSRC_MMIO + help + Enables the support for the BCM Kona mobile timer driver. + config DIGICOLOR_TIMER bool "Digicolor timer driver" if COMPILE_TEST depends on GENERIC_CLOCKEVENTS @@ -141,6 +155,72 @@ config CLKSRC_DBX500_PRCMU help Use the always on PRCMU Timer as clocksource +config CLPS711X_TIMER + bool "Cirrus logic timer driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + select CLKSRC_MMIO + help + Enables support for the Cirrus Logic PS711 timer. + +config ATLAS7_TIMER + bool "Atlas7 timer driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + select CLKSRC_MMIO + help + Enables support for the Atlas7 timer. + +config MOXART_TIMER + bool "Moxart timer driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + select CLKSRC_MMIO + help + Enables support for the Moxart timer. + +config MXS_TIMER + bool "Mxs timer driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + select CLKSRC_MMIO + select STMP_DEVICE + help + Enables support for the Mxs timer. + +config PRIMA2_TIMER + bool "Prima2 timer driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + select CLKSRC_MMIO + help + Enables support for the Prima2 timer. + +config U300_TIMER + bool "U300 timer driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + depends on ARM + select CLKSRC_MMIO + help + Enables support for the U300 timer. + +config NSPIRE_TIMER + bool "NSpire timer driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + select CLKSRC_MMIO + help + Enables support for the Nspire timer. + +config KEYSTONE_TIMER + bool "Keystone timer driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + depends on ARM || ARM64 + select CLKSRC_MMIO + help + Enables support for the Keystone timer. + +config INTEGRATOR_AP_TIMER + bool "Integrator-ap timer driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + select CLKSRC_MMIO + help + Enables support for the Integrator-ap timer. + config CLKSRC_DBX500_PRCMU_SCHED_CLOCK bool "Clocksource PRCMU Timer sched_clock" depends on (CLKSRC_DBX500_PRCMU && !CLKSRC_NOMADIK_MTU_SCHED_CLOCK) @@ -208,14 +288,16 @@ config ARM_ARCH_TIMER select CLKSRC_ACPI if ACPI config ARM_ARCH_TIMER_EVTSTREAM - bool "Support for ARM architected timer event stream generation" + bool "Enable ARM architected timer event stream generation by default" default y if ARM_ARCH_TIMER depends on ARM_ARCH_TIMER help - This option enables support for event stream generation based on - the ARM architected timer. It is used for waking up CPUs executing - the wfe instruction at a frequency represented as a power-of-2 - divisor of the clock rate. + This option enables support by default for event stream generation + based on the ARM architected timer. It is used for waking up CPUs + executing the wfe instruction at a frequency represented as a + power-of-2 divisor of the clock rate. The behaviour can also be + overridden on the command line using the + clocksource.arm_arch_timer.evtstream parameter. The main use of the event stream is wfe-based timeouts of userspace locking implementations. It might also be useful for imposing timeout on wfe to safeguard against any programming errors in case an expected @@ -224,8 +306,9 @@ config ARM_ARCH_TIMER_EVTSTREAM hardware anomalies of missing events. config ARM_GLOBAL_TIMER - bool + bool "Support for the ARM global timer" if COMPILE_TEST select CLKSRC_OF if OF + depends on ARM help This options enables support for the ARM global timer unit @@ -243,7 +326,7 @@ config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK Use ARM global timer clock source as sched_clock config ARMV7M_SYSTICK - bool + bool "Support for the ARMv7M system time" if COMPILE_TEST select CLKSRC_OF if OF select CLKSRC_MMIO help @@ -254,9 +337,12 @@ config ATMEL_PIT def_bool SOC_AT91SAM9 || SOC_SAMA5 config ATMEL_ST - bool + bool "Atmel ST timer support" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS select CLKSRC_OF select MFD_SYSCON + help + Support for the Atmel ST timer. config CLKSRC_METAG_GENERIC def_bool y if METAG @@ -270,7 +356,7 @@ config CLKSRC_EXYNOS_MCT Support for Multi Core Timer controller on Exynos SoCs. config CLKSRC_SAMSUNG_PWM - bool "PWM timer drvier for Samsung S3C, S5P" if COMPILE_TEST + bool "PWM timer driver for Samsung S3C, S5P" if COMPILE_TEST depends on GENERIC_CLOCKEVENTS depends on HAS_IOMEM help @@ -293,6 +379,14 @@ config VF_PIT_TIMER help Support for Period Interrupt Timer on Freescale Vybrid Family SoCs. +config OXNAS_RPS_TIMER + bool "Oxford Semiconductor OXNAS RPS Timers driver" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + select CLKSRC_OF + select CLKSRC_MMIO + help + This enables support for the Oxford Semiconductor OXNAS RPS timers. + config SYS_SUPPORTS_SH_CMT bool @@ -361,8 +455,8 @@ config CLKSRC_QCOM Qualcomm SoCs. config CLKSRC_VERSATILE - bool "ARM Versatile (Express) reference platforms clock source" - depends on PLAT_VERSATILE && GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET + bool "ARM Versatile (Express) reference platforms clock source" if COMPILE_TEST + depends on GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET select CLKSRC_OF default y if MFD_VEXPRESS_SYSREG help @@ -19,21 +19,21 @@ obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o obj-$(CONFIG_ORION_TIMER) += time-orion.o -obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o -obj-$(CONFIG_ARCH_CLPS711X) += clps711x-timer.o -obj-$(CONFIG_ARCH_ATLAS7) += timer-atlas7.o -obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o -obj-$(CONFIG_ARCH_MXS) += mxs_timer.o +obj-$(CONFIG_BCM2835_TIMER) += bcm2835_timer.o +obj-$(CONFIG_CLPS711X_TIMER) += clps711x-timer.o +obj-$(CONFIG_ATLAS7_TIMER) += timer-atlas7.o +obj-$(CONFIG_MOXART_TIMER) += moxart_timer.o +obj-$(CONFIG_MXS_TIMER) += mxs_timer.o obj-$(CONFIG_CLKSRC_PXA) += pxa_timer.o -obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o -obj-$(CONFIG_ARCH_U300) += timer-u300.o +obj-$(CONFIG_PRIMA2_TIMER) += timer-prima2.o +obj-$(CONFIG_U300_TIMER) += timer-u300.o obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o obj-$(CONFIG_MESON6_TIMER) += meson6_timer.o obj-$(CONFIG_TEGRA_TIMER) += tegra20_timer.o obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o -obj-$(CONFIG_ARCH_NSPIRE) += zevio-timer.o -obj-$(CONFIG_ARCH_BCM_MOBILE) += bcm_kona_timer.o +obj-$(CONFIG_NSPIRE_TIMER) += zevio-timer.o +obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o obj-$(CONFIG_CLKSRC_STM32) += timer-stm32.o @@ -48,6 +48,7 @@ obj-$(CONFIG_MTK_TIMER) += mtk_timer.o obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o +obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o @@ -55,8 +56,8 @@ obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o -obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o -obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o +obj-$(CONFIG_KEYSTONE_TIMER) += timer-keystone.o +obj-$(CONFIG_INTEGRATOR_AP_TIMER) += timer-integrator-ap.o obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o @@ -79,6 +79,14 @@ static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI; static bool arch_timer_c3stop; static bool arch_timer_mem_use_virtual; +static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM); + +static int __init early_evtstrm_cfg(char *buf) +{ + return strtobool(buf, &evtstrm_enable); +} +early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg); + /* * Architected system timer support. */ @@ -372,7 +380,7 @@ static int arch_timer_setup(struct clock_event_device *clk) enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); arch_counter_set_user_access(); - if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM)) + if (evtstrm_enable) arch_timer_configure_evtstream(); return 0; @@ -693,25 +701,26 @@ arch_timer_needs_probing(int type, const struct of_device_id *matches) return needs_probing; } -static void __init arch_timer_common_init(void) +static int __init arch_timer_common_init(void) { unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER; /* Wait until both nodes are probed if we have two timers */ if ((arch_timers_present & mask) != mask) { if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match)) - return; + return 0; if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match)) - return; + return 0; } arch_timer_banner(arch_timers_present); arch_counter_register(arch_timers_present); - arch_timer_arch_init(); + return arch_timer_arch_init(); } -static void __init arch_timer_init(void) +static int __init arch_timer_init(void) { + int ret; /* * If HYP mode is available, we know that the physical timer * has been configured to be accessible from PL1. Use it, so @@ -739,23 +748,30 @@ static void __init arch_timer_init(void) if (!has_ppi) { pr_warn("arch_timer: No interrupt available, giving up\n"); - return; + return -EINVAL; } } - arch_timer_register(); - arch_timer_common_init(); + ret = arch_timer_register(); + if (ret) + return ret; + + ret = arch_timer_common_init(); + if (ret) + return ret; arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI]; + + return 0; } -static void __init arch_timer_of_init(struct device_node *np) +static int __init arch_timer_of_init(struct device_node *np) { int i; if (arch_timers_present & ARCH_CP15_TIMER) { pr_warn("arch_timer: multiple nodes in dt, skipping\n"); - return; + return 0; } arch_timers_present |= ARCH_CP15_TIMER; @@ -774,23 +790,23 @@ static void __init arch_timer_of_init(struct device_node *np) of_property_read_bool(np, "arm,cpu-registers-not-fw-configured")) arch_timer_uses_ppi = PHYS_SECURE_PPI; - arch_timer_init(); + return arch_timer_init(); } CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init); CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init); -static void __init arch_timer_mem_init(struct device_node *np) +static int __init arch_timer_mem_init(struct device_node *np) { struct device_node *frame, *best_frame = NULL; void __iomem *cntctlbase, *base; - unsigned int irq; + unsigned int irq, ret = -EINVAL; u32 cnttidr; arch_timers_present |= ARCH_MEM_TIMER; cntctlbase = of_iomap(np, 0); if (!cntctlbase) { pr_err("arch_timer: Can't find CNTCTLBase\n"); - return; + return -ENXIO; } cnttidr = readl_relaxed(cntctlbase + CNTTIDR); @@ -830,6 +846,7 @@ static void __init arch_timer_mem_init(struct device_node *np) best_frame = of_node_get(frame); } + ret= -ENXIO; base = arch_counter_base = of_iomap(best_frame, 0); if (!base) { pr_err("arch_timer: Can't map frame's registers\n"); @@ -841,6 +858,7 @@ static void __init arch_timer_mem_init(struct device_node *np) else irq = irq_of_parse_and_map(best_frame, 0); + ret = -EINVAL; if (!irq) { pr_err("arch_timer: Frame missing %s irq", arch_timer_mem_use_virtual ? "virt" : "phys"); @@ -848,11 +866,15 @@ static void __init arch_timer_mem_init(struct device_node *np) } arch_timer_detect_rate(base, np); - arch_timer_mem_register(base, irq); - arch_timer_common_init(); + ret = arch_timer_mem_register(base, irq); + if (ret) + goto out; + + return arch_timer_common_init(); out: iounmap(cntctlbase); of_node_put(best_frame); + return ret; } CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem", arch_timer_mem_init); @@ -238,7 +238,7 @@ static void __init gt_delay_timer_init(void) register_current_timer_delay(>_delay_timer); } -static void __init gt_clocksource_init(void) +static int __init gt_clocksource_init(void) { writel(0, gt_base + GT_CONTROL); writel(0, gt_base + GT_COUNTER0); @@ -249,7 +249,7 @@ static void __init gt_clocksource_init(void) #ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate); #endif - clocksource_register_hz(>_clocksource, gt_clk_rate); + return clocksource_register_hz(>_clocksource, gt_clk_rate); } static int gt_cpu_notify(struct notifier_block *self, unsigned long action, @@ -270,7 +270,7 @@ static struct notifier_block gt_cpu_nb = { .notifier_call = gt_cpu_notify, }; -static void __init global_timer_of_register(struct device_node *np) +static int __init global_timer_of_register(struct device_node *np) { struct clk *gt_clk; int err = 0; @@ -283,19 +283,19 @@ static void __init global_timer_of_register(struct device_node *np) if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9 && (read_cpuid_id() & 0xf0000f) < 0x200000) { pr_warn("global-timer: non support for this cpu version.\n"); - return; + return -ENOSYS; } gt_ppi = irq_of_parse_and_map(np, 0); if (!gt_ppi) { pr_warn("global-timer: unable to parse irq\n"); - return; + return -EINVAL; } gt_base = of_iomap(np, 0); if (!gt_base) { pr_warn("global-timer: invalid base address\n"); - return; + return -ENXIO; } gt_clk = of_clk_get(np, 0); @@ -332,11 +332,17 @@ static void __init global_timer_of_register(struct device_node *np) } /* Immediately configure the timer on the boot CPU */ - gt_clocksource_init(); - gt_clockevents_init(this_cpu_ptr(gt_evt)); + err = gt_clocksource_init(); + if (err) + goto out_irq; + + err = gt_clockevents_init(this_cpu_ptr(gt_evt)); + if (err) + goto out_irq; + gt_delay_timer_init(); - return; + return 0; out_irq: free_percpu_irq(gt_ppi, gt_evt); @@ -347,6 +353,8 @@ out_clk: out_unmap: iounmap(gt_base); WARN(err, "ARM Global timer register failed (%d)\n", err); + + return err; } /* Only tested on r2p2 and r3p0 */ @@ -7,6 +7,7 @@ #include <linux/kernel.h> #include <linux/clocksource.h> #include <linux/clockchips.h> +#include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/clk.h> @@ -21,7 +22,7 @@ #define SYSTICK_LOAD_RELOAD_MASK 0x00FFFFFF -static void __init system_timer_of_register(struct device_node *np) +static int __init system_timer_of_register(struct device_node *np) { struct clk *clk = NULL; void __iomem *base; @@ -31,22 +32,26 @@ static void __init system_timer_of_register(struct device_node *np) base = of_iomap(np, 0); if (!base) { pr_warn("system-timer: invalid base address\n"); - return; + return -ENXIO; } ret = of_property_read_u32(np, "clock-frequency", &rate); if (ret) { clk = of_clk_get(np, 0); - if (IS_ERR(clk)) + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); goto out_unmap; + } ret = clk_prepare_enable(clk); if (ret) goto out_clk_put; rate = clk_get_rate(clk); - if (!rate) + if (!rate) { + ret = -EINVAL; goto out_clk_disable; + } } writel_relaxed(SYSTICK_LOAD_RELOAD_MASK, base + SYST_RVR); @@ -64,7 +69,7 @@ static void __init system_timer_of_register(struct device_node *np) pr_info("ARM System timer initialized as clocksource\n"); - return; + return 0; out_clk_disable: clk_disable_unprepare(clk); @@ -73,6 +78,8 @@ out_clk_put: out_unmap: iounmap(base); pr_warn("ARM System timer register failed (%d)\n", ret); + + return ret; } CLOCKSOURCE_OF_DECLARE(arm_systick, "arm,armv7m-systick", @@ -184,7 +184,7 @@ static irqreturn_t asm9260_timer_interrupt(int irq, void *dev_id) * Timer initialization * --------------------------------------------------------------------------- */ -static void __init asm9260_timer_init(struct device_node *np) +static int __init asm9260_timer_init(struct device_node *np) { int irq; struct clk *clk; @@ -192,20 +192,26 @@ static void __init asm9260_timer_init(struct device_node *np) unsigned long rate; priv.base = of_io_request_and_map(np, 0, np->name); - if (IS_ERR(priv.base)) - panic("%s: unable to map resource", np->name); + if (IS_ERR(priv.base)) { + pr_err("%s: unable to map resource", np->name); + return PTR_ERR(priv.base); + } clk = of_clk_get(np, 0); ret = clk_prepare_enable(clk); - if (ret) - panic("Failed to enable clk!\n"); + if (ret) { + pr_err("Failed to enable clk!\n"); + return ret; + } irq = irq_of_parse_and_map(np, 0); ret = request_irq(irq, asm9260_timer_interrupt, IRQF_TIMER, DRIVER_NAME, &event_dev); - if (ret) - panic("Failed to setup irq!\n"); + if (ret) { + pr_err("Failed to setup irq!\n"); + return ret; + } /* set all timers for count-up */ writel_relaxed(BM_DIR_DEFAULT, priv.base + HW_DIR); @@ -229,6 +235,8 @@ static void __init asm9260_timer_init(struct device_node *np) priv.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ); event_dev.cpumask = cpumask_of(0); clockevents_config_and_register(&event_dev, rate, 0x2c00, 0xfffffffe); + + return 0; } CLOCKSOURCE_OF_DECLARE(asm9260_timer, "alphascale,asm9260-timer", asm9260_timer_init); @@ -80,19 +80,24 @@ static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id) } } -static void __init bcm2835_timer_init(struct device_node *node) +static int __init bcm2835_timer_init(struct device_node *node) { void __iomem *base; u32 freq; - int irq; + int irq, ret; struct bcm2835_timer *timer; base = of_iomap(node, 0); - if (!base) - panic("Can't remap registers"); + if (!base) { + pr_err("Can't remap registers"); + return -ENXIO; + } - if (of_property_read_u32(node, "clock-frequency", &freq)) - panic("Can't read clock-frequency"); + ret = of_property_read_u32(node, "clock-frequency", &freq); + if (ret) { + pr_err("Can't read clock-frequency"); + return ret; + } system_clock = base + REG_COUNTER_LO; sched_clock_register(bcm2835_sched_read, 32, freq); @@ -101,12 +106,16 @@ static void __init bcm2835_timer_init(struct device_node *node) freq, 300, 32, clocksource_mmio_readl_up); irq = irq_of_parse_and_map(node, DEFAULT_TIMER); - if (irq <= 0) - panic("Can't parse IRQ"); + if (irq <= 0) { + pr_err("Can't parse IRQ"); + return -EINVAL; + } timer = kzalloc(sizeof(*timer), GFP_KERNEL); - if (!timer) - panic("Can't allocate timer struct\n"); + if (!timer) { + pr_err("Can't allocate timer struct\n"); + return -ENOMEM; + } timer->control = base + REG_CONTROL; timer->compare = base + REG_COMPARE(DEFAULT_TIMER); @@ -121,12 +130,17 @@ static void __init bcm2835_timer_init(struct device_node *node) timer->act.dev_id = timer; timer->act.handler = bcm2835_time_interrupt; - if (setup_irq(irq, &timer->act)) - panic("Can't set up timer IRQ\n"); + ret = setup_irq(irq, &timer->act); + if (ret) { + pr_err("Can't set up timer IRQ\n"); + return ret; + } clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff); pr_info("bcm2835: system timer (irq = %d)\n", irq); + + return 0; } CLOCKSOURCE_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer", bcm2835_timer_init); @@ -20,7 +20,6 @@ #include <linux/clk.h> #include <linux/io.h> -#include <asm/mach/time.h> #include <linux/of.h> #include <linux/of_address.h> @@ -163,16 +162,11 @@ static struct irqaction kona_timer_irq = { .handler = kona_timer_interrupt, }; -static void __init kona_timer_init(struct device_node *node) +static int __init kona_timer_init(struct device_node *node) { u32 freq; struct clk *external_clk; - if (!of_device_is_available(node)) { - pr_info("Kona Timer v1 marked as disabled in device tree\n"); - return; - } - external_clk = of_clk_get_by_name(node, NULL); if (!IS_ERR(external_clk)) { @@ -182,7 +176,7 @@ static void __init kona_timer_init(struct device_node *node) arch_timer_rate = freq; } else { pr_err("Kona Timer v1 unable to determine clock-frequency"); - return; + return -EINVAL; } /* Setup IRQ numbers */ @@ -196,6 +190,8 @@ static void __init kona_timer_init(struct device_node *node) kona_timer_clockevents_init(); setup_irq(timers.tmr_irq, &kona_timer_irq); kona_timer_set_next_event((arch_timer_rate / HZ), NULL); + + return 0; } CLOCKSOURCE_OF_DECLARE(brcm_kona, "brcm,kona-timer", kona_timer_init); @@ -322,22 +322,22 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb, return NOTIFY_DONE; } -static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base, +static int __init ttc_setup_clocksource(struct clk *clk, void __iomem *base, u32 timer_width) { struct ttc_timer_clocksource *ttccs; int err; ttccs = kzalloc(sizeof(*ttccs), GFP_KERNEL); - if (WARN_ON(!ttccs)) - return; + if (!ttccs) + return -ENOMEM; ttccs->ttc.clk = clk; err = clk_prepare_enable(ttccs->ttc.clk); - if (WARN_ON(err)) { + if (err) { kfree(ttccs); - return; + return err; } ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk); @@ -345,8 +345,10 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base, ttccs->ttc.clk_rate_change_nb.notifier_call = ttc_rate_change_clocksource_cb; ttccs->ttc.clk_rate_change_nb.next = NULL; - if (clk_notifier_register(ttccs->ttc.clk, - &ttccs->ttc.clk_rate_change_nb)) + + err = clk_notifier_register(ttccs->ttc.clk, + &ttccs->ttc.clk_rate_change_nb); + if (err) pr_warn("Unable to register clock notifier.\n"); ttccs->ttc.base_addr = base; @@ -368,14 +370,16 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base, ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE); - if (WARN_ON(err)) { + if (err) { kfree(ttccs); - return; + return err; } ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET; sched_clock_register(ttc_sched_clock_read, timer_width, ttccs->ttc.freq / PRESCALE); + + return 0; } static int ttc_rate_change_clockevent_cb(struct notifier_block *nb, @@ -401,30 +405,35 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb, } } -static void __init ttc_setup_clockevent(struct clk *clk, - void __iomem *base, u32 irq) +static int __init ttc_setup_clockevent(struct clk *clk, + void __iomem *base, u32 irq) { struct ttc_timer_clockevent *ttcce; int err; ttcce = kzalloc(sizeof(*ttcce), GFP_KERNEL); - if (WARN_ON(!ttcce)) - return; + if (!ttcce) + return -ENOMEM; ttcce->ttc.clk = clk; err = clk_prepare_enable(ttcce->ttc.clk); - if (WARN_ON(err)) { + if (err) { kfree(ttcce); - return; + return err; } ttcce->ttc.clk_rate_change_nb.notifier_call = ttc_rate_change_clockevent_cb; ttcce->ttc.clk_rate_change_nb.next = NULL; - if (clk_notifier_register(ttcce->ttc.clk, - &ttcce->ttc.clk_rate_change_nb)) + + err = clk_notifier_register(ttcce->ttc.clk, + &ttcce->ttc.clk_rate_change_nb); + if (err) { pr_warn("Unable to register clock notifier.\n"); + return err; + } + ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk); ttcce->ttc.base_addr = base; @@ -451,13 +460,15 @@ static void __init ttc_setup_clockevent(struct clk *clk, err = request_irq(irq, ttc_clock_event_interrupt, IRQF_TIMER, ttcce->ce.name, ttcce); - if (WARN_ON(err)) { + if (err) { kfree(ttcce); - return; + return err; } clockevents_config_and_register(&ttcce->ce, ttcce->ttc.freq / PRESCALE, 1, 0xfffe); + + return 0; } /** @@ -466,17 +477,17 @@ static void __init ttc_setup_clockevent(struct clk *clk, * Initializes the timer hardware and register the clock source and clock event * timers with Linux kernal timer framework */ -static void __init ttc_timer_init(struct device_node *timer) +static int __init ttc_timer_init(struct device_node *timer) { unsigned int irq; void __iomem *timer_baseaddr; struct clk *clk_cs, *clk_ce; static int initialized; - int clksel; + int clksel, ret; u32 timer_width = 16; if (initialized) - return; + return 0; initialized = 1; @@ -488,13 +499,13 @@ static void __init ttc_timer_init(struct device_node *timer) timer_baseaddr = of_iomap(timer, 0); if (!timer_baseaddr) { pr_err("ERROR: invalid timer base address\n"); - BUG(); + return -ENXIO; } irq = irq_of_parse_and_map(timer, 1); if (irq <= 0) { pr_err("ERROR: invalid interrupt number\n"); - BUG(); + return -EINVAL; } of_property_read_u32(timer, "timer-width", &timer_width); @@ -504,7 +515,7 @@ static void __init ttc_timer_init(struct device_node *timer) clk_cs = of_clk_get(timer, clksel); if (IS_ERR(clk_cs)) { pr_err("ERROR: timer input clock not found\n"); - BUG(); + return PTR_ERR(clk_cs); } clksel = readl_relaxed(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET); @@ -512,13 +523,20 @@ static void __init ttc_timer_init(struct device_node *timer) clk_ce = of_clk_get(timer, clksel); if (IS_ERR(clk_ce)) { pr_err("ERROR: timer input clock not found\n"); - BUG(); + return PTR_ERR(clk_ce); } - ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width); - ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq); + ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width); + if (ret) + return ret; + + ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq); + if (ret) + return ret; pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq); + + return 0; } CLOCKSOURCE_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init); @@ -64,7 +64,7 @@ static u64 notrace dbx500_prcmu_sched_clock_read(void) #endif -static void __init clksrc_dbx500_prcmu_init(struct device_node *node) +static int __init clksrc_dbx500_prcmu_init(struct device_node *node) { clksrc_dbx500_timer_base = of_iomap(node, 0); @@ -84,7 +84,7 @@ static void __init clksrc_dbx500_prcmu_init(struct device_node *node) #ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK sched_clock_register(dbx500_prcmu_sched_clock_read, 32, RATE_32K); #endif - clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K); + return clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K); } CLOCKSOURCE_OF_DECLARE(dbx500_prcmu, "stericsson,db8500-prcmu-timer-4", clksrc_dbx500_prcmu_init); @@ -28,15 +28,23 @@ void __init clocksource_probe(void) { struct device_node *np; const struct of_device_id *match; - of_init_fn_1 init_func; + of_init_fn_1_ret init_func_ret; unsigned clocksources = 0; + int ret; for_each_matching_node_and_match(np, __clksrc_of_table, &match) { if (!of_device_is_available(np)) continue; - init_func = match->data; - init_func(np); + init_func_ret = match->data; + + ret = init_func_ret(np); + if (ret) { + pr_err("Failed to initialize '%s': %d", + of_node_full_name(np), ret); + continue; + } + clocksources++; } @@ -92,7 +92,7 @@ static int __init st_clksrc_setup_clk(struct device_node *np) return 0; } -static void __init st_clksrc_of_register(struct device_node *np) +static int __init st_clksrc_of_register(struct device_node *np) { int ret; uint32_t mode; @@ -100,32 +100,36 @@ static void __init st_clksrc_of_register(struct device_node *np) ret = of_property_read_u32(np, "st,lpc-mode", &mode); if (ret) { pr_err("clksrc-st-lpc: An LPC mode must be provided\n"); - return; + return ret; } /* LPC can either run as a Clocksource or in RTC or WDT mode */ if (mode != ST_LPC_MODE_CLKSRC) - return; + return 0; ddata.base = of_iomap(np, 0); if (!ddata.base) { pr_err("clksrc-st-lpc: Unable to map iomem\n"); - return; + return -ENXIO; } - if (st_clksrc_setup_clk(np)) { + ret = st_clksrc_setup_clk(np); + if (ret) { iounmap(ddata.base); - return; + return ret; } - if (st_clksrc_init()) { + ret = st_clksrc_init(); + if (ret) { clk_disable_unprepare(ddata.clk); clk_put(ddata.clk); iounmap(ddata.base); - return; + return ret; } pr_info("clksrc-st-lpc: clocksource initialised - running @ %luHz\n", clk_get_rate(ddata.clk)); + + return ret; } CLOCKSOURCE_OF_DECLARE(ddata, "st,stih407-lpc", st_clksrc_of_register); @@ -104,7 +104,7 @@ void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base, } #ifdef CONFIG_CLKSRC_OF -static void __init clps711x_timer_init(struct device_node *np) +static int __init clps711x_timer_init(struct device_node *np) { unsigned int irq = irq_of_parse_and_map(np, 0); struct clk *clock = of_clk_get(np, 0); @@ -112,13 +112,11 @@ static void __init clps711x_timer_init(struct device_node *np) switch (of_alias_get_id(np, "timer")) { case CLPS711X_CLKSRC_CLOCKSOURCE: - BUG_ON(_clps711x_clksrc_init(clock, base)); - break; + return _clps711x_clksrc_init(clock, base); case CLPS711X_CLKSRC_CLOCKEVENT: - BUG_ON(_clps711x_clkevt_init(clock, base, irq)); - break; + return _clps711x_clkevt_init(clock, base, irq); default: - break; + return -EINVAL; } } CLOCKSOURCE_OF_DECLARE(clps711x, "cirrus,clps711x-timer", clps711x_timer_init); @@ -143,7 +143,7 @@ static struct delay_timer dw_apb_delay_timer = { #endif static int num_called; -static void __init dw_apb_timer_init(struct device_node *timer) +static int __init dw_apb_timer_init(struct device_node *timer) { switch (num_called) { case 0: @@ -164,6 +164,8 @@ static void __init dw_apb_timer_init(struct device_node *timer) } num_called++; + + return 0; } CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init); CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init); @@ -232,7 +232,7 @@ static cycles_t exynos4_read_current_timer(void) return exynos4_read_count_32(); } -static void __init exynos4_clocksource_init(void) +static int __init exynos4_clocksource_init(void) { exynos4_mct_frc_start(); @@ -244,6 +244,8 @@ static void __init exynos4_clocksource_init(void) panic("%s: can't register clocksource\n", mct_frc.name); sched_clock_register(exynos4_read_sched_clock, 32, clk_rate); + + return 0; } static void exynos4_mct_comp0_stop(void) @@ -335,12 +337,14 @@ static struct irqaction mct_comp_event_irq = { .dev_id = &mct_comp_device, }; -static void exynos4_clockevent_init(void) +static int exynos4_clockevent_init(void) { mct_comp_device.cpumask = cpumask_of(0); clockevents_config_and_register(&mct_comp_device, clk_rate, 0xf, 0xffffffff); setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq); + + return 0; } static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick); @@ -516,7 +520,7 @@ static struct notifier_block exynos4_mct_cpu_nb = { .notifier_call = exynos4_mct_cpu_notify, }; -static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base) +static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base) { int err, cpu; struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); @@ -572,15 +576,17 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem /* Immediately configure the timer on the boot CPU */ exynos4_local_timer_setup(mevt); - return; + return 0; out_irq: free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick); + return err; } -static void __init mct_init_dt(struct device_node *np, unsigned int int_type) +static int __init mct_init_dt(struct device_node *np, unsigned int int_type) { u32 nr_irqs, i; + int ret; mct_int_type = int_type; @@ -600,18 +606,24 @@ static void __init mct_init_dt(struct device_node *np, unsigned int int_type) for (i = MCT_L0_IRQ; i < nr_irqs; i++) mct_irqs[i] = irq_of_parse_and_map(np, i); - exynos4_timer_resources(np, of_iomap(np, 0)); - exynos4_clocksource_init(); - exynos4_clockevent_init(); + ret = exynos4_timer_resources(np, of_iomap(np, 0)); + if (ret) + return ret; + + ret = exynos4_clocksource_init(); + if (ret) + return ret; + + return exynos4_clockevent_init(); } -static void __init mct_init_spi(struct device_node *np) +static int __init mct_init_spi(struct device_node *np) { return mct_init_dt(np, MCT_INT_SPI); } -static void __init mct_init_ppi(struct device_node *np) +static int __init mct_init_ppi(struct device_node *np) { return mct_init_dt(np, MCT_INT_PPI); } @@ -316,15 +316,16 @@ static int __init ftm_calc_closest_round_cyc(unsigned long freq) return 0; } -static void __init ftm_timer_init(struct device_node *np) +static int __init ftm_timer_init(struct device_node *np) { unsigned long freq; - int irq; + int ret, irq; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) - return; + return -ENOMEM; + ret = -ENXIO; priv->clkevt_base = of_iomap(np, 0); if (!priv->clkevt_base) { pr_err("ftm: unable to map event timer registers\n"); @@ -337,6 +338,7 @@ static void __init ftm_timer_init(struct device_node *np) goto err; } + ret = -EINVAL; irq = irq_of_parse_and_map(np, 0); if (irq <= 0) { pr_err("ftm: unable to get IRQ from DT, %d\n", irq); @@ -349,18 +351,22 @@ static void __init ftm_timer_init(struct device_node *np) if (!freq) goto err; - if (ftm_calc_closest_round_cyc(freq)) + ret = ftm_calc_closest_round_cyc(freq); + if (ret) goto err; - if (ftm_clocksource_init(freq)) + ret = ftm_clocksource_init(freq); + if (ret) goto err; - if (ftm_clockevent_init(freq, irq)) + ret = ftm_clockevent_init(freq, irq); + if (ret) goto err; - return; + return 0; err: kfree(priv); + return ret; } CLOCKSOURCE_OF_DECLARE(flextimer, "fsl,ftm-timer", ftm_timer_init); @@ -126,7 +126,7 @@ static struct timer16_priv timer16_priv = { #define REG_CH 0 #define REG_COMM 1 -static void __init h8300_16timer_init(struct device_node *node) +static int __init h8300_16timer_init(struct device_node *node) { void __iomem *base[2]; int ret, irq; @@ -136,9 +136,10 @@ static void __init h8300_16timer_init(struct device_node *node) clk = of_clk_get(node, 0); if (IS_ERR(clk)) { pr_err("failed to get clock for clocksource\n"); - return; + return PTR_ERR(clk); } + ret = -ENXIO; base[REG_CH] = of_iomap(node, 0); if (!base[REG_CH]) { pr_err("failed to map registers for clocksource\n"); @@ -151,6 +152,7 @@ static void __init h8300_16timer_init(struct device_node *node) goto unmap_ch; } + ret = -EINVAL; irq = irq_of_parse_and_map(node, 0); if (!irq) { pr_err("failed to get irq for clockevent\n"); @@ -174,7 +176,7 @@ static void __init h8300_16timer_init(struct device_node *node) clocksource_register_hz(&timer16_priv.cs, clk_get_rate(clk) / 8); - return; + return 0; unmap_comm: iounmap(base[REG_COMM]); @@ -182,6 +184,8 @@ unmap_ch: iounmap(base[REG_CH]); free_clk: clk_put(clk); + return ret; } -CLOCKSOURCE_OF_DECLARE(h8300_16bit, "renesas,16bit-timer", h8300_16timer_init); +CLOCKSOURCE_OF_DECLARE(h8300_16bit, "renesas,16bit-timer", + h8300_16timer_init); @@ -164,24 +164,26 @@ static struct timer8_priv timer8_priv = { }, }; -static void __init h8300_8timer_init(struct device_node *node) +static int __init h8300_8timer_init(struct device_node *node) { void __iomem *base; - int irq; + int irq, ret; struct clk *clk; clk = of_clk_get(node, 0); if (IS_ERR(clk)) { pr_err("failed to get clock for clockevent\n"); - return; + return PTR_ERR(clk); } + ret = ENXIO; base = of_iomap(node, 0); if (!base) { pr_err("failed to map registers for clockevent\n"); goto free_clk; } + ret = -EINVAL; irq = irq_of_parse_and_map(node, 0); if (!irq) { pr_err("failed to get irq for clockevent\n"); @@ -205,11 +207,12 @@ static void __init h8300_8timer_init(struct device_node *node) clockevents_config_and_register(&timer8_priv.ced, timer8_priv.rate, 1, 0x0000ffff); - return; + return 0; unmap_reg: iounmap(base); free_clk: clk_put(clk); + return ret; } CLOCKSOURCE_OF_DECLARE(h8300_8bit, "renesas,8bit-timer", h8300_8timer_init); @@ -119,15 +119,16 @@ static struct tpu_priv tpu_priv = { #define CH_L 0 #define CH_H 1 -static void __init h8300_tpu_init(struct device_node *node) +static int __init h8300_tpu_init(struct device_node *node) { void __iomem *base[2]; struct clk *clk; + int ret = -ENXIO; clk = of_clk_get(node, 0); if (IS_ERR(clk)) { pr_err("failed to get clock for clocksource\n"); - return; + return PTR_ERR(clk); } base[CH_L] = of_iomap(node, CH_L); @@ -144,14 +145,13 @@ static void __init h8300_tpu_init(struct device_node *node) tpu_priv.mapbase1 = base[CH_L]; tpu_priv.mapbase2 = base[CH_H]; - clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64); - - return; + return clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64); unmap_L: iounmap(base[CH_H]); free_clk: clk_put(clk); + return ret; } CLOCKSOURCE_OF_DECLARE(h8300_tpu, "renesas,tpu", h8300_tpu_init); @@ -126,18 +126,22 @@ static struct irqaction meson6_timer_irq = { .dev_id = &meson6_clockevent, }; -static void __init meson6_timer_init(struct device_node *node) +static int __init meson6_timer_init(struct device_node *node) { u32 val; int ret, irq; timer_base = of_io_request_and_map(node, 0, "meson6-timer"); - if (IS_ERR(timer_base)) - panic("Can't map registers"); + if (IS_ERR(timer_base)) { + pr_err("Can't map registers"); + return -ENXIO; + } irq = irq_of_parse_and_map(node, 0); - if (irq <= 0) - panic("Can't parse IRQ"); + if (irq <= 0) { + pr_err("Can't parse IRQ"); + return -EINVAL; + } /* Set 1us for timer E */ val = readl(timer_base + TIMER_ISA_MUX); @@ -158,14 +162,17 @@ static void __init meson6_timer_init(struct device_node *node) meson6_clkevt_time_stop(CED_ID); ret = setup_irq(irq, &meson6_timer_irq); - if (ret) + if (ret) { pr_warn("failed to setup irq %d\n", irq); + return ret; + } meson6_clockevent.cpumask = cpu_possible_mask; meson6_clockevent.irq = irq; clockevents_config_and_register(&meson6_clockevent, USEC_PER_SEC, 1, 0xfffe); + return 0; } CLOCKSOURCE_OF_DECLARE(meson6, "amlogic,meson6-timer", meson6_timer_init); @@ -146,7 +146,7 @@ static struct clocksource gic_clocksource = { .archdata = { .vdso_clock_mode = VDSO_CLOCK_GIC }, }; -static void __init __gic_clocksource_init(void) +static int __init __gic_clocksource_init(void) { int ret; @@ -159,6 +159,8 @@ static void __init __gic_clocksource_init(void) ret = clocksource_register_hz(&gic_clocksource, gic_frequency); if (ret < 0) pr_warn("GIC: Unable to register clocksource\n"); + + return ret; } void __init gic_clocksource_init(unsigned int frequency) @@ -179,31 +181,35 @@ static void __init gic_clocksource_of_init(struct device_node *node) struct clk *clk; int ret; - if (WARN_ON(!gic_present || !node->parent || - !of_device_is_compatible(node->parent, "mti,gic"))) - return; + if (!gic_present || !node->parent || + !of_device_is_compatible(node->parent, "mti,gic")) { + pr_warn("No DT definition for the mips gic driver"); + return -ENXIO; + } clk = of_clk_get(node, 0); if (!IS_ERR(clk)) { if (clk_prepare_enable(clk) < 0) { pr_err("GIC failed to enable clock\n"); clk_put(clk); - return; + return PTR_ERR(clk); } gic_frequency = clk_get_rate(clk); } else if (of_property_read_u32(node, "clock-frequency", &gic_frequency)) { pr_err("GIC frequency not specified.\n"); - return; + return -EINVAL;; } gic_timer_irq = irq_of_parse_and_map(node, 0); if (!gic_timer_irq) { pr_err("GIC timer IRQ not specified.\n"); - return; + return -EINVAL;; } - __gic_clocksource_init(); + ret = __gic_clocksource_init(); + if (ret) + return ret; ret = gic_clockevent_init(); if (!ret && !IS_ERR(clk)) { @@ -213,6 +219,8 @@ static void __init gic_clocksource_of_init(struct device_node *node) /* And finally start the counter */ gic_start_count(); + + return 0; } CLOCKSOURCE_OF_DECLARE(mips_gic_timer, "mti,gic-timer", gic_clocksource_of_init); @@ -119,34 +119,45 @@ static struct irqaction moxart_timer_irq = { .dev_id = &moxart_clockevent, }; -static void __init moxart_timer_init(struct device_node *node) +static int __init moxart_timer_init(struct device_node *node) { int ret, irq; unsigned long pclk; struct clk *clk; base = of_iomap(node, 0); - if (!base) - panic("%s: of_iomap failed\n", node->full_name); + if (!base) { + pr_err("%s: of_iomap failed\n", node->full_name); + return -ENXIO; + } irq = irq_of_parse_and_map(node, 0); - if (irq <= 0) - panic("%s: irq_of_parse_and_map failed\n", node->full_name); + if (irq <= 0) { + pr_err("%s: irq_of_parse_and_map failed\n", node->full_name); + return -EINVAL; + } ret = setup_irq(irq, &moxart_timer_irq); - if (ret) - panic("%s: setup_irq failed\n", node->full_name); + if (ret) { + pr_err("%s: setup_irq failed\n", node->full_name); + return ret; + } clk = of_clk_get(node, 0); - if (IS_ERR(clk)) - panic("%s: of_clk_get failed\n", node->full_name); + if (IS_ERR(clk)) { + pr_err("%s: of_clk_get failed\n", node->full_name); + return PTR_ERR(clk); + } pclk = clk_get_rate(clk); - if (clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT, - "moxart_timer", pclk, 200, 32, - clocksource_mmio_readl_down)) - panic("%s: clocksource_mmio_init failed\n", node->full_name); + ret = clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT, + "moxart_timer", pclk, 200, 32, + clocksource_mmio_readl_down); + if (ret) { + pr_err("%s: clocksource_mmio_init failed\n", node->full_name); + return ret; + } clock_count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ); @@ -164,5 +175,7 @@ static void __init moxart_timer_init(struct device_node *node) */ clockevents_config_and_register(&moxart_clockevent, pclk, 0x4, 0xfffffffe); + + return 0; } CLOCKSOURCE_OF_DECLARE(moxart, "moxa,moxart-timer", moxart_timer_init); @@ -250,7 +250,7 @@ out: return ret; } -static void __init mps2_timer_init(struct device_node *np) +static int __init mps2_timer_init(struct device_node *np) { static int has_clocksource, has_clockevent; int ret; @@ -259,7 +259,7 @@ static void __init mps2_timer_init(struct device_node *np) ret = mps2_clocksource_init(np); if (!ret) { has_clocksource = 1; - return; + return 0; } } @@ -267,9 +267,11 @@ static void __init mps2_timer_init(struct device_node *np) ret = mps2_clockevent_init(np); if (!ret) { has_clockevent = 1; - return; + return 0; } } + + return 0; } CLOCKSOURCE_OF_DECLARE(mps2_timer, "arm,mps2-timer", mps2_timer_init); @@ -181,7 +181,7 @@ static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer) evt->gpt_base + GPT_IRQ_EN_REG); } -static void __init mtk_timer_init(struct device_node *node) +static int __init mtk_timer_init(struct device_node *node) { struct mtk_clock_event_device *evt; struct resource res; @@ -190,7 +190,7 @@ static void __init mtk_timer_init(struct device_node *node) evt = kzalloc(sizeof(*evt), GFP_KERNEL); if (!evt) - return; + return -ENOMEM; evt->dev.name = "mtk_tick"; evt->dev.rating = 300; @@ -248,7 +248,7 @@ static void __init mtk_timer_init(struct device_node *node) mtk_timer_enable_irq(evt, GPT_CLK_EVT); - return; + return 0; err_clk_disable: clk_disable_unprepare(clk); @@ -262,5 +262,7 @@ err_mem: release_mem_region(res.start, resource_size(&res)); err_kzalloc: kfree(evt); + + return -EINVAL; } CLOCKSOURCE_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init); @@ -31,8 +31,6 @@ #include <linux/stmp_device.h> #include <linux/sched_clock.h> -#include <asm/mach/time.h> - /* * There are 2 versions of the timrot on Freescale MXS-based SoCs. * The v1 on MX23 only gets 16 bits counter, while v2 on MX28 @@ -226,10 +224,10 @@ static int __init mxs_clocksource_init(struct clk *timer_clk) return 0; } -static void __init mxs_timer_init(struct device_node *np) +static int __init mxs_timer_init(struct device_node *np) { struct clk *timer_clk; - int irq; + int irq, ret; mxs_timrot_base = of_iomap(np, 0); WARN_ON(!mxs_timrot_base); @@ -237,10 +235,12 @@ static void __init mxs_timer_init(struct device_node *np) timer_clk = of_clk_get(np, 0); if (IS_ERR(timer_clk)) { pr_err("%s: failed to get clk\n", __func__); - return; + return PTR_ERR(timer_clk); } - clk_prepare_enable(timer_clk); + ret = clk_prepare_enable(timer_clk); + if (ret) + return ret; /* * Initialize timers to a known state @@ -278,11 +278,19 @@ static void __init mxs_timer_init(struct device_node *np) mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1)); /* init and register the timer to the framework */ - mxs_clocksource_init(timer_clk); - mxs_clockevent_init(timer_clk); + ret = mxs_clocksource_init(timer_clk); + if (ret) + return ret; + + ret = mxs_clockevent_init(timer_clk); + if (ret) + return ret; /* Make irqs happen */ irq = irq_of_parse_and_map(np, 0); - setup_irq(irq, &mxs_timer_irq); + if (irq <= 0) + return -EINVAL; + + return setup_irq(irq, &mxs_timer_irq); } CLOCKSOURCE_OF_DECLARE(mxs, "fsl,timrot", mxs_timer_init); @@ -193,10 +193,11 @@ static struct irqaction nmdk_timer_irq = { .dev_id = &nmdk_clkevt, }; -static void __init nmdk_timer_init(void __iomem *base, int irq, +static int __init nmdk_timer_init(void __iomem *base, int irq, struct clk *pclk, struct clk *clk) { unsigned long rate; + int ret; mtu_base = base; @@ -226,10 +227,12 @@ static void __init nmdk_timer_init(void __iomem *base, int irq, /* Timer 0 is the free running clocksource */ nmdk_clksrc_reset(); - if (clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0", - rate, 200, 32, clocksource_mmio_readl_down)) - pr_err("timer: failed to initialize clock source %s\n", - "mtu_0"); + ret = clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0", + rate, 200, 32, clocksource_mmio_readl_down); + if (ret) { + pr_err("timer: failed to initialize clock source %s\n", "mtu_0"); + return ret; + } #ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK sched_clock_register(nomadik_read_sched_clock, 32, rate); @@ -244,9 +247,11 @@ static void __init nmdk_timer_init(void __iomem *base, int irq, mtu_delay_timer.read_current_timer = &nmdk_timer_read_current_timer; mtu_delay_timer.freq = rate; register_current_timer_delay(&mtu_delay_timer); + + return 0; } -static void __init nmdk_timer_of_init(struct device_node *node) +static int __init nmdk_timer_of_init(struct device_node *node) { struct clk *pclk; struct clk *clk; @@ -254,22 +259,30 @@ static void __init nmdk_timer_of_init(struct device_node *node) int irq; base = of_iomap(node, 0); - if (!base) - panic("Can't remap registers"); + if (!base) { + pr_err("Can't remap registers"); + return -ENXIO; + } pclk = of_clk_get_by_name(node, "apb_pclk"); - if (IS_ERR(pclk)) - panic("could not get apb_pclk"); + if (IS_ERR(pclk)) { + pr_err("could not get apb_pclk"); + return PTR_ERR(pclk); + } clk = of_clk_get_by_name(node, "timclk"); - if (IS_ERR(clk)) - panic("could not get timclk"); + if (IS_ERR(clk)) { + pr_err("could not get timclk"); + return PTR_ERR(clk); + } irq = irq_of_parse_and_map(node, 0); - if (irq <= 0) - panic("Can't parse IRQ"); + if (irq <= 0) { + pr_err("Can't parse IRQ"); + return -EINVAL; + } - nmdk_timer_init(base, irq, pclk, clk); + return nmdk_timer_init(base, irq, pclk, clk); } CLOCKSOURCE_OF_DECLARE(nomadik_mtu, "st,nomadik-mtu", nmdk_timer_of_init); @@ -150,8 +150,10 @@ static struct irqaction pxa_ost0_irq = { .dev_id = &ckevt_pxa_osmr0, }; -static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate) +static int __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate) { + int ret; + timer_writel(0, OIER); timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR); @@ -159,39 +161,57 @@ static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate) ckevt_pxa_osmr0.cpumask = cpumask_of(0); - setup_irq(irq, &pxa_ost0_irq); + ret = setup_irq(irq, &pxa_ost0_irq); + if (ret) { + pr_err("Failed to setup irq"); + return ret; + } + + ret = clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200, + 32, clocksource_mmio_readl_up); + if (ret) { + pr_err("Failed to init clocksource"); + return ret; + } - clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200, - 32, clocksource_mmio_readl_up); clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate, MIN_OSCR_DELTA * 2, 0x7fffffff); + + return 0; } -static void __init pxa_timer_dt_init(struct device_node *np) +static int __init pxa_timer_dt_init(struct device_node *np) { struct clk *clk; - int irq; + int irq, ret; /* timer registers are shared with watchdog timer */ timer_base = of_iomap(np, 0); - if (!timer_base) - panic("%s: unable to map resource\n", np->name); + if (!timer_base) { + pr_err("%s: unable to map resource\n", np->name); + return -ENXIO; + } clk = of_clk_get(np, 0); if (IS_ERR(clk)) { pr_crit("%s: unable to get clk\n", np->name); - return; + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) { + pr_crit("Failed to prepare clock"); + return ret; } - clk_prepare_enable(clk); /* we are only interested in OS-timer0 irq */ irq = irq_of_parse_and_map(np, 0); if (irq <= 0) { pr_crit("%s: unable to parse OS-timer0 irq\n", np->name); - return; + return -EINVAL; } - pxa_timer_common_init(irq, clk_get_rate(clk)); + return pxa_timer_common_init(irq, clk_get_rate(clk)); } CLOCKSOURCE_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init); @@ -178,7 +178,7 @@ static struct delay_timer msm_delay_timer = { .read_current_timer = msm_read_current_timer, }; -static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, +static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, bool percpu) { struct clocksource *cs = &msm_clocksource; @@ -218,12 +218,14 @@ err: sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz); msm_delay_timer.freq = dgt_hz; register_current_timer_delay(&msm_delay_timer); + + return res; } -static void __init msm_dt_timer_init(struct device_node *np) +static int __init msm_dt_timer_init(struct device_node *np) { u32 freq; - int irq; + int irq, ret; struct resource res; u32 percpu_offset; void __iomem *base; @@ -232,34 +234,35 @@ static void __init msm_dt_timer_init(struct device_node *np) base = of_iomap(np, 0); if (!base) { pr_err("Failed to map event base\n"); - return; + return -ENXIO; } /* We use GPT0 for the clockevent */ irq = irq_of_parse_and_map(np, 1); if (irq <= 0) { pr_err("Can't get irq\n"); - return; + return -EINVAL; } /* We use CPU0's DGT for the clocksource */ if (of_property_read_u32(np, "cpu-offset", &percpu_offset)) percpu_offset = 0; - if (of_address_to_resource(np, 0, &res)) { + ret = of_address_to_resource(np, 0, &res); + if (ret) { pr_err("Failed to parse DGT resource\n"); - return; + return ret; } cpu0_base = ioremap(res.start + percpu_offset, resource_size(&res)); if (!cpu0_base) { pr_err("Failed to map source base\n"); - return; + return -EINVAL; } if (of_property_read_u32(np, "clock-frequency", &freq)) { pr_err("Unknown frequency\n"); - return; + return -EINVAL; } event_base = base + 0x4; @@ -268,7 +271,7 @@ static void __init msm_dt_timer_init(struct device_node *np) freq /= 4; writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL); - msm_timer_init(freq, 32, irq, !!percpu_offset); + return msm_timer_init(freq, 32, irq, !!percpu_offset); } CLOCKSOURCE_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init); CLOCKSOURCE_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init); @@ -19,7 +19,8 @@ #define TIMER_LOAD_COUNT0 0x00 #define TIMER_LOAD_COUNT1 0x04 -#define TIMER_CONTROL_REG 0x10 +#define TIMER_CONTROL_REG3288 0x10 +#define TIMER_CONTROL_REG3399 0x1c #define TIMER_INT_STATUS 0x18 #define TIMER_DISABLE 0x0 @@ -31,6 +32,7 @@ struct bc_timer { struct clock_event_device ce; void __iomem *base; + void __iomem *ctrl; u32 freq; }; @@ -46,15 +48,20 @@ static inline void __iomem *rk_base(struct clock_event_device *ce) return rk_timer(ce)->base; } +static inline void __iomem *rk_ctrl(struct clock_event_device *ce) +{ + return rk_timer(ce)->ctrl; +} + static inline void rk_timer_disable(struct clock_event_device *ce) { - writel_relaxed(TIMER_DISABLE, rk_base(ce) + TIMER_CONTROL_REG); + writel_relaxed(TIMER_DISABLE, rk_ctrl(ce)); } static inline void rk_timer_enable(struct clock_event_device *ce, u32 flags) { writel_relaxed(TIMER_ENABLE | TIMER_INT_UNMASK | flags, - rk_base(ce) + TIMER_CONTROL_REG); + rk_ctrl(ce)); } static void rk_timer_update_counter(unsigned long cycles, @@ -106,37 +113,42 @@ static irqreturn_t rk_timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static void __init rk_timer_init(struct device_node *np) +static int __init rk_timer_init(struct device_node *np, u32 ctrl_reg) { struct clock_event_device *ce = &bc_timer.ce; struct clk *timer_clk; struct clk *pclk; - int ret, irq; + int ret = -EINVAL, irq; bc_timer.base = of_iomap(np, 0); if (!bc_timer.base) { pr_err("Failed to get base address for '%s'\n", TIMER_NAME); - return; + return -ENXIO; } + bc_timer.ctrl = bc_timer.base + ctrl_reg; pclk = of_clk_get_by_name(np, "pclk"); if (IS_ERR(pclk)) { + ret = PTR_ERR(pclk); pr_err("Failed to get pclk for '%s'\n", TIMER_NAME); goto out_unmap; } - if (clk_prepare_enable(pclk)) { + ret = clk_prepare_enable(pclk); + if (ret) { pr_err("Failed to enable pclk for '%s'\n", TIMER_NAME); goto out_unmap; } timer_clk = of_clk_get_by_name(np, "timer"); if (IS_ERR(timer_clk)) { + ret = PTR_ERR(timer_clk); pr_err("Failed to get timer clock for '%s'\n", TIMER_NAME); goto out_timer_clk; } - if (clk_prepare_enable(timer_clk)) { + ret = clk_prepare_enable(timer_clk); + if (ret) { pr_err("Failed to enable timer clock\n"); goto out_timer_clk; } @@ -145,17 +157,19 @@ static void __init rk_timer_init(struct device_node *np) irq = irq_of_parse_and_map(np, 0); if (!irq) { + ret = -EINVAL; pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME); goto out_irq; } ce->name = TIMER_NAME; - ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; + ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_DYNIRQ; ce->set_next_event = rk_timer_set_next_event; ce->set_state_shutdown = rk_timer_shutdown; ce->set_state_periodic = rk_timer_set_periodic; ce->irq = irq; - ce->cpumask = cpumask_of(0); + ce->cpumask = cpu_possible_mask; ce->rating = 250; rk_timer_interrupt_clear(ce); @@ -169,7 +183,7 @@ static void __init rk_timer_init(struct device_node *np) clockevents_config_and_register(ce, bc_timer.freq, 1, UINT_MAX); - return; + return 0; out_irq: clk_disable_unprepare(timer_clk); @@ -177,6 +191,21 @@ out_timer_clk: clk_disable_unprepare(pclk); out_unmap: iounmap(bc_timer.base); + + return ret; +} + +static int __init rk3288_timer_init(struct device_node *np) +{ + return rk_timer_init(np, TIMER_CONTROL_REG3288); +} + +static int __init rk3399_timer_init(struct device_node *np) +{ + return rk_timer_init(np, TIMER_CONTROL_REG3399); } -CLOCKSOURCE_OF_DECLARE(rk_timer, "rockchip,rk3288-timer", rk_timer_init); +CLOCKSOURCE_OF_DECLARE(rk3288_timer, "rockchip,rk3288-timer", + rk3288_timer_init); +CLOCKSOURCE_OF_DECLARE(rk3399_timer, "rockchip,rk3399-timer", + rk3399_timer_init); @@ -130,9 +130,9 @@ static void samsung_time_stop(unsigned int channel) spin_lock_irqsave(&samsung_pwm_lock, flags); - tcon = __raw_readl(pwm.base + REG_TCON); + tcon = readl_relaxed(pwm.base + REG_TCON); tcon &= ~TCON_START(channel); - __raw_writel(tcon, pwm.base + REG_TCON); + writel_relaxed(tcon, pwm.base + REG_TCON); spin_unlock_irqrestore(&samsung_pwm_lock, flags); } @@ -148,14 +148,14 @@ static void samsung_time_setup(unsigned int channel, unsigned long tcnt) spin_lock_irqsave(&samsung_pwm_lock, flags); - tcon = __raw_readl(pwm.base + REG_TCON); + tcon = readl_relaxed(pwm.base + REG_TCON); tcon &= ~(TCON_START(tcon_chan) | TCON_AUTORELOAD(tcon_chan)); tcon |= TCON_MANUALUPDATE(tcon_chan); - __raw_writel(tcnt, pwm.base + REG_TCNTB(channel)); - __raw_writel(tcnt, pwm.base + REG_TCMPB(channel)); - __raw_writel(tcon, pwm.base + REG_TCON); + writel_relaxed(tcnt, pwm.base + REG_TCNTB(channel)); + writel_relaxed(tcnt, pwm.base + REG_TCMPB(channel)); + writel_relaxed(tcon, pwm.base + REG_TCON); spin_unlock_irqrestore(&samsung_pwm_lock, flags); } @@ -170,7 +170,7 @@ static void samsung_time_start(unsigned int channel, bool periodic) spin_lock_irqsave(&samsung_pwm_lock, flags); - tcon = __raw_readl(pwm.base + REG_TCON); + tcon = readl_relaxed(pwm.base + REG_TCON); tcon &= ~TCON_MANUALUPDATE(channel); tcon |= TCON_START(channel); @@ -180,7 +180,7 @@ static void samsung_time_start(unsigned int channel, bool periodic) else tcon &= ~TCON_AUTORELOAD(channel); - __raw_writel(tcon, pwm.base + REG_TCON); + writel_relaxed(tcon, pwm.base + REG_TCON); spin_unlock_irqrestore(&samsung_pwm_lock, flags); } @@ -333,11 +333,10 @@ static u64 notrace samsung_read_sched_clock(void) return samsung_clocksource_read(NULL); } -static void __init samsung_clocksource_init(void) +static int __init samsung_clocksource_init(void) { unsigned long pclk; unsigned long clock_rate; - int ret; pclk = clk_get_rate(pwm.timerclk); @@ -358,9 +357,7 @@ static void __init samsung_clocksource_init(void) pwm.variant.bits, clock_rate); samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits); - ret = clocksource_register_hz(&samsung_clocksource, clock_rate); - if (ret) - panic("samsung_clocksource_timer: can't register clocksource\n"); + return clocksource_register_hz(&samsung_clocksource, clock_rate); } static void __init samsung_timer_resources(void) @@ -380,26 +377,31 @@ static void __init samsung_timer_resources(void) /* * PWM master driver */ -static void __init _samsung_pwm_clocksource_init(void) +static int __init _samsung_pwm_clocksource_init(void) { u8 mask; int channel; mask = ~pwm.variant.output_mask & ((1 << SAMSUNG_PWM_NUM) - 1); channel = fls(mask) - 1; - if (channel < 0) - panic("failed to find PWM channel for clocksource"); + if (channel < 0) { + pr_crit("failed to find PWM channel for clocksource"); + return -EINVAL; + } pwm.source_id = channel; mask &= ~(1 << channel); channel = fls(mask) - 1; - if (channel < 0) - panic("failed to find PWM channel for clock event"); + if (channel < 0) { + pr_crit("failed to find PWM channel for clock event"); + return -EINVAL; + } pwm.event_id = channel; samsung_timer_resources(); samsung_clockevent_init(); - samsung_clocksource_init(); + + return samsung_clocksource_init(); } void __init samsung_pwm_clocksource_init(void __iomem *base, @@ -417,8 +419,8 @@ void __init samsung_pwm_clocksource_init(void __iomem *base, } #ifdef CONFIG_CLKSRC_OF -static void __init samsung_pwm_alloc(struct device_node *np, - const struct samsung_pwm_variant *variant) +static int __init samsung_pwm_alloc(struct device_node *np, + const struct samsung_pwm_variant *variant) { struct property *prop; const __be32 *cur; @@ -441,14 +443,16 @@ static void __init samsung_pwm_alloc(struct device_node *np, pwm.base = of_iomap(np, 0); if (!pwm.base) { pr_err("%s: failed to map PWM registers\n", __func__); - return; + return -ENXIO; } pwm.timerclk = of_clk_get_by_name(np, "timers"); - if (IS_ERR(pwm.timerclk)) - panic("failed to get timers clock for timer"); + if (IS_ERR(pwm.timerclk)) { + pr_crit("failed to get timers clock for timer"); + return PTR_ERR(pwm.timerclk); + } - _samsung_pwm_clocksource_init(); + return _samsung_pwm_clocksource_init(); } static const struct samsung_pwm_variant s3c24xx_variant = { @@ -458,9 +462,9 @@ static const struct samsung_pwm_variant s3c24xx_variant = { .tclk_mask = (1 << 4), }; -static void __init s3c2410_pwm_clocksource_init(struct device_node *np) +static int __init s3c2410_pwm_clocksource_init(struct device_node *np) { - samsung_pwm_alloc(np, &s3c24xx_variant); + return samsung_pwm_alloc(np, &s3c24xx_variant); } CLOCKSOURCE_OF_DECLARE(s3c2410_pwm, "samsung,s3c2410-pwm", s3c2410_pwm_clocksource_init); @@ -471,9 +475,9 @@ static const struct samsung_pwm_variant s3c64xx_variant = { .tclk_mask = (1 << 7) | (1 << 6) | (1 << 5), }; -static void __init s3c64xx_pwm_clocksource_init(struct device_node *np) +static int __init s3c64xx_pwm_clocksource_init(struct device_node *np) { - samsung_pwm_alloc(np, &s3c64xx_variant); + return samsung_pwm_alloc(np, &s3c64xx_variant); } CLOCKSOURCE_OF_DECLARE(s3c6400_pwm, "samsung,s3c6400-pwm", s3c64xx_pwm_clocksource_init); @@ -484,9 +488,9 @@ static const struct samsung_pwm_variant s5p64x0_variant = { .tclk_mask = 0, }; -static void __init s5p64x0_pwm_clocksource_init(struct device_node *np) +static int __init s5p64x0_pwm_clocksource_init(struct device_node *np) { - samsung_pwm_alloc(np, &s5p64x0_variant); + return samsung_pwm_alloc(np, &s5p64x0_variant); } CLOCKSOURCE_OF_DECLARE(s5p6440_pwm, "samsung,s5p6440-pwm", s5p64x0_pwm_clocksource_init); @@ -497,9 +501,9 @@ static const struct samsung_pwm_variant s5p_variant = { .tclk_mask = (1 << 5), }; -static void __init s5p_pwm_clocksource_init(struct device_node *np) +static int __init s5p_pwm_clocksource_init(struct device_node *np) { - samsung_pwm_alloc(np, &s5p_variant); + return samsung_pwm_alloc(np, &s5p_variant); } CLOCKSOURCE_OF_DECLARE(s5pc100_pwm, "samsung,s5pc100-pwm", s5p_pwm_clocksource_init); #endif @@ -146,7 +146,7 @@ static u64 notrace sun4i_timer_sched_read(void) return ~readl(timer_base + TIMER_CNTVAL_REG(1)); } -static void __init sun4i_timer_init(struct device_node *node) +static int __init sun4i_timer_init(struct device_node *node) { unsigned long rate = 0; struct clk *clk; @@ -154,17 +154,28 @@ static void __init sun4i_timer_init(struct device_node *node) u32 val; timer_base = of_iomap(node, 0); - if (!timer_base) - panic("Can't map registers"); + if (!timer_base) { + pr_crit("Can't map registers"); + return -ENXIO; + } irq = irq_of_parse_and_map(node, 0); - if (irq <= 0) - panic("Can't parse IRQ"); + if (irq <= 0) { + pr_crit("Can't parse IRQ"); + return -EINVAL; + } clk = of_clk_get(node, 0); - if (IS_ERR(clk)) - panic("Can't get timer clock"); - clk_prepare_enable(clk); + if (IS_ERR(clk)) { + pr_crit("Can't get timer clock"); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) { + pr_err("Failed to prepare clock"); + return ret; + } rate = clk_get_rate(clk); @@ -182,8 +193,12 @@ static void __init sun4i_timer_init(struct device_node *node) of_machine_is_compatible("allwinner,sun5i-a10s")) sched_clock_register(sun4i_timer_sched_read, 32, rate); - clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name, - rate, 350, 32, clocksource_mmio_readl_down); + ret = clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name, + rate, 350, 32, clocksource_mmio_readl_down); + if (ret) { + pr_err("Failed to register clocksource"); + return ret; + } ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); @@ -200,12 +215,16 @@ static void __init sun4i_timer_init(struct device_node *node) TIMER_SYNC_TICKS, 0xffffffff); ret = setup_irq(irq, &sun4i_timer_irq); - if (ret) - pr_warn("failed to setup irq %d\n", irq); + if (ret) { + pr_err("failed to setup irq %d\n", irq); + return ret; + } /* Enable timer0 interrupt */ val = readl(timer_base + TIMER_IRQ_EN_REG); writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); + + return ret; } CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer", sun4i_timer_init); @@ -19,7 +19,7 @@ static u64 notrace read_sched_clock(void) return read_xtal_counter(); } -static void __init tango_clocksource_init(struct device_node *np) +static int __init tango_clocksource_init(struct device_node *np) { struct clk *clk; int xtal_freq, ret; @@ -27,13 +27,13 @@ static void __init tango_clocksource_init(struct device_node *np) xtal_in_cnt = of_iomap(np, 0); if (xtal_in_cnt == NULL) { pr_err("%s: invalid address\n", np->full_name); - return; + return -ENXIO; } clk = of_clk_get(np, 0); if (IS_ERR(clk)) { pr_err("%s: invalid clock\n", np->full_name); - return; + return PTR_ERR(clk); } xtal_freq = clk_get_rate(clk); @@ -44,11 +44,13 @@ static void __init tango_clocksource_init(struct device_node *np) 32, clocksource_mmio_readl_up); if (ret) { pr_err("%s: registration failed\n", np->full_name); - return; + return ret; } sched_clock_register(read_sched_clock, 32, xtal_freq); register_current_timer_delay(&delay_timer); + + return 0; } CLOCKSOURCE_OF_DECLARE(tango, "sigma,tick-counter", tango_clocksource_init); @@ -165,7 +165,7 @@ static struct irqaction tegra_timer_irq = { .dev_id = &tegra_clockevent, }; -static void __init tegra20_init_timer(struct device_node *np) +static int __init tegra20_init_timer(struct device_node *np) { struct clk *clk; unsigned long rate; @@ -174,13 +174,13 @@ static void __init tegra20_init_timer(struct device_node *np) timer_reg_base = of_iomap(np, 0); if (!timer_reg_base) { pr_err("Can't map timer registers\n"); - BUG(); + return -ENXIO; } tegra_timer_irq.irq = irq_of_parse_and_map(np, 2); if (tegra_timer_irq.irq <= 0) { pr_err("Failed to map timer IRQ\n"); - BUG(); + return -EINVAL; } clk = of_clk_get(np, 0); @@ -211,10 +211,12 @@ static void __init tegra20_init_timer(struct device_node *np) sched_clock_register(tegra_read_sched_clock, 32, 1000000); - if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US, - "timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) { + ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US, + "timer_us", 1000000, 300, 32, + clocksource_mmio_readl_up); + if (ret) { pr_err("Failed to register clocksource\n"); - BUG(); + return ret; } tegra_delay_timer.read_current_timer = @@ -225,24 +227,26 @@ static void __init tegra20_init_timer(struct device_node *np) ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq); if (ret) { pr_err("Failed to register timer IRQ: %d\n", ret); - BUG(); + return ret; } tegra_clockevent.cpumask = cpu_all_mask; tegra_clockevent.irq = tegra_timer_irq.irq; clockevents_config_and_register(&tegra_clockevent, 1000000, 0x1, 0x1fffffff); + + return 0; } CLOCKSOURCE_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer); -static void __init tegra20_init_rtc(struct device_node *np) +static int __init tegra20_init_rtc(struct device_node *np) { struct clk *clk; rtc_base = of_iomap(np, 0); if (!rtc_base) { pr_err("Can't map RTC registers"); - BUG(); + return -ENXIO; } /* @@ -255,6 +259,6 @@ static void __init tegra20_init_rtc(struct device_node *np) else clk_prepare_enable(clk); - register_persistent_clock(NULL, tegra_read_persistent_clock64); + return register_persistent_clock(NULL, tegra_read_persistent_clock64); } CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc); @@ -246,7 +246,7 @@ static void armada_370_xp_timer_resume(void) writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF); } -struct syscore_ops armada_370_xp_timer_syscore_ops = { +static struct syscore_ops armada_370_xp_timer_syscore_ops = { .suspend = armada_370_xp_timer_suspend, .resume = armada_370_xp_timer_resume, }; @@ -260,14 +260,22 @@ static struct delay_timer armada_370_delay_timer = { .read_current_timer = armada_370_delay_timer_read, }; -static void __init armada_370_xp_timer_common_init(struct device_node *np) +static int __init armada_370_xp_timer_common_init(struct device_node *np) { u32 clr = 0, set = 0; int res; timer_base = of_iomap(np, 0); - WARN_ON(!timer_base); + if (!timer_base) { + pr_err("Failed to iomap"); + return -ENXIO; + } + local_base = of_iomap(np, 1); + if (!local_base) { + pr_err("Failed to iomap"); + return -ENXIO; + } if (timer25Mhz) { set = TIMER0_25MHZ; @@ -306,14 +314,19 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np) */ sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); - clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, - "armada_370_xp_clocksource", - timer_clk, 300, 32, clocksource_mmio_readl_down); + res = clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, + "armada_370_xp_clocksource", + timer_clk, 300, 32, clocksource_mmio_readl_down); + if (res) { + pr_err("Failed to initialize clocksource mmio"); + return res; + } register_cpu_notifier(&armada_370_xp_timer_cpu_nb); armada_370_xp_evt = alloc_percpu(struct clock_event_device); - + if (!armada_370_xp_evt) + return -ENOMEM; /* * Setup clockevent timer (interrupt-driven). @@ -323,33 +336,54 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np) "armada_370_xp_per_cpu_tick", armada_370_xp_evt); /* Immediately configure the timer on the boot CPU */ - if (!res) - armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); + if (res) { + pr_err("Failed to request percpu irq"); + return res; + } + + res = armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); + if (res) { + pr_err("Failed to setup timer"); + return res; + } register_syscore_ops(&armada_370_xp_timer_syscore_ops); + + return 0; } -static void __init armada_xp_timer_init(struct device_node *np) +static int __init armada_xp_timer_init(struct device_node *np) { struct clk *clk = of_clk_get_by_name(np, "fixed"); + int ret; + + clk = of_clk_get(np, 0); + if (IS_ERR(clk)) { + pr_err("Failed to get clock"); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) + return ret; - /* The 25Mhz fixed clock is mandatory, and must always be available */ - BUG_ON(IS_ERR(clk)); - clk_prepare_enable(clk); timer_clk = clk_get_rate(clk); - armada_370_xp_timer_common_init(np); + return armada_370_xp_timer_common_init(np); } CLOCKSOURCE_OF_DECLARE(armada_xp, "marvell,armada-xp-timer", armada_xp_timer_init); -static void __init armada_375_timer_init(struct device_node *np) +static int __init armada_375_timer_init(struct device_node *np) { struct clk *clk; + int ret; clk = of_clk_get_by_name(np, "fixed"); if (!IS_ERR(clk)) { - clk_prepare_enable(clk); + ret = clk_prepare_enable(clk); + if (ret) + return ret; timer_clk = clk_get_rate(clk); } else { @@ -360,27 +394,43 @@ static void __init armada_375_timer_init(struct device_node *np) clk = of_clk_get(np, 0); /* Must have at least a clock */ - BUG_ON(IS_ERR(clk)); - clk_prepare_enable(clk); + if (IS_ERR(clk)) { + pr_err("Failed to get clock"); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) + return ret; + timer_clk = clk_get_rate(clk) / TIMER_DIVIDER; timer25Mhz = false; } - armada_370_xp_timer_common_init(np); + return armada_370_xp_timer_common_init(np); } CLOCKSOURCE_OF_DECLARE(armada_375, "marvell,armada-375-timer", armada_375_timer_init); -static void __init armada_370_timer_init(struct device_node *np) +static int __init armada_370_timer_init(struct device_node *np) { - struct clk *clk = of_clk_get(np, 0); + struct clk *clk; + int ret; + + clk = of_clk_get(np, 0); + if (IS_ERR(clk)) { + pr_err("Failed to get clock"); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) + return ret; - BUG_ON(IS_ERR(clk)); - clk_prepare_enable(clk); timer_clk = clk_get_rate(clk) / TIMER_DIVIDER; timer25Mhz = false; - armada_370_xp_timer_common_init(np); + return armada_370_xp_timer_common_init(np); } CLOCKSOURCE_OF_DECLARE(armada_370, "marvell,armada-370-timer", armada_370_timer_init); @@ -233,10 +233,15 @@ static int __init efm32_clockevent_init(struct device_node *np) DIV_ROUND_CLOSEST(rate, 1024), 0xf, 0xffff); - setup_irq(irq, &efm32_clock_event_irq); + ret = setup_irq(irq, &efm32_clock_event_irq); + if (ret) { + pr_err("Failed setup irq"); + goto err_setup_irq; + } return 0; +err_setup_irq: err_get_irq: iounmap(base); @@ -255,16 +260,16 @@ err_clk_get: * This function asserts that we have exactly one clocksource and one * clock_event_device in the end. */ -static void __init efm32_timer_init(struct device_node *np) +static int __init efm32_timer_init(struct device_node *np) { static int has_clocksource, has_clockevent; - int ret; + int ret = 0; if (!has_clocksource) { ret = efm32_clocksource_init(np); if (!ret) { has_clocksource = 1; - return; + return 0; } } @@ -272,9 +277,11 @@ static void __init efm32_timer_init(struct device_node *np) ret = efm32_clockevent_init(np); if (!ret) { has_clockevent = 1; - return; + return 0; } } + + return ret; } CLOCKSOURCE_OF_DECLARE(efm32compat, "efm32,timer", efm32_timer_init); CLOCKSOURCE_OF_DECLARE(efm32, "energymicro,efm32-timer", efm32_timer_init); @@ -288,16 +288,16 @@ err_clk_enable: * This function asserts that we have exactly one clocksource and one * clock_event_device in the end. */ -static void __init lpc32xx_timer_init(struct device_node *np) +static int __init lpc32xx_timer_init(struct device_node *np) { static int has_clocksource, has_clockevent; - int ret; + int ret = 0; if (!has_clocksource) { ret = lpc32xx_clocksource_init(np); if (!ret) { has_clocksource = 1; - return; + return 0; } } @@ -305,8 +305,10 @@ static void __init lpc32xx_timer_init(struct device_node *np) ret = lpc32xx_clockevent_init(np); if (!ret) { has_clockevent = 1; - return; + return 0; } } + + return ret; } CLOCKSOURCE_OF_DECLARE(lpc32xx_timer, "nxp,lpc3220-timer", lpc32xx_timer_init); @@ -104,25 +104,36 @@ static struct irqaction orion_clkevt_irq = { .handler = orion_clkevt_irq_handler, }; -static void __init orion_timer_init(struct device_node *np) +static int __init orion_timer_init(struct device_node *np) { struct clk *clk; - int irq; + int irq, ret; /* timer registers are shared with watchdog timer */ timer_base = of_iomap(np, 0); - if (!timer_base) - panic("%s: unable to map resource\n", np->name); + if (!timer_base) { + pr_err("%s: unable to map resource\n", np->name); + return -ENXIO; + } clk = of_clk_get(np, 0); - if (IS_ERR(clk)) - panic("%s: unable to get clk\n", np->name); - clk_prepare_enable(clk); + if (IS_ERR(clk)) { + pr_err("%s: unable to get clk\n", np->name); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) { + pr_err("Failed to prepare clock"); + return ret; + } /* we are only interested in timer1 irq */ irq = irq_of_parse_and_map(np, 1); - if (irq <= 0) - panic("%s: unable to parse timer1 irq\n", np->name); + if (irq <= 0) { + pr_err("%s: unable to parse timer1 irq\n", np->name); + return -EINVAL; + } /* setup timer0 as free-running clocksource */ writel(~0, timer_base + TIMER0_VAL); @@ -130,19 +141,30 @@ static void __init orion_timer_init(struct device_node *np) atomic_io_modify(timer_base + TIMER_CTRL, TIMER0_RELOAD_EN | TIMER0_EN, TIMER0_RELOAD_EN | TIMER0_EN); - clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource", - clk_get_rate(clk), 300, 32, - clocksource_mmio_readl_down); + + ret = clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource", + clk_get_rate(clk), 300, 32, + clocksource_mmio_readl_down); + if (ret) { + pr_err("Failed to initialize mmio timer"); + return ret; + } + sched_clock_register(orion_read_sched_clock, 32, clk_get_rate(clk)); /* setup timer1 as clockevent timer */ - if (setup_irq(irq, &orion_clkevt_irq)) - panic("%s: unable to setup irq\n", np->name); + ret = setup_irq(irq, &orion_clkevt_irq); + if (ret) { + pr_err("%s: unable to setup irq\n", np->name); + return ret; + } ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ; orion_clkevt.cpumask = cpumask_of(0); orion_clkevt.irq = irq; clockevents_config_and_register(&orion_clkevt, clk_get_rate(clk), ORION_ONESHOT_MIN, ORION_ONESHOT_MAX); + + return 0; } CLOCKSOURCE_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init); @@ -148,7 +148,7 @@ static struct pistachio_clocksource pcs_gpt = { }, }; -static void __init pistachio_clksrc_of_init(struct device_node *node) +static int __init pistachio_clksrc_of_init(struct device_node *node) { struct clk *sys_clk, *fast_clk; struct regmap *periph_regs; @@ -158,45 +158,45 @@ static void __init pistachio_clksrc_of_init(struct device_node *node) pcs_gpt.base = of_iomap(node, 0); if (!pcs_gpt.base) { pr_err("cannot iomap\n"); - return; + return -ENXIO; } periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph"); if (IS_ERR(periph_regs)) { pr_err("cannot get peripheral regmap (%ld)\n", PTR_ERR(periph_regs)); - return; + return PTR_ERR(periph_regs); } /* Switch to using the fast counter clock */ ret = regmap_update_bits(periph_regs, PERIP_TIMER_CONTROL, 0xf, 0x0); if (ret) - return; + return ret; sys_clk = of_clk_get_by_name(node, "sys"); if (IS_ERR(sys_clk)) { pr_err("clock get failed (%ld)\n", PTR_ERR(sys_clk)); - return; + return PTR_ERR(sys_clk); } fast_clk = of_clk_get_by_name(node, "fast"); if (IS_ERR(fast_clk)) { pr_err("clock get failed (%lu)\n", PTR_ERR(fast_clk)); - return; + return PTR_ERR(fast_clk); } ret = clk_prepare_enable(sys_clk); if (ret < 0) { pr_err("failed to enable clock (%d)\n", ret); - return; + return ret; } ret = clk_prepare_enable(fast_clk); if (ret < 0) { pr_err("failed to enable clock (%d)\n", ret); clk_disable_unprepare(sys_clk); - return; + return ret; } rate = clk_get_rate(fast_clk); @@ -212,7 +212,7 @@ static void __init pistachio_clksrc_of_init(struct device_node *node) raw_spin_lock_init(&pcs_gpt.lock); sched_clock_register(pistachio_read_sched_clock, 32, rate); - clocksource_register_hz(&pcs_gpt.cs, rate); + return clocksource_register_hz(&pcs_gpt.cs, rate); } CLOCKSOURCE_OF_DECLARE(pistachio_gptimer, "img,pistachio-gptimer", pistachio_clksrc_of_init); @@ -238,7 +238,7 @@ static struct notifier_block sirfsoc_cpu_nb = { .notifier_call = sirfsoc_cpu_notify, }; -static void __init sirfsoc_clockevent_init(void) +static int __init sirfsoc_clockevent_init(void) { sirfsoc_clockevent = alloc_percpu(struct clock_event_device); BUG_ON(!sirfsoc_clockevent); @@ -246,11 +246,11 @@ static void __init sirfsoc_clockevent_init(void) BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb)); /* Immediately configure the timer on the boot CPU */ - sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); + return sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); } /* initialize the kernel jiffy timer source */ -static void __init sirfsoc_atlas7_timer_init(struct device_node *np) +static int __init sirfsoc_atlas7_timer_init(struct device_node *np) { struct clk *clk; @@ -279,23 +279,29 @@ static void __init sirfsoc_atlas7_timer_init(struct device_node *np) BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, atlas7_timer_rate)); - sirfsoc_clockevent_init(); + return sirfsoc_clockevent_init(); } -static void __init sirfsoc_of_timer_init(struct device_node *np) +static int __init sirfsoc_of_timer_init(struct device_node *np) { sirfsoc_timer_base = of_iomap(np, 0); - if (!sirfsoc_timer_base) - panic("unable to map timer cpu registers\n"); + if (!sirfsoc_timer_base) { + pr_err("unable to map timer cpu registers\n"); + return -ENXIO; + } sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0); - if (!sirfsoc_timer_irq.irq) - panic("No irq passed for timer0 via DT\n"); + if (!sirfsoc_timer_irq.irq) { + pr_err("No irq passed for timer0 via DT\n"); + return -EINVAL; + } sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1); - if (!sirfsoc_timer1_irq.irq) - panic("No irq passed for timer1 via DT\n"); + if (!sirfsoc_timer1_irq.irq) { + pr_err("No irq passed for timer1 via DT\n"); + return -EINVAL; + } - sirfsoc_atlas7_timer_init(np); + return sirfsoc_atlas7_timer_init(np); } CLOCKSOURCE_OF_DECLARE(sirfsoc_atlas7_timer, "sirf,atlas7-tick", sirfsoc_of_timer_init); @@ -177,7 +177,7 @@ static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id) /* * Set up both clocksource and clockevent support. */ -static void __init at91sam926x_pit_common_init(struct pit_data *data) +static int __init at91sam926x_pit_common_init(struct pit_data *data) { unsigned long pit_rate; unsigned bits; @@ -204,14 +204,21 @@ static void __init at91sam926x_pit_common_init(struct pit_data *data) data->clksrc.rating = 175; data->clksrc.read = read_pit_clk; data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; - clocksource_register_hz(&data->clksrc, pit_rate); + + ret = clocksource_register_hz(&data->clksrc, pit_rate); + if (ret) { + pr_err("Failed to register clocksource"); + return ret; + } /* Set up irq handler */ ret = request_irq(data->irq, at91sam926x_pit_interrupt, IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, "at91_tick", data); - if (ret) - panic(pr_fmt("Unable to setup IRQ\n")); + if (ret) { + pr_err("Unable to setup IRQ\n"); + return ret; + } /* Set up and register clockevents */ data->clkevt.name = "pit"; @@ -226,34 +233,42 @@ static void __init at91sam926x_pit_common_init(struct pit_data *data) data->clkevt.resume = at91sam926x_pit_resume; data->clkevt.suspend = at91sam926x_pit_suspend; clockevents_register_device(&data->clkevt); + + return 0; } -static void __init at91sam926x_pit_dt_init(struct device_node *node) +static int __init at91sam926x_pit_dt_init(struct device_node *node) { struct pit_data *data; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) - panic(pr_fmt("Unable to allocate memory\n")); + return -ENOMEM; data->base = of_iomap(node, 0); - if (!data->base) - panic(pr_fmt("Could not map PIT address\n")); + if (!data->base) { + pr_err("Could not map PIT address\n"); + return -ENXIO; + } data->mck = of_clk_get(node, 0); if (IS_ERR(data->mck)) /* Fallback on clkdev for !CCF-based boards */ data->mck = clk_get(NULL, "mck"); - if (IS_ERR(data->mck)) - panic(pr_fmt("Unable to get mck clk\n")); + if (IS_ERR(data->mck)) { + pr_err("Unable to get mck clk\n"); + return PTR_ERR(data->mck); + } /* Get the interrupts property */ data->irq = irq_of_parse_and_map(node, 0); - if (!data->irq) - panic(pr_fmt("Unable to get IRQ from DT\n")); + if (!data->irq) { + pr_err("Unable to get IRQ from DT\n"); + return -EINVAL; + } - at91sam926x_pit_common_init(data); + return at91sam926x_pit_common_init(data); } CLOCKSOURCE_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit", at91sam926x_pit_dt_init); @@ -194,15 +194,17 @@ static struct clock_event_device clkevt = { /* * ST (system timer) module supports both clockevents and clocksource. */ -static void __init atmel_st_timer_init(struct device_node *node) +static int __init atmel_st_timer_init(struct device_node *node) { struct clk *sclk; unsigned int sclk_rate, val; int irq, ret; regmap_st = syscon_node_to_regmap(node); - if (IS_ERR(regmap_st)) - panic(pr_fmt("Unable to get regmap\n")); + if (IS_ERR(regmap_st)) { + pr_err("Unable to get regmap\n"); + return PTR_ERR(regmap_st); + } /* Disable all timer interrupts, and clear any pending ones */ regmap_write(regmap_st, AT91_ST_IDR, @@ -211,27 +213,37 @@ static void __init atmel_st_timer_init(struct device_node *node) /* Get the interrupts property */ irq = irq_of_parse_and_map(node, 0); - if (!irq) - panic(pr_fmt("Unable to get IRQ from DT\n")); + if (!irq) { + pr_err("Unable to get IRQ from DT\n"); + return -EINVAL; + } /* Make IRQs happen for the system timer */ ret = request_irq(irq, at91rm9200_timer_interrupt, IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, "at91_tick", regmap_st); - if (ret) - panic(pr_fmt("Unable to setup IRQ\n")); + if (ret) { + pr_err("Unable to setup IRQ\n"); + return ret; + } sclk = of_clk_get(node, 0); - if (IS_ERR(sclk)) - panic(pr_fmt("Unable to get slow clock\n")); + if (IS_ERR(sclk)) { + pr_err("Unable to get slow clock\n"); + return PTR_ERR(sclk); + } - clk_prepare_enable(sclk); - if (ret) - panic(pr_fmt("Could not enable slow clock\n")); + ret = clk_prepare_enable(sclk); + if (ret) { + pr_err("Could not enable slow clock\n"); + return ret; + } sclk_rate = clk_get_rate(sclk); - if (!sclk_rate) - panic(pr_fmt("Invalid slow clock rate\n")); + if (!sclk_rate) { + pr_err("Invalid slow clock rate\n"); + return -EINVAL; + } timer_latch = (sclk_rate + HZ / 2) / HZ; /* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used @@ -246,7 +258,7 @@ static void __init atmel_st_timer_init(struct device_node *node) 2, AT91_ST_ALMV); /* register clocksource */ - clocksource_register_hz(&clk32k, sclk_rate); + return clocksource_register_hz(&clk32k, sclk_rate); } CLOCKSOURCE_OF_DECLARE(atmel_st_timer, "atmel,at91rm9200-st", atmel_st_timer_init); @@ -63,7 +63,7 @@ struct digicolor_timer { int timer_id; /* one of TIMER_* */ }; -struct digicolor_timer *dc_timer(struct clock_event_device *ce) +static struct digicolor_timer *dc_timer(struct clock_event_device *ce) { return container_of(ce, struct digicolor_timer, ce); } @@ -148,7 +148,7 @@ static u64 notrace digicolor_timer_sched_read(void) return ~readl(dc_timer_dev.base + COUNT(TIMER_B)); } -static void __init digicolor_timer_init(struct device_node *node) +static int __init digicolor_timer_init(struct device_node *node) { unsigned long rate; struct clk *clk; @@ -161,19 +161,19 @@ static void __init digicolor_timer_init(struct device_node *node) dc_timer_dev.base = of_iomap(node, 0); if (!dc_timer_dev.base) { pr_err("Can't map registers"); - return; + return -ENXIO; } irq = irq_of_parse_and_map(node, dc_timer_dev.timer_id); if (irq <= 0) { pr_err("Can't parse IRQ"); - return; + return -EINVAL; } clk = of_clk_get(node, 0); if (IS_ERR(clk)) { pr_err("Can't get timer clock"); - return; + return PTR_ERR(clk); } clk_prepare_enable(clk); rate = clk_get_rate(clk); @@ -190,13 +190,17 @@ static void __init digicolor_timer_init(struct device_node *node) ret = request_irq(irq, digicolor_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, "digicolor_timerC", &dc_timer_dev.ce); - if (ret) + if (ret) { pr_warn("request of timer irq %d failed (%d)\n", irq, ret); + return ret; + } dc_timer_dev.ce.cpumask = cpu_possible_mask; dc_timer_dev.ce.irq = irq; clockevents_config_and_register(&dc_timer_dev.ce, rate, 0, 0xffffffff); + + return 0; } CLOCKSOURCE_OF_DECLARE(conexant_digicolor, "cnxt,cx92755-timer", digicolor_timer_init); @@ -407,8 +407,10 @@ static const struct imx_gpt_data imx6dl_gpt_data = { .set_next_event = v2_set_next_event, }; -static void __init _mxc_timer_init(struct imx_timer *imxtm) +static int __init _mxc_timer_init(struct imx_timer *imxtm) { + int ret; + switch (imxtm->type) { case GPT_TYPE_IMX1: imxtm->gpt = &imx1_gpt_data; @@ -423,12 +425,12 @@ static void __init _mxc_timer_init(struct imx_timer *imxtm) imxtm->gpt = &imx6dl_gpt_data; break; default: - BUG(); + return -EINVAL; } if (IS_ERR(imxtm->clk_per)) { pr_err("i.MX timer: unable to get clk\n"); - return; + return PTR_ERR(imxtm->clk_per); } if (!IS_ERR(imxtm->clk_ipg)) @@ -446,8 +448,11 @@ static void __init _mxc_timer_init(struct imx_timer *imxtm) imxtm->gpt->gpt_setup_tctl(imxtm); /* init and register the timer to the framework */ - mxc_clocksource_init(imxtm); - mxc_clockevent_init(imxtm); + ret = mxc_clocksource_init(imxtm); + if (ret) + return ret; + + return mxc_clockevent_init(imxtm); } void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type) @@ -469,21 +474,27 @@ void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type) _mxc_timer_init(imxtm); } -static void __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type) +static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type) { struct imx_timer *imxtm; static int initialized; + int ret; /* Support one instance only */ if (initialized) - return; + return 0; imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL); - BUG_ON(!imxtm); + if (!imxtm) + return -ENOMEM; imxtm->base = of_iomap(np, 0); - WARN_ON(!imxtm->base); + if (!imxtm->base) + return -ENXIO; + imxtm->irq = irq_of_parse_and_map(np, 0); + if (imxtm->irq <= 0) + return -EINVAL; imxtm->clk_ipg = of_clk_get_by_name(np, "ipg"); @@ -494,22 +505,26 @@ static void __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type imxtm->type = type; - _mxc_timer_init(imxtm); + ret = _mxc_timer_init(imxtm); + if (ret) + return ret; initialized = 1; + + return 0; } -static void __init imx1_timer_init_dt(struct device_node *np) +static int __init imx1_timer_init_dt(struct device_node *np) { - mxc_timer_init_dt(np, GPT_TYPE_IMX1); + return mxc_timer_init_dt(np, GPT_TYPE_IMX1); } -static void __init imx21_timer_init_dt(struct device_node *np) +static int __init imx21_timer_init_dt(struct device_node *np) { - mxc_timer_init_dt(np, GPT_TYPE_IMX21); + return mxc_timer_init_dt(np, GPT_TYPE_IMX21); } -static void __init imx31_timer_init_dt(struct device_node *np) +static int __init imx31_timer_init_dt(struct device_node *np) { enum imx_gpt_type type = GPT_TYPE_IMX31; @@ -522,12 +537,12 @@ static void __init imx31_timer_init_dt(struct device_node *np) if (of_machine_is_compatible("fsl,imx6dl")) type = GPT_TYPE_IMX6DL; - mxc_timer_init_dt(np, type); + return mxc_timer_init_dt(np, type); } -static void __init imx6dl_timer_init_dt(struct device_node *np) +static int __init imx6dl_timer_init_dt(struct device_node *np) { - mxc_timer_init_dt(np, GPT_TYPE_IMX6DL); + return mxc_timer_init_dt(np, GPT_TYPE_IMX6DL); } CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt); @@ -36,11 +36,12 @@ static u64 notrace integrator_read_sched_clock(void) return -readl(sched_clk_base + TIMER_VALUE); } -static void integrator_clocksource_init(unsigned long inrate, - void __iomem *base) +static int integrator_clocksource_init(unsigned long inrate, + void __iomem *base) { u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC; unsigned long rate = inrate; + int ret; if (rate >= 1500000) { rate /= 16; @@ -50,11 +51,15 @@ static void integrator_clocksource_init(unsigned long inrate, writel(0xffff, base + TIMER_LOAD); writel(ctrl, base + TIMER_CTRL); - clocksource_mmio_init(base + TIMER_VALUE, "timer2", - rate, 200, 16, clocksource_mmio_readl_down); + ret = clocksource_mmio_init(base + TIMER_VALUE, "timer2", + rate, 200, 16, clocksource_mmio_readl_down); + if (ret) + return ret; sched_clk_base = base; sched_clock_register(integrator_read_sched_clock, 16, rate); + + return 0; } static unsigned long timer_reload; @@ -138,11 +143,12 @@ static struct irqaction integrator_timer_irq = { .dev_id = &integrator_clockevent, }; -static void integrator_clockevent_init(unsigned long inrate, - void __iomem *base, int irq) +static int integrator_clockevent_init(unsigned long inrate, + void __iomem *base, int irq) { unsigned long rate = inrate; unsigned int ctrl = 0; + int ret; clkevt_base = base; /* Calculate and program a divisor */ @@ -156,14 +162,18 @@ static void integrator_clockevent_init(unsigned long inrate, timer_reload = rate / HZ; writel(ctrl, clkevt_base + TIMER_CTRL); - setup_irq(irq, &integrator_timer_irq); + ret = setup_irq(irq, &integrator_timer_irq); + if (ret) + return ret; + clockevents_config_and_register(&integrator_clockevent, rate, 1, 0xffffU); + return 0; } -static void __init integrator_ap_timer_init_of(struct device_node *node) +static int __init integrator_ap_timer_init_of(struct device_node *node) { const char *path; void __iomem *base; @@ -176,12 +186,12 @@ static void __init integrator_ap_timer_init_of(struct device_node *node) base = of_io_request_and_map(node, 0, "integrator-timer"); if (IS_ERR(base)) - return; + return PTR_ERR(base); clk = of_clk_get(node, 0); if (IS_ERR(clk)) { pr_err("No clock for %s\n", node->name); - return; + return PTR_ERR(clk); } clk_prepare_enable(clk); rate = clk_get_rate(clk); @@ -189,30 +199,37 @@ static void __init integrator_ap_timer_init_of(struct device_node *node) err = of_property_read_string(of_aliases, "arm,timer-primary", &path); - if (WARN_ON(err)) - return; + if (err) { + pr_warn("Failed to read property"); + return err; + } + pri_node = of_find_node_by_path(path); + err = of_property_read_string(of_aliases, "arm,timer-secondary", &path); - if (WARN_ON(err)) - return; + if (err) { + pr_warn("Failed to read property"); + return err; + } + + sec_node = of_find_node_by_path(path); - if (node == pri_node) { + if (node == pri_node) /* The primary timer lacks IRQ, use as clocksource */ - integrator_clocksource_init(rate, base); - return; - } + return integrator_clocksource_init(rate, base); if (node == sec_node) { /* The secondary timer will drive the clock event */ irq = irq_of_parse_and_map(node, 0); - integrator_clockevent_init(rate, base, irq); - return; + return integrator_clockevent_init(rate, base, irq); } pr_info("Timer @%p unused\n", base); clk_disable_unprepare(clk); + + return 0; } CLOCKSOURCE_OF_DECLARE(integrator_ap_timer, "arm,integrator-timer", @@ -144,7 +144,7 @@ static int keystone_set_periodic(struct clock_event_device *evt) return 0; } -static void __init keystone_timer_init(struct device_node *np) +static int __init keystone_timer_init(struct device_node *np) { struct clock_event_device *event_dev = &timer.event_dev; unsigned long rate; @@ -154,20 +154,20 @@ static void __init keystone_timer_init(struct device_node *np) irq = irq_of_parse_and_map(np, 0); if (!irq) { pr_err("%s: failed to map interrupts\n", __func__); - return; + return -EINVAL; } timer.base = of_iomap(np, 0); if (!timer.base) { pr_err("%s: failed to map registers\n", __func__); - return; + return -ENXIO; } clk = of_clk_get(np, 0); if (IS_ERR(clk)) { pr_err("%s: failed to get clock\n", __func__); iounmap(timer.base); - return; + return PTR_ERR(clk); } error = clk_prepare_enable(clk); @@ -219,11 +219,12 @@ static void __init keystone_timer_init(struct device_node *np) clockevents_config_and_register(event_dev, rate, 1, ULONG_MAX); pr_info("keystone timer clock @%lu Hz\n", rate); - return; + return 0; err: clk_put(clk); iounmap(timer.base); + return error; } CLOCKSOURCE_OF_DECLARE(keystone_timer, "ti,keystone-timer", - keystone_timer_init); + keystone_timer_init); @@ -55,8 +55,8 @@ static cycle_t nps_clksrc_read(struct clocksource *clksrc) return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]); } -static void __init nps_setup_clocksource(struct device_node *node, - struct clk *clk) +static int __init nps_setup_clocksource(struct device_node *node, + struct clk *clk) { int ret, cluster; @@ -68,7 +68,7 @@ static void __init nps_setup_clocksource(struct device_node *node, ret = clk_prepare_enable(clk); if (ret) { pr_err("Couldn't enable parent clock\n"); - return; + return ret; } nps_timer_rate = clk_get_rate(clk); @@ -79,19 +79,21 @@ static void __init nps_setup_clocksource(struct device_node *node, pr_err("Couldn't register clock source.\n"); clk_disable_unprepare(clk); } + + return ret; } -static void __init nps_timer_init(struct device_node *node) +static int __init nps_timer_init(struct device_node *node) { struct clk *clk; clk = of_clk_get(node, 0); if (IS_ERR(clk)) { pr_err("Can't get timer clock.\n"); - return; + return PTR_ERR(clk); } - nps_setup_clocksource(node, clk); + return nps_setup_clocksource(node, clk); } CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer", diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c new file mode 100644 index 000000000000..bd887e2a8cf8 --- /dev/null +++ b/ drivers/clocksource/timer-oxnas-rps.c@@ -0,0 +1,297 @@ +/* + * drivers/clocksource/timer-oxnas-rps.c + * + * Copyright (C) 2009 Oxford Semiconductor Ltd + * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com> + * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <linux/clockchips.h> +#include <linux/sched_clock.h> + +/* TIMER1 used as tick + * TIMER2 used as clocksource + */ + +/* Registers definitions */ + +#define TIMER_LOAD_REG 0x0 +#define TIMER_CURR_REG 0x4 +#define TIMER_CTRL_REG 0x8 +#define TIMER_CLRINT_REG 0xC + +#define TIMER_BITS 24 + +#define TIMER_MAX_VAL (BIT(TIMER_BITS) - 1) + +#define TIMER_PERIODIC BIT(6) +#define TIMER_ENABLE BIT(7) + +#define TIMER_DIV1 (0) +#define TIMER_DIV16 (1 << 2) +#define TIMER_DIV256 (2 << 2) + +#define TIMER1_REG_OFFSET 0 +#define TIMER2_REG_OFFSET 0x20 + +/* Clockevent & Clocksource data */ + +struct oxnas_rps_timer { + struct clock_event_device clkevent; + void __iomem *clksrc_base; + void __iomem *clkevt_base; + unsigned long timer_period; + unsigned int timer_prescaler; + struct clk *clk; + int irq; +}; + +static irqreturn_t oxnas_rps_timer_irq(int irq, void *dev_id) +{ + struct oxnas_rps_timer *rps = dev_id; + + writel_relaxed(0, rps->clkevt_base + TIMER_CLRINT_REG); + + rps->clkevent.event_handler(&rps->clkevent); + + return IRQ_HANDLED; +} + +static void oxnas_rps_timer_config(struct oxnas_rps_timer *rps, + unsigned long period, + unsigned int periodic) +{ + uint32_t cfg = rps->timer_prescaler; + + if (period) + cfg |= TIMER_ENABLE; + + if (periodic) + cfg |= TIMER_PERIODIC; + + writel_relaxed(period, rps->clkevt_base + TIMER_LOAD_REG); + writel_relaxed(cfg, rps->clkevt_base + TIMER_CTRL_REG); +} + +static int oxnas_rps_timer_shutdown(struct clock_event_device *evt) +{ + struct oxnas_rps_timer *rps = + container_of(evt, struct oxnas_rps_timer, clkevent); + + oxnas_rps_timer_config(rps, 0, 0); + + return 0; +} + +static int oxnas_rps_timer_set_periodic(struct clock_event_device *evt) +{ + struct oxnas_rps_timer *rps = + container_of(evt, struct oxnas_rps_timer, clkevent); + + oxnas_rps_timer_config(rps, rps->timer_period, 1); + + return 0; +} + +static int oxnas_rps_timer_set_oneshot(struct clock_event_device *evt) +{ + struct oxnas_rps_timer *rps = + container_of(evt, struct oxnas_rps_timer, clkevent); + + oxnas_rps_timer_config(rps, rps->timer_period, 0); + + return 0; +} + +static int oxnas_rps_timer_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + struct oxnas_rps_timer *rps = + container_of(evt, struct oxnas_rps_timer, clkevent); + + oxnas_rps_timer_config(rps, delta, 0); + + return 0; +} + +static int __init oxnas_rps_clockevent_init(struct oxnas_rps_timer *rps) +{ + ulong clk_rate = clk_get_rate(rps->clk); + ulong timer_rate; + + /* Start with prescaler 1 */ + rps->timer_prescaler = TIMER_DIV1; + rps->timer_period = DIV_ROUND_UP(clk_rate, HZ); + timer_rate = clk_rate; + + if (rps->timer_period > TIMER_MAX_VAL) { + rps->timer_prescaler = TIMER_DIV16; + timer_rate = clk_rate / 16; + rps->timer_period = DIV_ROUND_UP(timer_rate, HZ); + } + if (rps->timer_period > TIMER_MAX_VAL) { + rps->timer_prescaler = TIMER_DIV256; + timer_rate = clk_rate / 256; + rps->timer_period = DIV_ROUND_UP(timer_rate, HZ); + } + + rps->clkevent.name = "oxnas-rps"; + rps->clkevent.features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_DYNIRQ; + rps->clkevent.tick_resume = oxnas_rps_timer_shutdown; + rps->clkevent.set_state_shutdown = oxnas_rps_timer_shutdown; + rps->clkevent.set_state_periodic = oxnas_rps_timer_set_periodic; + rps->clkevent.set_state_oneshot = oxnas_rps_timer_set_oneshot; + rps->clkevent.set_next_event = oxnas_rps_timer_next_event; + rps->clkevent.rating = 200; + rps->clkevent.cpumask = cpu_possible_mask; + rps->clkevent.irq = rps->irq; + clockevents_config_and_register(&rps->clkevent, + timer_rate, + 1, + TIMER_MAX_VAL); + + pr_info("Registered clock event rate %luHz prescaler %x period %lu\n", + clk_rate, + rps->timer_prescaler, + rps->timer_period); + + return 0; +} + +/* Clocksource */ + +static void __iomem *timer_sched_base; + +static u64 notrace oxnas_rps_read_sched_clock(void) +{ + return ~readl_relaxed(timer_sched_base); +} + +static int __init oxnas_rps_clocksource_init(struct oxnas_rps_timer *rps) +{ + ulong clk_rate = clk_get_rate(rps->clk); + int ret; + + /* use prescale 16 */ + clk_rate = clk_rate / 16; + + writel_relaxed(TIMER_MAX_VAL, rps->clksrc_base + TIMER_LOAD_REG); + writel_relaxed(TIMER_PERIODIC | TIMER_ENABLE | TIMER_DIV16, + rps->clksrc_base + TIMER_CTRL_REG); + + timer_sched_base = rps->clksrc_base + TIMER_CURR_REG; + sched_clock_register(oxnas_rps_read_sched_clock, + TIMER_BITS, clk_rate); + ret = clocksource_mmio_init(timer_sched_base, + "oxnas_rps_clocksource_timer", + clk_rate, 250, TIMER_BITS, + clocksource_mmio_readl_down); + if (WARN_ON(ret)) { + pr_err("can't register clocksource\n"); + return ret; + } + + pr_info("Registered clocksource rate %luHz\n", clk_rate); + + return 0; +} + +static int __init oxnas_rps_timer_init(struct device_node *np) +{ + struct oxnas_rps_timer *rps; + void __iomem *base; + int ret; + + rps = kzalloc(sizeof(*rps), GFP_KERNEL); + if (!rps) + return -ENOMEM; + + rps->clk = of_clk_get(np, 0); + if (IS_ERR(rps->clk)) { + ret = PTR_ERR(rps->clk); + goto err_alloc; + } + + ret = clk_prepare_enable(rps->clk); + if (ret) + goto err_clk; + + base = of_iomap(np, 0); + if (!base) { + ret = -ENXIO; + goto err_clk_prepare; + } + + rps->irq = irq_of_parse_and_map(np, 0); + if (rps->irq < 0) { + ret = -EINVAL; + goto err_iomap; + } + + rps->clkevt_base = base + TIMER1_REG_OFFSET; + rps->clksrc_base = base + TIMER2_REG_OFFSET; + + /* Disable timers */ + writel_relaxed(0, rps->clkevt_base + TIMER_CTRL_REG); + writel_relaxed(0, rps->clksrc_base + TIMER_CTRL_REG); + writel_relaxed(0, rps->clkevt_base + TIMER_LOAD_REG); + writel_relaxed(0, rps->clksrc_base + TIMER_LOAD_REG); + writel_relaxed(0, rps->clkevt_base + TIMER_CLRINT_REG); + writel_relaxed(0, rps->clksrc_base + TIMER_CLRINT_REG); + + ret = request_irq(rps->irq, oxnas_rps_timer_irq, + IRQF_TIMER | IRQF_IRQPOLL, + "rps-timer", rps); + if (ret) + goto err_iomap; + + ret = oxnas_rps_clocksource_init(rps); + if (ret) + goto err_irqreq; + + ret = oxnas_rps_clockevent_init(rps); + if (ret) + goto err_irqreq; + + return 0; + +err_irqreq: + free_irq(rps->irq, rps); +err_iomap: + iounmap(base); +err_clk_prepare: + clk_disable_unprepare(rps->clk); +err_clk: + clk_put(rps->clk); +err_alloc: + kfree(rps); + + return ret; +} + +CLOCKSOURCE_OF_DECLARE(ox810se_rps, + "oxsemi,ox810se-rps-timer", oxnas_rps_timer_init); @@ -19,7 +19,6 @@ #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/sched_clock.h> -#include <asm/mach/time.h> #define PRIMA2_CLOCK_FREQ 1000000 @@ -189,24 +188,36 @@ static void __init sirfsoc_clockevent_init(void) } /* initialize the kernel jiffy timer source */ -static void __init sirfsoc_prima2_timer_init(struct device_node *np) +static int __init sirfsoc_prima2_timer_init(struct device_node *np) { unsigned long rate; struct clk *clk; + int ret; clk = of_clk_get(np, 0); - BUG_ON(IS_ERR(clk)); + if (IS_ERR(clk)) { + pr_err("Failed to get clock"); + return PTR_ERR(clk); + } - BUG_ON(clk_prepare_enable(clk)); + ret = clk_prepare_enable(clk); + if (ret) { + pr_err("Failed to enable clock"); + return ret; + } rate = clk_get_rate(clk); - BUG_ON(rate < PRIMA2_CLOCK_FREQ); - BUG_ON(rate % PRIMA2_CLOCK_FREQ); + if (rate < PRIMA2_CLOCK_FREQ || rate % PRIMA2_CLOCK_FREQ) { + pr_err("Invalid clock rate"); + return -EINVAL; + } sirfsoc_timer_base = of_iomap(np, 0); - if (!sirfsoc_timer_base) - panic("unable to map timer cpu registers\n"); + if (!sirfsoc_timer_base) { + pr_err("unable to map timer cpu registers\n"); + return -ENXIO; + } sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0); @@ -216,14 +227,23 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np) writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI); writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS); - BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, - PRIMA2_CLOCK_FREQ)); + ret = clocksource_register_hz(&sirfsoc_clocksource, PRIMA2_CLOCK_FREQ); + if (ret) { + pr_err("Failed to register clocksource"); + return ret; + } sched_clock_register(sirfsoc_read_sched_clock, 64, PRIMA2_CLOCK_FREQ); - BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq)); + ret = setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq); + if (ret) { + pr_err("Failed to setup irq"); + return ret; + } sirfsoc_clockevent_init(); + + return 0; } CLOCKSOURCE_OF_DECLARE(sirfsoc_prima2_timer, "sirf,prima2-tick", sirfsoc_prima2_timer_init); @@ -77,7 +77,7 @@ void __init sp804_timer_disable(void __iomem *base) writel(0, base + TIMER_CTRL); } -void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base, +int __init __sp804_clocksource_and_sched_clock_init(void __iomem *base, const char *name, struct clk *clk, int use_sched_clock) @@ -89,14 +89,13 @@ void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base, if (IS_ERR(clk)) { pr_err("sp804: clock not found: %d\n", (int)PTR_ERR(clk)); - return; + return PTR_ERR(clk); } } rate = sp804_get_clock_rate(clk); - if (rate < 0) - return; + return -EINVAL; /* setup timer 0 as free-running clocksource */ writel(0, base + TIMER_CTRL); @@ -112,6 +111,8 @@ void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base, sched_clock_base = base; sched_clock_register(sp804_read, 32, rate); } + + return 0; } @@ -186,7 +187,7 @@ static struct irqaction sp804_timer_irq = { .dev_id = &sp804_clockevent, }; -void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name) +int __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name) { struct clock_event_device *evt = &sp804_clockevent; long rate; @@ -196,12 +197,12 @@ void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struc if (IS_ERR(clk)) { pr_err("sp804: %s clock not found: %d\n", name, (int)PTR_ERR(clk)); - return; + return PTR_ERR(clk); } rate = sp804_get_clock_rate(clk); if (rate < 0) - return; + return -EINVAL; clkevt_base = base; clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ); @@ -213,27 +214,31 @@ void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struc setup_irq(irq, &sp804_timer_irq); clockevents_config_and_register(evt, rate, 0xf, 0xffffffff); + + return 0; } -static void __init sp804_of_init(struct device_node *np) +static int __init sp804_of_init(struct device_node *np) { static bool initialized = false; void __iomem *base; - int irq; + int irq, ret = -EINVAL; u32 irq_num = 0; struct clk *clk1, *clk2; const char *name = of_get_property(np, "compatible", NULL); base = of_iomap(np, 0); - if (WARN_ON(!base)) - return; + if (!base) + return -ENXIO; /* Ensure timers are disabled */ writel(0, base + TIMER_CTRL); writel(0, base + TIMER_2_BASE + TIMER_CTRL); - if (initialized || !of_device_is_available(np)) + if (initialized || !of_device_is_available(np)) { + ret = -EINVAL; goto err; + } clk1 = of_clk_get(np, 0); if (IS_ERR(clk1)) @@ -256,35 +261,53 @@ static void __init sp804_of_init(struct device_node *np) of_property_read_u32(np, "arm,sp804-has-irq", &irq_num); if (irq_num == 2) { - __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name); - __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1); + + ret = __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name); + if (ret) + goto err; + + ret = __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1); + if (ret) + goto err; } else { - __sp804_clockevents_init(base, irq, clk1 , name); - __sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE, - name, clk2, 1); + + ret = __sp804_clockevents_init(base, irq, clk1 , name); + if (ret) + goto err; + + ret =__sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE, + name, clk2, 1); + if (ret) + goto err; } initialized = true; - return; + return 0; err: iounmap(base); + return ret; } CLOCKSOURCE_OF_DECLARE(sp804, "arm,sp804", sp804_of_init); -static void __init integrator_cp_of_init(struct device_node *np) +static int __init integrator_cp_of_init(struct device_node *np) { static int init_count = 0; void __iomem *base; - int irq; + int irq, ret = -EINVAL; const char *name = of_get_property(np, "compatible", NULL); struct clk *clk; base = of_iomap(np, 0); - if (WARN_ON(!base)) - return; + if (!base) { + pr_err("Failed to iomap"); + return -ENXIO; + } + clk = of_clk_get(np, 0); - if (WARN_ON(IS_ERR(clk))) - return; + if (IS_ERR(clk)) { + pr_err("Failed to get clock"); + return PTR_ERR(clk); + } /* Ensure timer is disabled */ writel(0, base + TIMER_CTRL); @@ -292,19 +315,24 @@ static void __init integrator_cp_of_init(struct device_node *np) if (init_count == 2 || !of_device_is_available(np)) goto err; - if (!init_count) - __sp804_clocksource_and_sched_clock_init(base, name, clk, 0); - else { + if (!init_count) { + ret = __sp804_clocksource_and_sched_clock_init(base, name, clk, 0); + if (ret) + goto err; + } else { irq = irq_of_parse_and_map(np, 0); if (irq <= 0) goto err; - __sp804_clockevents_init(base, irq, clk, name); + ret = __sp804_clockevents_init(base, irq, clk, name); + if (ret) + goto err; } init_count++; - return; + return 0; err: iounmap(base); + return ret; } CLOCKSOURCE_OF_DECLARE(intcp, "arm,integrator-cp-timer", integrator_cp_of_init); @@ -98,7 +98,7 @@ static struct stm32_clock_event_ddata clock_event_ddata = { }, }; -static void __init stm32_clockevent_init(struct device_node *np) +static int __init stm32_clockevent_init(struct device_node *np) { struct stm32_clock_event_ddata *data = &clock_event_ddata; struct clk *clk; @@ -130,12 +130,14 @@ static void __init stm32_clockevent_init(struct device_node *np) data->base = of_iomap(np, 0); if (!data->base) { + ret = -ENXIO; pr_err("failed to map registers for clockevent\n"); goto err_iomap; } irq = irq_of_parse_and_map(np, 0); if (!irq) { + ret = -EINVAL; pr_err("%s: failed to get irq.\n", np->full_name); goto err_get_irq; } @@ -173,7 +175,7 @@ static void __init stm32_clockevent_init(struct device_node *np) pr_info("%s: STM32 clockevent driver initialized (%d bits)\n", np->full_name, bits); - return; + return ret; err_get_irq: iounmap(data->base); @@ -182,7 +184,7 @@ err_iomap: err_clk_enable: clk_put(clk); err_clk_get: - return; + return ret; } CLOCKSOURCE_OF_DECLARE(stm32, "st,stm32-timer", stm32_clockevent_init); @@ -311,33 +311,42 @@ err_free: return ret; } -static void __init sun5i_timer_init(struct device_node *node) +static int __init sun5i_timer_init(struct device_node *node) { struct reset_control *rstc; void __iomem *timer_base; struct clk *clk; - int irq; + int irq, ret; timer_base = of_io_request_and_map(node, 0, of_node_full_name(node)); - if (IS_ERR(timer_base)) - panic("Can't map registers"); + if (IS_ERR(timer_base)) { + pr_err("Can't map registers"); + return PTR_ERR(timer_base);; + } irq = irq_of_parse_and_map(node, 0); - if (irq <= 0) - panic("Can't parse IRQ"); + if (irq <= 0) { + pr_err("Can't parse IRQ"); + return -EINVAL; + } clk = of_clk_get(node, 0); - if (IS_ERR(clk)) - panic("Can't get timer clock"); + if (IS_ERR(clk)) { + pr_err("Can't get timer clock"); + return PTR_ERR(clk); + } rstc = of_reset_control_get(node, NULL); if (!IS_ERR(rstc)) reset_control_deassert(rstc); - sun5i_setup_clocksource(node, timer_base, clk, irq); - sun5i_setup_clockevent(node, timer_base, clk, irq); + ret = sun5i_setup_clocksource(node, timer_base, clk, irq); + if (ret) + return ret; + + return sun5i_setup_clockevent(node, timer_base, clk, irq); } CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer", - sun5i_timer_init); + sun5i_timer_init); CLOCKSOURCE_OF_DECLARE(sun7i_a20, "allwinner,sun7i-a20-hstimer", - sun5i_timer_init); + sun5i_timer_init); @@ -88,14 +88,14 @@ static u64 notrace omap_32k_read_sched_clock(void) return ti_32k_read_cycles(&ti_32k_timer.cs); } -static void __init ti_32k_timer_init(struct device_node *np) +static int __init ti_32k_timer_init(struct device_node *np) { int ret; ti_32k_timer.base = of_iomap(np, 0); if (!ti_32k_timer.base) { pr_err("Can't ioremap 32k timer base\n"); - return; + return -ENXIO; } ti_32k_timer.counter = ti_32k_timer.base; @@ -116,11 +116,13 @@ static void __init ti_32k_timer_init(struct device_node *np) ret = clocksource_register_hz(&ti_32k_timer.cs, 32768); if (ret) { pr_err("32k_counter: can't register clocksource\n"); - return; + return ret; } sched_clock_register(omap_32k_read_sched_clock, 32, 32768); pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n"); + + return 0; } CLOCKSOURCE_OF_DECLARE(ti_32k_timer, "ti,omap-counter32k", ti_32k_timer_init); @@ -359,27 +359,37 @@ static struct delay_timer u300_delay_timer; /* * This sets up the system timers, clock source and clock event. */ -static void __init u300_timer_init_of(struct device_node *np) +static int __init u300_timer_init_of(struct device_node *np) { unsigned int irq; struct clk *clk; unsigned long rate; + int ret; u300_timer_base = of_iomap(np, 0); - if (!u300_timer_base) - panic("could not ioremap system timer\n"); + if (!u300_timer_base) { + pr_err("could not ioremap system timer\n"); + return -ENXIO; + } /* Get the IRQ for the GP1 timer */ irq = irq_of_parse_and_map(np, 2); - if (!irq) - panic("no IRQ for system timer\n"); + if (!irq) { + pr_err("no IRQ for system timer\n"); + return -EINVAL; + } pr_info("U300 GP1 timer @ base: %p, IRQ: %u\n", u300_timer_base, irq); /* Clock the interrupt controller */ clk = of_clk_get(np, 0); - BUG_ON(IS_ERR(clk)); - clk_prepare_enable(clk); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + ret = clk_prepare_enable(clk); + if (ret) + return ret; + rate = clk_get_rate(clk); u300_clockevent_data.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ); @@ -410,7 +420,9 @@ static void __init u300_timer_init_of(struct device_node *np) u300_timer_base + U300_TIMER_APP_RGPT1); /* Set up the IRQ handler */ - setup_irq(irq, &u300_timer_irq); + ret = setup_irq(irq, &u300_timer_irq); + if (ret) + return ret; /* Reset the General Purpose timer 2 */ writel(U300_TIMER_APP_RGPT2_TIMER_RESET, @@ -428,9 +440,12 @@ static void __init u300_timer_init_of(struct device_node *np) u300_timer_base + U300_TIMER_APP_EGPT2); /* Use general purpose timer 2 as clock source */ - if (clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC, - "GPT2", rate, 300, 32, clocksource_mmio_readl_up)) + ret = clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC, + "GPT2", rate, 300, 32, clocksource_mmio_readl_up); + if (ret) { pr_err("timer: failed to initialize U300 clock source\n"); + return ret; + } /* Configure and register the clockevent */ clockevents_config_and_register(&u300_clockevent_data.cevd, rate, @@ -440,6 +455,7 @@ static void __init u300_timer_init_of(struct device_node *np) * TODO: init and register the rest of the timers too, they can be * used by hrtimers! */ + return 0; } CLOCKSOURCE_OF_DECLARE(u300_timer, "stericsson,u300-apptimer", @@ -25,16 +25,18 @@ static u64 notrace versatile_sys_24mhz_read(void) return readl(versatile_sys_24mhz); } -static void __init versatile_sched_clock_init(struct device_node *node) +static int __init versatile_sched_clock_init(struct device_node *node) { void __iomem *base = of_iomap(node, 0); if (!base) - return; + return -ENXIO; versatile_sys_24mhz = base + SYS_24MHZ; sched_clock_register(versatile_sys_24mhz_read, 32, 24000000); + + return 0; } CLOCKSOURCE_OF_DECLARE(vexpress, "arm,vexpress-sysreg", versatile_sched_clock_init); @@ -156,15 +156,18 @@ static int __init pit_clockevent_init(unsigned long rate, int irq) return 0; } -static void __init pit_timer_init(struct device_node *np) +static int __init pit_timer_init(struct device_node *np) { struct clk *pit_clk; void __iomem *timer_base; unsigned long clk_rate; - int irq; + int irq, ret; timer_base = of_iomap(np, 0); - BUG_ON(!timer_base); + if (!timer_base) { + pr_err("Failed to iomap"); + return -ENXIO; + } /* * PIT0 and PIT1 can be chained to build a 64-bit timer, @@ -175,12 +178,16 @@ static void __init pit_timer_init(struct device_node *np) clkevt_base = timer_base + PITn_OFFSET(3); irq = irq_of_parse_and_map(np, 0); - BUG_ON(irq <= 0); + if (irq <= 0) + return -EINVAL; pit_clk = of_clk_get(np, 0); - BUG_ON(IS_ERR(pit_clk)); + if (IS_ERR(pit_clk)) + return PTR_ERR(pit_clk); - BUG_ON(clk_prepare_enable(pit_clk)); + ret = clk_prepare_enable(pit_clk); + if (ret) + return ret; clk_rate = clk_get_rate(pit_clk); cycle_per_jiffy = clk_rate / (HZ); @@ -188,8 +195,10 @@ static void __init pit_timer_init(struct device_node *np) /* enable the pit module */ __raw_writel(~PITMCR_MDIS, timer_base + PITMCR); - BUG_ON(pit_clocksource_init(clk_rate)); + ret = pit_clocksource_init(clk_rate); + if (ret) + return ret; - pit_clockevent_init(clk_rate, irq); + return pit_clockevent_init(clk_rate, irq); } CLOCKSOURCE_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init); @@ -121,38 +121,48 @@ static struct irqaction irq = { .dev_id = &clockevent, }; -static void __init vt8500_timer_init(struct device_node *np) +static int __init vt8500_timer_init(struct device_node *np) { - int timer_irq; + int timer_irq, ret; regbase = of_iomap(np, 0); if (!regbase) { pr_err("%s: Missing iobase description in Device Tree\n", __func__); - return; + return -ENXIO; } + timer_irq = irq_of_parse_and_map(np, 0); if (!timer_irq) { pr_err("%s: Missing irq description in Device Tree\n", __func__); - return; + return -EINVAL; } writel(1, regbase + TIMER_CTRL_VAL); writel(0xf, regbase + TIMER_STATUS_VAL); writel(~0, regbase + TIMER_MATCH_VAL); - if (clocksource_register_hz(&clocksource, VT8500_TIMER_HZ)) + ret = clocksource_register_hz(&clocksource, VT8500_TIMER_HZ); + if (ret) { pr_err("%s: vt8500_timer_init: clocksource_register failed for %s\n", - __func__, clocksource.name); + __func__, clocksource.name); + return ret; + } clockevent.cpumask = cpumask_of(0); - if (setup_irq(timer_irq, &irq)) + ret = setup_irq(timer_irq, &irq); + if (ret) { pr_err("%s: setup_irq failed for %s\n", __func__, clockevent.name); + return ret; + } + clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ, MIN_OSCR_DELTA * 2, 0xf0000000); + + return 0; } CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init); @@ -210,9 +210,9 @@ error_free: return ret; } -static void __init zevio_timer_init(struct device_node *node) +static int __init zevio_timer_init(struct device_node *node) { - BUG_ON(zevio_timer_add(node)); + return zevio_timer_add(node); } CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init); @@ -22,7 +22,6 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include <linux/module.h> #include <linux/kernel.h> #include <linux/ktime.h> #include <linux/init.h> @@ -56,11 +55,21 @@ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC }; /* proc_event_counts is used as the sequence number of the netlink message */ static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 }; -static inline void get_seq(__u32 *ts, int *cpu) +static inline void send_msg(struct cn_msg *msg) { preempt_disable(); - *ts = __this_cpu_inc_return(proc_event_counts) - 1; - *cpu = smp_processor_id(); + + msg->seq = __this_cpu_inc_return(proc_event_counts) - 1; + ((struct proc_event *)msg->data)->cpu = smp_processor_id(); + + /* + * Preemption remains disabled during send to ensure the messages are + * ordered according to their sequence numbers. + * + * If cn_netlink_send() fails, the data is not sent. + */ + cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT); + preempt_enable(); } @@ -77,7 +86,6 @@ void proc_fork_connector(struct task_struct *task) msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); - get_seq(&msg->seq, &ev->cpu); ev->timestamp_ns = ktime_get_ns(); ev->what = PROC_EVENT_FORK; rcu_read_lock(); @@ -92,8 +100,7 @@ void proc_fork_connector(struct task_struct *task) msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ - /* If cn_netlink_send() failed, the data is not sent */ - cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); + send_msg(msg); } void proc_exec_connector(struct task_struct *task) @@ -108,7 +115,6 @@ void proc_exec_connector(struct task_struct *task) msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); - get_seq(&msg->seq, &ev->cpu); ev->timestamp_ns = ktime_get_ns(); ev->what = PROC_EVENT_EXEC; ev->event_data.exec.process_pid = task->pid; @@ -118,7 +124,7 @@ void proc_exec_connector(struct task_struct *task) msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ - cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); + send_msg(msg); } void proc_id_connector(struct task_struct *task, int which_id) @@ -150,14 +156,13 @@ void proc_id_connector(struct task_struct *task, int which_id) return; } rcu_read_unlock(); - get_seq(&msg->seq, &ev->cpu); ev->timestamp_ns = ktime_get_ns(); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ - cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); + send_msg(msg); } void proc_sid_connector(struct task_struct *task) @@ -172,7 +177,6 @@ void proc_sid_connector(struct task_struct *task) msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); - get_seq(&msg->seq, &ev->cpu); ev->timestamp_ns = ktime_get_ns(); ev->what = PROC_EVENT_SID; ev->event_data.sid.process_pid = task->pid; @@ -182,7 +186,7 @@ void proc_sid_connector(struct task_struct *task) msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ - cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); + send_msg(msg); } void proc_ptrace_connector(struct task_struct *task, int ptrace_id) @@ -197,7 +201,6 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id) msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); - get_seq(&msg->seq, &ev->cpu); ev->timestamp_ns = ktime_get_ns(); ev->what = PROC_EVENT_PTRACE; ev->event_data.ptrace.process_pid = task->pid; @@ -215,7 +218,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id) msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ - cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); + send_msg(msg); } void proc_comm_connector(struct task_struct *task) @@ -230,7 +233,6 @@ void proc_comm_connector(struct task_struct *task) msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); - get_seq(&msg->seq, &ev->cpu); ev->timestamp_ns = ktime_get_ns(); ev->what = PROC_EVENT_COMM; ev->event_data.comm.process_pid = task->pid; @@ -241,7 +243,7 @@ void proc_comm_connector(struct task_struct *task) msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ - cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); + send_msg(msg); } void proc_coredump_connector(struct task_struct *task) @@ -256,7 +258,6 @@ void proc_coredump_connector(struct task_struct *task) msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); - get_seq(&msg->seq, &ev->cpu); ev->timestamp_ns = ktime_get_ns(); ev->what = PROC_EVENT_COREDUMP; ev->event_data.coredump.process_pid = task->pid; @@ -266,7 +267,7 @@ void proc_coredump_connector(struct task_struct *task) msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ - cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); + send_msg(msg); } void proc_exit_connector(struct task_struct *task) @@ -281,7 +282,6 @@ void proc_exit_connector(struct task_struct *task) msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); - get_seq(&msg->seq, &ev->cpu); ev->timestamp_ns = ktime_get_ns(); ev->what = PROC_EVENT_EXIT; ev->event_data.exit.process_pid = task->pid; @@ -293,7 +293,7 @@ void proc_exit_connector(struct task_struct *task) msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ - cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); + send_msg(msg); } /* @@ -325,7 +325,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) msg->ack = rcvd_ack + 1; msg->len = sizeof(*ev); msg->flags = 0; /* not used */ - cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); + send_msg(msg); } /** @@ -389,5 +389,4 @@ static int __init cn_proc_init(void) } return 0; } - -module_init(cn_proc_init); +device_initcall(cn_proc_init); @@ -31,23 +31,18 @@ config CPU_FREQ_BOOST_SW depends on THERMAL config CPU_FREQ_STAT - tristate "CPU frequency translation statistics" + bool "CPU frequency transition statistics" default y help - This driver exports CPU frequency statistics information through sysfs - file system. - - To compile this driver as a module, choose M here: the - module will be called cpufreq_stats. + Export CPU frequency statistics information through sysfs. If in doubt, say N. config CPU_FREQ_STAT_DETAILS - bool "CPU frequency translation statistics details" + bool "CPU frequency transition statistics details" depends on CPU_FREQ_STAT help - This will show detail CPU frequency translation table in sysfs file - system. + Show detailed CPU frequency transition table in sysfs. If in doubt, say N. @@ -468,20 +468,17 @@ unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy, struct acpi_cpufreq_data *data = policy->driver_data; struct acpi_processor_performance *perf; struct cpufreq_frequency_table *entry; - unsigned int next_perf_state, next_freq, freq; + unsigned int next_perf_state, next_freq, index; /* * Find the closest frequency above target_freq. - * - * The table is sorted in the reverse order with respect to the - * frequency and all of the entries are valid (see the initialization). */ - entry = policy->freq_table; - do { - entry++; - freq = entry->frequency; - } while (freq >= target_freq && freq != CPUFREQ_TABLE_END); - entry--; + if (policy->cached_target_freq == target_freq) + index = policy->cached_resolved_idx; + else + index = cpufreq_table_find_index_dl(policy, target_freq); + + entry = &policy->freq_table[index]; next_freq = entry->frequency; next_perf_state = entry->driver_data; @@ -48,9 +48,8 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy, struct policy_dbs_info *policy_dbs = policy->governor_data; struct dbs_data *od_data = policy_dbs->dbs_data; struct od_dbs_tuners *od_tuners = od_data->tuners; - struct od_policy_dbs_info *od_info = to_dbs_info(policy_dbs); - if (!od_info->freq_table) + if (!policy->freq_table) return freq_next; rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, @@ -92,10 +91,9 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy, else { unsigned int index; - cpufreq_frequency_table_target(policy, - od_info->freq_table, policy->cur - 1, - CPUFREQ_RELATION_H, &index); - freq_next = od_info->freq_table[index].frequency; + index = cpufreq_table_find_index_h(policy, + policy->cur - 1); + freq_next = policy->freq_table[index].frequency; } data->freq_prev = freq_next; @@ -79,15 +79,16 @@ static const struct of_device_id machines[] __initconst = { static int __init cpufreq_dt_platdev_init(void) { struct device_node *np = of_find_node_by_path("/"); + const struct of_device_id *match; if (!np) return -ENODEV; - if (!of_match_node(machines, np)) + match = of_match_node(machines, np); + of_node_put(np); + if (!match) return -ENODEV; - of_node_put(of_root); - return PTR_ERR_OR_ZERO(platform_device_register_simple("cpufreq-dt", -1, NULL, 0)); } @@ -74,19 +74,12 @@ static inline bool has_target(void) } /* internal prototypes */ -static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); static unsigned int __cpufreq_get(struct cpufreq_policy *policy); +static int cpufreq_init_governor(struct cpufreq_policy *policy); +static void cpufreq_exit_governor(struct cpufreq_policy *policy); static int cpufreq_start_governor(struct cpufreq_policy *policy); - -static inline void cpufreq_exit_governor(struct cpufreq_policy *policy) -{ - (void)cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); -} - -static inline void cpufreq_stop_governor(struct cpufreq_policy *policy) -{ - (void)cpufreq_governor(policy, CPUFREQ_GOV_STOP); -} +static void cpufreq_stop_governor(struct cpufreq_policy *policy); +static void cpufreq_governor_limits(struct cpufreq_policy *policy); /** * Two notifier lists: the "policy" list is involved in the @@ -133,15 +126,6 @@ struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) } EXPORT_SYMBOL_GPL(get_governor_parent_kobj); -struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) -{ - struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); - - return policy && !policy_is_inactive(policy) ? - policy->freq_table : NULL; -} -EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); - static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) { u64 idle_time; @@ -354,6 +338,7 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy, pr_debug("FREQ: %lu - CPU: %lu\n", (unsigned long)freqs->new, (unsigned long)freqs->cpu); trace_cpu_frequency(freqs->new, freqs->cpu); + cpufreq_stats_record_transition(policy, freqs->new); srcu_notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs); if (likely(policy) && likely(policy->cpu == freqs->cpu)) @@ -507,6 +492,38 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy) } EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch); +/** + * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported + * one. + * @target_freq: target frequency to resolve. + * + * The target to driver frequency mapping is cached in the policy. + * + * Return: Lowest driver-supported frequency greater than or equal to the + * given target_freq, subject to policy (min/max) and driver limitations. + */ +unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + target_freq = clamp_val(target_freq, policy->min, policy->max); + policy->cached_target_freq = target_freq; + + if (cpufreq_driver->target_index) { + int idx; + + idx = cpufreq_frequency_table_target(policy, target_freq, + CPUFREQ_RELATION_L); + policy->cached_resolved_idx = idx; + return policy->freq_table[idx].frequency; + } + + if (cpufreq_driver->resolve_freq) + return cpufreq_driver->resolve_freq(policy, target_freq); + + return target_freq; +} +EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq); + /********************************************************************* * SYSFS INTERFACE * *********************************************************************/ @@ -1115,6 +1132,7 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify) CPUFREQ_REMOVE_POLICY, policy); down_write(&policy->rwsem); + cpufreq_stats_free_table(policy); cpufreq_remove_dev_symlink(policy); kobj = &policy->kobj; cmp = &policy->kobj_unregister; @@ -1265,13 +1283,12 @@ static int cpufreq_online(unsigned int cpu) } } - blocking_notifier_call_chain(&cpufreq_policy_notifier_list, - CPUFREQ_START, policy); - if (new_policy) { ret = cpufreq_add_dev_interface(policy); if (ret) goto out_exit_policy; + + cpufreq_stats_create_table(policy); blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_CREATE_POLICY, policy); @@ -1280,6 +1297,9 @@ static int cpufreq_online(unsigned int cpu) write_unlock_irqrestore(&cpufreq_driver_lock, flags); } + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, + CPUFREQ_START, policy); + ret = cpufreq_init_policy(policy); if (ret) { pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n", @@ -1556,9 +1576,6 @@ static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy) { unsigned int new_freq; - if (cpufreq_suspended) - return 0; - new_freq = cpufreq_driver->get(policy->cpu); if (!new_freq) return 0; @@ -1832,7 +1849,7 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier); unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, unsigned int target_freq) { - clamp_val(target_freq, policy->min, policy->max); + target_freq = clamp_val(target_freq, policy->min, policy->max); return cpufreq_driver->fast_switch(policy, target_freq); } @@ -1864,14 +1881,17 @@ static int __target_intermediate(struct cpufreq_policy *policy, return ret; } -static int __target_index(struct cpufreq_policy *policy, - struct cpufreq_frequency_table *freq_table, int index) +static int __target_index(struct cpufreq_policy *policy, int index) { struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; unsigned int intermediate_freq = 0; + unsigned int newfreq = policy->freq_table[index].frequency; int retval = -EINVAL; bool notify; + if (newfreq == policy->cur) + return 0; + notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); if (notify) { /* Handle switching to intermediate frequency */ @@ -1886,7 +1906,7 @@ static int __target_index(struct cpufreq_policy *policy, freqs.old = freqs.new; } - freqs.new = freq_table[index].frequency; + freqs.new = newfreq; pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", __func__, policy->cpu, freqs.old, freqs.new); @@ -1923,17 +1943,13 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int relation) { unsigned int old_target_freq = target_freq; - struct cpufreq_frequency_table *freq_table; - int index, retval; + int index; if (cpufreq_disabled()) return -ENODEV; /* Make sure that target_freq is within supported range */ - if (target_freq > policy->max) - target_freq = policy->max; - if (target_freq < policy->min) - target_freq = policy->min; + target_freq = clamp_val(target_freq, policy->min, policy->max); pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", policy->cpu, target_freq, relation, old_target_freq); @@ -1956,23 +1972,9 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, if (!cpufreq_driver->target_index) return -EINVAL; - freq_table = cpufreq_frequency_get_table(policy->cpu); - if (unlikely(!freq_table)) { - pr_err("%s: Unable to find freq_table\n", __func__); - return -EINVAL; - } - - retval = cpufreq_frequency_table_target(policy, freq_table, target_freq, - relation, &index); - if (unlikely(retval)) { - pr_err("%s: Unable to find matching freq\n", __func__); - return retval; - } - - if (freq_table[index].frequency == policy->cur) - return 0; + index = cpufreq_frequency_table_target(policy, target_freq, relation); - return __target_index(policy, freq_table, index); + return __target_index(policy, index); } EXPORT_SYMBOL_GPL(__cpufreq_driver_target); @@ -1997,7 +1999,7 @@ __weak struct cpufreq_governor *cpufreq_fallback_governor(void) return NULL; } -static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) +static int cpufreq_init_governor(struct cpufreq_policy *policy) { int ret; @@ -2025,36 +2027,82 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) } } - if (event == CPUFREQ_GOV_POLICY_INIT) - if (!try_module_get(policy->governor->owner)) - return -EINVAL; - - pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event); + if (!try_module_get(policy->governor->owner)) + return -EINVAL; - ret = policy->governor->governor(policy, event); + pr_debug("%s: for CPU %u\n", __func__, policy->cpu); - if (event == CPUFREQ_GOV_POLICY_INIT) { - if (ret) + if (policy->governor->init) { + ret = policy->governor->init(policy); + if (ret) { module_put(policy->governor->owner); - else - policy->governor->initialized++; - } else if (event == CPUFREQ_GOV_POLICY_EXIT) { - policy->governor->initialized--; - module_put(policy->governor->owner); + return ret; + } } - return ret; + return 0; +} + +static void cpufreq_exit_governor(struct cpufreq_policy *policy) +{ + if (cpufreq_suspended || !policy->governor) + return; + + pr_debug("%s: for CPU %u\n", __func__, policy->cpu); + + if (policy->governor->exit) + policy->governor->exit(policy); + + module_put(policy->governor->owner); } static int cpufreq_start_governor(struct cpufreq_policy *policy) { int ret; + if (cpufreq_suspended) + return 0; + + if (!policy->governor) + return -EINVAL; + + pr_debug("%s: for CPU %u\n", __func__, policy->cpu); + if (cpufreq_driver->get && !cpufreq_driver->setpolicy) cpufreq_update_current_freq(policy); - ret = cpufreq_governor(policy, CPUFREQ_GOV_START); - return ret ? ret : cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); + if (policy->governor->start) { + ret = policy->governor->start(policy); + if (ret) + return ret; + } + + if (policy->governor->limits) + policy->governor->limits(policy); + + return 0; +} + +static void cpufreq_stop_governor(struct cpufreq_policy *policy) +{ + if (cpufreq_suspended || !policy->governor) + return; + + pr_debug("%s: for CPU %u\n", __func__, policy->cpu); + + if (policy->governor->stop) + policy->governor->stop(policy); +} + +static void cpufreq_governor_limits(struct cpufreq_policy *policy) +{ + if (cpufreq_suspended || !policy->governor) + return; + + pr_debug("%s: for CPU %u\n", __func__, policy->cpu); + + if (policy->governor->limits) + policy->governor->limits(policy); } int cpufreq_register_governor(struct cpufreq_governor *governor) @@ -2069,7 +2117,6 @@ int cpufreq_register_governor(struct cpufreq_governor *governor) mutex_lock(&cpufreq_governor_mutex); - governor->initialized = 0; err = -EBUSY; if (!find_governor(governor->name)) { err = 0; @@ -2184,6 +2231,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, policy->min = new_policy->min; policy->max = new_policy->max; + policy->cached_target_freq = UINT_MAX; + pr_debug("new min and max freqs are %u - %u kHz\n", policy->min, policy->max); @@ -2195,7 +2244,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, if (new_policy->governor == policy->governor) { pr_debug("cpufreq: governor limits update\n"); - return cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); + cpufreq_governor_limits(policy); + return 0; } pr_debug("governor switch\n"); @@ -2210,7 +2260,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, /* start new governor */ policy->governor = new_policy->governor; - ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); + ret = cpufreq_init_governor(policy); if (!ret) { ret = cpufreq_start_governor(policy); if (!ret) { @@ -2224,7 +2274,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, pr_debug("starting governor %s failed\n", policy->governor->name); if (old_gov) { policy->governor = old_gov; - if (cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) + if (cpufreq_init_governor(policy)) policy->governor = NULL; else cpufreq_start_governor(policy); @@ -2261,6 +2311,10 @@ int cpufreq_update_policy(unsigned int cpu) * -> ask driver for current freq and notify governors about a change */ if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { + if (cpufreq_suspended) { + ret = -EAGAIN; + goto unlock; + } new_policy.cur = cpufreq_update_current_freq(policy); if (WARN_ON(!new_policy.cur)) { ret = -EIO; @@ -2305,26 +2359,25 @@ static struct notifier_block __refdata cpufreq_cpu_notifier = { *********************************************************************/ static int cpufreq_boost_set_sw(int state) { - struct cpufreq_frequency_table *freq_table; struct cpufreq_policy *policy; int ret = -EINVAL; for_each_active_policy(policy) { - freq_table = cpufreq_frequency_get_table(policy->cpu); - if (freq_table) { - ret = cpufreq_frequency_table_cpuinfo(policy, - freq_table); - if (ret) { - pr_err("%s: Policy frequency update failed\n", - __func__); - break; - } - - down_write(&policy->rwsem); - policy->user_policy.max = policy->max; - cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); - up_write(&policy->rwsem); + if (!policy->freq_table) + continue; + + ret = cpufreq_frequency_table_cpuinfo(policy, + policy->freq_table); + if (ret) { + pr_err("%s: Policy frequency update failed\n", + __func__); + break; } + + down_write(&policy->rwsem); + policy->user_policy.max = policy->max; + cpufreq_governor_limits(policy); + up_write(&policy->rwsem); } return ret; @@ -17,7 +17,6 @@ struct cs_policy_dbs_info { struct policy_dbs_info policy_dbs; unsigned int down_skip; - unsigned int requested_freq; }; static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs) @@ -75,19 +74,17 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) /* Check for frequency increase */ if (load > dbs_data->up_threshold) { + unsigned int requested_freq = policy->cur; + dbs_info->down_skip = 0; /* if we are already at full speed then break out early */ - if (dbs_info->requested_freq == policy->max) + if (requested_freq == policy->max) goto out; - dbs_info->requested_freq += get_freq_target(cs_tuners, policy); - - if (dbs_info->requested_freq > policy->max) - dbs_info->requested_freq = policy->max; + requested_freq += get_freq_target(cs_tuners, policy); - __cpufreq_driver_target(policy, dbs_info->requested_freq, - CPUFREQ_RELATION_H); + __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H); goto out; } @@ -98,36 +95,27 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) /* Check for frequency decrease */ if (load < cs_tuners->down_threshold) { - unsigned int freq_target; + unsigned int freq_target, requested_freq = policy->cur; /* * if we cannot reduce the frequency anymore, break out early */ - if (policy->cur == policy->min) + if (requested_freq == policy->min) goto out; freq_target = get_freq_target(cs_tuners, policy); - if (dbs_info->requested_freq > freq_target) - dbs_info->requested_freq -= freq_target; + if (requested_freq > freq_target) + requested_freq -= freq_target; else - dbs_info->requested_freq = policy->min; + requested_freq = policy->min; - __cpufreq_driver_target(policy, dbs_info->requested_freq, - CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L); } out: return dbs_data->sampling_rate; } -static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, - void *data); - -static struct notifier_block cs_cpufreq_notifier_block = { - .notifier_call = dbs_cpufreq_notifier, -}; - /************************** sysfs interface ************************/ -static struct dbs_governor cs_dbs_gov; static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set, const char *buf, size_t count) @@ -268,15 +256,13 @@ static void cs_free(struct policy_dbs_info *policy_dbs) kfree(to_dbs_info(policy_dbs)); } -static int cs_init(struct dbs_data *dbs_data, bool notify) +static int cs_init(struct dbs_data *dbs_data) { struct cs_dbs_tuners *tuners; tuners = kzalloc(sizeof(*tuners), GFP_KERNEL); - if (!tuners) { - pr_err("%s: kzalloc failed\n", __func__); + if (!tuners) return -ENOMEM; - } tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; tuners->freq_step = DEF_FREQUENCY_STEP; @@ -288,19 +274,11 @@ static int cs_init(struct dbs_data *dbs_data, bool notify) dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); - if (notify) - cpufreq_register_notifier(&cs_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - return 0; } -static void cs_exit(struct dbs_data *dbs_data, bool notify) +static void cs_exit(struct dbs_data *dbs_data) { - if (notify) - cpufreq_unregister_notifier(&cs_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - kfree(dbs_data->tuners); } @@ -309,16 +287,10 @@ static void cs_start(struct cpufreq_policy *policy) struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data); dbs_info->down_skip = 0; - dbs_info->requested_freq = policy->cur; } -static struct dbs_governor cs_dbs_gov = { - .gov = { - .name = "conservative", - .governor = cpufreq_governor_dbs, - .max_transition_latency = TRANSITION_LATENCY_LIMIT, - .owner = THIS_MODULE, - }, +static struct dbs_governor cs_governor = { + .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"), .kobj_type = { .default_attrs = cs_attributes }, .gov_dbs_timer = cs_dbs_timer, .alloc = cs_alloc, @@ -328,33 +300,7 @@ static struct dbs_governor cs_dbs_gov = { .start = cs_start, }; -#define CPU_FREQ_GOV_CONSERVATIVE (&cs_dbs_gov.gov) - -static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, - void *data) -{ - struct cpufreq_freqs *freq = data; - struct cpufreq_policy *policy = cpufreq_cpu_get_raw(freq->cpu); - struct cs_policy_dbs_info *dbs_info; - - if (!policy) - return 0; - - /* policy isn't governed by conservative governor */ - if (policy->governor != CPU_FREQ_GOV_CONSERVATIVE) - return 0; - - dbs_info = to_dbs_info(policy->governor_data); - /* - * we only care if our internally tracked freq moves outside the 'valid' - * ranges of frequency available to us otherwise we do not change it - */ - if (dbs_info->requested_freq > policy->max - || dbs_info->requested_freq < policy->min) - dbs_info->requested_freq = freq->new; - - return 0; -} +#define CPU_FREQ_GOV_CONSERVATIVE (&cs_governor.gov) static int __init cpufreq_gov_dbs_init(void) { @@ -336,17 +336,6 @@ static inline void gov_clear_update_util(struct cpufreq_policy *policy) synchronize_sched(); } -static void gov_cancel_work(struct cpufreq_policy *policy) -{ - struct policy_dbs_info *policy_dbs = policy->governor_data; - - gov_clear_update_util(policy_dbs->policy); - irq_work_sync(&policy_dbs->irq_work); - cancel_work_sync(&policy_dbs->work); - atomic_set(&policy_dbs->work_count, 0); - policy_dbs->work_in_progress = false; -} - static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy, struct dbs_governor *gov) { @@ -389,7 +378,7 @@ static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs, gov->free(policy_dbs); } -static int cpufreq_governor_init(struct cpufreq_policy *policy) +int cpufreq_dbs_governor_init(struct cpufreq_policy *policy) { struct dbs_governor *gov = dbs_governor_of(policy); struct dbs_data *dbs_data; @@ -429,7 +418,7 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy) gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list); - ret = gov->init(dbs_data, !policy->governor->initialized); + ret = gov->init(dbs_data); if (ret) goto free_policy_dbs_info; @@ -458,13 +447,13 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy) goto out; /* Failure, so roll back. */ - pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret); + pr_err("initialization failed (dbs_data kobject init error %d)\n", ret); policy->governor_data = NULL; if (!have_governor_per_policy()) gov->gdbs_data = NULL; - gov->exit(dbs_data, !policy->governor->initialized); + gov->exit(dbs_data); kfree(dbs_data); free_policy_dbs_info: @@ -474,8 +463,9 @@ out: mutex_unlock(&gov_dbs_data_mutex); return ret; } +EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init); -static int cpufreq_governor_exit(struct cpufreq_policy *policy) +void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy) { struct dbs_governor *gov = dbs_governor_of(policy); struct policy_dbs_info *policy_dbs = policy->governor_data; @@ -493,17 +483,17 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy) if (!have_governor_per_policy()) gov->gdbs_data = NULL; - gov->exit(dbs_data, policy->governor->initialized == 1); + gov->exit(dbs_data); kfree(dbs_data); } free_policy_dbs_info(policy_dbs, gov); mutex_unlock(&gov_dbs_data_mutex); - return 0; } +EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit); -static int cpufreq_governor_start(struct cpufreq_policy *policy) +int cpufreq_dbs_governor_start(struct cpufreq_policy *policy) { struct dbs_governor *gov = dbs_governor_of(policy); struct policy_dbs_info *policy_dbs = policy->governor_data; @@ -539,47 +529,28 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy) gov_set_update_util(policy_dbs, sampling_rate); return 0; } +EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start); -static int cpufreq_governor_stop(struct cpufreq_policy *policy) +void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy) { - gov_cancel_work(policy); - return 0; + struct policy_dbs_info *policy_dbs = policy->governor_data; + + gov_clear_update_util(policy_dbs->policy); + irq_work_sync(&policy_dbs->irq_work); + cancel_work_sync(&policy_dbs->work); + atomic_set(&policy_dbs->work_count, 0); + policy_dbs->work_in_progress = false; } +EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop); -static int cpufreq_governor_limits(struct cpufreq_policy *policy) +void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy) { struct policy_dbs_info *policy_dbs = policy->governor_data; mutex_lock(&policy_dbs->timer_mutex); - - if (policy->max < policy->cur) - __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); - else if (policy->min > policy->cur) - __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); - + cpufreq_policy_apply_limits(policy); gov_update_sample_delay(policy_dbs, 0); mutex_unlock(&policy_dbs->timer_mutex); - - return 0; -} - -int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event) -{ - if (event == CPUFREQ_GOV_POLICY_INIT) { - return cpufreq_governor_init(policy); - } else if (policy->governor_data) { - switch (event) { - case CPUFREQ_GOV_POLICY_EXIT: - return cpufreq_governor_exit(policy); - case CPUFREQ_GOV_START: - return cpufreq_governor_start(policy); - case CPUFREQ_GOV_STOP: - return cpufreq_governor_stop(policy); - case CPUFREQ_GOV_LIMITS: - return cpufreq_governor_limits(policy); - } - } - return -EINVAL; } -EXPORT_SYMBOL_GPL(cpufreq_governor_dbs); +EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits); @@ -138,8 +138,8 @@ struct dbs_governor { unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy); struct policy_dbs_info *(*alloc)(void); void (*free)(struct policy_dbs_info *policy_dbs); - int (*init)(struct dbs_data *dbs_data, bool notify); - void (*exit)(struct dbs_data *dbs_data, bool notify); + int (*init)(struct dbs_data *dbs_data); + void (*exit)(struct dbs_data *dbs_data); void (*start)(struct cpufreq_policy *policy); }; @@ -148,6 +148,25 @@ static inline struct dbs_governor *dbs_governor_of(struct cpufreq_policy *policy return container_of(policy->governor, struct dbs_governor, gov); } +/* Governor callback routines */ +int cpufreq_dbs_governor_init(struct cpufreq_policy *policy); +void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy); +int cpufreq_dbs_governor_start(struct cpufreq_policy *policy); +void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy); +void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy); + +#define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \ + { \ + .name = _name_, \ + .max_transition_latency = TRANSITION_LATENCY_LIMIT, \ + .owner = THIS_MODULE, \ + .init = cpufreq_dbs_governor_init, \ + .exit = cpufreq_dbs_governor_exit, \ + .start = cpufreq_dbs_governor_start, \ + .stop = cpufreq_dbs_governor_stop, \ + .limits = cpufreq_dbs_governor_limits, \ + } + /* Governor specific operations */ struct od_ops { unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy, @@ -155,7 +174,6 @@ struct od_ops { }; unsigned int dbs_update(struct cpufreq_policy *policy); -int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event); void od_register_powersave_bias_handler(unsigned int (*f) (struct cpufreq_policy *, unsigned int, unsigned int), unsigned int powersave_bias); @@ -65,34 +65,30 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy, { unsigned int freq_req, freq_reduc, freq_avg; unsigned int freq_hi, freq_lo; - unsigned int index = 0; + unsigned int index; unsigned int delay_hi_us; struct policy_dbs_info *policy_dbs = policy->governor_data; struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs); struct dbs_data *dbs_data = policy_dbs->dbs_data; struct od_dbs_tuners *od_tuners = dbs_data->tuners; + struct cpufreq_frequency_table *freq_table = policy->freq_table; - if (!dbs_info->freq_table) { + if (!freq_table) { dbs_info->freq_lo = 0; dbs_info->freq_lo_delay_us = 0; return freq_next; } - cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, - relation, &index); - freq_req = dbs_info->freq_table[index].frequency; + index = cpufreq_frequency_table_target(policy, freq_next, relation); + freq_req = freq_table[index].frequency; freq_reduc = freq_req * od_tuners->powersave_bias / 1000; freq_avg = freq_req - freq_reduc; /* Find freq bounds for freq_avg in freq_table */ - index = 0; - cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, - CPUFREQ_RELATION_H, &index); - freq_lo = dbs_info->freq_table[index].frequency; - index = 0; - cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, - CPUFREQ_RELATION_L, &index); - freq_hi = dbs_info->freq_table[index].frequency; + index = cpufreq_table_find_index_h(policy, freq_avg); + freq_lo = freq_table[index].frequency; + index = cpufreq_table_find_index_l(policy, freq_avg); + freq_hi = freq_table[index].frequency; /* Find out how long we have to be in hi and lo freqs */ if (freq_hi == freq_lo) { @@ -113,7 +109,6 @@ static void ondemand_powersave_bias_init(struct cpufreq_policy *policy) { struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data); - dbs_info->freq_table = cpufreq_frequency_get_table(policy->cpu); dbs_info->freq_lo = 0; } @@ -361,17 +356,15 @@ static void od_free(struct policy_dbs_info *policy_dbs) kfree(to_dbs_info(policy_dbs)); } -static int od_init(struct dbs_data *dbs_data, bool notify) +static int od_init(struct dbs_data *dbs_data) { struct od_dbs_tuners *tuners; u64 idle_time; int cpu; tuners = kzalloc(sizeof(*tuners), GFP_KERNEL); - if (!tuners) { - pr_err("%s: kzalloc failed\n", __func__); + if (!tuners) return -ENOMEM; - } cpu = get_cpu(); idle_time = get_cpu_idle_time_us(cpu, NULL); @@ -402,7 +395,7 @@ static int od_init(struct dbs_data *dbs_data, bool notify) return 0; } -static void od_exit(struct dbs_data *dbs_data, bool notify) +static void od_exit(struct dbs_data *dbs_data) { kfree(dbs_data->tuners); } @@ -420,12 +413,7 @@ static struct od_ops od_ops = { }; static struct dbs_governor od_dbs_gov = { - .gov = { - .name = "ondemand", - .governor = cpufreq_governor_dbs, - .max_transition_latency = TRANSITION_LATENCY_LIMIT, - .owner = THIS_MODULE, - }, + .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"), .kobj_type = { .default_attrs = od_attributes }, .gov_dbs_timer = od_dbs_timer, .alloc = od_alloc, @@ -13,7 +13,6 @@ struct od_policy_dbs_info { struct policy_dbs_info policy_dbs; - struct cpufreq_frequency_table *freq_table; unsigned int freq_lo; unsigned int freq_lo_delay_us; unsigned int freq_hi_delay_us; @@ -16,27 +16,16 @@ #include <linux/init.h> #include <linux/module.h> -static int cpufreq_governor_performance(struct cpufreq_policy *policy, - unsigned int event) +static void cpufreq_gov_performance_limits(struct cpufreq_policy *policy) { - switch (event) { - case CPUFREQ_GOV_START: - case CPUFREQ_GOV_LIMITS: - pr_debug("setting to %u kHz because of event %u\n", - policy->max, event); - __cpufreq_driver_target(policy, policy->max, - CPUFREQ_RELATION_H); - break; - default: - break; - } - return 0; + pr_debug("setting to %u kHz\n", policy->max); + __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); } static struct cpufreq_governor cpufreq_gov_performance = { .name = "performance", - .governor = cpufreq_governor_performance, .owner = THIS_MODULE, + .limits = cpufreq_gov_performance_limits, }; static int __init cpufreq_gov_performance_init(void) @@ -16,26 +16,15 @@ #include <linux/init.h> #include <linux/module.h> -static int cpufreq_governor_powersave(struct cpufreq_policy *policy, - unsigned int event) +static void cpufreq_gov_powersave_limits(struct cpufreq_policy *policy) { - switch (event) { - case CPUFREQ_GOV_START: - case CPUFREQ_GOV_LIMITS: - pr_debug("setting to %u kHz because of event %u\n", - policy->min, event); - __cpufreq_driver_target(policy, policy->min, - CPUFREQ_RELATION_L); - break; - default: - break; - } - return 0; + pr_debug("setting to %u kHz\n", policy->min); + __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); } static struct cpufreq_governor cpufreq_gov_powersave = { .name = "powersave", - .governor = cpufreq_governor_powersave, + .limits = cpufreq_gov_powersave_limits, .owner = THIS_MODULE, }; @@ -15,7 +15,7 @@ #include <linux/slab.h> #include <linux/cputime.h> -static spinlock_t cpufreq_stats_lock; +static DEFINE_SPINLOCK(cpufreq_stats_lock); struct cpufreq_stats { unsigned int total_trans; @@ -52,6 +52,9 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) ssize_t len = 0; int i; + if (policy->fast_switch_enabled) + return 0; + cpufreq_stats_update(stats); for (i = 0; i < stats->state_num; i++) { len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i], @@ -68,6 +71,9 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) ssize_t len = 0; int i, j; + if (policy->fast_switch_enabled) + return 0; + len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n"); len += snprintf(buf + len, PAGE_SIZE - len, " : "); for (i = 0; i < stats->state_num; i++) { @@ -130,7 +136,7 @@ static int freq_table_get_index(struct cpufreq_stats *stats, unsigned int freq) return -1; } -static void __cpufreq_stats_free_table(struct cpufreq_policy *policy) +void cpufreq_stats_free_table(struct cpufreq_policy *policy) { struct cpufreq_stats *stats = policy->stats; @@ -146,39 +152,25 @@ static void __cpufreq_stats_free_table(struct cpufreq_policy *policy) policy->stats = NULL; } -static void cpufreq_stats_free_table(unsigned int cpu) -{ - struct cpufreq_policy *policy; - - policy = cpufreq_cpu_get(cpu); - if (!policy) - return; - - __cpufreq_stats_free_table(policy); - - cpufreq_cpu_put(policy); -} - -static int __cpufreq_stats_create_table(struct cpufreq_policy *policy) +void cpufreq_stats_create_table(struct cpufreq_policy *policy) { unsigned int i = 0, count = 0, ret = -ENOMEM; struct cpufreq_stats *stats; unsigned int alloc_size; - unsigned int cpu = policy->cpu; struct cpufreq_frequency_table *pos, *table; /* We need cpufreq table for creating stats table */ - table = cpufreq_frequency_get_table(cpu); + table = policy->freq_table; if (unlikely(!table)) - return 0; + return; /* stats already initialized */ if (policy->stats) - return -EEXIST; + return; stats = kzalloc(sizeof(*stats), GFP_KERNEL); if (!stats) - return -ENOMEM; + return; /* Find total allocation size */ cpufreq_for_each_valid_entry(pos, table) @@ -215,80 +207,32 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy) policy->stats = stats; ret = sysfs_create_group(&policy->kobj, &stats_attr_group); if (!ret) - return 0; + return; /* We failed, release resources */ policy->stats = NULL; kfree(stats->time_in_state); free_stat: kfree(stats); - - return ret; -} - -static void cpufreq_stats_create_table(unsigned int cpu) -{ - struct cpufreq_policy *policy; - - /* - * "likely(!policy)" because normally cpufreq_stats will be registered - * before cpufreq driver - */ - policy = cpufreq_cpu_get(cpu); - if (likely(!policy)) - return; - - __cpufreq_stats_create_table(policy); - - cpufreq_cpu_put(policy); } -static int cpufreq_stat_notifier_policy(struct notifier_block *nb, - unsigned long val, void *data) +void cpufreq_stats_record_transition(struct cpufreq_policy *policy, + unsigned int new_freq) { - int ret = 0; - struct cpufreq_policy *policy = data; - - if (val == CPUFREQ_CREATE_POLICY) - ret = __cpufreq_stats_create_table(policy); - else if (val == CPUFREQ_REMOVE_POLICY) - __cpufreq_stats_free_table(policy); - - return ret; -} - -static int cpufreq_stat_notifier_trans(struct notifier_block *nb, - unsigned long val, void *data) -{ - struct cpufreq_freqs *freq = data; - struct cpufreq_policy *policy = cpufreq_cpu_get(freq->cpu); - struct cpufreq_stats *stats; + struct cpufreq_stats *stats = policy->stats; int old_index, new_index; - if (!policy) { - pr_err("%s: No policy found\n", __func__); - return 0; - } - - if (val != CPUFREQ_POSTCHANGE) - goto put_policy; - - if (!policy->stats) { + if (!stats) { pr_debug("%s: No stats found\n", __func__); - goto put_policy; + return; } - stats = policy->stats; - old_index = stats->last_index; - new_index = freq_table_get_index(stats, freq->new); + new_index = freq_table_get_index(stats, new_freq); /* We can't do stats->time_in_state[-1]= .. */ - if (old_index == -1 || new_index == -1) - goto put_policy; - - if (old_index == new_index) - goto put_policy; + if (old_index == -1 || new_index == -1 || old_index == new_index) + return; cpufreq_stats_update(stats); @@ -297,61 +241,4 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb, stats->trans_table[old_index * stats->max_state + new_index]++; #endif stats->total_trans++; - -put_policy: - cpufreq_cpu_put(policy); - return 0; } - -static struct notifier_block notifier_policy_block = { - .notifier_call = cpufreq_stat_notifier_policy -}; - -static struct notifier_block notifier_trans_block = { - .notifier_call = cpufreq_stat_notifier_trans -}; - -static int __init cpufreq_stats_init(void) -{ - int ret; - unsigned int cpu; - - spin_lock_init(&cpufreq_stats_lock); - ret = cpufreq_register_notifier(¬ifier_policy_block, - CPUFREQ_POLICY_NOTIFIER); - if (ret) - return ret; - - for_each_online_cpu(cpu) - cpufreq_stats_create_table(cpu); - - ret = cpufreq_register_notifier(¬ifier_trans_block, - CPUFREQ_TRANSITION_NOTIFIER); - if (ret) { - cpufreq_unregister_notifier(¬ifier_policy_block, - CPUFREQ_POLICY_NOTIFIER); - for_each_online_cpu(cpu) - cpufreq_stats_free_table(cpu); - return ret; - } - - return 0; -} -static void __exit cpufreq_stats_exit(void) -{ - unsigned int cpu; - - cpufreq_unregister_notifier(¬ifier_policy_block, - CPUFREQ_POLICY_NOTIFIER); - cpufreq_unregister_notifier(¬ifier_trans_block, - CPUFREQ_TRANSITION_NOTIFIER); - for_each_online_cpu(cpu) - cpufreq_stats_free_table(cpu); -} - -MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>"); -MODULE_DESCRIPTION("Export cpufreq stats via sysfs"); -MODULE_LICENSE("GPL"); - -module_init(cpufreq_stats_init); -module_exit(cpufreq_stats_exit); @@ -65,66 +65,66 @@ static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy) return 0; } -static int cpufreq_governor_userspace(struct cpufreq_policy *policy, - unsigned int event) +static void cpufreq_userspace_policy_exit(struct cpufreq_policy *policy) +{ + mutex_lock(&userspace_mutex); + kfree(policy->governor_data); + policy->governor_data = NULL; + mutex_unlock(&userspace_mutex); +} + +static int cpufreq_userspace_policy_start(struct cpufreq_policy *policy) { unsigned int *setspeed = policy->governor_data; - unsigned int cpu = policy->cpu; - int rc = 0; - if (event == CPUFREQ_GOV_POLICY_INIT) - return cpufreq_userspace_policy_init(policy); + BUG_ON(!policy->cur); + pr_debug("started managing cpu %u\n", policy->cpu); - if (!setspeed) - return -EINVAL; - - switch (event) { - case CPUFREQ_GOV_POLICY_EXIT: - mutex_lock(&userspace_mutex); - policy->governor_data = NULL; - kfree(setspeed); - mutex_unlock(&userspace_mutex); - break; - case CPUFREQ_GOV_START: - BUG_ON(!policy->cur); - pr_debug("started managing cpu %u\n", cpu); - - mutex_lock(&userspace_mutex); - per_cpu(cpu_is_managed, cpu) = 1; - *setspeed = policy->cur; - mutex_unlock(&userspace_mutex); - break; - case CPUFREQ_GOV_STOP: - pr_debug("managing cpu %u stopped\n", cpu); - - mutex_lock(&userspace_mutex); - per_cpu(cpu_is_managed, cpu) = 0; - *setspeed = 0; - mutex_unlock(&userspace_mutex); - break; - case CPUFREQ_GOV_LIMITS: - mutex_lock(&userspace_mutex); - pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n", - cpu, policy->min, policy->max, policy->cur, *setspeed); - - if (policy->max < *setspeed) - __cpufreq_driver_target(policy, policy->max, - CPUFREQ_RELATION_H); - else if (policy->min > *setspeed) - __cpufreq_driver_target(policy, policy->min, - CPUFREQ_RELATION_L); - else - __cpufreq_driver_target(policy, *setspeed, - CPUFREQ_RELATION_L); - mutex_unlock(&userspace_mutex); - break; - } - return rc; + mutex_lock(&userspace_mutex); + per_cpu(cpu_is_managed, policy->cpu) = 1; + *setspeed = policy->cur; + mutex_unlock(&userspace_mutex); + return 0; +} + +static void cpufreq_userspace_policy_stop(struct cpufreq_policy *policy) +{ + unsigned int *setspeed = policy->governor_data; + + pr_debug("managing cpu %u stopped\n", policy->cpu); + + mutex_lock(&userspace_mutex); + per_cpu(cpu_is_managed, policy->cpu) = 0; + *setspeed = 0; + mutex_unlock(&userspace_mutex); +} + +static void cpufreq_userspace_policy_limits(struct cpufreq_policy *policy) +{ + unsigned int *setspeed = policy->governor_data; + + mutex_lock(&userspace_mutex); + + pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n", + policy->cpu, policy->min, policy->max, policy->cur, *setspeed); + + if (policy->max < *setspeed) + __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); + else if (policy->min > *setspeed) + __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); + else + __cpufreq_driver_target(policy, *setspeed, CPUFREQ_RELATION_L); + + mutex_unlock(&userspace_mutex); } static struct cpufreq_governor cpufreq_gov_userspace = { .name = "userspace", - .governor = cpufreq_governor_userspace, + .init = cpufreq_userspace_policy_init, + .exit = cpufreq_userspace_policy_exit, + .start = cpufreq_userspace_policy_start, + .stop = cpufreq_userspace_policy_stop, + .limits = cpufreq_userspace_policy_limits, .store_setspeed = cpufreq_set, .show_setspeed = show_speed, .owner = THIS_MODULE, @@ -38,26 +38,6 @@ struct davinci_cpufreq { }; static struct davinci_cpufreq cpufreq; -static int davinci_verify_speed(struct cpufreq_policy *policy) -{ - struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; - struct cpufreq_frequency_table *freq_table = pdata->freq_table; - struct clk *armclk = cpufreq.armclk; - - if (freq_table) - return cpufreq_frequency_table_verify(policy, freq_table); - - if (policy->cpu) - return -EINVAL; - - cpufreq_verify_within_cpu_limits(policy); - policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000; - policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000; - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - return 0; -} - static int davinci_target(struct cpufreq_policy *policy, unsigned int idx) { struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; @@ -121,7 +101,7 @@ static int davinci_cpu_init(struct cpufreq_policy *policy) static struct cpufreq_driver davinci_driver = { .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, - .verify = davinci_verify_speed, + .verify = cpufreq_generic_frequency_table_verify, .target_index = davinci_target, .get = cpufreq_generic_get, .init = davinci_cpu_init, @@ -63,8 +63,6 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, else return 0; } -EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo); - int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table) @@ -108,20 +106,16 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify); */ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy) { - struct cpufreq_frequency_table *table = - cpufreq_frequency_get_table(policy->cpu); - if (!table) + if (!policy->freq_table) return -ENODEV; - return cpufreq_frequency_table_verify(policy, table); + return cpufreq_frequency_table_verify(policy, policy->freq_table); } EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify); -int cpufreq_frequency_table_target(struct cpufreq_policy *policy, - struct cpufreq_frequency_table *table, - unsigned int target_freq, - unsigned int relation, - unsigned int *index) +int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) { struct cpufreq_frequency_table optimal = { .driver_data = ~0, @@ -132,7 +126,9 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, .frequency = 0, }; struct cpufreq_frequency_table *pos; + struct cpufreq_frequency_table *table = policy->freq_table; unsigned int freq, diff, i = 0; + int index; pr_debug("request for target %u kHz (relation: %u) for cpu %u\n", target_freq, relation, policy->cpu); @@ -196,25 +192,26 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, } } if (optimal.driver_data > i) { - if (suboptimal.driver_data > i) - return -EINVAL; - *index = suboptimal.driver_data; - } else - *index = optimal.driver_data; + if (suboptimal.driver_data > i) { + WARN(1, "Invalid frequency table: %d\n", policy->cpu); + return 0; + } - pr_debug("target index is %u, freq is:%u kHz\n", *index, - table[*index].frequency); + index = suboptimal.driver_data; + } else + index = optimal.driver_data; - return 0; + pr_debug("target index is %u, freq is:%u kHz\n", index, + table[index].frequency); + return index; } -EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target); +EXPORT_SYMBOL_GPL(cpufreq_table_index_unsorted); int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, unsigned int freq) { - struct cpufreq_frequency_table *pos, *table; + struct cpufreq_frequency_table *pos, *table = policy->freq_table; - table = cpufreq_frequency_get_table(policy->cpu); if (unlikely(!table)) { pr_debug("%s: Unable to find frequency table\n", __func__); return -ENOENT; @@ -300,15 +297,72 @@ struct freq_attr *cpufreq_generic_attr[] = { }; EXPORT_SYMBOL_GPL(cpufreq_generic_attr); +static int set_freq_table_sorted(struct cpufreq_policy *policy) +{ + struct cpufreq_frequency_table *pos, *table = policy->freq_table; + struct cpufreq_frequency_table *prev = NULL; + int ascending = 0; + + policy->freq_table_sorted = CPUFREQ_TABLE_UNSORTED; + + cpufreq_for_each_valid_entry(pos, table) { + if (!prev) { + prev = pos; + continue; + } + + if (pos->frequency == prev->frequency) { + pr_warn("Duplicate freq-table entries: %u\n", + pos->frequency); + return -EINVAL; + } + + /* Frequency increased from prev to pos */ + if (pos->frequency > prev->frequency) { + /* But frequency was decreasing earlier */ + if (ascending < 0) { + pr_debug("Freq table is unsorted\n"); + return 0; + } + + ascending++; + } else { + /* Frequency decreased from prev to pos */ + + /* But frequency was increasing earlier */ + if (ascending > 0) { + pr_debug("Freq table is unsorted\n"); + return 0; + } + + ascending--; + } + + prev = pos; + } + + if (ascending > 0) + policy->freq_table_sorted = CPUFREQ_TABLE_SORTED_ASCENDING; + else + policy->freq_table_sorted = CPUFREQ_TABLE_SORTED_DESCENDING; + + pr_debug("Freq table is sorted in %s order\n", + ascending > 0 ? "ascending" : "descending"); + + return 0; +} + int cpufreq_table_validate_and_show(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table) { - int ret = cpufreq_frequency_table_cpuinfo(policy, table); + int ret; - if (!ret) - policy->freq_table = table; + ret = cpufreq_frequency_table_cpuinfo(policy, table); + if (ret) + return ret; - return ret; + policy->freq_table = table; + return set_freq_table_sorted(policy); } EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show); @@ -35,6 +35,7 @@ #include <asm/msr.h> #include <asm/cpu_device_id.h> #include <asm/cpufeature.h> +#include <asm/intel-family.h> #define ATOM_RATIOS 0x66a #define ATOM_VIDS 0x66b @@ -96,7 +97,6 @@ static inline u64 div_ext_fp(u64 x, u64 y) * read from MPERF MSR between last and current sample * @tsc: Difference of time stamp counter between last and * current sample - * @freq: Effective frequency calculated from APERF/MPERF * @time: Current time from scheduler * * This structure is used in the cpudata structure to store performance sample @@ -108,7 +108,6 @@ struct sample { u64 aperf; u64 mperf; u64 tsc; - int freq; u64 time; }; @@ -281,9 +280,9 @@ struct cpu_defaults { static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu); static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu); -static struct pstate_adjust_policy pid_params; -static struct pstate_funcs pstate_funcs; -static int hwp_active; +static struct pstate_adjust_policy pid_params __read_mostly; +static struct pstate_funcs pstate_funcs __read_mostly; +static int hwp_active __read_mostly; #ifdef CONFIG_ACPI static bool acpi_ppc; @@ -372,26 +371,9 @@ static bool intel_pstate_get_ppc_enable_status(void) return acpi_ppc; } -/* - * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and - * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and - * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state - * ratio, out of it only high 8 bits are used. For example 0x1700 is setting - * target ratio 0x17. The _PSS control value stores in a format which can be - * directly written to PERF_CTL MSR. But in intel_pstate driver this shift - * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()). - * This function converts the _PSS control value to intel pstate driver format - * for comparison and assignment. - */ -static int convert_to_native_pstate_format(struct cpudata *cpu, int index) -{ - return cpu->acpi_perf_data.states[index].control >> 8; -} - static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) { struct cpudata *cpu; - int turbo_pss_ctl; int ret; int i; @@ -441,15 +423,14 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) * max frequency, which will cause a reduced performance as * this driver uses real max turbo frequency as the max * frequency. So correct this frequency in _PSS table to - * correct max turbo frequency based on the turbo ratio. + * correct max turbo frequency based on the turbo state. * Also need to convert to MHz as _PSS freq is in MHz. */ - turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0); - if (turbo_pss_ctl > cpu->pstate.max_pstate) + if (!limits->turbo_disabled) cpu->acpi_perf_data.states[0].core_frequency = policy->cpuinfo.max_freq / 1000; cpu->valid_pss_table = true; - pr_info("_PPC limits will be enforced\n"); + pr_debug("_PPC limits will be enforced\n"); return; @@ -825,7 +806,8 @@ static void __init intel_pstate_sysfs_expose_params(void) static void intel_pstate_hwp_enable(struct cpudata *cpudata) { /* First disable HWP notification interrupt as we don't process them */ - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); + if (static_cpu_has(X86_FEATURE_HWP_NOTIFY)) + wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); } @@ -962,7 +944,7 @@ static int core_get_max_pstate(void) if (err) goto skip_tar; - tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl; + tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3); err = rdmsrl_safe(tdp_msr, &tdp_ratio); if (err) goto skip_tar; @@ -990,7 +972,7 @@ static int core_get_turbo_pstate(void) u64 value; int nont, ret; - rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); + rdmsrl(MSR_TURBO_RATIO_LIMIT, value); nont = core_get_max_pstate(); ret = (value) & 255; if (ret <= nont) @@ -1019,7 +1001,7 @@ static int knl_get_turbo_pstate(void) u64 value; int nont, ret; - rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); + rdmsrl(MSR_TURBO_RATIO_LIMIT, value); nont = core_get_max_pstate(); ret = (((value) >> 8) & 0xFF); if (ret <= nont) @@ -1109,6 +1091,26 @@ static struct cpu_defaults knl_params = { }, }; +static struct cpu_defaults bxt_params = { + .pid_policy = { + .sample_rate_ms = 10, + .deadband = 0, + .setpoint = 60, + .p_gain_pct = 14, + .d_gain_pct = 0, + .i_gain_pct = 4, + }, + .funcs = { + .get_max = core_get_max_pstate, + .get_max_physical = core_get_max_pstate_physical, + .get_min = core_get_min_pstate, + .get_turbo = core_get_turbo_pstate, + .get_scaling = core_get_scaling, + .get_val = core_get_val, + .get_target_pstate = get_target_pstate_use_cpu_load, + }, +}; + static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) { int max_perf = cpu->pstate.turbo_pstate; @@ -1131,17 +1133,12 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); } -static inline void intel_pstate_record_pstate(struct cpudata *cpu, int pstate) -{ - trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); - cpu->pstate.current_pstate = pstate; -} - static void intel_pstate_set_min_pstate(struct cpudata *cpu) { int pstate = cpu->pstate.min_pstate; - intel_pstate_record_pstate(cpu, pstate); + trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); + cpu->pstate.current_pstate = pstate; /* * Generally, there is no guarantee that this code will always run on * the CPU being updated, so force the register update to run on the @@ -1301,10 +1298,11 @@ static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) intel_pstate_get_min_max(cpu, &min_perf, &max_perf); pstate = clamp_t(int, pstate, min_perf, max_perf); + trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); if (pstate == cpu->pstate.current_pstate) return; - intel_pstate_record_pstate(cpu, pstate); + cpu->pstate.current_pstate = pstate; wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); } @@ -1352,29 +1350,30 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time, (unsigned long)&policy } static const struct x86_cpu_id intel_pstate_cpu_ids[] = { - ICPU(0x2a, core_params), - ICPU(0x2d, core_params), - ICPU(0x37, silvermont_params), - ICPU(0x3a, core_params), - ICPU(0x3c, core_params), - ICPU(0x3d, core_params), - ICPU(0x3e, core_params), - ICPU(0x3f, core_params), - ICPU(0x45, core_params), - ICPU(0x46, core_params), - ICPU(0x47, core_params), - ICPU(0x4c, airmont_params), - ICPU(0x4e, core_params), - ICPU(0x4f, core_params), - ICPU(0x5e, core_params), - ICPU(0x56, core_params), - ICPU(0x57, knl_params), + ICPU(INTEL_FAM6_SANDYBRIDGE, core_params), + ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_params), + ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_params), + ICPU(INTEL_FAM6_IVYBRIDGE, core_params), + ICPU(INTEL_FAM6_HASWELL_CORE, core_params), + ICPU(INTEL_FAM6_BROADWELL_CORE, core_params), + ICPU(INTEL_FAM6_IVYBRIDGE_X, core_params), + ICPU(INTEL_FAM6_HASWELL_X, core_params), + ICPU(INTEL_FAM6_HASWELL_ULT, core_params), + ICPU(INTEL_FAM6_HASWELL_GT3E, core_params), + ICPU(INTEL_FAM6_BROADWELL_GT3E, core_params), + ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_params), + ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_params), + ICPU(INTEL_FAM6_BROADWELL_X, core_params), + ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_params), + ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params), + ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_params), + ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_params), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); -static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = { - ICPU(0x56, core_params), +static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { + ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params), {} }; @@ -1418,6 +1417,9 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num) { struct cpudata *cpu = all_cpu_data[cpu_num]; + if (cpu->update_util_set) + return; + /* Prevent intel_pstate_update_util() from using stale data. */ cpu->sample.time = 0; cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, @@ -1458,7 +1460,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) if (!policy->cpuinfo.max_freq) return -ENODEV; - intel_pstate_clear_update_util_hook(policy->cpu); + pr_debug("set_policy cpuinfo.max %u policy->max %u\n", + policy->cpuinfo.max_freq, policy->max); cpu = all_cpu_data[0]; if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && @@ -1495,13 +1498,13 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits->max_sysfs_pct); limits->max_perf_pct = max(limits->min_policy_pct, limits->max_perf_pct); - limits->max_perf = round_up(limits->max_perf, FRAC_BITS); /* Make sure min_perf_pct <= max_perf_pct */ limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); limits->min_perf = div_fp(limits->min_perf_pct, 100); limits->max_perf = div_fp(limits->max_perf_pct, 100); + limits->max_perf = round_up(limits->max_perf, FRAC_BITS); out: intel_pstate_set_update_util_hook(policy->cpu); @@ -1558,8 +1561,11 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) /* cpuinfo and default policy values */ policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; - policy->cpuinfo.max_freq = - cpu->pstate.turbo_pstate * cpu->pstate.scaling; + update_turbo_state(); + policy->cpuinfo.max_freq = limits->turbo_disabled ? + cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; + policy->cpuinfo.max_freq *= cpu->pstate.scaling; + intel_pstate_init_acpi_perf_limits(policy); policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; cpumask_set_cpu(policy->cpu, policy->cpus); @@ -1586,12 +1592,12 @@ static struct cpufreq_driver intel_pstate_driver = { .name = "intel_pstate", }; -static int __initdata no_load; -static int __initdata no_hwp; -static int __initdata hwp_only; -static unsigned int force_load; +static int no_load __initdata; +static int no_hwp __initdata; +static int hwp_only __initdata; +static unsigned int force_load __initdata; -static int intel_pstate_msrs_not_valid(void) +static int __init intel_pstate_msrs_not_valid(void) { if (!pstate_funcs.get_max() || !pstate_funcs.get_min() || @@ -1601,7 +1607,7 @@ static int intel_pstate_msrs_not_valid(void) return 0; } -static void copy_pid_params(struct pstate_adjust_policy *policy) +static void __init copy_pid_params(struct pstate_adjust_policy *policy) { pid_params.sample_rate_ms = policy->sample_rate_ms; pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; @@ -1612,7 +1618,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy) pid_params.setpoint = policy->setpoint; } -static void copy_cpu_funcs(struct pstate_funcs *funcs) +static void __init copy_cpu_funcs(struct pstate_funcs *funcs) { pstate_funcs.get_max = funcs->get_max; pstate_funcs.get_max_physical = funcs->get_max_physical; @@ -1627,7 +1633,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs) #ifdef CONFIG_ACPI -static bool intel_pstate_no_acpi_pss(void) +static bool __init intel_pstate_no_acpi_pss(void) { int i; @@ -1656,7 +1662,7 @@ static bool intel_pstate_no_acpi_pss(void) return true; } -static bool intel_pstate_has_acpi_ppc(void) +static bool __init intel_pstate_has_acpi_ppc(void) { int i; @@ -1684,7 +1690,7 @@ struct hw_vendor_info { }; /* Hardware vendor-specific info that has its own power management modes */ -static struct hw_vendor_info vendor_info[] = { +static struct hw_vendor_info vendor_info[] __initdata = { {1, "HP ", "ProLiant", PSS}, {1, "ORACLE", "X4-2 ", PPC}, {1, "ORACLE", "X4-2L ", PPC}, @@ -1703,7 +1709,7 @@ static struct hw_vendor_info vendor_info[] = { {0, "", ""}, }; -static bool intel_pstate_platform_pwr_mgmt_exists(void) +static bool __init intel_pstate_platform_pwr_mgmt_exists(void) { struct acpi_table_header hdr; struct hw_vendor_info *v_info; @@ -70,7 +70,7 @@ static int __init armada_xp_pmsu_cpufreq_init(void) continue; } - clk = clk_get(cpu_dev, 0); + clk = clk_get(cpu_dev, NULL); if (IS_ERR(clk)) { pr_err("Cannot get clock for CPU %d\n", cpu); return PTR_ERR(clk); @@ -487,7 +487,7 @@ static int __init pcc_cpufreq_probe(void) doorbell.space_id = reg_resource->space_id; doorbell.bit_width = reg_resource->bit_width; doorbell.bit_offset = reg_resource->bit_offset; - doorbell.access_width = 64; + doorbell.access_width = 4; doorbell.address = reg_resource->address; pr_debug("probe: doorbell: space_id is %d, bit_width is %d, " @@ -555,8 +555,6 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->min = policy->cpuinfo.min_freq = ioread32(&pcch_hdr->minimum_frequency) * 1000; - policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; - pr_debug("init: policy->max is %d, policy->min is %d\n", policy->max, policy->min); out: @@ -64,12 +64,14 @@ /** * struct global_pstate_info - Per policy data structure to maintain history of * global pstates - * @highest_lpstate: The local pstate from which we are ramping down + * @highest_lpstate_idx: The local pstate index from which we are + * ramping down * @elapsed_time: Time in ms spent in ramping down from - * highest_lpstate + * highest_lpstate_idx * @last_sampled_time: Time from boot in ms when global pstates were * last set - * @last_lpstate,last_gpstate: Last set values for local and global pstates + * @last_lpstate_idx, Last set value of local pstate and global + * last_gpstate_idx pstate in terms of cpufreq table index * @timer: Is used for ramping down if cpu goes idle for * a long time with global pstate held high * @gpstate_lock: A spinlock to maintain synchronization between @@ -77,11 +79,11 @@ * governer's target_index calls */ struct global_pstate_info { - int highest_lpstate; + int highest_lpstate_idx; unsigned int elapsed_time; unsigned int last_sampled_time; - int last_lpstate; - int last_gpstate; + int last_lpstate_idx; + int last_gpstate_idx; spinlock_t gpstate_lock; struct timer_list timer; }; @@ -124,29 +126,47 @@ static int nr_chips; static DEFINE_PER_CPU(struct chip *, chip_info); /* - * Note: The set of pstates consists of contiguous integers, the - * smallest of which is indicated by powernv_pstate_info.min, the - * largest of which is indicated by powernv_pstate_info.max. + * Note: + * The set of pstates consists of contiguous integers. + * powernv_pstate_info stores the index of the frequency table for + * max, min and nominal frequencies. It also stores number of + * available frequencies. * - * The nominal pstate is the highest non-turbo pstate in this - * platform. This is indicated by powernv_pstate_info.nominal. + * powernv_pstate_info.nominal indicates the index to the highest + * non-turbo frequency. */ static struct powernv_pstate_info { - int min; - int max; - int nominal; - int nr_pstates; + unsigned int min; + unsigned int max; + unsigned int nominal; + unsigned int nr_pstates; } powernv_pstate_info; +/* Use following macros for conversions between pstate_id and index */ +static inline int idx_to_pstate(unsigned int i) +{ + return powernv_freqs[i].driver_data; +} + +static inline unsigned int pstate_to_idx(int pstate) +{ + /* + * abs() is deliberately used so that is works with + * both monotonically increasing and decreasing + * pstate values + */ + return abs(pstate - idx_to_pstate(powernv_pstate_info.max)); +} + static inline void reset_gpstates(struct cpufreq_policy *policy) { struct global_pstate_info *gpstates = policy->driver_data; - gpstates->highest_lpstate = 0; + gpstates->highest_lpstate_idx = 0; gpstates->elapsed_time = 0; gpstates->last_sampled_time = 0; - gpstates->last_lpstate = 0; - gpstates->last_gpstate = 0; + gpstates->last_lpstate_idx = 0; + gpstates->last_gpstate_idx = 0; } /* @@ -156,9 +176,10 @@ static inline void reset_gpstates(struct cpufreq_policy *policy) static int init_powernv_pstates(void) { struct device_node *power_mgt; - int i, pstate_min, pstate_max, pstate_nominal, nr_pstates = 0; + int i, nr_pstates = 0; const __be32 *pstate_ids, *pstate_freqs; u32 len_ids, len_freqs; + u32 pstate_min, pstate_max, pstate_nominal; power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); if (!power_mgt) { @@ -208,6 +229,7 @@ static int init_powernv_pstates(void) return -ENODEV; } + powernv_pstate_info.nr_pstates = nr_pstates; pr_debug("NR PStates %d\n", nr_pstates); for (i = 0; i < nr_pstates; i++) { u32 id = be32_to_cpu(pstate_ids[i]); @@ -216,15 +238,17 @@ static int init_powernv_pstates(void) pr_debug("PState id %d freq %d MHz\n", id, freq); powernv_freqs[i].frequency = freq * 1000; /* kHz */ powernv_freqs[i].driver_data = id; + + if (id == pstate_max) + powernv_pstate_info.max = i; + else if (id == pstate_nominal) + powernv_pstate_info.nominal = i; + else if (id == pstate_min) + powernv_pstate_info.min = i; } + /* End of list marker entry */ powernv_freqs[i].frequency = CPUFREQ_TABLE_END; - - powernv_pstate_info.min = pstate_min; - powernv_pstate_info.max = pstate_max; - powernv_pstate_info.nominal = pstate_nominal; - powernv_pstate_info.nr_pstates = nr_pstates; - return 0; } @@ -233,12 +257,12 @@ static unsigned int pstate_id_to_freq(int pstate_id) { int i; - i = powernv_pstate_info.max - pstate_id; + i = pstate_to_idx(pstate_id); if (i >= powernv_pstate_info.nr_pstates || i < 0) { pr_warn("PState id %d outside of PState table, " "reporting nominal id %d instead\n", - pstate_id, powernv_pstate_info.nominal); - i = powernv_pstate_info.max - powernv_pstate_info.nominal; + pstate_id, idx_to_pstate(powernv_pstate_info.nominal)); + i = powernv_pstate_info.nominal; } return powernv_freqs[i].frequency; @@ -252,7 +276,7 @@ static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy, char *buf) { return sprintf(buf, "%u\n", - pstate_id_to_freq(powernv_pstate_info.nominal)); + powernv_freqs[powernv_pstate_info.nominal].frequency); } struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq = @@ -426,7 +450,7 @@ static void set_pstate(void *data) */ static inline unsigned int get_nominal_index(void) { - return powernv_pstate_info.max - powernv_pstate_info.nominal; + return powernv_pstate_info.nominal; } static void powernv_cpufreq_throttle_check(void *data) @@ -435,20 +459,22 @@ static void powernv_cpufreq_throttle_check(void *data) unsigned int cpu = smp_processor_id(); unsigned long pmsr; int pmsr_pmax; + unsigned int pmsr_pmax_idx; pmsr = get_pmspr(SPRN_PMSR); chip = this_cpu_read(chip_info); /* Check for Pmax Capping */ pmsr_pmax = (s8)PMSR_MAX(pmsr); - if (pmsr_pmax != powernv_pstate_info.max) { + pmsr_pmax_idx = pstate_to_idx(pmsr_pmax); + if (pmsr_pmax_idx != powernv_pstate_info.max) { if (chip->throttled) goto next; chip->throttled = true; - if (pmsr_pmax < powernv_pstate_info.nominal) { - pr_warn_once("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n", + if (pmsr_pmax_idx > powernv_pstate_info.nominal) { + pr_warn_once("CPU %d on Chip %u has Pmax(%d) reduced below nominal frequency(%d)\n", cpu, chip->id, pmsr_pmax, - powernv_pstate_info.nominal); + idx_to_pstate(powernv_pstate_info.nominal)); chip->throttle_sub_turbo++; } else { chip->throttle_turbo++; @@ -484,34 +510,35 @@ next: /** * calc_global_pstate - Calculate global pstate - * @elapsed_time: Elapsed time in milliseconds - * @local_pstate: New local pstate - * @highest_lpstate: pstate from which its ramping down + * @elapsed_time: Elapsed time in milliseconds + * @local_pstate_idx: New local pstate + * @highest_lpstate_idx: pstate from which its ramping down * * Finds the appropriate global pstate based on the pstate from which its * ramping down and the time elapsed in ramping down. It follows a quadratic * equation which ensures that it reaches ramping down to pmin in 5sec. */ static inline int calc_global_pstate(unsigned int elapsed_time, - int highest_lpstate, int local_pstate) + int highest_lpstate_idx, + int local_pstate_idx) { - int pstate_diff; + int index_diff; /* * Using ramp_down_percent we get the percentage of rampdown * that we are expecting to be dropping. Difference between - * highest_lpstate and powernv_pstate_info.min will give a absolute + * highest_lpstate_idx and powernv_pstate_info.min will give a absolute * number of how many pstates we will drop eventually by the end of * 5 seconds, then just scale it get the number pstates to be dropped. */ - pstate_diff = ((int)ramp_down_percent(elapsed_time) * - (highest_lpstate - powernv_pstate_info.min)) / 100; + index_diff = ((int)ramp_down_percent(elapsed_time) * + (powernv_pstate_info.min - highest_lpstate_idx)) / 100; /* Ensure that global pstate is >= to local pstate */ - if (highest_lpstate - pstate_diff < local_pstate) - return local_pstate; + if (highest_lpstate_idx + index_diff >= local_pstate_idx) + return local_pstate_idx; else - return highest_lpstate - pstate_diff; + return highest_lpstate_idx + index_diff; } static inline void queue_gpstate_timer(struct global_pstate_info *gpstates) @@ -530,8 +557,7 @@ static inline void queue_gpstate_timer(struct global_pstate_info *gpstates) else timer_interval = GPSTATE_TIMER_INTERVAL; - mod_timer_pinned(&gpstates->timer, jiffies + - msecs_to_jiffies(timer_interval)); + mod_timer(&gpstates->timer, jiffies + msecs_to_jiffies(timer_interval)); } /** @@ -547,7 +573,7 @@ void gpstate_timer_handler(unsigned long data) { struct cpufreq_policy *policy = (struct cpufreq_policy *)data; struct global_pstate_info *gpstates = policy->driver_data; - int gpstate_id; + int gpstate_idx; unsigned int time_diff = jiffies_to_msecs(jiffies) - gpstates->last_sampled_time; struct powernv_smp_call_data freq_data; @@ -557,29 +583,29 @@ void gpstate_timer_handler(unsigned long data) gpstates->last_sampled_time += time_diff; gpstates->elapsed_time += time_diff; - freq_data.pstate_id = gpstates->last_lpstate; + freq_data.pstate_id = idx_to_pstate(gpstates->last_lpstate_idx); - if ((gpstates->last_gpstate == freq_data.pstate_id) || + if ((gpstates->last_gpstate_idx == gpstates->last_lpstate_idx) || (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME)) { - gpstate_id = freq_data.pstate_id; + gpstate_idx = pstate_to_idx(freq_data.pstate_id); reset_gpstates(policy); - gpstates->highest_lpstate = freq_data.pstate_id; + gpstates->highest_lpstate_idx = gpstate_idx; } else { - gpstate_id = calc_global_pstate(gpstates->elapsed_time, - gpstates->highest_lpstate, - freq_data.pstate_id); + gpstate_idx = calc_global_pstate(gpstates->elapsed_time, + gpstates->highest_lpstate_idx, + freq_data.pstate_id); } /* * If local pstate is equal to global pstate, rampdown is over * So timer is not required to be queued. */ - if (gpstate_id != freq_data.pstate_id) + if (gpstate_idx != gpstates->last_lpstate_idx) queue_gpstate_timer(gpstates); - freq_data.gpstate_id = gpstate_id; - gpstates->last_gpstate = freq_data.gpstate_id; - gpstates->last_lpstate = freq_data.pstate_id; + freq_data.gpstate_id = idx_to_pstate(gpstate_idx); + gpstates->last_gpstate_idx = pstate_to_idx(freq_data.gpstate_id); + gpstates->last_lpstate_idx = pstate_to_idx(freq_data.pstate_id); spin_unlock(&gpstates->gpstate_lock); @@ -596,7 +622,7 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy, unsigned int new_index) { struct powernv_smp_call_data freq_data; - unsigned int cur_msec, gpstate_id; + unsigned int cur_msec, gpstate_idx; struct global_pstate_info *gpstates = policy->driver_data; if (unlikely(rebooting) && new_index != get_nominal_index()) @@ -608,15 +634,15 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy, cur_msec = jiffies_to_msecs(get_jiffies_64()); spin_lock(&gpstates->gpstate_lock); - freq_data.pstate_id = powernv_freqs[new_index].driver_data; + freq_data.pstate_id = idx_to_pstate(new_index); if (!gpstates->last_sampled_time) { - gpstate_id = freq_data.pstate_id; - gpstates->highest_lpstate = freq_data.pstate_id; + gpstate_idx = new_index; + gpstates->highest_lpstate_idx = new_index; goto gpstates_done; } - if (gpstates->last_gpstate > freq_data.pstate_id) { + if (gpstates->last_gpstate_idx < new_index) { gpstates->elapsed_time += cur_msec - gpstates->last_sampled_time; @@ -627,34 +653,34 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy, */ if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) { reset_gpstates(policy); - gpstates->highest_lpstate = freq_data.pstate_id; - gpstate_id = freq_data.pstate_id; + gpstates->highest_lpstate_idx = new_index; + gpstate_idx = new_index; } else { /* Elaspsed_time is less than 5 seconds, continue to rampdown */ - gpstate_id = calc_global_pstate(gpstates->elapsed_time, - gpstates->highest_lpstate, - freq_data.pstate_id); + gpstate_idx = calc_global_pstate(gpstates->elapsed_time, + gpstates->highest_lpstate_idx, + new_index); } } else { reset_gpstates(policy); - gpstates->highest_lpstate = freq_data.pstate_id; - gpstate_id = freq_data.pstate_id; + gpstates->highest_lpstate_idx = new_index; + gpstate_idx = new_index; } /* * If local pstate is equal to global pstate, rampdown is over * So timer is not required to be queued. */ - if (gpstate_id != freq_data.pstate_id) + if (gpstate_idx != new_index) queue_gpstate_timer(gpstates); else del_timer_sync(&gpstates->timer); gpstates_done: - freq_data.gpstate_id = gpstate_id; + freq_data.gpstate_id = idx_to_pstate(gpstate_idx); gpstates->last_sampled_time = cur_msec; - gpstates->last_gpstate = freq_data.gpstate_id; - gpstates->last_lpstate = freq_data.pstate_id; + gpstates->last_gpstate_idx = gpstate_idx; + gpstates->last_lpstate_idx = new_index; spin_unlock(&gpstates->gpstate_lock); @@ -699,7 +725,7 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->driver_data = gpstates; /* initialize timer */ - init_timer_deferrable(&gpstates->timer); + init_timer_pinned_deferrable(&gpstates->timer); gpstates->timer.data = (unsigned long)policy; gpstates->timer.function = gpstate_timer_handler; gpstates->timer.expires = jiffies + @@ -760,9 +786,7 @@ void powernv_cpufreq_work_fn(struct work_struct *work) struct cpufreq_policy policy; cpufreq_get_policy(&policy, cpu); - cpufreq_frequency_table_target(&policy, policy.freq_table, - policy.cur, - CPUFREQ_RELATION_C, &index); + index = cpufreq_table_find_index_c(&policy, policy.cur); powernv_cpufreq_target_index(&policy, index); cpumask_andnot(&mask, &mask, policy.cpus); } @@ -848,8 +872,8 @@ static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy) struct powernv_smp_call_data freq_data; struct global_pstate_info *gpstates = policy->driver_data; - freq_data.pstate_id = powernv_pstate_info.min; - freq_data.gpstate_id = powernv_pstate_info.min; + freq_data.pstate_id = idx_to_pstate(powernv_pstate_info.min); + freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min); smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1); del_timer_sync(&gpstates->timer); } @@ -94,7 +94,7 @@ static int pmi_notifier(struct notifier_block *nb, unsigned long event, void *data) { struct cpufreq_policy *policy = data; - struct cpufreq_frequency_table *cbe_freqs; + struct cpufreq_frequency_table *cbe_freqs = policy->freq_table; u8 node; /* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY @@ -103,7 +103,6 @@ static int pmi_notifier(struct notifier_block *nb, if (event == CPUFREQ_START) return 0; - cbe_freqs = cpufreq_frequency_get_table(policy->cpu); node = cbe_cpu_to_node(policy->cpu); pr_debug("got notified, event=%lu, node=%u\n", event, node); @@ -293,12 +293,8 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy, __func__, policy, target_freq, relation); if (ftab) { - if (cpufreq_frequency_table_target(policy, ftab, - target_freq, relation, - &index)) { - s3c_freq_dbg("%s: table failed\n", __func__); - return -EINVAL; - } + index = cpufreq_frequency_table_target(policy, target_freq, + relation); s3c_freq_dbg("%s: adjust %d to entry %d (%u)\n", __func__, target_freq, index, ftab[index].frequency); @@ -315,7 +311,6 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy, pll = NULL; } else { struct cpufreq_policy tmp_policy; - int ret; /* we keep the cpu pll table in Hz, to ensure we get an * accurate value for the PLL output. */ @@ -323,20 +318,14 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy, tmp_policy.min = policy->min * 1000; tmp_policy.max = policy->max * 1000; tmp_policy.cpu = policy->cpu; + tmp_policy.freq_table = pll_reg; - /* cpufreq_frequency_table_target uses a pointer to 'index' - * which is the number of the table entry, not the value of + /* cpufreq_frequency_table_target returns the index + * of the table entry, not the value of * the table entry's index field. */ - ret = cpufreq_frequency_table_target(&tmp_policy, pll_reg, - target_freq, relation, - &index); - - if (ret < 0) { - pr_err("%s: no PLL available\n", __func__); - goto err_notpossible; - } - + index = cpufreq_frequency_table_target(&tmp_policy, target_freq, + relation); pll = pll_reg + index; s3c_freq_dbg("%s: target %u => %u\n", @@ -346,10 +335,6 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy, } return s3c_cpufreq_settarget(policy, target_freq, pll); - - err_notpossible: - pr_err("no compatible settings for %d\n", target_freq); - return -EINVAL; } struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name) @@ -571,11 +556,7 @@ static int s3c_cpufreq_build_freq(void) { int size, ret; - if (!cpu_cur.info->calc_freqtable) - return -EINVAL; - kfree(ftab); - ftab = NULL; size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0); size++; @@ -246,12 +246,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index) new_freq = s5pv210_freq_table[index].frequency; /* Finding current running level index */ - if (cpufreq_frequency_table_target(policy, s5pv210_freq_table, - old_freq, CPUFREQ_RELATION_H, - &priv_index)) { - ret = -EINVAL; - goto exit; - } + priv_index = cpufreq_table_find_index_h(policy, old_freq); arm_volt = dvs_conf[index].arm_volt; int_volt = dvs_conf[index].int_volt; @@ -36,26 +36,12 @@ static int arm_enter_idle_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { - int ret; - - if (!idx) { - cpu_do_idle(); - return idx; - } - - ret = cpu_pm_enter(); - if (!ret) { - /* - * Pass idle state index to cpu_suspend which in turn will - * call the CPU ops suspend protocol with idle index as a - * parameter. - */ - ret = arm_cpuidle_suspend(idx); - - cpu_pm_exit(); - } - - return ret ? -1 : idx; + /* + * Pass idle state index to arm_cpuidle_suspend which in turn + * will call the CPU ops suspend protocol with idle index as a + * parameter. + */ + return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, idx); } static struct cpuidle_driver arm_idle_driver = { @@ -173,7 +173,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, struct cpuidle_state *target_state = &drv->states[index]; bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP); - u64 time_start, time_end; + ktime_t time_start, time_end; s64 diff; /* @@ -195,13 +195,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, sched_idle_set_state(target_state); trace_cpu_idle_rcuidle(index, dev->cpu); - time_start = local_clock(); + time_start = ns_to_ktime(local_clock()); stop_critical_timings(); entered_state = target_state->enter(dev, drv, index); start_critical_timings(); - time_end = local_clock(); + time_end = ns_to_ktime(local_clock()); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); /* The cpu is no longer idle or about to enter idle. */ @@ -217,11 +217,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, if (!cpuidle_state_is_coupled(drv, index)) local_irq_enable(); - /* - * local_clock() returns the time in nanosecond, let's shift - * by 10 (divide by 1024) to have microsecond based time. - */ - diff = (time_end - time_start) >> 10; + diff = ktime_us_delta(time_end, time_start); if (diff > INT_MAX) diff = INT_MAX; @@ -159,6 +159,19 @@ config CRYPTO_GHASH_S390 It is available as of z196. +config CRYPTO_CRC32_S390 + tristate "CRC-32 algorithms" + depends on S390 + select CRYPTO_HASH + select CRC32 + help + Select this option if you want to use hardware accelerated + implementations of CRC algorithms. With this option, you + can optimize the computation of CRC-32 (IEEE 802.3 Ethernet) + and CRC-32C (Castagnoli). + + It is available with IBM z13 or later. + config CRYPTO_DEV_MV_CESA tristate "Marvell's Cryptographic Engine" depends on PLAT_ORION @@ -588,11 +588,6 @@ static int bfin_crypto_crc_probe(struct platform_device *pdev) crypto_init_queue(&crc->queue, CRC_CCRYPTO_QUEUE_LENGTH); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { - dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); - return -ENOENT; - } - crc->regs = devm_ioremap_resource(dev, res); if (IS_ERR((void *)crc->regs)) { dev_err(&pdev->dev, "Cannot map CRC IO\n"); @@ -1,6 +1,6 @@ config CRYPTO_DEV_FSL_CAAM tristate "Freescale CAAM-Multicore driver backend" - depends on FSL_SOC || ARCH_MXC + depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE help Enables the driver module for Freescale's Cryptographic Accelerator and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). @@ -99,6 +99,18 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API To compile this as a module, choose M here: the module will be called caamhash. +config CRYPTO_DEV_FSL_CAAM_PKC_API + tristate "Register public key cryptography implementations with Crypto API" + depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR + default y + select CRYPTO_RSA + help + Selecting this will allow SEC Public key support for RSA. + Supported cryptographic primitives: encryption, decryption, + signature and verification. + To compile this as a module, choose M here: the module + will be called caam_pkc. + config CRYPTO_DEV_FSL_CAAM_RNG_API tristate "Register caam device for hwrng API" depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR @@ -116,10 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_IMX def_bool SOC_IMX6 || SOC_IMX7D depends on CRYPTO_DEV_FSL_CAAM -config CRYPTO_DEV_FSL_CAAM_LE - def_bool CRYPTO_DEV_FSL_CAAM_IMX || SOC_LS1021A - depends on CRYPTO_DEV_FSL_CAAM - config CRYPTO_DEV_FSL_CAAM_DEBUG bool "Enable debug output in CAAM driver" depends on CRYPTO_DEV_FSL_CAAM @@ -2,7 +2,7 @@ # Makefile for the CAAM backend and dependent components # ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y) - EXTRA_CFLAGS := -DDEBUG + ccflags-y := -DDEBUG endif obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o @@ -10,6 +10,8 @@ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o caam-objs := ctrl.o caam_jr-objs := jr.o key_gen.o error.o +caam_pkc-y := caampkc.o pkc_desc.o @@ -847,7 +847,7 @@ static int ahash_update_ctx(struct ahash_request *req) *next_buflen, 0); } else { (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= - SEC4_SG_LEN_FIN; + cpu_to_caam32(SEC4_SG_LEN_FIN); } state->current_buf = !state->current_buf; @@ -949,7 +949,8 @@ static int ahash_final_ctx(struct ahash_request *req) state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, buf, state->buf_dma, buflen, last_buflen); - (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN; + (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= + cpu_to_caam32(SEC4_SG_LEN_FIN); edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c new file mode 100644 index 000000000000..851015e652b8 --- /dev/null +++ b/ drivers/crypto/caam/caampkc.c@@ -0,0 +1,607 @@ +/* + * caam - Freescale FSL CAAM support for Public Key Cryptography + * + * Copyright 2016 Freescale Semiconductor, Inc. + * + * There is no Shared Descriptor for PKC so that the Job Descriptor must carry + * all the desired key parameters, input and output pointers. + */ +#include "compat.h" +#include "regs.h" +#include "intern.h" +#include "jr.h" +#include "error.h" +#include "desc_constr.h" +#include "sg_sw_sec4.h" +#include "caampkc.h" + +#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb)) +#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \ + sizeof(struct rsa_priv_f1_pdb)) + +static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, + struct akcipher_request *req) +{ + dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE); + dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); + + if (edesc->sec4_sg_bytes) + dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes, + DMA_TO_DEVICE); +} + +static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc, + struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct rsa_pub_pdb *pdb = &edesc->pdb.pub; + + dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE); +} + +static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc, + struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1; + + dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); +} + +/* RSA Job Completion handler */ +static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context) +{ + struct akcipher_request *req = context; + struct rsa_edesc *edesc; + + if (err) + caam_jr_strstatus(dev, err); + + edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); + + rsa_pub_unmap(dev, edesc, req); + rsa_io_unmap(dev, edesc, req); + kfree(edesc); + + akcipher_request_complete(req, err); +} + +static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err, + void *context) +{ + struct akcipher_request *req = context; + struct rsa_edesc *edesc; + + if (err) + caam_jr_strstatus(dev, err); + + edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); + + rsa_priv_f1_unmap(dev, edesc, req); + rsa_io_unmap(dev, edesc, req); + kfree(edesc); + + akcipher_request_complete(req, err); +} + +static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, + size_t desclen) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct device *dev = ctx->dev; + struct rsa_edesc *edesc; + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; + int sgc; + int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; + int src_nents, dst_nents; + + src_nents = sg_nents_for_len(req->src, req->src_len); + dst_nents = sg_nents_for_len(req->dst, req->dst_len); + + if (src_nents > 1) + sec4_sg_len = src_nents; + if (dst_nents > 1) + sec4_sg_len += dst_nents; + + sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); + + /* allocate space for base edesc, hw desc commands and link tables */ + edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, + GFP_DMA | flags); + if (!edesc) + return ERR_PTR(-ENOMEM); + + sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE); + if (unlikely(!sgc)) { + dev_err(dev, "unable to map source\n"); + goto src_fail; + } + + sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); + if (unlikely(!sgc)) { + dev_err(dev, "unable to map destination\n"); + goto dst_fail; + } + + edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen; + + sec4_sg_index = 0; + if (src_nents > 1) { + sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); + sec4_sg_index += src_nents; + } + if (dst_nents > 1) + sg_to_sec4_sg_last(req->dst, dst_nents, + edesc->sec4_sg + sec4_sg_index, 0); + + /* Save nents for later use in Job Descriptor */ + edesc->src_nents = src_nents; + edesc->dst_nents = dst_nents; + + if (!sec4_sg_bytes) + return edesc; + + edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(dev, edesc->sec4_sg_dma)) { + dev_err(dev, "unable to map S/G table\n"); + goto sec4_sg_fail; + } + + edesc->sec4_sg_bytes = sec4_sg_bytes; + + return edesc; + +sec4_sg_fail: + dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); +dst_fail: + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); +src_fail: + kfree(edesc); + return ERR_PTR(-ENOMEM); +} + +static int set_rsa_pub_pdb(struct akcipher_request *req, + struct rsa_edesc *edesc) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct device *dev = ctx->dev; + struct rsa_pub_pdb *pdb = &edesc->pdb.pub; + int sec4_sg_index = 0; + + pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->n_dma)) { + dev_err(dev, "Unable to map RSA modulus memory\n"); + return -ENOMEM; + } + + pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->e_dma)) { + dev_err(dev, "Unable to map RSA public exponent memory\n"); + dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE); + return -ENOMEM; + } + + if (edesc->src_nents > 1) { + pdb->sgf |= RSA_PDB_SGF_F; + pdb->f_dma = edesc->sec4_sg_dma; + sec4_sg_index += edesc->src_nents; + } else { + pdb->f_dma = sg_dma_address(req->src); + } + + if (edesc->dst_nents > 1) { + pdb->sgf |= RSA_PDB_SGF_G; + pdb->g_dma = edesc->sec4_sg_dma + + sec4_sg_index * sizeof(struct sec4_sg_entry); + } else { + pdb->g_dma = sg_dma_address(req->dst); + } + + pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz; + pdb->f_len = req->src_len; + + return 0; +} + +static int set_rsa_priv_f1_pdb(struct akcipher_request *req, + struct rsa_edesc *edesc) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct device *dev = ctx->dev; + struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1; + int sec4_sg_index = 0; + + pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->n_dma)) { + dev_err(dev, "Unable to map modulus memory\n"); + return -ENOMEM; + } + + pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->d_dma)) { + dev_err(dev, "Unable to map RSA private exponent memory\n"); + dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE); + return -ENOMEM; + } + + if (edesc->src_nents > 1) { + pdb->sgf |= RSA_PRIV_PDB_SGF_G; + pdb->g_dma = edesc->sec4_sg_dma; + sec4_sg_index += edesc->src_nents; + } else { + pdb->g_dma = sg_dma_address(req->src); + } + + if (edesc->dst_nents > 1) { + pdb->sgf |= RSA_PRIV_PDB_SGF_F; + pdb->f_dma = edesc->sec4_sg_dma + + sec4_sg_index * sizeof(struct sec4_sg_entry); + } else { + pdb->f_dma = sg_dma_address(req->dst); + } + + pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz; + + return 0; +} + +static int caam_rsa_enc(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct device *jrdev = ctx->dev; + struct rsa_edesc *edesc; + int ret; + + if (unlikely(!key->n || !key->e)) + return -EINVAL; + + if (req->dst_len < key->n_sz) { + req->dst_len = key->n_sz; + dev_err(jrdev, "Output buffer length less than parameter n\n"); + return -EOVERFLOW; + } + + /* Allocate extended descriptor */ + edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* Set RSA Encrypt Protocol Data Block */ + ret = set_rsa_pub_pdb(req, edesc); + if (ret) + goto init_fail; + + /* Initialize Job Descriptor */ + init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub); + + ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req); + if (!ret) + return -EINPROGRESS; + + rsa_pub_unmap(jrdev, edesc, req); + +init_fail: + rsa_io_unmap(jrdev, edesc, req); + kfree(edesc); + return ret; +} + +static int caam_rsa_dec(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct device *jrdev = ctx->dev; + struct rsa_edesc *edesc; + int ret; + + if (unlikely(!key->n || !key->d)) + return -EINVAL; + + if (req->dst_len < key->n_sz) { + req->dst_len = key->n_sz; + dev_err(jrdev, "Output buffer length less than parameter n\n"); + return -EOVERFLOW; + } + + /* Allocate extended descriptor */ + edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */ + ret = set_rsa_priv_f1_pdb(req, edesc); + if (ret) + goto init_fail; + + /* Initialize Job Descriptor */ + init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1); + + ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req); + if (!ret) + return -EINPROGRESS; + + rsa_priv_f1_unmap(jrdev, edesc, req); + +init_fail: + rsa_io_unmap(jrdev, edesc, req); + kfree(edesc); + return ret; +} + +static void caam_rsa_free_key(struct caam_rsa_key *key) +{ + kzfree(key->d); + kfree(key->e); + kfree(key->n); + key->d = NULL; + key->e = NULL; + key->n = NULL; + key->d_sz = 0; + key->e_sz = 0; + key->n_sz = 0; +} + +/** + * caam_read_raw_data - Read a raw byte stream as a positive integer. + * The function skips buffer's leading zeros, copies the remained data + * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns + * the address of the new buffer. + * + * @buf : The data to read + * @nbytes: The amount of data to read + */ +static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes) +{ + u8 *val; + + while (!*buf && *nbytes) { + buf++; + (*nbytes)--; + } + + val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL); + if (!val) + return NULL; + + memcpy(val, buf, *nbytes); + + return val; +} + +static int caam_rsa_check_key_length(unsigned int len) +{ + if (len > 4096) + return -EINVAL; + return 0; +} + +static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct rsa_key raw_key = {0}; + struct caam_rsa_key *rsa_key = &ctx->key; + int ret; + + /* Free the old RSA key if any */ + caam_rsa_free_key(rsa_key); + + ret = rsa_parse_pub_key(&raw_key, key, keylen); + if (ret) + return ret; + + /* Copy key in DMA zone */ + rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL); + if (!rsa_key->e) + goto err; + + /* + * Skip leading zeros and copy the positive integer to a buffer + * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor + * expects a positive integer for the RSA modulus and uses its length as + * decryption output length. + */ + rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz); + if (!rsa_key->n) + goto err; + + if (caam_rsa_check_key_length(raw_key.n_sz << 3)) { + caam_rsa_free_key(rsa_key); + return -EINVAL; + } + + rsa_key->e_sz = raw_key.e_sz; + rsa_key->n_sz = raw_key.n_sz; + + memcpy(rsa_key->e, raw_key.e, raw_key.e_sz); + + return 0; +err: + caam_rsa_free_key(rsa_key); + return -ENOMEM; +} + +static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct rsa_key raw_key = {0}; + struct caam_rsa_key *rsa_key = &ctx->key; + int ret; + + /* Free the old RSA key if any */ + caam_rsa_free_key(rsa_key); + + ret = rsa_parse_priv_key(&raw_key, key, keylen); + if (ret) + return ret; + + /* Copy key in DMA zone */ + rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL); + if (!rsa_key->d) + goto err; + + rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL); + if (!rsa_key->e) + goto err; + + /* + * Skip leading zeros and copy the positive integer to a buffer + * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor + * expects a positive integer for the RSA modulus and uses its length as + * decryption output length. + */ + rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz); + if (!rsa_key->n) + goto err; + + if (caam_rsa_check_key_length(raw_key.n_sz << 3)) { + caam_rsa_free_key(rsa_key); + return -EINVAL; + } + + rsa_key->d_sz = raw_key.d_sz; + rsa_key->e_sz = raw_key.e_sz; + rsa_key->n_sz = raw_key.n_sz; + + memcpy(rsa_key->d, raw_key.d, raw_key.d_sz); + memcpy(rsa_key->e, raw_key.e, raw_key.e_sz); + + return 0; + +err: + caam_rsa_free_key(rsa_key); + return -ENOMEM; +} + +static int caam_rsa_max_size(struct crypto_akcipher *tfm) +{ + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + + return (key->n) ? key->n_sz : -EINVAL; +} + +/* Per session pkc's driver context creation function */ +static int caam_rsa_init_tfm(struct crypto_akcipher *tfm) +{ + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + + ctx->dev = caam_jr_alloc(); + + if (IS_ERR(ctx->dev)) { + dev_err(ctx->dev, "Job Ring Device allocation for transform failed\n"); + return PTR_ERR(ctx->dev); + } + + return 0; +} + +/* Per session pkc's driver context cleanup function */ +static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm) +{ + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + + caam_rsa_free_key(key); + caam_jr_free(ctx->dev); +} + +static struct akcipher_alg caam_rsa = { + .encrypt = caam_rsa_enc, + .decrypt = caam_rsa_dec, + .sign = caam_rsa_dec, + .verify = caam_rsa_enc, + .set_pub_key = caam_rsa_set_pub_key, + .set_priv_key = caam_rsa_set_priv_key, + .max_size = caam_rsa_max_size, + .init = caam_rsa_init_tfm, + .exit = caam_rsa_exit_tfm, + .base = { + .cra_name = "rsa", + .cra_driver_name = "rsa-caam", + .cra_priority = 3000, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct caam_rsa_ctx), + }, +}; + +/* Public Key Cryptography module initialization handler */ +static int __init caam_pkc_init(void) +{ + struct device_node *dev_node; + struct platform_device *pdev; + struct device *ctrldev; + struct caam_drv_private *priv; + u32 cha_inst, pk_inst; + int err; + + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); + if (!dev_node) { + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); + if (!dev_node) + return -ENODEV; + } + + pdev = of_find_device_by_node(dev_node); + if (!pdev) { + of_node_put(dev_node); + return -ENODEV; + } + + ctrldev = &pdev->dev; + priv = dev_get_drvdata(ctrldev); + of_node_put(dev_node); + + /* + * If priv is NULL, it's probably because the caam driver wasn't + * properly initialized (e.g. RNG4 init failed). Thus, bail out here. + */ + if (!priv) + return -ENODEV; + + /* Determine public key hardware accelerator presence. */ + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); + pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT; + + /* Do not register algorithms if PKHA is not present. */ + if (!pk_inst) + return -ENODEV; + + err = crypto_register_akcipher(&caam_rsa); + if (err) + dev_warn(ctrldev, "%s alg registration failed\n", + caam_rsa.base.cra_driver_name); + else + dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); + + return err; +} + +static void __exit caam_pkc_exit(void) +{ + crypto_unregister_akcipher(&caam_rsa); +} + +module_init(caam_pkc_init); +module_exit(caam_pkc_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API"); +MODULE_AUTHOR("Freescale Semiconductor"); diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h new file mode 100644 index 000000000000..f595d159b112 --- /dev/null +++ b/ drivers/crypto/caam/caampkc.h@@ -0,0 +1,70 @@ +/* + * caam - Freescale FSL CAAM support for Public Key Cryptography descriptors + * + * Copyright 2016 Freescale Semiconductor, Inc. + * + * There is no Shared Descriptor for PKC so that the Job Descriptor must carry + * all the desired key parameters, input and output pointers. + */ + +#ifndef _PKC_DESC_H_ +#define _PKC_DESC_H_ +#include "compat.h" +#include "pdb.h" + +/** + * caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone. + * @n : RSA modulus raw byte stream + * @e : RSA public exponent raw byte stream + * @d : RSA private exponent raw byte stream + * @n_sz : length in bytes of RSA modulus n + * @e_sz : length in bytes of RSA public exponent + * @d_sz : length in bytes of RSA private exponent + */ +struct caam_rsa_key { + u8 *n; + u8 *e; + u8 *d; + size_t n_sz; + size_t e_sz; + size_t d_sz; +}; + +/** + * caam_rsa_ctx - per session context. + * @key : RSA key in DMA zone + * @dev : device structure + */ +struct caam_rsa_ctx { + struct caam_rsa_key key; + struct device *dev; +}; + +/** + * rsa_edesc - s/w-extended rsa descriptor + * @src_nents : number of segments in input scatterlist + * @dst_nents : number of segments in output scatterlist + * @sec4_sg_bytes : length of h/w link table + * @sec4_sg_dma : dma address of h/w link table + * @sec4_sg : pointer to h/w link table + * @pdb : specific RSA Protocol Data Block (PDB) + * @hw_desc : descriptor followed by link tables if any + */ +struct rsa_edesc { + int src_nents; + int dst_nents; + int sec4_sg_bytes; + dma_addr_t sec4_sg_dma; + struct sec4_sg_entry *sec4_sg; + union { + struct rsa_pub_pdb pub; + struct rsa_priv_f1_pdb priv_f1; + } pdb; + u32 hw_desc[]; +}; + +/* Descriptor construction primitives. */ +void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb); +void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb); + +#endif @@ -35,8 +35,11 @@ #include <crypto/md5.h> #include <crypto/internal/aead.h> #include <crypto/authenc.h> +#include <crypto/akcipher.h> #include <crypto/scatterwalk.h> #include <crypto/internal/skcipher.h> #include <crypto/internal/hash.h> +#include <crypto/internal/rsa.h> +#include <crypto/internal/akcipher.h> #endif /* !defined(CAAM_COMPAT_H) */ @@ -15,6 +15,9 @@ #include "desc_constr.h" #include "error.h" +bool caam_little_end; +EXPORT_SYMBOL(caam_little_end); + /* * i.MX targets tend to have clock control subsystems that can * enable/disable clocking to our device. @@ -106,7 +109,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, if (ctrlpriv->virt_en == 1) { - setbits32(&ctrl->deco_rsr, DECORSR_JR0); + clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0); while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) && --timeout) @@ -115,7 +118,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, timeout = 100000; } - setbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE); + clrsetbits_32(&ctrl->deco_rq, 0, DECORR_RQD0ENABLE); while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) && --timeout) @@ -123,12 +126,12 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, if (!timeout) { dev_err(ctrldev, "failed to acquire DECO 0\n"); - clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE); + clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0); return -ENODEV; } for (i = 0; i < desc_len(desc); i++) - wr_reg32(&deco->descbuf[i], *(desc + i)); + wr_reg32(&deco->descbuf[i], caam32_to_cpu(*(desc + i))); flags = DECO_JQCR_WHL; /* @@ -139,7 +142,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, flags |= DECO_JQCR_FOUR; /* Instruct the DECO to execute it */ - setbits32(&deco->jr_ctl_hi, flags); + clrsetbits_32(&deco->jr_ctl_hi, 0, flags); timeout = 10000000; do { @@ -158,10 +161,10 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, DECO_OP_STATUS_HI_ERR_MASK; if (ctrlpriv->virt_en == 1) - clrbits32(&ctrl->deco_rsr, DECORSR_JR0); + clrsetbits_32(&ctrl->deco_rsr, DECORSR_JR0, 0); /* Mark the DECO as free */ - clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE); + clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0); if (!timeout) return -EAGAIN; @@ -349,7 +352,7 @@ static void kick_trng(struct platform_device *pdev, int ent_delay) r4tst = &ctrl->r4tst[0]; /* put RNG4 into program mode */ - setbits32(&r4tst->rtmctl, RTMCTL_PRGM); + clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM); /* * Performance-wise, it does not make sense to @@ -363,7 +366,7 @@ static void kick_trng(struct platform_device *pdev, int ent_delay) >> RTSDCTL_ENT_DLY_SHIFT; if (ent_delay <= val) { /* put RNG4 into run mode */ - clrbits32(&r4tst->rtmctl, RTMCTL_PRGM); + clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0); return; } @@ -381,9 +384,9 @@ static void kick_trng(struct platform_device *pdev, int ent_delay) * select raw sampling in both entropy shifter * and statistical checker */ - setbits32(&val, RTMCTL_SAMP_MODE_RAW_ES_SC); + clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC); /* put RNG4 into run mode */ - clrbits32(&val, RTMCTL_PRGM); + clrsetbits_32(&val, RTMCTL_PRGM, 0); /* write back the control register */ wr_reg32(&r4tst->rtmctl, val); } @@ -406,6 +409,23 @@ int caam_get_era(void) } EXPORT_SYMBOL(caam_get_era); +#ifdef CONFIG_DEBUG_FS +static int caam_debugfs_u64_get(void *data, u64 *val) +{ + *val = caam64_to_cpu(*(u64 *)data); + return 0; +} + +static int caam_debugfs_u32_get(void *data, u64 *val) +{ + *val = caam32_to_cpu(*(u32 *)data); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n"); +#endif + /* Probe routine for CAAM top (controller) level */ static int caam_probe(struct platform_device *pdev) { @@ -504,6 +524,10 @@ static int caam_probe(struct platform_device *pdev) ret = -ENOMEM; goto disable_caam_emi_slow; } + + caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) & + (CSTA_PLEND | CSTA_ALT_PLEND)); + /* Finding the page size for using the CTPR_MS register */ comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms); pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT; @@ -559,9 +583,9 @@ static int caam_probe(struct platform_device *pdev) } if (ctrlpriv->virt_en == 1) - setbits32(&ctrl->jrstart, JRSTART_JR0_START | - JRSTART_JR1_START | JRSTART_JR2_START | - JRSTART_JR3_START); + clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START | + JRSTART_JR1_START | JRSTART_JR2_START | + JRSTART_JR3_START); if (sizeof(dma_addr_t) == sizeof(u64)) if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) @@ -693,7 +717,7 @@ static int caam_probe(struct platform_device *pdev) ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK; /* Enable RDB bit so that RNG works faster */ - setbits32(&ctrl->scfgr, SCFGR_RDBENABLE); + clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE); } /* NOTE: RTIC detection ought to go here, around Si time */ @@ -719,48 +743,59 @@ static int caam_probe(struct platform_device *pdev) ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root); /* Controller-level - performance monitor counters */ + ctrlpriv->ctl_rq_dequeued = - debugfs_create_u64("rq_dequeued", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->req_dequeued); + debugfs_create_file("rq_dequeued", + S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->req_dequeued, + &caam_fops_u64_ro); ctrlpriv->ctl_ob_enc_req = - debugfs_create_u64("ob_rq_encrypted", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->ob_enc_req); + debugfs_create_file("ob_rq_encrypted", + S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ob_enc_req, + &caam_fops_u64_ro); ctrlpriv->ctl_ib_dec_req = - debugfs_create_u64("ib_rq_decrypted", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->ib_dec_req); + debugfs_create_file("ib_rq_decrypted", + S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ib_dec_req, + &caam_fops_u64_ro); ctrlpriv->ctl_ob_enc_bytes = - debugfs_create_u64("ob_bytes_encrypted", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->ob_enc_bytes); + debugfs_create_file("ob_bytes_encrypted", + S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ob_enc_bytes, + &caam_fops_u64_ro); ctrlpriv->ctl_ob_prot_bytes = - debugfs_create_u64("ob_bytes_protected", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->ob_prot_bytes); + debugfs_create_file("ob_bytes_protected", + S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ob_prot_bytes, + &caam_fops_u64_ro); ctrlpriv->ctl_ib_dec_bytes = - debugfs_create_u64("ib_bytes_decrypted", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->ib_dec_bytes); + debugfs_create_file("ib_bytes_decrypted", + S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ib_dec_bytes, + &caam_fops_u64_ro); ctrlpriv->ctl_ib_valid_bytes = - debugfs_create_u64("ib_bytes_validated", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->ib_valid_bytes); + debugfs_create_file("ib_bytes_validated", + S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->ib_valid_bytes, + &caam_fops_u64_ro); /* Controller level - global status values */ ctrlpriv->ctl_faultaddr = - debugfs_create_u64("fault_addr", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->faultaddr); + debugfs_create_file("fault_addr", + S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->faultaddr, + &caam_fops_u32_ro); ctrlpriv->ctl_faultdetail = - debugfs_create_u32("fault_detail", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->faultdetail); + debugfs_create_file("fault_detail", + S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->faultdetail, + &caam_fops_u32_ro); ctrlpriv->ctl_faultstatus = - debugfs_create_u32("fault_status", - S_IRUSR | S_IRGRP | S_IROTH, - ctrlpriv->ctl, &perfmon->status); + debugfs_create_file("fault_status", + S_IRUSR | S_IRGRP | S_IROTH, + ctrlpriv->ctl, &perfmon->status, + &caam_fops_u32_ro); /* Internal covering keys (useful in non-secure mode only) */ ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0]; @@ -20,19 +20,18 @@ #define SEC4_SG_BPID_MASK 0x000000ff #define SEC4_SG_BPID_SHIFT 16 #define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */ -#define SEC4_SG_OFFS_MASK 0x00001fff +#define SEC4_SG_OFFSET_MASK 0x00001fff struct sec4_sg_entry { -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX +#if !defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && \ + defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX) u32 rsvd1; dma_addr_t ptr; #else u64 ptr; #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_IMX */ u32 len; - u8 rsvd2; - u8 buf_pool_id; - u16 offset; + u32 bpid_offset; }; /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */ @@ -454,6 +453,8 @@ struct sec4_sg_entry { #define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT) #define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT) #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT) +#define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT) +#define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT) /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */ #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT) @@ -5,6 +5,7 @@ */ #include "desc.h" +#include "regs.h" #define IMMEDIATE (1 << 23) #define CAAM_CMD_SZ sizeof(u32) @@ -30,9 +31,11 @@ LDST_SRCDST_WORD_DECOCTRL | \ (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) +extern bool caam_little_end; + static inline int desc_len(u32 *desc) { - return *desc & HDR_DESCLEN_MASK; + return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK; } static inline int desc_bytes(void *desc) @@ -52,7 +55,7 @@ static inline void *sh_desc_pdb(u32 *desc) static inline void init_desc(u32 *desc, u32 options) { - *desc = (options | HDR_ONE) + 1; + *desc = cpu_to_caam32((options | HDR_ONE) + 1); } static inline void init_sh_desc(u32 *desc, u32 options) @@ -74,13 +77,21 @@ static inline void init_job_desc(u32 *desc, u32 options) init_desc(desc, CMD_DESC_HDR | options); } +static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes) +{ + u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ; + + init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options); +} + static inline void append_ptr(u32 *desc, dma_addr_t ptr) { dma_addr_t *offset = (dma_addr_t *)desc_end(desc); - *offset = ptr; + *offset = cpu_to_caam_dma(ptr); - (*desc) += CAAM_PTR_SZ / CAAM_CMD_SZ; + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + + CAAM_PTR_SZ / CAAM_CMD_SZ); } static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len, @@ -99,16 +110,17 @@ static inline void append_data(u32 *desc, void *data, int len) if (len) /* avoid sparse warning: memcpy with byte count of 0 */ memcpy(offset, data, len); - (*desc) += (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ; + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + + (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ); } static inline void append_cmd(u32 *desc, u32 command) { u32 *cmd = desc_end(desc); - *cmd = command; + *cmd = cpu_to_caam32(command); - (*desc)++; + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + 1); } #define append_u32 append_cmd @@ -117,16 +129,22 @@ static inline void append_u64(u32 *desc, u64 data) { u32 *offset = desc_end(desc); - *offset = upper_32_bits(data); - *(++offset) = lower_32_bits(data); + /* Only 32-bit alignment is guaranteed in descriptor buffer */ + if (caam_little_end) { + *offset = cpu_to_caam32(lower_32_bits(data)); + *(++offset) = cpu_to_caam32(upper_32_bits(data)); + } else { + *offset = cpu_to_caam32(upper_32_bits(data)); + *(++offset) = cpu_to_caam32(lower_32_bits(data)); + } - (*desc) += 2; + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + 2); } /* Write command without affecting header, and return pointer to next word */ static inline u32 *write_cmd(u32 *desc, u32 command) { - *desc = command; + *desc = cpu_to_caam32(command); return desc + 1; } @@ -168,14 +186,17 @@ APPEND_CMD_RET(move, MOVE) static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd) { - *jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc)); + *jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) | + (desc_len(desc) - (jump_cmd - desc))); } static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd) { - *move_cmd &= ~MOVE_OFFSET_MASK; - *move_cmd = *move_cmd | ((desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) & - MOVE_OFFSET_MASK); + u32 val = caam32_to_cpu(*move_cmd); + + val &= ~MOVE_OFFSET_MASK; + val |= (desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) & MOVE_OFFSET_MASK; + *move_cmd = cpu_to_caam32(val); } #define APPEND_CMD(cmd, op) \ @@ -31,7 +31,7 @@ static int caam_reset_hw_jr(struct device *dev) * mask interrupts since we are going to poll * for reset completion status */ - setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); + clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK); /* initiate flush (required prior to reset) */ wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); @@ -57,7 +57,7 @@ static int caam_reset_hw_jr(struct device *dev) } /* unmask interrupts */ - clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); + clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0); return 0; } @@ -147,7 +147,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) } /* mask valid interrupts */ - setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); + clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK); /* Have valid interrupt at this point, just ACK and trigger */ wr_reg32(&jrp->rregs->jrintstatus, irqstate); @@ -182,7 +182,7 @@ static void caam_jr_dequeue(unsigned long devarg) sw_idx = (tail + i) & (JOBR_DEPTH - 1); if (jrp->outring[hw_idx].desc == - jrp->entinfo[sw_idx].desc_addr_dma) + caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma)) break; /* found */ } /* we should never fail to find a matching descriptor */ @@ -200,7 +200,7 @@ static void caam_jr_dequeue(unsigned long devarg) usercall = jrp->entinfo[sw_idx].callbk; userarg = jrp->entinfo[sw_idx].cbkarg; userdesc = jrp->entinfo[sw_idx].desc_addr_virt; - userstatus = jrp->outring[hw_idx].jrstatus; + userstatus = caam32_to_cpu(jrp->outring[hw_idx].jrstatus); /* * Make sure all information from the job has been obtained @@ -236,7 +236,7 @@ static void caam_jr_dequeue(unsigned long devarg) } /* reenable / unmask IRQs */ - clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); + clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0); } /** @@ -330,7 +330,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, int head, tail, desc_size; dma_addr_t desc_dma; - desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32); + desc_size = (caam32_to_cpu(*desc) & HDR_JD_LENGTH_MASK) * sizeof(u32); desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE); if (dma_mapping_error(dev, desc_dma)) { dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n"); @@ -356,7 +356,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, head_entry->cbkarg = areq; head_entry->desc_addr_dma = desc_dma; - jrp->inpring[jrp->inp_ring_write_index] = desc_dma; + jrp->inpring[jrp->inp_ring_write_index] = cpu_to_caam_dma(desc_dma); /* * Guarantee that the descriptor's DMA address has been written to @@ -444,9 +444,9 @@ static int caam_jr_init(struct device *dev) spin_lock_init(&jrp->outlock); /* Select interrupt coalescing parameters */ - setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC | - (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | - (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); + clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC | + (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | + (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); return 0; @@ -1,18 +1,19 @@ /* * CAAM Protocol Data Block (PDB) definition header file * - * Copyright 2008-2012 Freescale Semiconductor, Inc. + * Copyright 2008-2016 Freescale Semiconductor, Inc. * */ #ifndef CAAM_PDB_H #define CAAM_PDB_H +#include "compat.h" /* * PDB- IPSec ESP Header Modification Options */ -#define PDBHMO_ESP_DECAP_SHIFT 12 -#define PDBHMO_ESP_ENCAP_SHIFT 4 +#define PDBHMO_ESP_DECAP_SHIFT 28 +#define PDBHMO_ESP_ENCAP_SHIFT 28 /* * Encap and Decap - Decrement TTL (Hop Limit) - Based on the value of the * Options Byte IP version (IPvsn) field: @@ -32,12 +33,23 @@ */ #define PDBHMO_ESP_DFBIT (0x04 << PDBHMO_ESP_ENCAP_SHIFT) +#define PDBNH_ESP_ENCAP_SHIFT 16 +#define PDBNH_ESP_ENCAP_MASK (0xff << PDBNH_ESP_ENCAP_SHIFT) + +#define PDBHDRLEN_ESP_DECAP_SHIFT 16 +#define PDBHDRLEN_MASK (0x0fff << PDBHDRLEN_ESP_DECAP_SHIFT) + +#define PDB_NH_OFFSET_SHIFT 8 +#define PDB_NH_OFFSET_MASK (0xff << PDB_NH_OFFSET_SHIFT) + /* * PDB - IPSec ESP Encap/Decap Options */ #define PDBOPTS_ESP_ARSNONE 0x00 /* no antireplay window */ #define PDBOPTS_ESP_ARS32 0x40 /* 32-entry antireplay window */ +#define PDBOPTS_ESP_ARS128 0x80 /* 128-entry antireplay window */ #define PDBOPTS_ESP_ARS64 0xc0 /* 64-entry antireplay window */ +#define PDBOPTS_ESP_ARS_MASK 0xc0 /* antireplay window mask */ #define PDBOPTS_ESP_IVSRC 0x20 /* IV comes from internal random gen */ #define PDBOPTS_ESP_ESN 0x10 /* extended sequence included */ #define PDBOPTS_ESP_OUTFMT 0x08 /* output only decapsulation (decap) */ @@ -54,35 +66,73 @@ /* * General IPSec encap/decap PDB definitions */ + +/** + * ipsec_encap_cbc - PDB part for IPsec CBC encapsulation + * @iv: 16-byte array initialization vector + */ struct ipsec_encap_cbc { - u32 iv[4]; + u8 iv[16]; }; +/** + * ipsec_encap_ctr - PDB part for IPsec CTR encapsulation + * @ctr_nonce: 4-byte array nonce + * @ctr_initial: initial count constant + * @iv: initialization vector + */ struct ipsec_encap_ctr { - u32 ctr_nonce; + u8 ctr_nonce[4]; u32 ctr_initial; - u32 iv[2]; + u64 iv; }; +/** + * ipsec_encap_ccm - PDB part for IPsec CCM encapsulation + * @salt: 3-byte array salt (lower 24 bits) + * @ccm_opt: CCM algorithm options - MSB-LSB description: + * b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV, + * 0x7B for 16-byte ICV (cf. RFC4309, RFC3610) + * ctr_flags (8b) - counter flags; constant equal to 0x3 + * ctr_initial (16b) - initial count constant + * @iv: initialization vector + */ struct ipsec_encap_ccm { - u32 salt; /* lower 24 bits */ - u8 b0_flags; - u8 ctr_flags; - u16 ctr_initial; - u32 iv[2]; + u8 salt[4]; + u32 ccm_opt; + u64 iv; }; +/** + * ipsec_encap_gcm - PDB part for IPsec GCM encapsulation + * @salt: 3-byte array salt (lower 24 bits) + * @rsvd: reserved, do not use + * @iv: initialization vector + */ struct ipsec_encap_gcm { - u32 salt; /* lower 24 bits */ + u8 salt[4]; u32 rsvd1; - u32 iv[2]; + u64 iv; }; +/** + * ipsec_encap_pdb - PDB for IPsec encapsulation + * @options: MSB-LSB description + * hmo (header manipulation options) - 4b + * reserved - 4b + * next header - 8b + * next header offset - 8b + * option flags (depend on selected algorithm) - 8b + * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN) + * @seq_num: IPsec sequence number + * @spi: IPsec SPI (Security Parameters Index) + * @ip_hdr_len: optional IP Header length (in bytes) + * reserved - 16b + * Opt. IP Hdr Len - 16b + * @ip_hdr: optional IP Header content + */ struct ipsec_encap_pdb { - u8 hmo_rsvd; - u8 ip_nh; - u8 ip_nh_offset; - u8 options; + u32 options; u32 seq_num_ext_hi; u32 seq_num; union { @@ -92,36 +142,65 @@ struct ipsec_encap_pdb { struct ipsec_encap_gcm gcm; }; u32 spi; - u16 rsvd1; - u16 ip_hdr_len; - u32 ip_hdr[0]; /* optional IP Header content */ + u32 ip_hdr_len; + u32 ip_hdr[0]; }; +/** + * ipsec_decap_cbc - PDB part for IPsec CBC decapsulation + * @rsvd: reserved, do not use + */ struct ipsec_decap_cbc { u32 rsvd[2]; }; +/** + * ipsec_decap_ctr - PDB part for IPsec CTR decapsulation + * @ctr_nonce: 4-byte array nonce + * @ctr_initial: initial count constant + */ struct ipsec_decap_ctr { - u32 salt; + u8 ctr_nonce[4]; u32 ctr_initial; }; +/** + * ipsec_decap_ccm - PDB part for IPsec CCM decapsulation + * @salt: 3-byte salt (lower 24 bits) + * @ccm_opt: CCM algorithm options - MSB-LSB description: + * b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV, + * 0x7B for 16-byte ICV (cf. RFC4309, RFC3610) + * ctr_flags (8b) - counter flags; constant equal to 0x3 + * ctr_initial (16b) - initial count constant + */ struct ipsec_decap_ccm { - u32 salt; - u8 iv_flags; - u8 ctr_flags; - u16 ctr_initial; + u8 salt[4]; + u32 ccm_opt; }; +/** + * ipsec_decap_gcm - PDB part for IPsec GCN decapsulation + * @salt: 4-byte salt + * @rsvd: reserved, do not use + */ struct ipsec_decap_gcm { - u32 salt; + u8 salt[4]; u32 resvd; }; +/** + * ipsec_decap_pdb - PDB for IPsec decapsulation + * @options: MSB-LSB description + * hmo (header manipulation options) - 4b + * IP header length - 12b + * next header offset - 8b + * option flags (depend on selected algorithm) - 8b + * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN) + * @seq_num: IPsec sequence number + * @anti_replay: Anti-replay window; size depends on ARS (option flags) + */ struct ipsec_decap_pdb { - u16 hmo_ip_hdr_len; - u8 ip_nh_offset; - u8 options; + u32 options; union { struct ipsec_decap_cbc cbc; struct ipsec_decap_ctr ctr; @@ -130,8 +209,7 @@ struct ipsec_decap_pdb { }; u32 seq_num_ext_hi; u32 seq_num; - u32 anti_replay[2]; - u32 end_index[0]; + __be32 anti_replay[4]; }; /* @@ -399,4 +477,52 @@ struct dsa_verify_pdb { u8 *ab; /* only used if ECC processing */ }; +/* RSA Protocol Data Block */ +#define RSA_PDB_SGF_SHIFT 28 +#define RSA_PDB_E_SHIFT 12 +#define RSA_PDB_E_MASK (0xFFF << RSA_PDB_E_SHIFT) +#define RSA_PDB_D_SHIFT 12 +#define RSA_PDB_D_MASK (0xFFF << RSA_PDB_D_SHIFT) + +#define RSA_PDB_SGF_F (0x8 << RSA_PDB_SGF_SHIFT) +#define RSA_PDB_SGF_G (0x4 << RSA_PDB_SGF_SHIFT) +#define RSA_PRIV_PDB_SGF_F (0x4 << RSA_PDB_SGF_SHIFT) +#define RSA_PRIV_PDB_SGF_G (0x8 << RSA_PDB_SGF_SHIFT) + +#define RSA_PRIV_KEY_FRM_1 0 + +/** + * RSA Encrypt Protocol Data Block + * @sgf: scatter-gather field + * @f_dma: dma address of input data + * @g_dma: dma address of encrypted output data + * @n_dma: dma address of RSA modulus + * @e_dma: dma address of RSA public exponent + * @f_len: length in octets of the input data + */ +struct rsa_pub_pdb { + u32 sgf; + dma_addr_t f_dma; + dma_addr_t g_dma; + dma_addr_t n_dma; + dma_addr_t e_dma; + u32 f_len; +} __packed; + +/** + * RSA Decrypt PDB - Private Key Form #1 + * @sgf: scatter-gather field + * @g_dma: dma address of encrypted input data + * @f_dma: dma address of output data + * @n_dma: dma address of RSA modulus + * @d_dma: dma address of RSA private exponent + */ +struct rsa_priv_f1_pdb { + u32 sgf; + dma_addr_t g_dma; + dma_addr_t f_dma; + dma_addr_t n_dma; + dma_addr_t d_dma; +} __packed; + #endif diff --git a/drivers/crypto/caam/pkc_desc.c b/drivers/crypto/caam/pkc_desc.c new file mode 100644 index 000000000000..4e4183e615ea --- /dev/null +++ b/ drivers/crypto/caam/pkc_desc.c@@ -0,0 +1,36 @@ +/* + * caam - Freescale FSL CAAM support for Public Key Cryptography descriptors + * + * Copyright 2016 Freescale Semiconductor, Inc. + * + * There is no Shared Descriptor for PKC so that the Job Descriptor must carry + * all the desired key parameters, input and output pointers. + */ +#include "caampkc.h" +#include "desc_constr.h" + +/* Descriptor for RSA Public operation */ +void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb) +{ + init_job_desc_pdb(desc, 0, sizeof(*pdb)); + append_cmd(desc, pdb->sgf); + append_ptr(desc, pdb->f_dma); + append_ptr(desc, pdb->g_dma); + append_ptr(desc, pdb->n_dma); + append_ptr(desc, pdb->e_dma); + append_cmd(desc, pdb->f_len); + append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSAENC_PUBKEY); +} + +/* Descriptor for RSA Private operation - Private Key Form #1 */ +void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb) +{ + init_job_desc_pdb(desc, 0, sizeof(*pdb)); + append_cmd(desc, pdb->sgf); + append_ptr(desc, pdb->g_dma); + append_ptr(desc, pdb->f_dma); + append_ptr(desc, pdb->n_dma); + append_ptr(desc, pdb->d_dma); + append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY | + RSA_PRIV_KEY_FRM_1); +} @@ -8,6 +8,7 @@ #define REGS_H #include <linux/types.h> +#include <linux/bitops.h> #include <linux/io.h> /* @@ -65,46 +66,56 @@ * */ -#ifdef CONFIG_ARM -/* These are common macros for Power, put here for ARM */ -#define setbits32(_addr, _v) writel((readl(_addr) | (_v)), (_addr)) -#define clrbits32(_addr, _v) writel((readl(_addr) & ~(_v)), (_addr)) +extern bool caam_little_end; -#define out_arch(type, endian, a, v) __raw_write##type(cpu_to_##endian(v), a) -#define in_arch(type, endian, a) endian##_to_cpu(__raw_read##type(a)) +#define caam_to_cpu(len) \ +static inline u##len caam##len ## _to_cpu(u##len val) \ +{ \ + if (caam_little_end) \ + return le##len ## _to_cpu(val); \ + else \ + return be##len ## _to_cpu(val); \ +} -#define out_le32(a, v) out_arch(l, le32, a, v) -#define in_le32(a) in_arch(l, le32, a) +#define cpu_to_caam(len) \ +static inline u##len cpu_to_caam##len(u##len val) \ +{ \ + if (caam_little_end) \ + return cpu_to_le##len(val); \ + else \ + return cpu_to_be##len(val); \ +} -#define out_be32(a, v) out_arch(l, be32, a, v) -#define in_be32(a) in_arch(l, be32, a) +caam_to_cpu(16) +caam_to_cpu(32) +caam_to_cpu(64) +cpu_to_caam(16) +cpu_to_caam(32) +cpu_to_caam(64) -#define clrsetbits(type, addr, clear, set) \ - out_##type((addr), (in_##type(addr) & ~(clear)) | (set)) +static inline void wr_reg32(void __iomem *reg, u32 data) +{ + if (caam_little_end) + iowrite32(data, reg); + else + iowrite32be(data, reg); +} -#define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set) -#define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set) -#endif +static inline u32 rd_reg32(void __iomem *reg) +{ + if (caam_little_end) + return ioread32(reg); -#ifdef __BIG_ENDIAN -#define wr_reg32(reg, data) out_be32(reg, data) -#define rd_reg32(reg) in_be32(reg) -#define clrsetbits_32(addr, clear, set) clrsetbits_be32(addr, clear, set) -#ifdef CONFIG_64BIT -#define wr_reg64(reg, data) out_be64(reg, data) -#define rd_reg64(reg) in_be64(reg) -#endif -#else -#ifdef __LITTLE_ENDIAN -#define wr_reg32(reg, data) __raw_writel(data, reg) -#define rd_reg32(reg) __raw_readl(reg) -#define clrsetbits_32(addr, clear, set) clrsetbits_le32(addr, clear, set) -#ifdef CONFIG_64BIT -#define wr_reg64(reg, data) __raw_writeq(data, reg) -#define rd_reg64(reg) __raw_readq(reg) -#endif -#endif -#endif + return ioread32be(reg); +} + +static inline void clrsetbits_32(void __iomem *reg, u32 clear, u32 set) +{ + if (caam_little_end) + iowrite32((ioread32(reg) & ~clear) | set, reg); + else + iowrite32be((ioread32be(reg) & ~clear) | set, reg); +} /* * The only users of these wr/rd_reg64 functions is the Job Ring (JR). @@ -123,29 +134,67 @@ * base + 0x0000 : least-significant 32 bits * base + 0x0004 : most-significant 32 bits */ +#ifdef CONFIG_64BIT +static inline void wr_reg64(void __iomem *reg, u64 data) +{ + if (caam_little_end) + iowrite64(data, reg); + else + iowrite64be(data, reg); +} -#ifndef CONFIG_64BIT -#if !defined(CONFIG_CRYPTO_DEV_FSL_CAAM_LE) || \ - defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX) -#define REG64_MS32(reg) ((u32 __iomem *)(reg)) -#define REG64_LS32(reg) ((u32 __iomem *)(reg) + 1) -#else -#define REG64_MS32(reg) ((u32 __iomem *)(reg) + 1) -#define REG64_LS32(reg) ((u32 __iomem *)(reg)) -#endif - -static inline void wr_reg64(u64 __iomem *reg, u64 data) +static inline u64 rd_reg64(void __iomem *reg) { - wr_reg32(REG64_MS32(reg), data >> 32); - wr_reg32(REG64_LS32(reg), data); + if (caam_little_end) + return ioread64(reg); + else + return ioread64be(reg); } -static inline u64 rd_reg64(u64 __iomem *reg) +#else /* CONFIG_64BIT */ +static inline void wr_reg64(void __iomem *reg, u64 data) { - return ((u64)rd_reg32(REG64_MS32(reg)) << 32 | - (u64)rd_reg32(REG64_LS32(reg))); +#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX + if (caam_little_end) { + wr_reg32((u32 __iomem *)(reg) + 1, data >> 32); + wr_reg32((u32 __iomem *)(reg), data); + } else +#endif + { + wr_reg32((u32 __iomem *)(reg), data >> 32); + wr_reg32((u32 __iomem *)(reg) + 1, data); + } } + +static inline u64 rd_reg64(void __iomem *reg) +{ +#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX + if (caam_little_end) + return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 | + (u64)rd_reg32((u32 __iomem *)(reg))); + else #endif + return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 | + (u64)rd_reg32((u32 __iomem *)(reg) + 1)); +} +#endif /* CONFIG_64BIT */ + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT +#ifdef CONFIG_SOC_IMX7D +#define cpu_to_caam_dma(value) \ + (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \ + (u64)cpu_to_caam32(upper_32_bits(value))) +#define caam_dma_to_cpu(value) \ + (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \ + (u64)caam32_to_cpu(upper_32_bits(value))) +#else +#define cpu_to_caam_dma(value) cpu_to_caam64(value) +#define caam_dma_to_cpu(value) caam64_to_cpu(value) +#endif /* CONFIG_SOC_IMX7D */ +#else +#define cpu_to_caam_dma(value) cpu_to_caam32(value) +#define caam_dma_to_cpu(value) caam32_to_cpu(value) +#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ /* * jr_outentry @@ -249,6 +298,8 @@ struct caam_perfmon { u32 faultliodn; /* FALR - Fault Address LIODN */ u32 faultdetail; /* FADR - Fault Addr Detail */ u32 rsvd2; +#define CSTA_PLEND BIT(10) +#define CSTA_ALT_PLEND BIT(18) u32 status; /* CSTA - CAAM Status */ u64 rsvd3; @@ -5,18 +5,19 @@ * */ +#include "regs.h" + struct sec4_sg_entry; /* * convert single dma address to h/w link table format */ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, - dma_addr_t dma, u32 len, u32 offset) + dma_addr_t dma, u32 len, u16 offset) { - sec4_sg_ptr->ptr = dma; - sec4_sg_ptr->len = len; - sec4_sg_ptr->buf_pool_id = 0; - sec4_sg_ptr->offset = offset; + sec4_sg_ptr->ptr = cpu_to_caam_dma(dma); + sec4_sg_ptr->len = cpu_to_caam32(len); + sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK); #ifdef DEBUG print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ", DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr, @@ -30,7 +31,7 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, */ static inline struct sec4_sg_entry * sg_to_sec4_sg(struct scatterlist *sg, int sg_count, - struct sec4_sg_entry *sec4_sg_ptr, u32 offset) + struct sec4_sg_entry *sec4_sg_ptr, u16 offset) { while (sg_count) { dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), @@ -48,10 +49,10 @@ sg_to_sec4_sg(struct scatterlist *sg, int sg_count, */ static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count, struct sec4_sg_entry *sec4_sg_ptr, - u32 offset) + u16 offset) { sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset); - sec4_sg_ptr->len |= SEC4_SG_LEN_FIN; + sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); } static inline struct sec4_sg_entry *sg_to_sec4_sg_len( @@ -14,9 +14,8 @@ #include <linux/sched.h> #include <linux/delay.h> #include <linux/scatterlist.h> -#include <linux/crypto.h> -#include <crypto/algapi.h> #include <crypto/aes.h> +#include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include "ccp-crypto.h" @@ -110,18 +109,16 @@ static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key, ctx->u.aes.key_len = key_len / 2; sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); - return crypto_ablkcipher_setkey(ctx->u.aes.tfm_ablkcipher, key, - key_len); + return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len); } static int ccp_aes_xts_crypt(struct ablkcipher_request *req, unsigned int encrypt) { - struct crypto_tfm *tfm = - crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); unsigned int unit; + u32 unit_size; int ret; if (!ctx->u.aes.key_len) @@ -133,20 +130,31 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, if (!req->info) return -EINVAL; - for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) - if (!(req->nbytes & (unit_size_map[unit].size - 1))) - break; + unit_size = CCP_XTS_AES_UNIT_SIZE__LAST; + if (req->nbytes <= unit_size_map[0].size) { + for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) { + if (!(req->nbytes & (unit_size_map[unit].size - 1))) { + unit_size = unit_size_map[unit].value; + break; + } + } + } - if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) || + if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) || (ctx->u.aes.key_len != AES_KEYSIZE_128)) { + SKCIPHER_REQUEST_ON_STACK(subreq, ctx->u.aes.tfm_skcipher); + /* Use the fallback to process the request for any * unsupported unit sizes or key sizes */ - ablkcipher_request_set_tfm(req, ctx->u.aes.tfm_ablkcipher); - ret = (encrypt) ? crypto_ablkcipher_encrypt(req) : - crypto_ablkcipher_decrypt(req); - ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); - + skcipher_request_set_tfm(subreq, ctx->u.aes.tfm_skcipher); + skcipher_request_set_callback(subreq, req->base.flags, + NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->nbytes, req->info); + ret = encrypt ? crypto_skcipher_encrypt(subreq) : + crypto_skcipher_decrypt(subreq); + skcipher_request_zero(subreq); return ret; } @@ -158,7 +166,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, rctx->cmd.engine = CCP_ENGINE_XTS_AES_128; rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT; - rctx->cmd.u.xts.unit_size = unit_size_map[unit].value; + rctx->cmd.u.xts.unit_size = unit_size; rctx->cmd.u.xts.key = &ctx->u.aes.key_sg; rctx->cmd.u.xts.key_len = ctx->u.aes.key_len; rctx->cmd.u.xts.iv = &rctx->iv_sg; @@ -185,23 +193,21 @@ static int ccp_aes_xts_decrypt(struct ablkcipher_request *req) static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm) { struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_ablkcipher *fallback_tfm; + struct crypto_skcipher *fallback_tfm; ctx->complete = ccp_aes_xts_complete; ctx->u.aes.key_len = 0; - fallback_tfm = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm), 0, - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK); + fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(fallback_tfm)) { - pr_warn("could not load fallback driver %s\n", - crypto_tfm_alg_name(tfm)); + pr_warn("could not load fallback driver xts(aes)\n"); return PTR_ERR(fallback_tfm); } - ctx->u.aes.tfm_ablkcipher = fallback_tfm; + ctx->u.aes.tfm_skcipher = fallback_tfm; - tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx) + - fallback_tfm->base.crt_ablkcipher.reqsize; + tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx); return 0; } @@ -210,9 +216,7 @@ static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm) { struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); - if (ctx->u.aes.tfm_ablkcipher) - crypto_free_ablkcipher(ctx->u.aes.tfm_ablkcipher); - ctx->u.aes.tfm_ablkcipher = NULL; + crypto_free_skcipher(ctx->u.aes.tfm_skcipher); } static int ccp_register_aes_xts_alg(struct list_head *head, @@ -17,7 +17,6 @@ #include <linux/wait.h> #include <linux/pci.h> #include <linux/ccp.h> -#include <linux/crypto.h> #include <crypto/algapi.h> #include <crypto/aes.h> #include <crypto/ctr.h> @@ -69,7 +68,7 @@ static inline struct ccp_crypto_ahash_alg * /***** AES related defines *****/ struct ccp_aes_ctx { /* Fallback cipher for XTS with unsupported unit sizes */ - struct crypto_ablkcipher *tfm_ablkcipher; + struct crypto_skcipher *tfm_skcipher; /* Cipher used to generate CMAC K1/K2 keys */ struct crypto_cipher *tfm_cipher; @@ -31,22 +31,42 @@ #include "cesa.h" +/* Limit of the crypto queue before reaching the backlog */ +#define CESA_CRYPTO_DEFAULT_MAX_QLEN 128 + static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA); module_param_named(allhwsupport, allhwsupport, int, 0444); MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)"); struct mv_cesa_dev *cesa_dev; -static void mv_cesa_dequeue_req_unlocked(struct mv_cesa_engine *engine) +struct crypto_async_request * +mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine, + struct crypto_async_request **backlog) { - struct crypto_async_request *req, *backlog; + struct crypto_async_request *req; + + *backlog = crypto_get_backlog(&engine->queue); + req = crypto_dequeue_request(&engine->queue); + + if (!req) + return NULL; + + return req; +} + +static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine) +{ + struct crypto_async_request *req = NULL, *backlog = NULL; struct mv_cesa_ctx *ctx; - spin_lock_bh(&cesa_dev->lock); - backlog = crypto_get_backlog(&cesa_dev->queue); - req = crypto_dequeue_request(&cesa_dev->queue); - engine->req = req; - spin_unlock_bh(&cesa_dev->lock); + + spin_lock_bh(&engine->lock); + if (!engine->req) { + req = mv_cesa_dequeue_req_locked(engine, &backlog); + engine->req = req; + } + spin_unlock_bh(&engine->lock); if (!req) return; @@ -55,8 +75,47 @@ static void mv_cesa_dequeue_req_unlocked(struct mv_cesa_engine *engine) backlog->complete(backlog, -EINPROGRESS); ctx = crypto_tfm_ctx(req->tfm); - ctx->ops->prepare(req, engine); ctx->ops->step(req); + + return; +} + +static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status) +{ + struct crypto_async_request *req; + struct mv_cesa_ctx *ctx; + int res; + + req = engine->req; + ctx = crypto_tfm_ctx(req->tfm); + res = ctx->ops->process(req, status); + + if (res == 0) { + ctx->ops->complete(req); + mv_cesa_engine_enqueue_complete_request(engine, req); + } else if (res == -EINPROGRESS) { + ctx->ops->step(req); + } + + return res; +} + +static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status) +{ + if (engine->chain.first && engine->chain.last) + return mv_cesa_tdma_process(engine, status); + + return mv_cesa_std_process(engine, status); +} + +static inline void +mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req, + int res) +{ + ctx->ops->cleanup(req); + local_bh_disable(); + req->complete(req, res); + local_bh_enable(); } static irqreturn_t mv_cesa_int(int irq, void *priv) @@ -83,49 +142,54 @@ static irqreturn_t mv_cesa_int(int irq, void *priv) writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS); writel(~status, engine->regs + CESA_SA_INT_STATUS); + /* Process fetched requests */ + res = mv_cesa_int_process(engine, status & mask); ret = IRQ_HANDLED; + spin_lock_bh(&engine->lock); req = engine->req; + if (res != -EINPROGRESS) + engine->req = NULL; spin_unlock_bh(&engine->lock); - if (req) { - ctx = crypto_tfm_ctx(req->tfm); - res = ctx->ops->process(req, status & mask); - if (res != -EINPROGRESS) { - spin_lock_bh(&engine->lock); - engine->req = NULL; - mv_cesa_dequeue_req_unlocked(engine); - spin_unlock_bh(&engine->lock); - ctx->ops->cleanup(req); - local_bh_disable(); - req->complete(req, res); - local_bh_enable(); - } else { - ctx->ops->step(req); - } + + ctx = crypto_tfm_ctx(req->tfm); + + if (res && res != -EINPROGRESS) + mv_cesa_complete_req(ctx, req, res); + + /* Launch the next pending request */ + mv_cesa_rearm_engine(engine); + + /* Iterate over the complete queue */ + while (true) { + req = mv_cesa_engine_dequeue_complete_request(engine); + if (!req) + break; + + mv_cesa_complete_req(ctx, req, 0); } } return ret; } -int mv_cesa_queue_req(struct crypto_async_request *req) +int mv_cesa_queue_req(struct crypto_async_request *req, + struct mv_cesa_req *creq) { int ret; - int i; + struct mv_cesa_engine *engine = creq->engine; + + spin_lock_bh(&engine->lock); + if (mv_cesa_req_get_type(creq) == CESA_DMA_REQ) + mv_cesa_tdma_chain(engine, creq); - spin_lock_bh(&cesa_dev->lock); - ret = crypto_enqueue_request(&cesa_dev->queue, req); - spin_unlock_bh(&cesa_dev->lock); + ret = crypto_enqueue_request(&engine->queue, req); + spin_unlock_bh(&engine->lock); if (ret != -EINPROGRESS) return ret; - for (i = 0; i < cesa_dev->caps->nengines; i++) { - spin_lock_bh(&cesa_dev->engines[i].lock); - if (!cesa_dev->engines[i].req) - mv_cesa_dequeue_req_unlocked(&cesa_dev->engines[i]); - spin_unlock_bh(&cesa_dev->engines[i].lock); - } + mv_cesa_rearm_engine(engine); return -EINPROGRESS; } @@ -309,6 +373,10 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa) if (!dma->padding_pool) return -ENOMEM; + dma->iv_pool = dmam_pool_create("cesa_iv", dev, 16, 1, 0); + if (!dma->iv_pool) + return -ENOMEM; + cesa->dma = dma; return 0; @@ -416,7 +484,7 @@ static int mv_cesa_probe(struct platform_device *pdev) return -ENOMEM; spin_lock_init(&cesa->lock); - crypto_init_queue(&cesa->queue, 50); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); cesa->regs = devm_ioremap_resource(dev, res); if (IS_ERR(cesa->regs)) @@ -489,6 +557,10 @@ static int mv_cesa_probe(struct platform_device *pdev) engine); if (ret) goto err_cleanup; + + crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN); + atomic_set(&engine->load, 0); + INIT_LIST_HEAD(&engine->complete_queue); } cesa_dev = cesa; @@ -271,10 +271,13 @@ struct mv_cesa_op_ctx { /* TDMA descriptor flags */ #define CESA_TDMA_DST_IN_SRAM BIT(31) #define CESA_TDMA_SRC_IN_SRAM BIT(30) -#define CESA_TDMA_TYPE_MSK GENMASK(29, 0) +#define CESA_TDMA_END_OF_REQ BIT(29) +#define CESA_TDMA_BREAK_CHAIN BIT(28) +#define CESA_TDMA_TYPE_MSK GENMASK(27, 0) #define CESA_TDMA_DUMMY 0 #define CESA_TDMA_DATA 1 #define CESA_TDMA_OP 2 +#define CESA_TDMA_IV 3 /** * struct mv_cesa_tdma_desc - TDMA descriptor @@ -390,6 +393,7 @@ struct mv_cesa_dev_dma { struct dma_pool *op_pool; struct dma_pool *cache_pool; struct dma_pool *padding_pool; + struct dma_pool *iv_pool; }; /** @@ -398,7 +402,6 @@ struct mv_cesa_dev_dma { * @regs: device registers * @sram_size: usable SRAM size * @lock: device lock - * @queue: crypto request queue * @engines: array of engines * @dma: dma pools * @@ -410,7 +413,6 @@ struct mv_cesa_dev { struct device *dev; unsigned int sram_size; spinlock_t lock; - struct crypto_queue queue; struct mv_cesa_engine *engines; struct mv_cesa_dev_dma *dma; }; @@ -429,6 +431,11 @@ struct mv_cesa_dev { * @int_mask: interrupt mask cache * @pool: memory pool pointing to the memory region reserved in * SRAM + * @queue: fifo of the pending crypto requests + * @load: engine load counter, useful for load balancing + * @chain: list of the current tdma descriptors being processed + * by this engine. + * @complete_queue: fifo of the processed requests by the engine * * Structure storing CESA engine information. */ @@ -444,23 +451,27 @@ struct mv_cesa_engine { size_t max_req_len; u32 int_mask; struct gen_pool *pool; + struct crypto_queue queue; + atomic_t load; + struct mv_cesa_tdma_chain chain; + struct list_head complete_queue; }; /** * struct mv_cesa_req_ops - CESA request operations - * @prepare: prepare a request to be executed on the specified engine * @process: process a request chunk result (should return 0 if the * operation, -EINPROGRESS if it needs more steps or an error * code) * @step: launch the crypto operation on the next chunk * @cleanup: cleanup the crypto request (release associated data) + * @complete: complete the request, i.e copy result or context from sram when + * needed. */ struct mv_cesa_req_ops { - void (*prepare)(struct crypto_async_request *req, - struct mv_cesa_engine *engine); int (*process)(struct crypto_async_request *req, u32 status); void (*step)(struct crypto_async_request *req); void (*cleanup)(struct crypto_async_request *req); + void (*complete)(struct crypto_async_request *req); }; /** @@ -507,21 +518,11 @@ enum mv_cesa_req_type { /** * struct mv_cesa_req - CESA request - * @type: request type * @engine: engine associated with this request + * @chain: list of tdma descriptors associated with this request */ struct mv_cesa_req { - enum mv_cesa_req_type type; struct mv_cesa_engine *engine; -}; - -/** - * struct mv_cesa_tdma_req - CESA TDMA request - * @base: base information - * @chain: TDMA chain - */ -struct mv_cesa_tdma_req { - struct mv_cesa_req base; struct mv_cesa_tdma_chain chain; }; @@ -538,13 +539,11 @@ struct mv_cesa_sg_std_iter { /** * struct mv_cesa_ablkcipher_std_req - cipher standard request - * @base: base information * @op: operation context * @offset: current operation offset * @size: size of the crypto operation */ struct mv_cesa_ablkcipher_std_req { - struct mv_cesa_req base; struct mv_cesa_op_ctx op; unsigned int offset; unsigned int size; @@ -558,34 +557,27 @@ struct mv_cesa_ablkcipher_std_req { * @dst_nents: number of entries in the dest sg list */ struct mv_cesa_ablkcipher_req { - union { - struct mv_cesa_req base; - struct mv_cesa_tdma_req dma; - struct mv_cesa_ablkcipher_std_req std; - } req; + struct mv_cesa_req base; + struct mv_cesa_ablkcipher_std_req std; int src_nents; int dst_nents; }; /** * struct mv_cesa_ahash_std_req - standard hash request - * @base: base information * @offset: current operation offset */ struct mv_cesa_ahash_std_req { - struct mv_cesa_req base; unsigned int offset; }; /** * struct mv_cesa_ahash_dma_req - DMA hash request - * @base: base information * @padding: padding buffer * @padding_dma: DMA address of the padding buffer * @cache_dma: DMA address of the cache buffer */ struct mv_cesa_ahash_dma_req { - struct mv_cesa_tdma_req base; u8 *padding; dma_addr_t padding_dma; u8 *cache; @@ -604,8 +596,8 @@ struct mv_cesa_ahash_dma_req { * @state: hash state */ struct mv_cesa_ahash_req { + struct mv_cesa_req base; union { - struct mv_cesa_req base; struct mv_cesa_ahash_dma_req dma; struct mv_cesa_ahash_std_req std; } req; @@ -623,6 +615,35 @@ struct mv_cesa_ahash_req { extern struct mv_cesa_dev *cesa_dev; + +static inline void +mv_cesa_engine_enqueue_complete_request(struct mv_cesa_engine *engine, + struct crypto_async_request *req) +{ + list_add_tail(&req->list, &engine->complete_queue); +} + +static inline struct crypto_async_request * +mv_cesa_engine_dequeue_complete_request(struct mv_cesa_engine *engine) +{ + struct crypto_async_request *req; + + req = list_first_entry_or_null(&engine->complete_queue, + struct crypto_async_request, + list); + if (req) + list_del(&req->list); + + return req; +} + + +static inline enum mv_cesa_req_type +mv_cesa_req_get_type(struct mv_cesa_req *req) +{ + return req->chain.first ? CESA_DMA_REQ : CESA_STD_REQ; +} + static inline void mv_cesa_update_op_cfg(struct mv_cesa_op_ctx *op, u32 cfg, u32 mask) { @@ -695,7 +716,32 @@ static inline bool mv_cesa_mac_op_is_first_frag(const struct mv_cesa_op_ctx *op) CESA_SA_DESC_CFG_FIRST_FRAG; } -int mv_cesa_queue_req(struct crypto_async_request *req); +int mv_cesa_queue_req(struct crypto_async_request *req, + struct mv_cesa_req *creq); + +struct crypto_async_request * +mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine, + struct crypto_async_request **backlog); + +static inline struct mv_cesa_engine *mv_cesa_select_engine(int weight) +{ + int i; + u32 min_load = U32_MAX; + struct mv_cesa_engine *selected = NULL; + + for (i = 0; i < cesa_dev->caps->nengines; i++) { + struct mv_cesa_engine *engine = cesa_dev->engines + i; + u32 load = atomic_read(&engine->load); + if (load < min_load) { + min_load = load; + selected = engine; + } + } + + atomic_add(weight, &selected->load); + + return selected; +} /* * Helper function that indicates whether a crypto request needs to be @@ -765,9 +811,9 @@ static inline bool mv_cesa_req_dma_iter_next_op(struct mv_cesa_dma_iter *iter) return iter->op_len; } -void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq); +void mv_cesa_dma_step(struct mv_cesa_req *dreq); -static inline int mv_cesa_dma_process(struct mv_cesa_tdma_req *dreq, +static inline int mv_cesa_dma_process(struct mv_cesa_req *dreq, u32 status) { if (!(status & CESA_SA_INT_ACC0_IDMA_DONE)) @@ -779,10 +825,13 @@ static inline int mv_cesa_dma_process(struct mv_cesa_tdma_req *dreq, return 0; } -void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq, +void mv_cesa_dma_prepare(struct mv_cesa_req *dreq, struct mv_cesa_engine *engine); +void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq); +void mv_cesa_tdma_chain(struct mv_cesa_engine *engine, + struct mv_cesa_req *dreq); +int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status); -void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq); static inline void mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain) @@ -790,6 +839,9 @@ mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain) memset(chain, 0, sizeof(*chain)); } +int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src, + u32 size, u32 flags, gfp_t gfp_flags); + struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain, const struct mv_cesa_op_ctx *op_templ, bool skip_ctx, @@ -70,25 +70,28 @@ mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req) dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_BIDIRECTIONAL); } - mv_cesa_dma_cleanup(&creq->req.dma); + mv_cesa_dma_cleanup(&creq->base); } static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req) { struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); - if (creq->req.base.type == CESA_DMA_REQ) + if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) mv_cesa_ablkcipher_dma_cleanup(req); } static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req) { struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); - struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; - struct mv_cesa_engine *engine = sreq->base.engine; + struct mv_cesa_ablkcipher_std_req *sreq = &creq->std; + struct mv_cesa_engine *engine = creq->base.engine; size_t len = min_t(size_t, req->nbytes - sreq->offset, CESA_SA_SRAM_PAYLOAD_SIZE); + mv_cesa_adjust_op(engine, &sreq->op); + memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op)); + len = sg_pcopy_to_buffer(req->src, creq->src_nents, engine->sram + CESA_SA_DATA_SRAM_OFFSET, len, sreq->offset); @@ -106,6 +109,8 @@ static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req) mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); + BUG_ON(readl(engine->regs + CESA_SA_CMD) & + CESA_SA_CMD_EN_CESA_SA_ACCL0); writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); } @@ -113,8 +118,8 @@ static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req, u32 status) { struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); - struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; - struct mv_cesa_engine *engine = sreq->base.engine; + struct mv_cesa_ablkcipher_std_req *sreq = &creq->std; + struct mv_cesa_engine *engine = creq->base.engine; size_t len; len = sg_pcopy_from_buffer(req->dst, creq->dst_nents, @@ -133,21 +138,19 @@ static int mv_cesa_ablkcipher_process(struct crypto_async_request *req, { struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); - struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; - struct mv_cesa_engine *engine = sreq->base.engine; + struct mv_cesa_req *basereq = &creq->base; + unsigned int ivsize; int ret; - if (creq->req.base.type == CESA_DMA_REQ) - ret = mv_cesa_dma_process(&creq->req.dma, status); - else - ret = mv_cesa_ablkcipher_std_process(ablkreq, status); + if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ) + return mv_cesa_ablkcipher_std_process(ablkreq, status); + ret = mv_cesa_dma_process(basereq, status); if (ret) return ret; - memcpy_fromio(ablkreq->info, - engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, - crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq))); + ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq)); + memcpy_fromio(ablkreq->info, basereq->chain.last->data, ivsize); return 0; } @@ -157,8 +160,8 @@ static void mv_cesa_ablkcipher_step(struct crypto_async_request *req) struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); - if (creq->req.base.type == CESA_DMA_REQ) - mv_cesa_dma_step(&creq->req.dma); + if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) + mv_cesa_dma_step(&creq->base); else mv_cesa_ablkcipher_std_step(ablkreq); } @@ -167,22 +170,19 @@ static inline void mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req) { struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); - struct mv_cesa_tdma_req *dreq = &creq->req.dma; + struct mv_cesa_req *basereq = &creq->base; - mv_cesa_dma_prepare(dreq, dreq->base.engine); + mv_cesa_dma_prepare(basereq, basereq->engine); } static inline void mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req) { struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); - struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; - struct mv_cesa_engine *engine = sreq->base.engine; + struct mv_cesa_ablkcipher_std_req *sreq = &creq->std; sreq->size = 0; sreq->offset = 0; - mv_cesa_adjust_op(engine, &sreq->op); - memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op)); } static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req, @@ -190,9 +190,9 @@ static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req, { struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); - creq->req.base.engine = engine; + creq->base.engine = engine; - if (creq->req.base.type == CESA_DMA_REQ) + if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) mv_cesa_ablkcipher_dma_prepare(ablkreq); else mv_cesa_ablkcipher_std_prepare(ablkreq); @@ -206,11 +206,34 @@ mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req) mv_cesa_ablkcipher_cleanup(ablkreq); } +static void +mv_cesa_ablkcipher_complete(struct crypto_async_request *req) +{ + struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); + struct mv_cesa_engine *engine = creq->base.engine; + unsigned int ivsize; + + atomic_sub(ablkreq->nbytes, &engine->load); + ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq)); + + if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) { + struct mv_cesa_req *basereq; + + basereq = &creq->base; + memcpy(ablkreq->info, basereq->chain.last->data, ivsize); + } else { + memcpy_fromio(ablkreq->info, + engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, + ivsize); + } +} + static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = { .step = mv_cesa_ablkcipher_step, .process = mv_cesa_ablkcipher_process, - .prepare = mv_cesa_ablkcipher_prepare, .cleanup = mv_cesa_ablkcipher_req_cleanup, + .complete = mv_cesa_ablkcipher_complete, }; static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm) @@ -295,15 +318,15 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - struct mv_cesa_tdma_req *dreq = &creq->req.dma; + struct mv_cesa_req *basereq = &creq->base; struct mv_cesa_ablkcipher_dma_iter iter; struct mv_cesa_tdma_chain chain; bool skip_ctx = false; int ret; + unsigned int ivsize; - dreq->base.type = CESA_DMA_REQ; - dreq->chain.first = NULL; - dreq->chain.last = NULL; + basereq->chain.first = NULL; + basereq->chain.last = NULL; if (req->src != req->dst) { ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, @@ -358,12 +381,21 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, } while (mv_cesa_ablkcipher_req_iter_next_op(&iter)); - dreq->chain = chain; + /* Add output data for IV */ + ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)); + ret = mv_cesa_dma_add_iv_op(&chain, CESA_SA_CRYPT_IV_SRAM_OFFSET, + ivsize, CESA_TDMA_SRC_IN_SRAM, flags); + + if (ret) + goto err_free_tdma; + + basereq->chain = chain; + basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ; return 0; err_free_tdma: - mv_cesa_dma_cleanup(dreq); + mv_cesa_dma_cleanup(basereq); if (req->dst != req->src) dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, DMA_FROM_DEVICE); @@ -380,11 +412,13 @@ mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req, const struct mv_cesa_op_ctx *op_templ) { struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); - struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; + struct mv_cesa_ablkcipher_std_req *sreq = &creq->std; + struct mv_cesa_req *basereq = &creq->base; - sreq->base.type = CESA_STD_REQ; sreq->op = *op_templ; sreq->skip_ctx = false; + basereq->chain.first = NULL; + basereq->chain.last = NULL; return 0; } @@ -414,7 +448,6 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY, CESA_SA_DESC_CFG_OP_MSK); - /* TODO: add a threshold for DMA usage */ if (cesa_dev->caps->has_tdma) ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl); else @@ -423,28 +456,41 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, return ret; } -static int mv_cesa_des_op(struct ablkcipher_request *req, - struct mv_cesa_op_ctx *tmpl) +static int mv_cesa_ablkcipher_queue_req(struct ablkcipher_request *req, + struct mv_cesa_op_ctx *tmpl) { - struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm); int ret; - - mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES, - CESA_SA_DESC_CFG_CRYPTM_MSK); - - memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE); + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + struct mv_cesa_engine *engine; ret = mv_cesa_ablkcipher_req_init(req, tmpl); if (ret) return ret; - ret = mv_cesa_queue_req(&req->base); + engine = mv_cesa_select_engine(req->nbytes); + mv_cesa_ablkcipher_prepare(&req->base, engine); + + ret = mv_cesa_queue_req(&req->base, &creq->base); + if (mv_cesa_req_needs_cleanup(&req->base, ret)) mv_cesa_ablkcipher_cleanup(req); return ret; } +static int mv_cesa_des_op(struct ablkcipher_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + + mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES, + CESA_SA_DESC_CFG_CRYPTM_MSK); + + memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE); + + return mv_cesa_ablkcipher_queue_req(req, tmpl); +} + static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req) { struct mv_cesa_op_ctx tmpl; @@ -547,22 +593,13 @@ static int mv_cesa_des3_op(struct ablkcipher_request *req, struct mv_cesa_op_ctx *tmpl) { struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm); - int ret; mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES, CESA_SA_DESC_CFG_CRYPTM_MSK); memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE); - ret = mv_cesa_ablkcipher_req_init(req, tmpl); - if (ret) - return ret; - - ret = mv_cesa_queue_req(&req->base); - if (mv_cesa_req_needs_cleanup(&req->base, ret)) - mv_cesa_ablkcipher_cleanup(req); - - return ret; + return mv_cesa_ablkcipher_queue_req(req, tmpl); } static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req) @@ -673,7 +710,7 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req, struct mv_cesa_op_ctx *tmpl) { struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm); - int ret, i; + int i; u32 *key; u32 cfg; @@ -696,15 +733,7 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req, CESA_SA_DESC_CFG_CRYPTM_MSK | CESA_SA_DESC_CFG_AES_LEN_MSK); - ret = mv_cesa_ablkcipher_req_init(req, tmpl); - if (ret) - return ret; - - ret = mv_cesa_queue_req(&req->base); - if (mv_cesa_req_needs_cleanup(&req->base, ret)) - mv_cesa_ablkcipher_cleanup(req); - - return ret; + return mv_cesa_ablkcipher_queue_req(req, tmpl); } static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req) @@ -103,14 +103,14 @@ static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req) dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); mv_cesa_ahash_dma_free_cache(&creq->req.dma); - mv_cesa_dma_cleanup(&creq->req.dma.base); + mv_cesa_dma_cleanup(&creq->base); } static inline void mv_cesa_ahash_cleanup(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); - if (creq->req.base.type == CESA_DMA_REQ) + if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) mv_cesa_ahash_dma_cleanup(req); } @@ -118,7 +118,7 @@ static void mv_cesa_ahash_last_cleanup(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); - if (creq->req.base.type == CESA_DMA_REQ) + if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) mv_cesa_ahash_dma_last_cleanup(req); } @@ -157,11 +157,23 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); struct mv_cesa_ahash_std_req *sreq = &creq->req.std; - struct mv_cesa_engine *engine = sreq->base.engine; + struct mv_cesa_engine *engine = creq->base.engine; struct mv_cesa_op_ctx *op; unsigned int new_cache_ptr = 0; u32 frag_mode; size_t len; + unsigned int digsize; + int i; + + mv_cesa_adjust_op(engine, &creq->op_tmpl); + memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); + + digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); + for (i = 0; i < digsize / 4; i++) + writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i)); + + mv_cesa_adjust_op(engine, &creq->op_tmpl); + memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); if (creq->cache_ptr) memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET, @@ -237,6 +249,8 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req) mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); + BUG_ON(readl(engine->regs + CESA_SA_CMD) & + CESA_SA_CMD_EN_CESA_SA_ACCL0); writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); } @@ -254,20 +268,17 @@ static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status) static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); - struct mv_cesa_tdma_req *dreq = &creq->req.dma.base; + struct mv_cesa_req *basereq = &creq->base; - mv_cesa_dma_prepare(dreq, dreq->base.engine); + mv_cesa_dma_prepare(basereq, basereq->engine); } static void mv_cesa_ahash_std_prepare(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); struct mv_cesa_ahash_std_req *sreq = &creq->req.std; - struct mv_cesa_engine *engine = sreq->base.engine; sreq->offset = 0; - mv_cesa_adjust_op(engine, &creq->op_tmpl); - memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); } static void mv_cesa_ahash_step(struct crypto_async_request *req) @@ -275,8 +286,8 @@ static void mv_cesa_ahash_step(struct crypto_async_request *req) struct ahash_request *ahashreq = ahash_request_cast(req); struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); - if (creq->req.base.type == CESA_DMA_REQ) - mv_cesa_dma_step(&creq->req.dma.base); + if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) + mv_cesa_dma_step(&creq->base); else mv_cesa_ahash_std_step(ahashreq); } @@ -285,17 +296,20 @@ static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) { struct ahash_request *ahashreq = ahash_request_cast(req); struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); - struct mv_cesa_engine *engine = creq->req.base.engine; - unsigned int digsize; - int ret, i; - if (creq->req.base.type == CESA_DMA_REQ) - ret = mv_cesa_dma_process(&creq->req.dma.base, status); - else - ret = mv_cesa_ahash_std_process(ahashreq, status); + if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) + return mv_cesa_dma_process(&creq->base, status); - if (ret == -EINPROGRESS) - return ret; + return mv_cesa_ahash_std_process(ahashreq, status); +} + +static void mv_cesa_ahash_complete(struct crypto_async_request *req) +{ + struct ahash_request *ahashreq = ahash_request_cast(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); + struct mv_cesa_engine *engine = creq->base.engine; + unsigned int digsize; + int i; digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); for (i = 0; i < digsize / 4; i++) @@ -325,7 +339,7 @@ static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) } } - return ret; + atomic_sub(ahashreq->nbytes, &engine->load); } static void mv_cesa_ahash_prepare(struct crypto_async_request *req, @@ -333,19 +347,13 @@ static void mv_cesa_ahash_prepare(struct crypto_async_request *req, { struct ahash_request *ahashreq = ahash_request_cast(req); struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); - unsigned int digsize; - int i; - creq->req.base.engine = engine; + creq->base.engine = engine; - if (creq->req.base.type == CESA_DMA_REQ) + if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) mv_cesa_ahash_dma_prepare(ahashreq); else mv_cesa_ahash_std_prepare(ahashreq); - - digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); - for (i = 0; i < digsize / 4; i++) - writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i)); } static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req) @@ -362,8 +370,8 @@ static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req) static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = { .step = mv_cesa_ahash_step, .process = mv_cesa_ahash_process, - .prepare = mv_cesa_ahash_prepare, .cleanup = mv_cesa_ahash_req_cleanup, + .complete = mv_cesa_ahash_complete, }; static int mv_cesa_ahash_init(struct ahash_request *req, @@ -553,15 +561,14 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; - struct mv_cesa_tdma_req *dreq = &ahashdreq->base; + struct mv_cesa_req *basereq = &creq->base; struct mv_cesa_ahash_dma_iter iter; struct mv_cesa_op_ctx *op = NULL; unsigned int frag_len; int ret; - dreq->chain.first = NULL; - dreq->chain.last = NULL; + basereq->chain.first = NULL; + basereq->chain.last = NULL; if (creq->src_nents) { ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, @@ -572,14 +579,14 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) } } - mv_cesa_tdma_desc_iter_init(&dreq->chain); + mv_cesa_tdma_desc_iter_init(&basereq->chain); mv_cesa_ahash_req_iter_init(&iter, req); /* * Add the cache (left-over data from a previous block) first. * This will never overflow the SRAM size. */ - ret = mv_cesa_ahash_dma_add_cache(&dreq->chain, &iter, creq, flags); + ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, &iter, creq, flags); if (ret) goto err_free_tdma; @@ -590,7 +597,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) * data. We intentionally do not add the final op block. */ while (true) { - ret = mv_cesa_dma_add_op_transfers(&dreq->chain, + ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base, &iter.src, flags); if (ret) @@ -601,7 +608,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) if (!mv_cesa_ahash_req_iter_next_op(&iter)) break; - op = mv_cesa_dma_add_frag(&dreq->chain, &creq->op_tmpl, + op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl, frag_len, flags); if (IS_ERR(op)) { ret = PTR_ERR(op); @@ -619,10 +626,10 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) * operation, which depends whether this is the final request. */ if (creq->last_req) - op = mv_cesa_ahash_dma_last_req(&dreq->chain, &iter, creq, + op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq, frag_len, flags); else if (frag_len) - op = mv_cesa_dma_add_frag(&dreq->chain, &creq->op_tmpl, + op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl, frag_len, flags); if (IS_ERR(op)) { @@ -632,7 +639,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) if (op) { /* Add dummy desc to wait for crypto operation end */ - ret = mv_cesa_dma_add_dummy_end(&dreq->chain, flags); + ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags); if (ret) goto err_free_tdma; } @@ -643,10 +650,13 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) else creq->cache_ptr = 0; + basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ | + CESA_TDMA_BREAK_CHAIN); + return 0; err_free_tdma: - mv_cesa_dma_cleanup(dreq); + mv_cesa_dma_cleanup(basereq); dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); err: @@ -660,11 +670,6 @@ static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); int ret; - if (cesa_dev->caps->has_tdma) - creq->req.base.type = CESA_DMA_REQ; - else - creq->req.base.type = CESA_STD_REQ; - creq->src_nents = sg_nents_for_len(req->src, req->nbytes); if (creq->src_nents < 0) { dev_err(cesa_dev->dev, "Invalid number of src SG"); @@ -678,19 +683,19 @@ static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) if (*cached) return 0; - if (creq->req.base.type == CESA_DMA_REQ) + if (cesa_dev->caps->has_tdma) ret = mv_cesa_ahash_dma_req_init(req); return ret; } -static int mv_cesa_ahash_update(struct ahash_request *req) +static int mv_cesa_ahash_queue_req(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + struct mv_cesa_engine *engine; bool cached = false; int ret; - creq->len += req->nbytes; ret = mv_cesa_ahash_req_init(req, &cached); if (ret) return ret; @@ -698,61 +703,48 @@ static int mv_cesa_ahash_update(struct ahash_request *req) if (cached) return 0; - ret = mv_cesa_queue_req(&req->base); + engine = mv_cesa_select_engine(req->nbytes); + mv_cesa_ahash_prepare(&req->base, engine); + + ret = mv_cesa_queue_req(&req->base, &creq->base); + if (mv_cesa_req_needs_cleanup(&req->base, ret)) mv_cesa_ahash_cleanup(req); return ret; } +static int mv_cesa_ahash_update(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + + creq->len += req->nbytes; + + return mv_cesa_ahash_queue_req(req); +} + static int mv_cesa_ahash_final(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; - bool cached = false; - int ret; mv_cesa_set_mac_op_total_len(tmpl, creq->len); creq->last_req = true; req->nbytes = 0; - ret = mv_cesa_ahash_req_init(req, &cached); - if (ret) - return ret; - - if (cached) - return 0; - - ret = mv_cesa_queue_req(&req->base); - if (mv_cesa_req_needs_cleanup(&req->base, ret)) - mv_cesa_ahash_cleanup(req); - - return ret; + return mv_cesa_ahash_queue_req(req); } static int mv_cesa_ahash_finup(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; - bool cached = false; - int ret; creq->len += req->nbytes; mv_cesa_set_mac_op_total_len(tmpl, creq->len); creq->last_req = true; - ret = mv_cesa_ahash_req_init(req, &cached); - if (ret) - return ret; - - if (cached) - return 0; - - ret = mv_cesa_queue_req(&req->base); - if (mv_cesa_req_needs_cleanup(&req->base, ret)) - mv_cesa_ahash_cleanup(req); - - return ret; + return mv_cesa_ahash_queue_req(req); } static int mv_cesa_ahash_export(struct ahash_request *req, void *hash, @@ -37,9 +37,9 @@ bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter, return true; } -void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq) +void mv_cesa_dma_step(struct mv_cesa_req *dreq) { - struct mv_cesa_engine *engine = dreq->base.engine; + struct mv_cesa_engine *engine = dreq->engine; writel_relaxed(0, engine->regs + CESA_SA_CFG); @@ -53,19 +53,25 @@ void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq) engine->regs + CESA_SA_CFG); writel_relaxed(dreq->chain.first->cur_dma, engine->regs + CESA_TDMA_NEXT_ADDR); + BUG_ON(readl(engine->regs + CESA_SA_CMD) & + CESA_SA_CMD_EN_CESA_SA_ACCL0); writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); } -void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq) +void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq) { struct mv_cesa_tdma_desc *tdma; for (tdma = dreq->chain.first; tdma;) { struct mv_cesa_tdma_desc *old_tdma = tdma; + u32 type = tdma->flags & CESA_TDMA_TYPE_MSK; - if (tdma->flags & CESA_TDMA_OP) + if (type == CESA_TDMA_OP) dma_pool_free(cesa_dev->dma->op_pool, tdma->op, le32_to_cpu(tdma->src)); + else if (type == CESA_TDMA_IV) + dma_pool_free(cesa_dev->dma->iv_pool, tdma->data, + le32_to_cpu(tdma->dst)); tdma = tdma->next; dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma, @@ -76,7 +82,7 @@ void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq) dreq->chain.last = NULL; } -void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq, +void mv_cesa_dma_prepare(struct mv_cesa_req *dreq, struct mv_cesa_engine *engine) { struct mv_cesa_tdma_desc *tdma; @@ -88,11 +94,97 @@ void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq, if (tdma->flags & CESA_TDMA_SRC_IN_SRAM) tdma->src = cpu_to_le32(tdma->src + engine->sram_dma); - if (tdma->flags & CESA_TDMA_OP) + if ((tdma->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_OP) mv_cesa_adjust_op(engine, tdma->op); } } +void mv_cesa_tdma_chain(struct mv_cesa_engine *engine, + struct mv_cesa_req *dreq) +{ + if (engine->chain.first == NULL && engine->chain.last == NULL) { + engine->chain.first = dreq->chain.first; + engine->chain.last = dreq->chain.last; + } else { + struct mv_cesa_tdma_desc *last; + + last = engine->chain.last; + last->next = dreq->chain.first; + engine->chain.last = dreq->chain.last; + + if (!(last->flags & CESA_TDMA_BREAK_CHAIN)) + last->next_dma = dreq->chain.first->cur_dma; + } +} + +int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status) +{ + struct crypto_async_request *req = NULL; + struct mv_cesa_tdma_desc *tdma = NULL, *next = NULL; + dma_addr_t tdma_cur; + int res = 0; + + tdma_cur = readl(engine->regs + CESA_TDMA_CUR); + + for (tdma = engine->chain.first; tdma; tdma = next) { + spin_lock_bh(&engine->lock); + next = tdma->next; + spin_unlock_bh(&engine->lock); + + if (tdma->flags & CESA_TDMA_END_OF_REQ) { + struct crypto_async_request *backlog = NULL; + struct mv_cesa_ctx *ctx; + u32 current_status; + + spin_lock_bh(&engine->lock); + /* + * if req is NULL, this means we're processing the + * request in engine->req. + */ + if (!req) + req = engine->req; + else + req = mv_cesa_dequeue_req_locked(engine, + &backlog); + + /* Re-chaining to the next request */ + engine->chain.first = tdma->next; + tdma->next = NULL; + + /* If this is the last request, clear the chain */ + if (engine->chain.first == NULL) + engine->chain.last = NULL; + spin_unlock_bh(&engine->lock); + + ctx = crypto_tfm_ctx(req->tfm); + current_status = (tdma->cur_dma == tdma_cur) ? + status : CESA_SA_INT_ACC0_IDMA_DONE; + res = ctx->ops->process(req, current_status); + ctx->ops->complete(req); + + if (res == 0) + mv_cesa_engine_enqueue_complete_request(engine, + req); + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + } + + if (res || tdma->cur_dma == tdma_cur) + break; + } + + /* Save the last request in error to engine->req, so that the core + * knows which request was fautly */ + if (res) { + spin_lock_bh(&engine->lock); + engine->req = req; + spin_unlock_bh(&engine->lock); + } + + return res; +} + static struct mv_cesa_tdma_desc * mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags) { @@ -117,6 +209,32 @@ mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags) return new_tdma; } +int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src, + u32 size, u32 flags, gfp_t gfp_flags) +{ + + struct mv_cesa_tdma_desc *tdma; + u8 *iv; + dma_addr_t dma_handle; + + tdma = mv_cesa_dma_add_desc(chain, gfp_flags); + if (IS_ERR(tdma)) + return PTR_ERR(tdma); + + iv = dma_pool_alloc(cesa_dev->dma->iv_pool, gfp_flags, &dma_handle); + if (!iv) + return -ENOMEM; + + tdma->byte_cnt = cpu_to_le32(size | BIT(31)); + tdma->src = src; + tdma->dst = cpu_to_le32(dma_handle); + tdma->data = iv; + + flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM); + tdma->flags = flags | CESA_TDMA_IV; + return 0; +} + struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain, const struct mv_cesa_op_ctx *op_templ, bool skip_ctx, @@ -11,7 +11,6 @@ * http://www.gnu.org/copyleft/gpl.html */ -#include <linux/crypto.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -25,6 +24,7 @@ #include <crypto/aes.h> #include <crypto/sha.h> #include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> #define DCP_MAX_CHANS 4 #define DCP_BUF_SZ PAGE_SIZE @@ -84,7 +84,7 @@ struct dcp_async_ctx { unsigned int hot:1; /* Crypto-specific context */ - struct crypto_ablkcipher *fallback; + struct crypto_skcipher *fallback; unsigned int key_len; uint8_t key[AES_KEYSIZE_128]; }; @@ -374,20 +374,22 @@ static int dcp_chan_thread_aes(void *data) static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc) { - struct crypto_tfm *tfm = - crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); - struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx( - crypto_ablkcipher_reqtfm(req)); + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm); + SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); int ret; - ablkcipher_request_set_tfm(req, ctx->fallback); + skcipher_request_set_tfm(subreq, ctx->fallback); + skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->nbytes, req->info); if (enc) - ret = crypto_ablkcipher_encrypt(req); + ret = crypto_skcipher_encrypt(subreq); else - ret = crypto_ablkcipher_decrypt(req); + ret = crypto_skcipher_decrypt(subreq); - ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); + skcipher_request_zero(subreq); return ret; } @@ -453,28 +455,22 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, return 0; } - /* Check if the key size is supported by kernel at all. */ - if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) { - tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return -EINVAL; - } - /* * If the requested AES key size is not supported by the hardware, * but is supported by in-kernel software implementation, we use * software fallback. */ - actx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; - actx->fallback->base.crt_flags |= - tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK; + crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(actx->fallback, + tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); - ret = crypto_ablkcipher_setkey(actx->fallback, key, len); + ret = crypto_skcipher_setkey(actx->fallback, key, len); if (!ret) return 0; tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->base.crt_flags |= - actx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK; + tfm->base.crt_flags |= crypto_skcipher_get_flags(actx->fallback) & + CRYPTO_TFM_RES_MASK; return ret; } @@ -484,9 +480,9 @@ static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm) const char *name = crypto_tfm_alg_name(tfm); const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK; struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); - struct crypto_ablkcipher *blk; + struct crypto_skcipher *blk; - blk = crypto_alloc_ablkcipher(name, 0, flags); + blk = crypto_alloc_skcipher(name, 0, flags); if (IS_ERR(blk)) return PTR_ERR(blk); @@ -499,8 +495,7 @@ static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm) { struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); - crypto_free_ablkcipher(actx->fallback); - actx->fallback = NULL; + crypto_free_skcipher(actx->fallback); } /* @@ -392,7 +392,7 @@ static void nx_of_update_msc(struct device *dev, ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) && i < msc->triplets; i++) { - if (msc->fc > NX_MAX_FC || msc->mode > NX_MAX_MODE) { + if (msc->fc >= NX_MAX_FC || msc->mode >= NX_MAX_MODE) { dev_err(dev, "unknown function code/mode " "combo: %d/%d (ignored)\n", msc->fc, msc->mode); @@ -528,8 +528,6 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) omap_aes_dma_stop(dd); - dmaengine_terminate_all(dd->dma_lch_in); - dmaengine_terminate_all(dd->dma_lch_out); return 0; } @@ -580,10 +578,12 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd) sg_init_table(&dd->in_sgl, 1); sg_set_buf(&dd->in_sgl, buf_in, total); dd->in_sg = &dd->in_sgl; + dd->in_sg_len = 1; sg_init_table(&dd->out_sgl, 1); sg_set_buf(&dd->out_sgl, buf_out, total); dd->out_sg = &dd->out_sgl; + dd->out_sg_len = 1; return 0; } @@ -604,7 +604,6 @@ static int omap_aes_prepare_req(struct crypto_engine *engine, crypto_ablkcipher_reqtfm(req)); struct omap_aes_dev *dd = omap_aes_find_dev(ctx); struct omap_aes_reqctx *rctx; - int len; if (!dd) return -ENODEV; @@ -616,6 +615,14 @@ static int omap_aes_prepare_req(struct crypto_engine *engine, dd->in_sg = req->src; dd->out_sg = req->dst; + dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total); + if (dd->in_sg_len < 0) + return dd->in_sg_len; + + dd->out_sg_len = sg_nents_for_len(dd->out_sg, dd->total); + if (dd->out_sg_len < 0) + return dd->out_sg_len; + if (omap_aes_check_aligned(dd->in_sg, dd->total) || omap_aes_check_aligned(dd->out_sg, dd->total)) { if (omap_aes_copy_sgs(dd)) @@ -625,11 +632,6 @@ static int omap_aes_prepare_req(struct crypto_engine *engine, dd->sgs_copied = 0; } - len = ALIGN(dd->total, AES_BLOCK_SIZE); - dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, len); - dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, len); - BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0); - rctx = ablkcipher_request_ctx(req); ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); rctx->mode &= FLAGS_MODE_MASK; @@ -1185,17 +1187,19 @@ static int omap_aes_probe(struct platform_device *pdev) spin_unlock(&list_lock); for (i = 0; i < dd->pdata->algs_info_size; i++) { - for (j = 0; j < dd->pdata->algs_info[i].size; j++) { - algp = &dd->pdata->algs_info[i].algs_list[j]; + if (!dd->pdata->algs_info[i].registered) { + for (j = 0; j < dd->pdata->algs_info[i].size; j++) { + algp = &dd->pdata->algs_info[i].algs_list[j]; - pr_debug("reg alg: %s\n", algp->cra_name); - INIT_LIST_HEAD(&algp->cra_list); + pr_debug("reg alg: %s\n", algp->cra_name); + INIT_LIST_HEAD(&algp->cra_list); - err = crypto_register_alg(algp); - if (err) - goto err_algs; + err = crypto_register_alg(algp); + if (err) + goto err_algs; - dd->pdata->algs_info[i].registered++; + dd->pdata->algs_info[i].registered++; + } } } @@ -560,10 +560,12 @@ static int omap_des_copy_sgs(struct omap_des_dev *dd) sg_init_table(&dd->in_sgl, 1); sg_set_buf(&dd->in_sgl, buf_in, dd->total); dd->in_sg = &dd->in_sgl; + dd->in_sg_len = 1; sg_init_table(&dd->out_sgl, 1); sg_set_buf(&dd->out_sgl, buf_out, dd->total); dd->out_sg = &dd->out_sgl; + dd->out_sg_len = 1; return 0; } @@ -595,6 +597,14 @@ static int omap_des_prepare_req(struct crypto_engine *engine, dd->in_sg = req->src; dd->out_sg = req->dst; + dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total); + if (dd->in_sg_len < 0) + return dd->in_sg_len; + + dd->out_sg_len = sg_nents_for_len(dd->out_sg, dd->total); + if (dd->out_sg_len < 0) + return dd->out_sg_len; + if (omap_des_copy_needed(dd->in_sg) || omap_des_copy_needed(dd->out_sg)) { if (omap_des_copy_sgs(dd)) @@ -604,10 +614,6 @@ static int omap_des_prepare_req(struct crypto_engine *engine, dd->sgs_copied = 0; } - dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total); - dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total); - BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0); - rctx = ablkcipher_request_ctx(req); ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); rctx->mode &= FLAGS_MODE_MASK; @@ -100,6 +100,8 @@ #define DEFAULT_TIMEOUT_INTERVAL HZ +#define DEFAULT_AUTOSUSPEND_DELAY 1000 + /* mostly device flags */ #define FLAGS_BUSY 0 #define FLAGS_FINAL 1 @@ -173,7 +175,7 @@ struct omap_sham_ctx { struct omap_sham_hmac_ctx base[0]; }; -#define OMAP_SHAM_QUEUE_LENGTH 1 +#define OMAP_SHAM_QUEUE_LENGTH 10 struct omap_sham_algs_info { struct ahash_alg *algs_list; @@ -813,7 +815,6 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); - dmaengine_terminate_all(dd->dma_lch); if (ctx->flags & BIT(FLAGS_SG)) { dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); @@ -999,7 +1000,8 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY)); - pm_runtime_put(dd->dev); + pm_runtime_mark_last_busy(dd->dev); + pm_runtime_put_autosuspend(dd->dev); if (req->base.complete) req->base.complete(&req->base, err); @@ -1093,7 +1095,7 @@ static int omap_sham_update(struct ahash_request *req) ctx->offset = 0; if (ctx->flags & BIT(FLAGS_FINUP)) { - if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) { + if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 240) { /* * OMAP HW accel works only with buffers >= 9 * will switch to bypass in final() @@ -1149,9 +1151,13 @@ static int omap_sham_final(struct ahash_request *req) if (ctx->flags & BIT(FLAGS_ERROR)) return 0; /* uncompleted hash is not needed */ - /* OMAP HW accel works only with buffers >= 9 */ - /* HMAC is always >= 9 because ipad == block size */ - if ((ctx->digcnt + ctx->bufcnt) < 9) + /* + * OMAP HW accel works only with buffers >= 9. + * HMAC is always >= 9 because ipad == block size. + * If buffersize is less than 240, we use fallback SW encoding, + * as using DMA + HW in this case doesn't provide any benefit. + */ + if ((ctx->digcnt + ctx->bufcnt) < 240) return omap_sham_final_shash(req); else if (ctx->bufcnt) return omap_sham_enqueue(req, OP_FINAL); @@ -1328,7 +1334,7 @@ static struct ahash_alg algs_sha1_md5[] = { .halg.base = { .cra_name = "sha1", .cra_driver_name = "omap-sha1", - .cra_priority = 100, + .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | @@ -1351,7 +1357,7 @@ static struct ahash_alg algs_sha1_md5[] = { .halg.base = { .cra_name = "md5", .cra_driver_name = "omap-md5", - .cra_priority = 100, + .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | @@ -1375,7 +1381,7 @@ static struct ahash_alg algs_sha1_md5[] = { .halg.base = { .cra_name = "hmac(sha1)", .cra_driver_name = "omap-hmac-sha1", - .cra_priority = 100, + .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | @@ -1400,7 +1406,7 @@ static struct ahash_alg algs_sha1_md5[] = { .halg.base = { .cra_name = "hmac(md5)", .cra_driver_name = "omap-hmac-md5", - .cra_priority = 100, + .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | @@ -1428,7 +1434,7 @@ static struct ahash_alg algs_sha224_sha256[] = { .halg.base = { .cra_name = "sha224", .cra_driver_name = "omap-sha224", - .cra_priority = 100, + .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, @@ -1450,7 +1456,7 @@ static struct ahash_alg algs_sha224_sha256[] = { .halg.base = { .cra_name = "sha256", .cra_driver_name = "omap-sha256", - .cra_priority = 100, + .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, @@ -1473,7 +1479,7 @@ static struct ahash_alg algs_sha224_sha256[] = { .halg.base = { .cra_name = "hmac(sha224)", .cra_driver_name = "omap-hmac-sha224", - .cra_priority = 100, + .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, @@ -1497,7 +1503,7 @@ static struct ahash_alg algs_sha224_sha256[] = { .halg.base = { .cra_name = "hmac(sha256)", .cra_driver_name = "omap-hmac-sha256", - .cra_priority = 100, + .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, @@ -1523,7 +1529,7 @@ static struct ahash_alg algs_sha384_sha512[] = { .halg.base = { .cra_name = "sha384", .cra_driver_name = "omap-sha384", - .cra_priority = 100, + .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, @@ -1545,7 +1551,7 @@ static struct ahash_alg algs_sha384_sha512[] = { .halg.base = { .cra_name = "sha512", .cra_driver_name = "omap-sha512", - .cra_priority = 100, + .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, @@ -1568,7 +1574,7 @@ static struct ahash_alg algs_sha384_sha512[] = { .halg.base = { .cra_name = "hmac(sha384)", .cra_driver_name = "omap-hmac-sha384", - .cra_priority = 100, + .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, @@ -1592,7 +1598,7 @@ static struct ahash_alg algs_sha384_sha512[] = { .halg.base = { .cra_name = "hmac(sha512)", .cra_driver_name = "omap-hmac-sha512", - .cra_priority = 100, + .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, @@ -1946,6 +1952,9 @@ static int omap_sham_probe(struct platform_device *pdev) dd->flags |= dd->pdata->flags; + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY); + pm_runtime_enable(dev); pm_runtime_irq_safe(dev); @@ -1986,7 +1995,7 @@ err_algs: &dd->pdata->algs_info[i].algs_list[j]); err_pm: pm_runtime_disable(dev); - if (dd->polling_mode) + if (!dd->polling_mode) dma_release_channel(dd->dma_lch); data_err: dev_err(dev, "initialization failed.\n"); @@ -171,7 +171,7 @@ struct spacc_ablk_ctx { * The fallback cipher. If the operation can't be done in hardware, * fallback to a software version. */ - struct crypto_ablkcipher *sw_cipher; + struct crypto_skcipher *sw_cipher; }; /* AEAD cipher context. */ @@ -789,33 +789,35 @@ static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, * request for any other size (192 bits) then we need to do a software * fallback. */ - if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 && - ctx->sw_cipher) { + if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) { + if (!ctx->sw_cipher) + return -EINVAL; + /* * Set the fallback transform to use the same request flags as * the hardware transform. */ - ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; - ctx->sw_cipher->base.crt_flags |= - cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK; + crypto_skcipher_clear_flags(ctx->sw_cipher, + CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(ctx->sw_cipher, + cipher->base.crt_flags & + CRYPTO_TFM_REQ_MASK); + + err = crypto_skcipher_setkey(ctx->sw_cipher, key, len); + + tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; + tfm->crt_flags |= + crypto_skcipher_get_flags(ctx->sw_cipher) & + CRYPTO_TFM_RES_MASK; - err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len); if (err) goto sw_setkey_failed; - } else if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 && - !ctx->sw_cipher) - err = -EINVAL; + } memcpy(ctx->key, key, len); ctx->key_len = len; sw_setkey_failed: - if (err && ctx->sw_cipher) { - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->crt_flags |= - ctx->sw_cipher->base.crt_flags & CRYPTO_TFM_RES_MASK; - } - return err; } @@ -910,20 +912,21 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req, struct crypto_tfm *old_tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm); + SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher); int err; - if (!ctx->sw_cipher) - return -EINVAL; - /* * Change the request to use the software fallback transform, and once * the ciphering has completed, put the old transform back into the * request. */ - ablkcipher_request_set_tfm(req, ctx->sw_cipher); - err = is_encrypt ? crypto_ablkcipher_encrypt(req) : - crypto_ablkcipher_decrypt(req); - ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(old_tfm)); + skcipher_request_set_tfm(subreq, ctx->sw_cipher); + skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->nbytes, req->info); + err = is_encrypt ? crypto_skcipher_encrypt(subreq) : + crypto_skcipher_decrypt(subreq); + skcipher_request_zero(subreq); return err; } @@ -1015,12 +1018,13 @@ static int spacc_ablk_cra_init(struct crypto_tfm *tfm) ctx->generic.flags = spacc_alg->type; ctx->generic.engine = engine; if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) { - ctx->sw_cipher = crypto_alloc_ablkcipher(alg->cra_name, 0, - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); + ctx->sw_cipher = crypto_alloc_skcipher( + alg->cra_name, 0, CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->sw_cipher)) { dev_warn(engine->dev, "failed to allocate fallback for %s\n", alg->cra_name); - ctx->sw_cipher = NULL; + return PTR_ERR(ctx->sw_cipher); } } ctx->generic.key_offs = spacc_alg->key_offs; @@ -1035,9 +1039,7 @@ static void spacc_ablk_cra_exit(struct crypto_tfm *tfm) { struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); - if (ctx->sw_cipher) - crypto_free_ablkcipher(ctx->sw_cipher); - ctx->sw_cipher = NULL; + crypto_free_skcipher(ctx->sw_cipher); } static int spacc_ablk_encrypt(struct ablkcipher_request *req) @@ -4,12 +4,13 @@ config CRYPTO_DEV_QAT select CRYPTO_AUTHENC select CRYPTO_BLKCIPHER select CRYPTO_AKCIPHER + select CRYPTO_DH select CRYPTO_HMAC + select CRYPTO_RSA select CRYPTO_SHA1 select CRYPTO_SHA256 select CRYPTO_SHA512 select FW_LOADER - select ASN1 config CRYPTO_DEV_QAT_DH895xCC tristate "Support for Intel(R) DH895xCC" @@ -229,6 +229,7 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data) hw_data->get_arb_mapping = adf_get_arbiter_mapping; hw_data->enable_ints = adf_enable_ints; hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; + hw_data->reset_device = adf_reset_flr; hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; } @@ -239,6 +239,7 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data) hw_data->get_arb_mapping = adf_get_arbiter_mapping; hw_data->enable_ints = adf_enable_ints; hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; + hw_data->reset_device = adf_reset_flr; hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; } @@ -1,11 +1,3 @@ -$(obj)/qat_rsapubkey-asn1.o: $(obj)/qat_rsapubkey-asn1.c \ - $(obj)/qat_rsapubkey-asn1.h -$(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \ - $(obj)/qat_rsaprivkey-asn1.h - -clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h -clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h - obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o intel_qat-objs := adf_cfg.o \ adf_isr.o \ @@ -19,8 +11,6 @@ intel_qat-objs := adf_cfg.o \ adf_hw_arbiter.o \ qat_crypto.o \ qat_algs.o \ - qat_rsapubkey-asn1.o \ - qat_rsaprivkey-asn1.o \ qat_asym_algs.o \ qat_uclo.o \ qat_hal.o @@ -176,6 +176,7 @@ struct adf_hw_device_data { void (*disable_iov)(struct adf_accel_dev *accel_dev); void (*enable_ints)(struct adf_accel_dev *accel_dev); int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev); + void (*reset_device)(struct adf_accel_dev *accel_dev); const char *fw_name; const char *fw_mmp_name; uint32_t fuses; @@ -82,18 +82,12 @@ struct adf_reset_dev_data { struct work_struct reset_work; }; -void adf_dev_restore(struct adf_accel_dev *accel_dev) +void adf_reset_sbr(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_to_pci_dev(accel_dev); struct pci_dev *parent = pdev->bus->self; uint16_t bridge_ctl = 0; - if (accel_dev->is_vf) - return; - - dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n", - accel_dev->accel_id); - if (!parent) parent = pdev; @@ -101,6 +95,8 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev) dev_info(&GET_DEV(accel_dev), "Transaction still in progress. Proceeding\n"); + dev_info(&GET_DEV(accel_dev), "Secondary bus reset\n"); + pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl); bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET; pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl); @@ -108,8 +104,40 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev) bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET; pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl); msleep(100); - pci_restore_state(pdev); - pci_save_state(pdev); +} +EXPORT_SYMBOL_GPL(adf_reset_sbr); + +void adf_reset_flr(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + u16 control = 0; + int pos = 0; + + dev_info(&GET_DEV(accel_dev), "Function level reset\n"); + pos = pci_pcie_cap(pdev); + if (!pos) { + dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); + return; + } + pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &control); + control |= PCI_EXP_DEVCTL_BCR_FLR; + pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, control); + msleep(100); +} +EXPORT_SYMBOL_GPL(adf_reset_flr); + +void adf_dev_restore(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + + if (hw_device->reset_device) { + dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n", + accel_dev->accel_id); + hw_device->reset_device(accel_dev); + pci_restore_state(pdev); + pci_save_state(pdev); + } } static void adf_device_reset_worker(struct work_struct *work) @@ -243,7 +271,8 @@ EXPORT_SYMBOL_GPL(adf_disable_aer); int adf_init_aer(void) { - device_reset_wq = create_workqueue("qat_device_reset_wq"); + device_reset_wq = alloc_workqueue("qat_device_reset_wq", + WQ_MEM_RECLAIM, 0); return !device_reset_wq ? -EFAULT : 0; } @@ -141,6 +141,8 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev); int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf); void adf_disable_aer(struct adf_accel_dev *accel_dev); +void adf_reset_sbr(struct adf_accel_dev *accel_dev); +void adf_reset_flr(struct adf_accel_dev *accel_dev); void adf_dev_restore(struct adf_accel_dev *accel_dev); int adf_init_aer(void); void adf_exit_aer(void); @@ -292,7 +292,7 @@ EXPORT_SYMBOL_GPL(adf_sriov_configure); int __init adf_init_pf_wq(void) { /* Workqueue for PF2VF responses */ - pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq"); + pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0); return !pf2vf_resp_wq ? -ENOMEM : 0; } @@ -321,7 +321,7 @@ EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc); int __init adf_init_vf_wq(void) { - adf_vf_stop_wq = create_workqueue("adf_vf_stop_wq"); + adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0); return !adf_vf_stop_wq ? -EFAULT : 0; } @@ -947,13 +947,13 @@ static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, return 0; out_free_all: - memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd)); - dma_free_coherent(dev, sizeof(*ctx->enc_cd), + memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); + dma_free_coherent(dev, sizeof(*ctx->dec_cd), ctx->dec_cd, ctx->dec_cd_paddr); ctx->dec_cd = NULL; out_free_enc: - memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd)); - dma_free_coherent(dev, sizeof(*ctx->dec_cd), + memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); + dma_free_coherent(dev, sizeof(*ctx->enc_cd), ctx->enc_cd, ctx->enc_cd_paddr); ctx->enc_cd = NULL; return -ENOMEM; @@ -49,11 +49,12 @@ #include <crypto/internal/rsa.h> #include <crypto/internal/akcipher.h> #include <crypto/akcipher.h> +#include <crypto/kpp.h> +#include <crypto/internal/kpp.h> +#include <crypto/dh.h> #include <linux/dma-mapping.h> #include <linux/fips.h> #include <crypto/scatterwalk.h> -#include "qat_rsapubkey-asn1.h" -#include "qat_rsaprivkey-asn1.h" #include "icp_qat_fw_pke.h" #include "adf_accel_devices.h" #include "adf_transport.h" @@ -75,6 +76,14 @@ struct qat_rsa_input_params { dma_addr_t d; dma_addr_t n; } dec; + struct { + dma_addr_t c; + dma_addr_t p; + dma_addr_t q; + dma_addr_t dp; + dma_addr_t dq; + dma_addr_t qinv; + } dec_crt; u64 in_tab[8]; }; } __packed __aligned(64); @@ -95,71 +104,480 @@ struct qat_rsa_ctx { char *n; char *e; char *d; + char *p; + char *q; + char *dp; + char *dq; + char *qinv; dma_addr_t dma_n; dma_addr_t dma_e; dma_addr_t dma_d; + dma_addr_t dma_p; + dma_addr_t dma_q; + dma_addr_t dma_dp; + dma_addr_t dma_dq; + dma_addr_t dma_qinv; unsigned int key_sz; + bool crt_mode; + struct qat_crypto_instance *inst; +} __packed __aligned(64); + +struct qat_dh_input_params { + union { + struct { + dma_addr_t b; + dma_addr_t xa; + dma_addr_t p; + } in; + struct { + dma_addr_t xa; + dma_addr_t p; + } in_g2; + u64 in_tab[8]; + }; +} __packed __aligned(64); + +struct qat_dh_output_params { + union { + dma_addr_t r; + u64 out_tab[8]; + }; +} __packed __aligned(64); + +struct qat_dh_ctx { + char *g; + char *xa; + char *p; + dma_addr_t dma_g; + dma_addr_t dma_xa; + dma_addr_t dma_p; + unsigned int p_size; + bool g2; struct qat_crypto_instance *inst; } __packed __aligned(64); -struct qat_rsa_request { - struct qat_rsa_input_params in; - struct qat_rsa_output_params out; +struct qat_asym_request { + union { + struct qat_rsa_input_params rsa; + struct qat_dh_input_params dh; + } in; + union { + struct qat_rsa_output_params rsa; + struct qat_dh_output_params dh; + } out; dma_addr_t phy_in; dma_addr_t phy_out; char *src_align; char *dst_align; struct icp_qat_fw_pke_request req; - struct qat_rsa_ctx *ctx; + union { + struct qat_rsa_ctx *rsa; + struct qat_dh_ctx *dh; + } ctx; + union { + struct akcipher_request *rsa; + struct kpp_request *dh; + } areq; int err; + void (*cb)(struct icp_qat_fw_pke_resp *resp); } __aligned(64); -static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp) +static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp) { - struct akcipher_request *areq = (void *)(__force long)resp->opaque; - struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64); - struct device *dev = &GET_DEV(req->ctx->inst->accel_dev); + struct qat_asym_request *req = (void *)(__force long)resp->opaque; + struct kpp_request *areq = req->areq.dh; + struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev); int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET( resp->pke_resp_hdr.comn_resp_flags); err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; - if (req->src_align) - dma_free_coherent(dev, req->ctx->key_sz, req->src_align, - req->in.enc.m); - else - dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz, - DMA_TO_DEVICE); + if (areq->src) { + if (req->src_align) + dma_free_coherent(dev, req->ctx.dh->p_size, + req->src_align, req->in.dh.in.b); + else + dma_unmap_single(dev, req->in.dh.in.b, + req->ctx.dh->p_size, DMA_TO_DEVICE); + } - areq->dst_len = req->ctx->key_sz; + areq->dst_len = req->ctx.dh->p_size; if (req->dst_align) { - char *ptr = req->dst_align; + scatterwalk_map_and_copy(req->dst_align, areq->dst, 0, + areq->dst_len, 1); - while (!(*ptr) && areq->dst_len) { - areq->dst_len--; - ptr++; - } + dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align, + req->out.dh.r); + } else { + dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size, + DMA_FROM_DEVICE); + } - if (areq->dst_len != req->ctx->key_sz) - memmove(req->dst_align, ptr, areq->dst_len); + dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params), + DMA_TO_DEVICE); + dma_unmap_single(dev, req->phy_out, + sizeof(struct qat_dh_output_params), + DMA_TO_DEVICE); - scatterwalk_map_and_copy(req->dst_align, areq->dst, 0, - areq->dst_len, 1); + kpp_request_complete(areq, err); +} + +#define PKE_DH_1536 0x390c1a49 +#define PKE_DH_G2_1536 0x2e0b1a3e +#define PKE_DH_2048 0x4d0c1a60 +#define PKE_DH_G2_2048 0x3e0b1a55 +#define PKE_DH_3072 0x510c1a77 +#define PKE_DH_G2_3072 0x3a0b1a6c +#define PKE_DH_4096 0x690c1a8e +#define PKE_DH_G2_4096 0x4a0b1a83 + +static unsigned long qat_dh_fn_id(unsigned int len, bool g2) +{ + unsigned int bitslen = len << 3; + + switch (bitslen) { + case 1536: + return g2 ? PKE_DH_G2_1536 : PKE_DH_1536; + case 2048: + return g2 ? PKE_DH_G2_2048 : PKE_DH_2048; + case 3072: + return g2 ? PKE_DH_G2_3072 : PKE_DH_3072; + case 4096: + return g2 ? PKE_DH_G2_4096 : PKE_DH_4096; + default: + return 0; + }; +} + +static inline struct qat_dh_ctx *qat_dh_get_params(struct crypto_kpp *tfm) +{ + return kpp_tfm_ctx(tfm); +} + +static int qat_dh_compute_value(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); + struct qat_crypto_instance *inst = ctx->inst; + struct device *dev = &GET_DEV(inst->accel_dev); + struct qat_asym_request *qat_req = + PTR_ALIGN(kpp_request_ctx(req), 64); + struct icp_qat_fw_pke_request *msg = &qat_req->req; + int ret, ctr = 0; + int n_input_params = 0; + + if (unlikely(!ctx->xa)) + return -EINVAL; + + if (req->dst_len < ctx->p_size) { + req->dst_len = ctx->p_size; + return -EOVERFLOW; + } + memset(msg, '\0', sizeof(*msg)); + ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, + ICP_QAT_FW_COMN_REQ_FLAG_SET); + + msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size, + !req->src && ctx->g2); + if (unlikely(!msg->pke_hdr.cd_pars.func_id)) + return -EINVAL; + + qat_req->cb = qat_dh_cb; + qat_req->ctx.dh = ctx; + qat_req->areq.dh = req; + msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; + msg->pke_hdr.comn_req_flags = + ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, + QAT_COMN_CD_FLD_TYPE_64BIT_ADR); - dma_free_coherent(dev, req->ctx->key_sz, req->dst_align, - req->out.enc.c); + /* + * If no source is provided use g as base + */ + if (req->src) { + qat_req->in.dh.in.xa = ctx->dma_xa; + qat_req->in.dh.in.p = ctx->dma_p; + n_input_params = 3; } else { - char *ptr = sg_virt(areq->dst); + if (ctx->g2) { + qat_req->in.dh.in_g2.xa = ctx->dma_xa; + qat_req->in.dh.in_g2.p = ctx->dma_p; + n_input_params = 2; + } else { + qat_req->in.dh.in.b = ctx->dma_g; + qat_req->in.dh.in.xa = ctx->dma_xa; + qat_req->in.dh.in.p = ctx->dma_p; + n_input_params = 3; + } + } - while (!(*ptr) && areq->dst_len) { - areq->dst_len--; - ptr++; + ret = -ENOMEM; + if (req->src) { + /* + * src can be of any size in valid range, but HW expects it to + * be the same as modulo p so in case it is different we need + * to allocate a new buf and copy src data. + * In other case we just need to map the user provided buffer. + * Also need to make sure that it is in contiguous buffer. + */ + if (sg_is_last(req->src) && req->src_len == ctx->p_size) { + qat_req->src_align = NULL; + qat_req->in.dh.in.b = dma_map_single(dev, + sg_virt(req->src), + req->src_len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, + qat_req->in.dh.in.b))) + return ret; + + } else { + int shift = ctx->p_size - req->src_len; + + qat_req->src_align = dma_zalloc_coherent(dev, + ctx->p_size, + &qat_req->in.dh.in.b, + GFP_KERNEL); + if (unlikely(!qat_req->src_align)) + return ret; + + scatterwalk_map_and_copy(qat_req->src_align + shift, + req->src, 0, req->src_len, 0); } + } + /* + * dst can be of any size in valid range, but HW expects it to be the + * same as modulo m so in case it is different we need to allocate a + * new buf and copy src data. + * In other case we just need to map the user provided buffer. + * Also need to make sure that it is in contiguous buffer. + */ + if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) { + qat_req->dst_align = NULL; + qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst), + req->dst_len, + DMA_FROM_DEVICE); - if (sg_virt(areq->dst) != ptr && areq->dst_len) - memmove(sg_virt(areq->dst), ptr, areq->dst_len); + if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r))) + goto unmap_src; + + } else { + qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size, + &qat_req->out.dh.r, + GFP_KERNEL); + if (unlikely(!qat_req->dst_align)) + goto unmap_src; + } - dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz, + qat_req->in.dh.in_tab[n_input_params] = 0; + qat_req->out.dh.out_tab[1] = 0; + /* Mapping in.in.b or in.in_g2.xa is the same */ + qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b, + sizeof(struct qat_dh_input_params), + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) + goto unmap_dst; + + qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r, + sizeof(struct qat_dh_output_params), + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) + goto unmap_in_params; + + msg->pke_mid.src_data_addr = qat_req->phy_in; + msg->pke_mid.dest_data_addr = qat_req->phy_out; + msg->pke_mid.opaque = (uint64_t)(__force long)qat_req; + msg->input_param_count = n_input_params; + msg->output_param_count = 1; + + do { + ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); + } while (ret == -EBUSY && ctr++ < 100); + + if (!ret) + return -EINPROGRESS; + + if (!dma_mapping_error(dev, qat_req->phy_out)) + dma_unmap_single(dev, qat_req->phy_out, + sizeof(struct qat_dh_output_params), + DMA_TO_DEVICE); +unmap_in_params: + if (!dma_mapping_error(dev, qat_req->phy_in)) + dma_unmap_single(dev, qat_req->phy_in, + sizeof(struct qat_dh_input_params), + DMA_TO_DEVICE); +unmap_dst: + if (qat_req->dst_align) + dma_free_coherent(dev, ctx->p_size, qat_req->dst_align, + qat_req->out.dh.r); + else + if (!dma_mapping_error(dev, qat_req->out.dh.r)) + dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size, + DMA_FROM_DEVICE); +unmap_src: + if (req->src) { + if (qat_req->src_align) + dma_free_coherent(dev, ctx->p_size, qat_req->src_align, + qat_req->in.dh.in.b); + else + if (!dma_mapping_error(dev, qat_req->in.dh.in.b)) + dma_unmap_single(dev, qat_req->in.dh.in.b, + ctx->p_size, + DMA_TO_DEVICE); + } + return ret; +} + +static int qat_dh_check_params_length(unsigned int p_len) +{ + switch (p_len) { + case 1536: + case 2048: + case 3072: + case 4096: + return 0; + } + return -EINVAL; +} + +static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) +{ + struct qat_crypto_instance *inst = ctx->inst; + struct device *dev = &GET_DEV(inst->accel_dev); + + if (unlikely(!params->p || !params->g)) + return -EINVAL; + + if (qat_dh_check_params_length(params->p_size << 3)) + return -EINVAL; + + ctx->p_size = params->p_size; + ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); + if (!ctx->p) + return -ENOMEM; + memcpy(ctx->p, params->p, ctx->p_size); + + /* If g equals 2 don't copy it */ + if (params->g_size == 1 && *(char *)params->g == 0x02) { + ctx->g2 = true; + return 0; + } + + ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); + if (!ctx->g) { + dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p); + ctx->p = NULL; + return -ENOMEM; + } + memcpy(ctx->g + (ctx->p_size - params->g_size), params->g, + params->g_size); + + return 0; +} + +static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx) +{ + if (ctx->g) { + dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g); + ctx->g = NULL; + } + if (ctx->xa) { + dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa); + ctx->xa = NULL; + } + if (ctx->p) { + dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p); + ctx->p = NULL; + } + ctx->p_size = 0; + ctx->g2 = false; +} + +static int qat_dh_set_secret(struct crypto_kpp *tfm, void *buf, + unsigned int len) +{ + struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); + struct device *dev = &GET_DEV(ctx->inst->accel_dev); + struct dh params; + int ret; + + if (crypto_dh_decode_key(buf, len, ¶ms) < 0) + return -EINVAL; + + /* Free old secret if any */ + qat_dh_clear_ctx(dev, ctx); + + ret = qat_dh_set_params(ctx, ¶ms); + if (ret < 0) + return ret; + + ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa, + GFP_KERNEL); + if (!ctx->xa) { + qat_dh_clear_ctx(dev, ctx); + return -ENOMEM; + } + memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key, + params.key_size); + + return 0; +} + +static int qat_dh_max_size(struct crypto_kpp *tfm) +{ + struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); + + return ctx->p ? ctx->p_size : -EINVAL; +} + +static int qat_dh_init_tfm(struct crypto_kpp *tfm) +{ + struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); + struct qat_crypto_instance *inst = + qat_crypto_get_instance_node(get_current_node()); + + if (!inst) + return -EINVAL; + + ctx->p_size = 0; + ctx->g2 = false; + ctx->inst = inst; + return 0; +} + +static void qat_dh_exit_tfm(struct crypto_kpp *tfm) +{ + struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); + struct device *dev = &GET_DEV(ctx->inst->accel_dev); + + qat_dh_clear_ctx(dev, ctx); + qat_crypto_put_instance(ctx->inst); +} + +static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp) +{ + struct qat_asym_request *req = (void *)(__force long)resp->opaque; + struct akcipher_request *areq = req->areq.rsa; + struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev); + int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET( + resp->pke_resp_hdr.comn_resp_flags); + + err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; + + if (req->src_align) + dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align, + req->in.rsa.enc.m); + else + dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz, + DMA_TO_DEVICE); + + areq->dst_len = req->ctx.rsa->key_sz; + if (req->dst_align) { + scatterwalk_map_and_copy(req->dst_align, areq->dst, 0, + areq->dst_len, 1); + + dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align, + req->out.rsa.enc.c); + } else { + dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz, DMA_FROM_DEVICE); } @@ -175,8 +593,9 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp) void qat_alg_asym_callback(void *_resp) { struct icp_qat_fw_pke_resp *resp = _resp; + struct qat_asym_request *areq = (void *)(__force long)resp->opaque; - qat_rsa_cb(resp); + areq->cb(resp); } #define PKE_RSA_EP_512 0x1c161b21 @@ -237,13 +656,42 @@ static unsigned long qat_rsa_dec_fn_id(unsigned int len) }; } +#define PKE_RSA_DP2_512 0x1c131b57 +#define PKE_RSA_DP2_1024 0x26131c2d +#define PKE_RSA_DP2_1536 0x45111d12 +#define PKE_RSA_DP2_2048 0x59121dfa +#define PKE_RSA_DP2_3072 0x81121ed9 +#define PKE_RSA_DP2_4096 0xb1111fb2 + +static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len) +{ + unsigned int bitslen = len << 3; + + switch (bitslen) { + case 512: + return PKE_RSA_DP2_512; + case 1024: + return PKE_RSA_DP2_1024; + case 1536: + return PKE_RSA_DP2_1536; + case 2048: + return PKE_RSA_DP2_2048; + case 3072: + return PKE_RSA_DP2_3072; + case 4096: + return PKE_RSA_DP2_4096; + default: + return 0; + }; +} + static int qat_rsa_enc(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); - struct qat_rsa_request *qat_req = + struct qat_asym_request *qat_req = PTR_ALIGN(akcipher_request_ctx(req), 64); struct icp_qat_fw_pke_request *msg = &qat_req->req; int ret, ctr = 0; @@ -262,14 +710,16 @@ static int qat_rsa_enc(struct akcipher_request *req) if (unlikely(!msg->pke_hdr.cd_pars.func_id)) return -EINVAL; - qat_req->ctx = ctx; + qat_req->cb = qat_rsa_cb; + qat_req->ctx.rsa = ctx; + qat_req->areq.rsa = req; msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; msg->pke_hdr.comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, QAT_COMN_CD_FLD_TYPE_64BIT_ADR); - qat_req->in.enc.e = ctx->dma_e; - qat_req->in.enc.n = ctx->dma_n; + qat_req->in.rsa.enc.e = ctx->dma_e; + qat_req->in.rsa.enc.n = ctx->dma_n; ret = -ENOMEM; /* @@ -281,16 +731,16 @@ static int qat_rsa_enc(struct akcipher_request *req) */ if (sg_is_last(req->src) && req->src_len == ctx->key_sz) { qat_req->src_align = NULL; - qat_req->in.enc.m = dma_map_single(dev, sg_virt(req->src), + qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src), req->src_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, qat_req->in.enc.m))) + if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m))) return ret; } else { int shift = ctx->key_sz - req->src_len; qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, - &qat_req->in.enc.m, + &qat_req->in.rsa.enc.m, GFP_KERNEL); if (unlikely(!qat_req->src_align)) return ret; @@ -300,30 +750,30 @@ static int qat_rsa_enc(struct akcipher_request *req) } if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { qat_req->dst_align = NULL; - qat_req->out.enc.c = dma_map_single(dev, sg_virt(req->dst), - req->dst_len, - DMA_FROM_DEVICE); + qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst), + req->dst_len, + DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(dev, qat_req->out.enc.c))) + if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c))) goto unmap_src; } else { qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, - &qat_req->out.enc.c, + &qat_req->out.rsa.enc.c, GFP_KERNEL); if (unlikely(!qat_req->dst_align)) goto unmap_src; } - qat_req->in.in_tab[3] = 0; - qat_req->out.out_tab[1] = 0; - qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m, + qat_req->in.rsa.in_tab[3] = 0; + qat_req->out.rsa.out_tab[1] = 0; + qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m, sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) goto unmap_dst; - qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c, + qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c, sizeof(struct qat_rsa_output_params), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) @@ -331,7 +781,7 @@ static int qat_rsa_enc(struct akcipher_request *req) msg->pke_mid.src_data_addr = qat_req->phy_in; msg->pke_mid.dest_data_addr = qat_req->phy_out; - msg->pke_mid.opaque = (uint64_t)(__force long)req; + msg->pke_mid.opaque = (uint64_t)(__force long)qat_req; msg->input_param_count = 3; msg->output_param_count = 1; do { @@ -353,19 +803,19 @@ unmap_in_params: unmap_dst: if (qat_req->dst_align) dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align, - qat_req->out.enc.c); + qat_req->out.rsa.enc.c); else - if (!dma_mapping_error(dev, qat_req->out.enc.c)) - dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz, - DMA_FROM_DEVICE); + if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c)) + dma_unmap_single(dev, qat_req->out.rsa.enc.c, + ctx->key_sz, DMA_FROM_DEVICE); unmap_src: if (qat_req->src_align) dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, - qat_req->in.enc.m); + qat_req->in.rsa.enc.m); else - if (!dma_mapping_error(dev, qat_req->in.enc.m)) - dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz, - DMA_TO_DEVICE); + if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m)) + dma_unmap_single(dev, qat_req->in.rsa.enc.m, + ctx->key_sz, DMA_TO_DEVICE); return ret; } @@ -375,7 +825,7 @@ static int qat_rsa_dec(struct akcipher_request *req) struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); - struct qat_rsa_request *qat_req = + struct qat_asym_request *qat_req = PTR_ALIGN(akcipher_request_ctx(req), 64); struct icp_qat_fw_pke_request *msg = &qat_req->req; int ret, ctr = 0; @@ -390,18 +840,30 @@ static int qat_rsa_dec(struct akcipher_request *req) memset(msg, '\0', sizeof(*msg)); ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, ICP_QAT_FW_COMN_REQ_FLAG_SET); - msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz); + msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ? + qat_rsa_dec_fn_id_crt(ctx->key_sz) : + qat_rsa_dec_fn_id(ctx->key_sz); if (unlikely(!msg->pke_hdr.cd_pars.func_id)) return -EINVAL; - qat_req->ctx = ctx; + qat_req->cb = qat_rsa_cb; + qat_req->ctx.rsa = ctx; + qat_req->areq.rsa = req; msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; msg->pke_hdr.comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, QAT_COMN_CD_FLD_TYPE_64BIT_ADR); - qat_req->in.dec.d = ctx->dma_d; - qat_req->in.dec.n = ctx->dma_n; + if (ctx->crt_mode) { + qat_req->in.rsa.dec_crt.p = ctx->dma_p; + qat_req->in.rsa.dec_crt.q = ctx->dma_q; + qat_req->in.rsa.dec_crt.dp = ctx->dma_dp; + qat_req->in.rsa.dec_crt.dq = ctx->dma_dq; + qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv; + } else { + qat_req->in.rsa.dec.d = ctx->dma_d; + qat_req->in.rsa.dec.n = ctx->dma_n; + } ret = -ENOMEM; /* @@ -413,16 +875,16 @@ static int qat_rsa_dec(struct akcipher_request *req) */ if (sg_is_last(req->src) && req->src_len == ctx->key_sz) { qat_req->src_align = NULL; - qat_req->in.dec.c = dma_map_single(dev, sg_virt(req->src), + qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src), req->dst_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, qat_req->in.dec.c))) + if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c))) return ret; } else { int shift = ctx->key_sz - req->src_len; qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, - &qat_req->in.dec.c, + &qat_req->in.rsa.dec.c, GFP_KERNEL); if (unlikely(!qat_req->src_align)) return ret; @@ -432,31 +894,34 @@ static int qat_rsa_dec(struct akcipher_request *req) } if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { qat_req->dst_align = NULL; - qat_req->out.dec.m = dma_map_single(dev, sg_virt(req->dst), + qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst), req->dst_len, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(dev, qat_req->out.dec.m))) + if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m))) goto unmap_src; } else { qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, - &qat_req->out.dec.m, + &qat_req->out.rsa.dec.m, GFP_KERNEL); if (unlikely(!qat_req->dst_align)) goto unmap_src; } - qat_req->in.in_tab[3] = 0; - qat_req->out.out_tab[1] = 0; - qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c, + if (ctx->crt_mode) + qat_req->in.rsa.in_tab[6] = 0; + else + qat_req->in.rsa.in_tab[3] = 0; + qat_req->out.rsa.out_tab[1] = 0; + qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c, sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) goto unmap_dst; - qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m, + qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m, sizeof(struct qat_rsa_output_params), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) @@ -464,8 +929,12 @@ static int qat_rsa_dec(struct akcipher_request *req) msg->pke_mid.src_data_addr = qat_req->phy_in; msg->pke_mid.dest_data_addr = qat_req->phy_out; - msg->pke_mid.opaque = (uint64_t)(__force long)req; - msg->input_param_count = 3; + msg->pke_mid.opaque = (uint64_t)(__force long)qat_req; + if (ctx->crt_mode) + msg->input_param_count = 6; + else + msg->input_param_count = 3; + msg->output_param_count = 1; do { ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); @@ -486,26 +955,24 @@ unmap_in_params: unmap_dst: if (qat_req->dst_align) dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align, - qat_req->out.dec.m); + qat_req->out.rsa.dec.m); else - if (!dma_mapping_error(dev, qat_req->out.dec.m)) - dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz, - DMA_FROM_DEVICE); + if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m)) + dma_unmap_single(dev, qat_req->out.rsa.dec.m, + ctx->key_sz, DMA_FROM_DEVICE); unmap_src: if (qat_req->src_align) dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, - qat_req->in.dec.c); + qat_req->in.rsa.dec.c); else - if (!dma_mapping_error(dev, qat_req->in.dec.c)) - dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz, - DMA_TO_DEVICE); + if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c)) + dma_unmap_single(dev, qat_req->in.rsa.dec.c, + ctx->key_sz, DMA_TO_DEVICE); return ret; } -int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag, - const void *value, size_t vlen) +int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value, size_t vlen) { - struct qat_rsa_ctx *ctx = context; struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); const char *ptr = value; @@ -518,11 +985,6 @@ int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag, ctx->key_sz = vlen; ret = -EINVAL; - /* In FIPS mode only allow key size 2K & 3K */ - if (fips_enabled && (ctx->key_sz != 256 && ctx->key_sz != 384)) { - pr_err("QAT: RSA: key size not allowed in FIPS mode\n"); - goto err; - } /* invalid key size provided */ if (!qat_rsa_enc_fn_id(ctx->key_sz)) goto err; @@ -540,10 +1002,8 @@ err: return ret; } -int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag, - const void *value, size_t vlen) +int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, size_t vlen) { - struct qat_rsa_ctx *ctx = context; struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); const char *ptr = value; @@ -559,18 +1019,15 @@ int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag, } ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); - if (!ctx->e) { - ctx->e = NULL; + if (!ctx->e) return -ENOMEM; - } + memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen); return 0; } -int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag, - const void *value, size_t vlen) +int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value, size_t vlen) { - struct qat_rsa_ctx *ctx = context; struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); const char *ptr = value; @@ -585,12 +1042,6 @@ int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag, if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) goto err; - /* In FIPS mode only allow key size 2K & 3K */ - if (fips_enabled && (vlen != 256 && vlen != 384)) { - pr_err("QAT: RSA: key size not allowed in FIPS mode\n"); - goto err; - } - ret = -ENOMEM; ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); if (!ctx->d) @@ -603,12 +1054,106 @@ err: return ret; } -static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key, - unsigned int keylen, bool private) +static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len) { - struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); - struct device *dev = &GET_DEV(ctx->inst->accel_dev); - int ret; + while (!**ptr && *len) { + (*ptr)++; + (*len)--; + } +} + +static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) +{ + struct qat_crypto_instance *inst = ctx->inst; + struct device *dev = &GET_DEV(inst->accel_dev); + const char *ptr; + unsigned int len; + unsigned int half_key_sz = ctx->key_sz / 2; + + /* p */ + ptr = rsa_key->p; + len = rsa_key->p_sz; + qat_rsa_drop_leading_zeros(&ptr, &len); + if (!len) + goto err; + ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL); + if (!ctx->p) + goto err; + memcpy(ctx->p + (half_key_sz - len), ptr, len); + + /* q */ + ptr = rsa_key->q; + len = rsa_key->q_sz; + qat_rsa_drop_leading_zeros(&ptr, &len); + if (!len) + goto free_p; + ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL); + if (!ctx->q) + goto free_p; + memcpy(ctx->q + (half_key_sz - len), ptr, len); + + /* dp */ + ptr = rsa_key->dp; + len = rsa_key->dp_sz; + qat_rsa_drop_leading_zeros(&ptr, &len); + if (!len) + goto free_q; + ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp, + GFP_KERNEL); + if (!ctx->dp) + goto free_q; + memcpy(ctx->dp + (half_key_sz - len), ptr, len); + + /* dq */ + ptr = rsa_key->dq; + len = rsa_key->dq_sz; + qat_rsa_drop_leading_zeros(&ptr, &len); + if (!len) + goto free_dp; + ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq, + GFP_KERNEL); + if (!ctx->dq) + goto free_dp; + memcpy(ctx->dq + (half_key_sz - len), ptr, len); + + /* qinv */ + ptr = rsa_key->qinv; + len = rsa_key->qinv_sz; + qat_rsa_drop_leading_zeros(&ptr, &len); + if (!len) + goto free_dq; + ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv, + GFP_KERNEL); + if (!ctx->qinv) + goto free_dq; + memcpy(ctx->qinv + (half_key_sz - len), ptr, len); + + ctx->crt_mode = true; + return; + +free_dq: + memset(ctx->dq, '\0', half_key_sz); + dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq); + ctx->dq = NULL; +free_dp: + memset(ctx->dp, '\0', half_key_sz); + dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp); + ctx->dp = NULL; +free_q: + memset(ctx->q, '\0', half_key_sz); + dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q); + ctx->q = NULL; +free_p: + memset(ctx->p, '\0', half_key_sz); + dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p); + ctx->p = NULL; +err: + ctx->crt_mode = false; +} + +static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx) +{ + unsigned int half_key_sz = ctx->key_sz / 2; /* Free the old key if any */ if (ctx->n) @@ -619,19 +1164,68 @@ static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key, memset(ctx->d, '\0', ctx->key_sz); dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); } + if (ctx->p) { + memset(ctx->p, '\0', half_key_sz); + dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p); + } + if (ctx->q) { + memset(ctx->q, '\0', half_key_sz); + dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q); + } + if (ctx->dp) { + memset(ctx->dp, '\0', half_key_sz); + dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp); + } + if (ctx->dq) { + memset(ctx->dq, '\0', half_key_sz); + dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq); + } + if (ctx->qinv) { + memset(ctx->qinv, '\0', half_key_sz); + dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv); + } ctx->n = NULL; ctx->e = NULL; ctx->d = NULL; + ctx->p = NULL; + ctx->q = NULL; + ctx->dp = NULL; + ctx->dq = NULL; + ctx->qinv = NULL; + ctx->crt_mode = false; + ctx->key_sz = 0; +} + +static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen, bool private) +{ + struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct device *dev = &GET_DEV(ctx->inst->accel_dev); + struct rsa_key rsa_key; + int ret; + + qat_rsa_clear_ctx(dev, ctx); if (private) - ret = asn1_ber_decoder(&qat_rsaprivkey_decoder, ctx, key, - keylen); + ret = rsa_parse_priv_key(&rsa_key, key, keylen); else - ret = asn1_ber_decoder(&qat_rsapubkey_decoder, ctx, key, - keylen); + ret = rsa_parse_pub_key(&rsa_key, key, keylen); + if (ret < 0) + goto free; + + ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz); if (ret < 0) goto free; + ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz); + if (ret < 0) + goto free; + if (private) { + ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz); + if (ret < 0) + goto free; + qat_rsa_setkey_crt(ctx, &rsa_key); + } if (!ctx->n || !ctx->e) { /* invalid key provided */ @@ -646,20 +1240,7 @@ static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key, return 0; free: - if (ctx->d) { - memset(ctx->d, '\0', ctx->key_sz); - dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); - ctx->d = NULL; - } - if (ctx->e) { - dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); - ctx->e = NULL; - } - if (ctx->n) { - dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); - ctx->n = NULL; - ctx->key_sz = 0; - } + qat_rsa_clear_ctx(dev, ctx); return ret; } @@ -725,7 +1306,7 @@ static struct akcipher_alg rsa = { .max_size = qat_rsa_max_size, .init = qat_rsa_init_tfm, .exit = qat_rsa_exit_tfm, - .reqsize = sizeof(struct qat_rsa_request) + 64, + .reqsize = sizeof(struct qat_asym_request) + 64, .base = { .cra_name = "rsa", .cra_driver_name = "qat-rsa", @@ -735,6 +1316,23 @@ static struct akcipher_alg rsa = { }, }; +static struct kpp_alg dh = { + .set_secret = qat_dh_set_secret, + .generate_public_key = qat_dh_compute_value, + .compute_shared_secret = qat_dh_compute_value, + .max_size = qat_dh_max_size, + .init = qat_dh_init_tfm, + .exit = qat_dh_exit_tfm, + .reqsize = sizeof(struct qat_asym_request) + 64, + .base = { + .cra_name = "dh", + .cra_driver_name = "qat-dh", + .cra_priority = 1000, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct qat_dh_ctx), + }, +}; + int qat_asym_algs_register(void) { int ret = 0; @@ -743,7 +1341,11 @@ int qat_asym_algs_register(void) if (++active_devs == 1) { rsa.base.cra_flags = 0; ret = crypto_register_akcipher(&rsa); + if (ret) + goto unlock; + ret = crypto_register_kpp(&dh); } +unlock: mutex_unlock(&algs_lock); return ret; } @@ -751,7 +1353,9 @@ int qat_asym_algs_register(void) void qat_asym_algs_unregister(void) { mutex_lock(&algs_lock); - if (--active_devs == 0) + if (--active_devs == 0) { crypto_unregister_akcipher(&rsa); + crypto_unregister_kpp(&dh); + } mutex_unlock(&algs_lock); } @@ -1,11 +0,0 @@ -RsaPrivKey ::= SEQUENCE { - version INTEGER, - n INTEGER ({ qat_rsa_get_n }), - e INTEGER ({ qat_rsa_get_e }), - d INTEGER ({ qat_rsa_get_d }), - prime1 INTEGER, - prime2 INTEGER, - exponent1 INTEGER, - exponent2 INTEGER, - coefficient INTEGER -} @@ -1,4 +0,0 @@ -RsaPubKey ::= SEQUENCE { - n INTEGER ({ qat_rsa_get_n }), - e INTEGER ({ qat_rsa_get_e }) -} @@ -252,6 +252,7 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) hw_data->get_arb_mapping = adf_get_arbiter_mapping; hw_data->enable_ints = adf_enable_ints; hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; + hw_data->reset_device = adf_reset_sbr; hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; } @@ -15,8 +15,8 @@ #include <linux/interrupt.h> #include <linux/types.h> #include <crypto/aes.h> -#include <crypto/algapi.h> #include <crypto/des.h> +#include <crypto/internal/skcipher.h> #include "cipher.h" @@ -189,7 +189,7 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key, memcpy(ctx->enc_key, key, keylen); return 0; fallback: - ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen); + ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); if (!ret) ctx->enc_keylen = keylen; return ret; @@ -212,10 +212,16 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt) if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 && ctx->enc_keylen != AES_KEYSIZE_256) { - ablkcipher_request_set_tfm(req, ctx->fallback); - ret = encrypt ? crypto_ablkcipher_encrypt(req) : - crypto_ablkcipher_decrypt(req); - ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); + SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); + + skcipher_request_set_tfm(subreq, ctx->fallback); + skcipher_request_set_callback(subreq, req->base.flags, + NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->nbytes, req->info); + ret = encrypt ? crypto_skcipher_encrypt(subreq) : + crypto_skcipher_decrypt(subreq); + skcipher_request_zero(subreq); return ret; } @@ -239,10 +245,9 @@ static int qce_ablkcipher_init(struct crypto_tfm *tfm) memset(ctx, 0, sizeof(*ctx)); tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx); - ctx->fallback = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm), - CRYPTO_ALG_TYPE_ABLKCIPHER, - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK); + ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->fallback)) return PTR_ERR(ctx->fallback); @@ -253,7 +258,7 @@ static void qce_ablkcipher_exit(struct crypto_tfm *tfm) { struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); - crypto_free_ablkcipher(ctx->fallback); + crypto_free_skcipher(ctx->fallback); } struct qce_ablkcipher_def { @@ -22,7 +22,7 @@ struct qce_cipher_ctx { u8 enc_key[QCE_MAX_KEY_SIZE]; unsigned int enc_keylen; - struct crypto_ablkcipher *fallback; + struct crypto_skcipher *fallback; }; /** @@ -155,43 +155,43 @@ * expansion of its usage. */ struct samsung_aes_variant { - unsigned int aes_offset; + unsigned int aes_offset; }; struct s5p_aes_reqctx { - unsigned long mode; + unsigned long mode; }; struct s5p_aes_ctx { - struct s5p_aes_dev *dev; + struct s5p_aes_dev *dev; - uint8_t aes_key[AES_MAX_KEY_SIZE]; - uint8_t nonce[CTR_RFC3686_NONCE_SIZE]; - int keylen; + uint8_t aes_key[AES_MAX_KEY_SIZE]; + uint8_t nonce[CTR_RFC3686_NONCE_SIZE]; + int keylen; }; struct s5p_aes_dev { - struct device *dev; - struct clk *clk; - void __iomem *ioaddr; - void __iomem *aes_ioaddr; - int irq_fc; + struct device *dev; + struct clk *clk; + void __iomem *ioaddr; + void __iomem *aes_ioaddr; + int irq_fc; - struct ablkcipher_request *req; - struct s5p_aes_ctx *ctx; - struct scatterlist *sg_src; - struct scatterlist *sg_dst; + struct ablkcipher_request *req; + struct s5p_aes_ctx *ctx; + struct scatterlist *sg_src; + struct scatterlist *sg_dst; /* In case of unaligned access: */ - struct scatterlist *sg_src_cpy; - struct scatterlist *sg_dst_cpy; + struct scatterlist *sg_src_cpy; + struct scatterlist *sg_dst_cpy; - struct tasklet_struct tasklet; - struct crypto_queue queue; - bool busy; - spinlock_t lock; + struct tasklet_struct tasklet; + struct crypto_queue queue; + bool busy; + spinlock_t lock; - struct samsung_aes_variant *variant; + struct samsung_aes_variant *variant; }; static struct s5p_aes_dev *s5p_dev; @@ -421,11 +421,11 @@ static bool s5p_aes_rx(struct s5p_aes_dev *dev) static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) { struct platform_device *pdev = dev_id; - struct s5p_aes_dev *dev = platform_get_drvdata(pdev); - uint32_t status; - unsigned long flags; - bool set_dma_tx = false; - bool set_dma_rx = false; + struct s5p_aes_dev *dev = platform_get_drvdata(pdev); + bool set_dma_tx = false; + bool set_dma_rx = false; + unsigned long flags; + uint32_t status; spin_lock_irqsave(&dev->lock, flags); @@ -538,10 +538,10 @@ static int s5p_set_outdata_start(struct s5p_aes_dev *dev, static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) { - struct ablkcipher_request *req = dev->req; - uint32_t aes_control; - int err; - unsigned long flags; + struct ablkcipher_request *req = dev->req; + uint32_t aes_control; + unsigned long flags; + int err; aes_control = SSS_AES_KEY_CHANGE_MODE; if (mode & FLAGS_AES_DECRYPT) @@ -653,10 +653,10 @@ exit: static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); - struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); - struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req); - struct s5p_aes_dev *dev = ctx->dev; + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req); + struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct s5p_aes_dev *dev = ctx->dev; if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { dev_err(dev->dev, "request size is not exact amount of AES blocks\n"); @@ -671,7 +671,7 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode) static int s5p_aes_setkey(struct crypto_ablkcipher *cipher, const uint8_t *key, unsigned int keylen) { - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm); if (keylen != AES_KEYSIZE_128 && @@ -763,11 +763,11 @@ static struct crypto_alg algs[] = { static int s5p_aes_probe(struct platform_device *pdev) { - int i, j, err = -ENODEV; - struct s5p_aes_dev *pdata; - struct device *dev = &pdev->dev; - struct resource *res; + struct device *dev = &pdev->dev; + int i, j, err = -ENODEV; struct samsung_aes_variant *variant; + struct s5p_aes_dev *pdata; + struct resource *res; if (s5p_dev) return -EEXIST; @@ -14,10 +14,9 @@ * Based on omap-aes.c and tegra-aes.c */ -#include <crypto/algapi.h> #include <crypto/aes.h> -#include <crypto/hash.h> #include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <crypto/sha.h> @@ -150,10 +149,7 @@ struct sahara_ctx { /* AES-specific context */ int keylen; u8 key[AES_KEYSIZE_128]; - struct crypto_ablkcipher *fallback; - - /* SHA-specific context */ - struct crypto_shash *shash_fallback; + struct crypto_skcipher *fallback; }; struct sahara_aes_reqctx { @@ -620,25 +616,21 @@ static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, return 0; } - if (keylen != AES_KEYSIZE_128 && - keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256) + if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256) return -EINVAL; /* * The requested key size is not supported by HW, do a fallback. */ - ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; - ctx->fallback->base.crt_flags |= - (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & + CRYPTO_TFM_REQ_MASK); - ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen); - if (ret) { - struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm); + ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); - tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm_aux->crt_flags |= - (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK); - } + tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK; + tfm->base.crt_flags |= crypto_skcipher_get_flags(ctx->fallback) & + CRYPTO_TFM_RES_MASK; return ret; } @@ -670,16 +662,20 @@ static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode) static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req) { - struct crypto_tfm *tfm = - crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); struct sahara_ctx *ctx = crypto_ablkcipher_ctx( crypto_ablkcipher_reqtfm(req)); int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { - ablkcipher_request_set_tfm(req, ctx->fallback); - err = crypto_ablkcipher_encrypt(req); - ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); + SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); + + skcipher_request_set_tfm(subreq, ctx->fallback); + skcipher_request_set_callback(subreq, req->base.flags, + NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->nbytes, req->info); + err = crypto_skcipher_encrypt(subreq); + skcipher_request_zero(subreq); return err; } @@ -688,16 +684,20 @@ static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req) static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req) { - struct crypto_tfm *tfm = - crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); struct sahara_ctx *ctx = crypto_ablkcipher_ctx( crypto_ablkcipher_reqtfm(req)); int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { - ablkcipher_request_set_tfm(req, ctx->fallback); - err = crypto_ablkcipher_decrypt(req); - ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); + SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); + + skcipher_request_set_tfm(subreq, ctx->fallback); + skcipher_request_set_callback(subreq, req->base.flags, + NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->nbytes, req->info); + err = crypto_skcipher_decrypt(subreq); + skcipher_request_zero(subreq); return err; } @@ -706,16 +706,20 @@ static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req) static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req) { - struct crypto_tfm *tfm = - crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); struct sahara_ctx *ctx = crypto_ablkcipher_ctx( crypto_ablkcipher_reqtfm(req)); int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { - ablkcipher_request_set_tfm(req, ctx->fallback); - err = crypto_ablkcipher_encrypt(req); - ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); + SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); + + skcipher_request_set_tfm(subreq, ctx->fallback); + skcipher_request_set_callback(subreq, req->base.flags, + NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->nbytes, req->info); + err = crypto_skcipher_encrypt(subreq); + skcipher_request_zero(subreq); return err; } @@ -724,16 +728,20 @@ static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req) static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req) { - struct crypto_tfm *tfm = - crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); struct sahara_ctx *ctx = crypto_ablkcipher_ctx( crypto_ablkcipher_reqtfm(req)); int err; if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { - ablkcipher_request_set_tfm(req, ctx->fallback); - err = crypto_ablkcipher_decrypt(req); - ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); + SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); + + skcipher_request_set_tfm(subreq, ctx->fallback); + skcipher_request_set_callback(subreq, req->base.flags, + NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->nbytes, req->info); + err = crypto_skcipher_decrypt(subreq); + skcipher_request_zero(subreq); return err; } @@ -745,8 +753,9 @@ static int sahara_aes_cra_init(struct crypto_tfm *tfm) const char *name = crypto_tfm_alg_name(tfm); struct sahara_ctx *ctx = crypto_tfm_ctx(tfm); - ctx->fallback = crypto_alloc_ablkcipher(name, 0, - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); + ctx->fallback = crypto_alloc_skcipher(name, 0, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->fallback)) { pr_err("Error allocating fallback algo %s\n", name); return PTR_ERR(ctx->fallback); @@ -761,9 +770,7 @@ static void sahara_aes_cra_exit(struct crypto_tfm *tfm) { struct sahara_ctx *ctx = crypto_tfm_ctx(tfm); - if (ctx->fallback) - crypto_free_ablkcipher(ctx->fallback); - ctx->fallback = NULL; + crypto_free_skcipher(ctx->fallback); } static u32 sahara_sha_init_hdr(struct sahara_dev *dev, @@ -1180,15 +1187,6 @@ static int sahara_sha_import(struct ahash_request *req, const void *in) static int sahara_sha_cra_init(struct crypto_tfm *tfm) { - const char *name = crypto_tfm_alg_name(tfm); - struct sahara_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->shash_fallback = crypto_alloc_shash(name, 0, - CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(ctx->shash_fallback)) { - pr_err("Error allocating fallback algo %s\n", name); - return PTR_ERR(ctx->shash_fallback); - } crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct sahara_sha_reqctx) + SHA_BUFFER_LEN + SHA256_BLOCK_SIZE); @@ -1196,14 +1194,6 @@ static int sahara_sha_cra_init(struct crypto_tfm *tfm) return 0; } -static void sahara_sha_cra_exit(struct crypto_tfm *tfm) -{ - struct sahara_ctx *ctx = crypto_tfm_ctx(tfm); - - crypto_free_shash(ctx->shash_fallback); - ctx->shash_fallback = NULL; -} - static struct crypto_alg aes_algs[] = { { .cra_name = "ecb(aes)", @@ -1272,7 +1262,6 @@ static struct ahash_alg sha_v3_algs[] = { .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = sahara_sha_cra_init, - .cra_exit = sahara_sha_cra_exit, } }, }; @@ -1300,7 +1289,6 @@ static struct ahash_alg sha_v4_algs[] = { .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = sahara_sha_cra_init, - .cra_exit = sahara_sha_cra_exit, } }, }; @@ -91,10 +91,17 @@ static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr, return be16_to_cpu(ptr->len); } -static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1) +static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val, + bool is_sec1) { if (!is_sec1) - ptr->j_extent = 0; + ptr->j_extent = val; +} + +static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1) +{ + if (!is_sec1) + ptr->j_extent |= val; } /* @@ -111,7 +118,7 @@ static void map_single_talitos_ptr(struct device *dev, to_talitos_ptr_len(ptr, len, is_sec1); to_talitos_ptr(ptr, dma_addr, is_sec1); - to_talitos_ptr_extent_clear(ptr, is_sec1); + to_talitos_ptr_ext_set(ptr, 0, is_sec1); } /* @@ -804,6 +811,11 @@ static void talitos_unregister_rng(struct device *dev) * crypto alg */ #define TALITOS_CRA_PRIORITY 3000 +/* + * Defines a priority for doing AEAD with descriptors type + * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP + */ +#define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1) #define TALITOS_MAX_KEY_SIZE 96 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ @@ -904,35 +916,59 @@ struct talitos_edesc { static void talitos_sg_unmap(struct device *dev, struct talitos_edesc *edesc, struct scatterlist *src, - struct scatterlist *dst) + struct scatterlist *dst, + unsigned int len, unsigned int offset) { + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); unsigned int src_nents = edesc->src_nents ? : 1; unsigned int dst_nents = edesc->dst_nents ? : 1; + if (is_sec1 && dst && dst_nents > 1) { + dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset, + len, DMA_FROM_DEVICE); + sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len, + offset); + } if (src != dst) { - dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); + if (src_nents == 1 || !is_sec1) + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); - if (dst) { + if (dst && (dst_nents == 1 || !is_sec1)) dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); - } - } else + } else if (src_nents == 1 || !is_sec1) { dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); + } } static void ipsec_esp_unmap(struct device *dev, struct talitos_edesc *edesc, struct aead_request *areq) { - unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE); + struct crypto_aead *aead = crypto_aead_reqtfm(areq); + struct talitos_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + + if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], + DMA_FROM_DEVICE); unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE); unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE); - talitos_sg_unmap(dev, edesc, areq->src, areq->dst); + talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, + areq->assoclen); if (edesc->dma_len) dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); + + if (!(edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)) { + unsigned int dst_nents = edesc->dst_nents ? : 1; + + sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize, + areq->assoclen + areq->cryptlen - ivsize); + } } /* @@ -942,6 +978,8 @@ static void ipsec_esp_encrypt_done(struct device *dev, struct talitos_desc *desc, void *context, int err) { + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); struct aead_request *areq = context; struct crypto_aead *authenc = crypto_aead_reqtfm(areq); unsigned int authsize = crypto_aead_authsize(authenc); @@ -955,8 +993,11 @@ static void ipsec_esp_encrypt_done(struct device *dev, /* copy the generated ICV to dst */ if (edesc->icv_ool) { - icvdata = &edesc->link_tbl[edesc->src_nents + - edesc->dst_nents + 2]; + if (is_sec1) + icvdata = edesc->buf + areq->assoclen + areq->cryptlen; + else + icvdata = &edesc->link_tbl[edesc->src_nents + + edesc->dst_nents + 2]; sg = sg_last(areq->dst, edesc->dst_nents); memcpy((char *)sg_virt(sg) + sg->length - authsize, icvdata, authsize); @@ -977,6 +1018,8 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev, struct talitos_edesc *edesc; struct scatterlist *sg; char *oicv, *icv; + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); edesc = container_of(desc, struct talitos_edesc, desc); @@ -988,7 +1031,12 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev, icv = (char *)sg_virt(sg) + sg->length - authsize; if (edesc->dma_len) { - oicv = (char *)&edesc->link_tbl[edesc->src_nents + + if (is_sec1) + oicv = (char *)&edesc->dma_link_tbl + + req->assoclen + req->cryptlen; + else + oicv = (char *) + &edesc->link_tbl[edesc->src_nents + edesc->dst_nents + 2]; if (edesc->icv_ool) icv = oicv + authsize; @@ -1050,8 +1098,8 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count, to_talitos_ptr(link_tbl_ptr + count, sg_dma_address(sg) + offset, 0); - link_tbl_ptr[count].len = cpu_to_be16(len); - link_tbl_ptr[count].j_extent = 0; + to_talitos_ptr_len(link_tbl_ptr + count, len, 0); + to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0); count++; cryptlen -= len; offset = 0; @@ -1062,17 +1110,43 @@ next: /* tag end of link table */ if (count > 0) - link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN; + to_talitos_ptr_ext_set(link_tbl_ptr + count - 1, + DESC_PTR_LNKTBL_RETURN, 0); return count; } -static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count, - int cryptlen, - struct talitos_ptr *link_tbl_ptr) +int talitos_sg_map(struct device *dev, struct scatterlist *src, + unsigned int len, struct talitos_edesc *edesc, + struct talitos_ptr *ptr, + int sg_count, unsigned int offset, int tbl_off) { - return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen, - link_tbl_ptr); + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); + + to_talitos_ptr_len(ptr, len, is_sec1); + to_talitos_ptr_ext_set(ptr, 0, is_sec1); + + if (sg_count == 1) { + to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1); + return sg_count; + } + if (is_sec1) { + to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1); + return sg_count; + } + sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, + &edesc->link_tbl[tbl_off]); + if (sg_count == 1) { + /* Only one segment now, so no link tbl needed*/ + copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1); + return sg_count; + } + to_talitos_ptr(ptr, edesc->dma_link_tbl + + tbl_off * sizeof(struct talitos_ptr), is_sec1); + to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1); + + return sg_count; } /* @@ -1093,42 +1167,52 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, int tbl_off = 0; int sg_count, ret; int sg_link_tbl_len; + bool sync_needed = false; + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); /* hmac key */ map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, DMA_TO_DEVICE); - sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1, - (areq->src == areq->dst) ? DMA_BIDIRECTIONAL - : DMA_TO_DEVICE); - /* hmac data */ - desc->ptr[1].len = cpu_to_be16(areq->assoclen); - if (sg_count > 1 && - (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0, - areq->assoclen, - &edesc->link_tbl[tbl_off])) > 1) { - to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * - sizeof(struct talitos_ptr), 0); - desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; + sg_count = edesc->src_nents ?: 1; + if (is_sec1 && sg_count > 1) + sg_copy_to_buffer(areq->src, sg_count, edesc->buf, + areq->assoclen + cryptlen); + else + sg_count = dma_map_sg(dev, areq->src, sg_count, + (areq->src == areq->dst) ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE); - dma_sync_single_for_device(dev, edesc->dma_link_tbl, - edesc->dma_len, DMA_BIDIRECTIONAL); + /* hmac data */ + ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc, + &desc->ptr[1], sg_count, 0, tbl_off); + if (ret > 1) { tbl_off += ret; - } else { - to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0); - desc->ptr[1].j_extent = 0; + sync_needed = true; } /* cipher iv */ - to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0); - desc->ptr[2].len = cpu_to_be16(ivsize); - desc->ptr[2].j_extent = 0; + if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) { + to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, is_sec1); + to_talitos_ptr_len(&desc->ptr[2], ivsize, is_sec1); + to_talitos_ptr_ext_set(&desc->ptr[2], 0, is_sec1); + } else { + to_talitos_ptr(&desc->ptr[3], edesc->iv_dma, is_sec1); + to_talitos_ptr_len(&desc->ptr[3], ivsize, is_sec1); + to_talitos_ptr_ext_set(&desc->ptr[3], 0, is_sec1); + } /* cipher key */ - map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, - (char *)&ctx->key + ctx->authkeylen, - DMA_TO_DEVICE); + if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) + map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, + (char *)&ctx->key + ctx->authkeylen, + DMA_TO_DEVICE); + else + map_single_talitos_ptr(dev, &desc->ptr[2], ctx->enckeylen, + (char *)&ctx->key + ctx->authkeylen, + DMA_TO_DEVICE); /* * cipher in @@ -1136,78 +1220,82 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, * extent is bytes of HMAC postpended to ciphertext, * typically 12 for ipsec */ - desc->ptr[4].len = cpu_to_be16(cryptlen); - desc->ptr[4].j_extent = authsize; + to_talitos_ptr_len(&desc->ptr[4], cryptlen, is_sec1); + to_talitos_ptr_ext_set(&desc->ptr[4], 0, is_sec1); sg_link_tbl_len = cryptlen; - if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) - sg_link_tbl_len += authsize; - if (sg_count == 1) { - to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) + - areq->assoclen, 0); - } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count, - areq->assoclen, sg_link_tbl_len, - &edesc->link_tbl[tbl_off])) > - 1) { - desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; - to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + - tbl_off * - sizeof(struct talitos_ptr), 0); - dma_sync_single_for_device(dev, edesc->dma_link_tbl, - edesc->dma_len, - DMA_BIDIRECTIONAL); - tbl_off += ret; - } else { - copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0); + if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) { + to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1); + + if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) + sg_link_tbl_len += authsize; } - /* cipher out */ - desc->ptr[5].len = cpu_to_be16(cryptlen); - desc->ptr[5].j_extent = authsize; + sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc, + &desc->ptr[4], sg_count, areq->assoclen, + tbl_off); - if (areq->src != areq->dst) - sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1, - DMA_FROM_DEVICE); + if (sg_count > 1) { + tbl_off += sg_count; + sync_needed = true; + } - edesc->icv_ool = false; + /* cipher out */ + if (areq->src != areq->dst) { + sg_count = edesc->dst_nents ? : 1; + if (!is_sec1 || sg_count == 1) + dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE); + } - if (sg_count == 1) { - to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) + - areq->assoclen, 0); - } else if ((sg_count = - sg_to_link_tbl_offset(areq->dst, sg_count, - areq->assoclen, cryptlen, - &edesc->link_tbl[tbl_off])) > 1) { - struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; - - to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + - tbl_off * sizeof(struct talitos_ptr), 0); - - /* Add an entry to the link table for ICV data */ - tbl_ptr += sg_count - 1; - tbl_ptr->j_extent = 0; - tbl_ptr++; - tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; - tbl_ptr->len = cpu_to_be16(authsize); - - /* icv data follows link tables */ - to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + - (edesc->src_nents + edesc->dst_nents + - 2) * sizeof(struct talitos_ptr) + - authsize, 0); - desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; - dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, - edesc->dma_len, DMA_BIDIRECTIONAL); + sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc, + &desc->ptr[5], sg_count, areq->assoclen, + tbl_off); + if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) + to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1); + + if (sg_count > 1) { edesc->icv_ool = true; + sync_needed = true; + + if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) { + struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; + int offset = (edesc->src_nents + edesc->dst_nents + 2) * + sizeof(struct talitos_ptr) + authsize; + + /* Add an entry to the link table for ICV data */ + tbl_ptr += sg_count - 1; + to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1); + tbl_ptr++; + to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN, + is_sec1); + to_talitos_ptr_len(tbl_ptr, authsize, is_sec1); + + /* icv data follows link tables */ + to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset, + is_sec1); + } } else { - copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0); + edesc->icv_ool = false; + } + + /* ICV data */ + if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) { + to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1); + to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl + + areq->assoclen + cryptlen, is_sec1); } /* iv out */ - map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, - DMA_FROM_DEVICE); + if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) + map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, + DMA_FROM_DEVICE); + + if (sync_needed) + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + edesc->dma_len, + DMA_BIDIRECTIONAL); ret = talitos_submit(dev, ctx->ch, desc, callback, areq); if (ret != -EINPROGRESS) { @@ -1233,7 +1321,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, bool encrypt) { struct talitos_edesc *edesc; - int src_nents, dst_nents, alloc_len, dma_len; + int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len; dma_addr_t iv_dma = 0; gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; @@ -1251,8 +1339,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); if (!dst || dst == src) { - src_nents = sg_nents_for_len(src, - assoclen + cryptlen + authsize); + src_len = assoclen + cryptlen + authsize; + src_nents = sg_nents_for_len(src, src_len); if (src_nents < 0) { dev_err(dev, "Invalid number of src SG.\n"); err = ERR_PTR(-EINVAL); @@ -1260,17 +1348,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, } src_nents = (src_nents == 1) ? 0 : src_nents; dst_nents = dst ? src_nents : 0; + dst_len = 0; } else { /* dst && dst != src*/ - src_nents = sg_nents_for_len(src, assoclen + cryptlen + - (encrypt ? 0 : authsize)); + src_len = assoclen + cryptlen + (encrypt ? 0 : authsize); + src_nents = sg_nents_for_len(src, src_len); if (src_nents < 0) { dev_err(dev, "Invalid number of src SG.\n"); err = ERR_PTR(-EINVAL); goto error_sg; } src_nents = (src_nents == 1) ? 0 : src_nents; - dst_nents = sg_nents_for_len(dst, assoclen + cryptlen + - (encrypt ? authsize : 0)); + dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); + dst_nents = sg_nents_for_len(dst, dst_len); if (dst_nents < 0) { dev_err(dev, "Invalid number of dst SG.\n"); err = ERR_PTR(-EINVAL); @@ -1287,8 +1376,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, alloc_len = sizeof(struct talitos_edesc); if (src_nents || dst_nents) { if (is_sec1) - dma_len = (src_nents ? cryptlen : 0) + - (dst_nents ? cryptlen : 0); + dma_len = (src_nents ? src_len : 0) + + (dst_nents ? dst_len : 0); else dma_len = (src_nents + dst_nents + 2) * sizeof(struct talitos_ptr) + authsize * 2; @@ -1412,40 +1501,13 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, return 0; } -static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src, - struct scatterlist *dst, unsigned int len, - struct talitos_edesc *edesc) -{ - struct talitos_private *priv = dev_get_drvdata(dev); - bool is_sec1 = has_ftr_sec1(priv); - - if (is_sec1) { - if (!edesc->src_nents) { - dma_unmap_sg(dev, src, 1, - dst != src ? DMA_TO_DEVICE - : DMA_BIDIRECTIONAL); - } - if (dst && edesc->dst_nents) { - dma_sync_single_for_device(dev, - edesc->dma_link_tbl + len, - len, DMA_FROM_DEVICE); - sg_copy_from_buffer(dst, edesc->dst_nents ? : 1, - edesc->buf + len, len); - } else if (dst && dst != src) { - dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE); - } - } else { - talitos_sg_unmap(dev, edesc, src, dst); - } -} - static void common_nonsnoop_unmap(struct device *dev, struct talitos_edesc *edesc, struct ablkcipher_request *areq) { unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); - unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc); + talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0); unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); @@ -1470,100 +1532,6 @@ static void ablkcipher_done(struct device *dev, areq->base.complete(&areq->base, err); } -int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src, - unsigned int len, struct talitos_edesc *edesc, - enum dma_data_direction dir, struct talitos_ptr *ptr) -{ - int sg_count; - struct talitos_private *priv = dev_get_drvdata(dev); - bool is_sec1 = has_ftr_sec1(priv); - - to_talitos_ptr_len(ptr, len, is_sec1); - - if (is_sec1) { - sg_count = edesc->src_nents ? : 1; - - if (sg_count == 1) { - dma_map_sg(dev, src, 1, dir); - to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); - } else { - sg_copy_to_buffer(src, sg_count, edesc->buf, len); - to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1); - dma_sync_single_for_device(dev, edesc->dma_link_tbl, - len, DMA_TO_DEVICE); - } - } else { - to_talitos_ptr_extent_clear(ptr, is_sec1); - - sg_count = dma_map_sg(dev, src, edesc->src_nents ? : 1, dir); - - if (sg_count == 1) { - to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); - } else { - sg_count = sg_to_link_tbl(src, sg_count, len, - &edesc->link_tbl[0]); - if (sg_count > 1) { - to_talitos_ptr(ptr, edesc->dma_link_tbl, 0); - ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; - dma_sync_single_for_device(dev, - edesc->dma_link_tbl, - edesc->dma_len, - DMA_BIDIRECTIONAL); - } else { - /* Only one segment now, so no link tbl needed*/ - to_talitos_ptr(ptr, sg_dma_address(src), - is_sec1); - } - } - } - return sg_count; -} - -void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst, - unsigned int len, struct talitos_edesc *edesc, - enum dma_data_direction dir, - struct talitos_ptr *ptr, int sg_count) -{ - struct talitos_private *priv = dev_get_drvdata(dev); - bool is_sec1 = has_ftr_sec1(priv); - - if (dir != DMA_NONE) - sg_count = dma_map_sg(dev, dst, edesc->dst_nents ? : 1, dir); - - to_talitos_ptr_len(ptr, len, is_sec1); - - if (is_sec1) { - if (sg_count == 1) { - if (dir != DMA_NONE) - dma_map_sg(dev, dst, 1, dir); - to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1); - } else { - to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1); - dma_sync_single_for_device(dev, - edesc->dma_link_tbl + len, - len, DMA_FROM_DEVICE); - } - } else { - to_talitos_ptr_extent_clear(ptr, is_sec1); - - if (sg_count == 1) { - to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1); - } else { - struct talitos_ptr *link_tbl_ptr = - &edesc->link_tbl[edesc->src_nents + 1]; - - to_talitos_ptr(ptr, edesc->dma_link_tbl + - (edesc->src_nents + 1) * - sizeof(struct talitos_ptr), 0); - ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; - sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr); - dma_sync_single_for_device(dev, edesc->dma_link_tbl, - edesc->dma_len, - DMA_BIDIRECTIONAL); - } - } -} - static int common_nonsnoop(struct talitos_edesc *edesc, struct ablkcipher_request *areq, void (*callback) (struct device *dev, @@ -1577,6 +1545,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, unsigned int cryptlen = areq->nbytes; unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); int sg_count, ret; + bool sync_needed = false; struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); @@ -1586,25 +1555,39 @@ static int common_nonsnoop(struct talitos_edesc *edesc, /* cipher iv */ to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1); to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1); - to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1); + to_talitos_ptr_ext_set(&desc->ptr[1], 0, is_sec1); /* cipher key */ map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, (char *)&ctx->key, DMA_TO_DEVICE); + sg_count = edesc->src_nents ?: 1; + if (is_sec1 && sg_count > 1) + sg_copy_to_buffer(areq->src, sg_count, edesc->buf, + cryptlen); + else + sg_count = dma_map_sg(dev, areq->src, sg_count, + (areq->src == areq->dst) ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE); /* * cipher in */ - sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc, - (areq->src == areq->dst) ? - DMA_BIDIRECTIONAL : DMA_TO_DEVICE, - &desc->ptr[3]); + sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc, + &desc->ptr[3], sg_count, 0, 0); + if (sg_count > 1) + sync_needed = true; /* cipher out */ - map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc, - (areq->src == areq->dst) ? DMA_NONE - : DMA_FROM_DEVICE, - &desc->ptr[4], sg_count); + if (areq->src != areq->dst) { + sg_count = edesc->dst_nents ? : 1; + if (!is_sec1 || sg_count == 1) + dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE); + } + + ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4], + sg_count, 0, (edesc->src_nents + 1)); + if (ret > 1) + sync_needed = true; /* iv out */ map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, @@ -1613,6 +1596,10 @@ static int common_nonsnoop(struct talitos_edesc *edesc, /* last DWORD empty */ desc->ptr[6] = zero_entry; + if (sync_needed) + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + edesc->dma_len, DMA_BIDIRECTIONAL); + ret = talitos_submit(dev, ctx->ch, desc, callback, areq); if (ret != -EINPROGRESS) { common_nonsnoop_unmap(dev, edesc, areq); @@ -1676,7 +1663,7 @@ static void common_nonsnoop_hash_unmap(struct device *dev, unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); - unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc); + talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0); /* When using hashctx-in, must unmap it. */ if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1)) @@ -1747,8 +1734,10 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, struct device *dev = ctx->dev; struct talitos_desc *desc = &edesc->desc; int ret; + bool sync_needed = false; struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); + int sg_count; /* first DWORD empty */ desc->ptr[0] = zero_entry; @@ -1773,11 +1762,19 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, else desc->ptr[2] = zero_entry; + sg_count = edesc->src_nents ?: 1; + if (is_sec1 && sg_count > 1) + sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length); + else + sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count, + DMA_TO_DEVICE); /* * data in */ - map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc, - DMA_TO_DEVICE, &desc->ptr[3]); + sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, + &desc->ptr[3], sg_count, 0, 0); + if (sg_count > 1) + sync_needed = true; /* fifth DWORD empty */ desc->ptr[4] = zero_entry; @@ -1798,6 +1795,10 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0) talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]); + if (sync_needed) + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + edesc->dma_len, DMA_BIDIRECTIONAL); + ret = talitos_submit(dev, ctx->ch, desc, callback, areq); if (ret != -EINPROGRESS) { common_nonsnoop_hash_unmap(dev, edesc, areq); @@ -2124,6 +2125,7 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, struct talitos_alg_template { u32 type; + u32 priority; union { struct crypto_alg crypto; struct ahash_alg hash; @@ -2155,6 +2157,27 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_SHA1_HMAC, }, { .type = CRYPTO_ALG_TYPE_AEAD, + .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_ASYNC, + }, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | + DESC_HDR_SEL0_AESU | + DESC_HDR_MODE0_AESU_CBC | + DESC_HDR_SEL1_MDEUA | + DESC_HDR_MODE1_MDEU_INIT | + DESC_HDR_MODE1_MDEU_PAD | + DESC_HDR_MODE1_MDEU_SHA1_HMAC, + }, + { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha1)," @@ -2176,6 +2199,29 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_PAD | DESC_HDR_MODE1_MDEU_SHA1_HMAC, }, + { .type = CRYPTO_ALG_TYPE_AEAD, + .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha1)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_ASYNC, + }, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | + DESC_HDR_SEL0_DEU | + DESC_HDR_MODE0_DEU_CBC | + DESC_HDR_MODE0_DEU_3DES | + DESC_HDR_SEL1_MDEUA | + DESC_HDR_MODE1_MDEU_INIT | + DESC_HDR_MODE1_MDEU_PAD | + DESC_HDR_MODE1_MDEU_SHA1_HMAC, + }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { @@ -2196,6 +2242,27 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_PAD | DESC_HDR_MODE1_MDEU_SHA224_HMAC, }, + { .type = CRYPTO_ALG_TYPE_AEAD, + .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha224),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha224-" + "cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_ASYNC, + }, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | + DESC_HDR_SEL0_AESU | + DESC_HDR_MODE0_AESU_CBC | + DESC_HDR_SEL1_MDEUA | + DESC_HDR_MODE1_MDEU_INIT | + DESC_HDR_MODE1_MDEU_PAD | + DESC_HDR_MODE1_MDEU_SHA224_HMAC, + }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { @@ -2219,6 +2286,29 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_SHA224_HMAC, }, { .type = CRYPTO_ALG_TYPE_AEAD, + .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha224)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha224-" + "cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_ASYNC, + }, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | + DESC_HDR_SEL0_DEU | + DESC_HDR_MODE0_DEU_CBC | + DESC_HDR_MODE0_DEU_3DES | + DESC_HDR_SEL1_MDEUA | + DESC_HDR_MODE1_MDEU_INIT | + DESC_HDR_MODE1_MDEU_PAD | + DESC_HDR_MODE1_MDEU_SHA224_HMAC, + }, + { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha256),cbc(aes))", @@ -2239,6 +2329,27 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_SHA256_HMAC, }, { .type = CRYPTO_ALG_TYPE_AEAD, + .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_ASYNC, + }, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | + DESC_HDR_SEL0_AESU | + DESC_HDR_MODE0_AESU_CBC | + DESC_HDR_SEL1_MDEUA | + DESC_HDR_MODE1_MDEU_INIT | + DESC_HDR_MODE1_MDEU_PAD | + DESC_HDR_MODE1_MDEU_SHA256_HMAC, + }, + { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha256)," @@ -2261,6 +2372,29 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_SHA256_HMAC, }, { .type = CRYPTO_ALG_TYPE_AEAD, + .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(sha256)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_ASYNC, + }, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | + DESC_HDR_SEL0_DEU | + DESC_HDR_MODE0_DEU_CBC | + DESC_HDR_MODE0_DEU_3DES | + DESC_HDR_SEL1_MDEUA | + DESC_HDR_MODE1_MDEU_INIT | + DESC_HDR_MODE1_MDEU_PAD | + DESC_HDR_MODE1_MDEU_SHA256_HMAC, + }, + { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha384),cbc(aes))", @@ -2365,6 +2499,27 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_MD5_HMAC, }, { .type = CRYPTO_ALG_TYPE_AEAD, + .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(md5),cbc(aes))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_ASYNC, + }, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | + DESC_HDR_SEL0_AESU | + DESC_HDR_MODE0_AESU_CBC | + DESC_HDR_SEL1_MDEUA | + DESC_HDR_MODE1_MDEU_INIT | + DESC_HDR_MODE1_MDEU_PAD | + DESC_HDR_MODE1_MDEU_MD5_HMAC, + }, + { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "authenc(hmac(md5),cbc(des3_ede))", @@ -2385,6 +2540,28 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_PAD | DESC_HDR_MODE1_MDEU_MD5_HMAC, }, + { .type = CRYPTO_ALG_TYPE_AEAD, + .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, + .alg.aead = { + .base = { + .cra_name = "authenc(hmac(md5),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_ASYNC, + }, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | + DESC_HDR_SEL0_DEU | + DESC_HDR_MODE0_DEU_CBC | + DESC_HDR_MODE0_DEU_3DES | + DESC_HDR_SEL1_MDEUA | + DESC_HDR_MODE1_MDEU_INIT | + DESC_HDR_MODE1_MDEU_PAD | + DESC_HDR_MODE1_MDEU_MD5_HMAC, + }, /* ABLKCIPHER algorithms. */ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .alg.crypto = { @@ -2901,7 +3078,10 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, } alg->cra_module = THIS_MODULE; - alg->cra_priority = TALITOS_CRA_PRIORITY; + if (t_alg->algt.priority) + alg->cra_priority = t_alg->algt.priority; + else + alg->cra_priority = TALITOS_CRA_PRIORITY; alg->cra_alignmask = 0; alg->cra_ctxsize = sizeof(struct talitos_ctx); alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY; @@ -4,9 +4,9 @@ # * License terms: GNU General Public License (GPL) version 2 */ ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG -CFLAGS_cryp_core.o := -DDEBUG -O0 -CFLAGS_cryp.o := -DDEBUG -O0 -CFLAGS_cryp_irq.o := -DDEBUG -O0 +CFLAGS_cryp_core.o := -DDEBUG +CFLAGS_cryp.o := -DDEBUG +CFLAGS_cryp_irq.o := -DDEBUG endif obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o @@ -4,7 +4,7 @@ # License terms: GNU General Public License (GPL) version 2 # ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG -CFLAGS_hash_core.o := -DDEBUG -O0 +CFLAGS_hash_core.o := -DDEBUG endif obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += ux500_hash.o @@ -781,7 +781,7 @@ static int hash_process_data(struct hash_device_data *device_data, &device_data->state); memmove(req_ctx->state.buffer, device_data->state.buffer, - HASH_BLOCK_SIZE / sizeof(u32)); + HASH_BLOCK_SIZE); if (ret) { dev_err(device_data->dev, "%s: hash_resume_state() failed!\n", @@ -832,7 +832,7 @@ static int hash_process_data(struct hash_device_data *device_data, memmove(device_data->state.buffer, req_ctx->state.buffer, - HASH_BLOCK_SIZE / sizeof(u32)); + HASH_BLOCK_SIZE); if (ret) { dev_err(device_data->dev, "%s: hash_save_state() failed!\n", __func__); diff --git a/drivers/crypto/vmx/.gitignore b/drivers/crypto/vmx/.gitignore new file mode 100644 index 000000000000..af4a7ce4738d --- /dev/null +++ b/ drivers/crypto/vmx/.gitignore@@ -0,0 +1,2 @@ +aesp8-ppc.S +ghashp8-ppc.S @@ -1,5 +1,5 @@ obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o -vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o ghash.o +vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) TARGET := linux-ppc64le @@ -182,7 +182,7 @@ struct crypto_alg p8_aes_cbc_alg = { .cra_name = "cbc(aes)", .cra_driver_name = "p8_aes_cbc", .cra_module = THIS_MODULE, - .cra_priority = 1000, + .cra_priority = 2000, .cra_type = &crypto_blkcipher_type, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, .cra_alignmask = 0, @@ -166,7 +166,7 @@ struct crypto_alg p8_aes_ctr_alg = { .cra_name = "ctr(aes)", .cra_driver_name = "p8_aes_ctr", .cra_module = THIS_MODULE, - .cra_priority = 1000, + .cra_priority = 2000, .cra_type = &crypto_blkcipher_type, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, .cra_alignmask = 0, diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c new file mode 100644 index 000000000000..cfb25413917c --- /dev/null +++ b/ drivers/crypto/vmx/aes_xts.c@@ -0,0 +1,190 @@ +/** + * AES XTS routines supporting VMX In-core instructions on Power 8 + * + * Copyright (C) 2015 International Business Machines Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundations; version 2 only. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY of FITNESS FOR A PARTICUPAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com> + */ + +#include <linux/types.h> +#include <linux/err.h> +#include <linux/crypto.h> +#include <linux/delay.h> +#include <linux/hardirq.h> +#include <asm/switch_to.h> +#include <crypto/aes.h> +#include <crypto/scatterwalk.h> +#include <crypto/xts.h> + +#include "aesp8-ppc.h" + +struct p8_aes_xts_ctx { + struct crypto_blkcipher *fallback; + struct aes_key enc_key; + struct aes_key dec_key; + struct aes_key tweak_key; +}; + +static int p8_aes_xts_init(struct crypto_tfm *tfm) +{ + const char *alg; + struct crypto_blkcipher *fallback; + struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); + + if (!(alg = crypto_tfm_alg_name(tfm))) { + printk(KERN_ERR "Failed to get algorithm name.\n"); + return -ENOENT; + } + + fallback = + crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + printk(KERN_ERR + "Failed to allocate transformation for '%s': %ld\n", + alg, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + printk(KERN_INFO "Using '%s' as fallback implementation.\n", + crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); + + crypto_blkcipher_set_flags( + fallback, + crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm)); + ctx->fallback = fallback; + + return 0; +} + +static void p8_aes_xts_exit(struct crypto_tfm *tfm) +{ + struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->fallback) { + crypto_free_blkcipher(ctx->fallback); + ctx->fallback = NULL; + } +} + +static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + int ret; + struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); + + ret = xts_check_key(tfm, key, keylen); + if (ret) + return ret; + + preempt_disable(); + pagefault_disable(); + enable_kernel_vsx(); + ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key); + ret += aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key); + ret += aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); + + ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); + return ret; +} + +static int p8_aes_xts_crypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes, int enc) +{ + int ret; + u8 tweak[AES_BLOCK_SIZE]; + u8 *iv; + struct blkcipher_walk walk; + struct p8_aes_xts_ctx *ctx = + crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); + struct blkcipher_desc fallback_desc = { + .tfm = ctx->fallback, + .info = desc->info, + .flags = desc->flags + }; + + if (in_interrupt()) { + ret = enc ? crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes) : + crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes); + } else { + preempt_disable(); + pagefault_disable(); + enable_kernel_vsx(); + + blkcipher_walk_init(&walk, dst, src, nbytes); + + iv = (u8 *)walk.iv; + ret = blkcipher_walk_virt(desc, &walk); + memset(tweak, 0, AES_BLOCK_SIZE); + aes_p8_encrypt(iv, tweak, &ctx->tweak_key); + + while ((nbytes = walk.nbytes)) { + if (enc) + aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, + nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); + else + aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, + nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); + + nbytes &= AES_BLOCK_SIZE - 1; + ret = blkcipher_walk_done(desc, &walk, nbytes); + } + + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); + } + return ret; +} + +static int p8_aes_xts_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) +{ + return p8_aes_xts_crypt(desc, dst, src, nbytes, 1); +} + +static int p8_aes_xts_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) +{ + return p8_aes_xts_crypt(desc, dst, src, nbytes, 0); +} + +struct crypto_alg p8_aes_xts_alg = { + .cra_name = "xts(aes)", + .cra_driver_name = "p8_aes_xts", + .cra_module = THIS_MODULE, + .cra_priority = 2000, + .cra_type = &crypto_blkcipher_type, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, + .cra_alignmask = 0, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct p8_aes_xts_ctx), + .cra_init = p8_aes_xts_init, + .cra_exit = p8_aes_xts_exit, + .cra_blkcipher = { + .ivsize = AES_BLOCK_SIZE, + .min_keysize = 2 * AES_MIN_KEY_SIZE, + .max_keysize = 2 * AES_MAX_KEY_SIZE, + .setkey = p8_aes_xts_setkey, + .encrypt = p8_aes_xts_encrypt, + .decrypt = p8_aes_xts_decrypt, + } +}; @@ -19,3 +19,7 @@ void aes_p8_cbc_encrypt(const u8 *in, u8 *out, size_t len, void aes_p8_ctr32_encrypt_blocks(const u8 *in, u8 *out, size_t len, const struct aes_key *key, const u8 *iv); +void aes_p8_xts_encrypt(const u8 *in, u8 *out, size_t len, + const struct aes_key *key1, const struct aes_key *key2, u8 *iv); +void aes_p8_xts_decrypt(const u8 *in, u8 *out, size_t len, + const struct aes_key *key1, const struct aes_key *key2, u8 *iv); @@ -1,4 +1,11 @@ -#!/usr/bin/env perl +#! /usr/bin/env perl +# Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. +# +# Licensed under the OpenSSL license (the "License"). You may not use +# this file except in compliance with the License. You can obtain a copy +# in the file LICENSE in the source distribution or at +# https://www.openssl.org/source/license.html + # # ==================================================================== # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL @@ -20,6 +27,19 @@ # instructions are interleaved. It's reckoned that eventual # misalignment penalties at page boundaries are in average lower # than additional overhead in pure AltiVec approach. +# +# May 2016 +# +# Add XTS subroutine, 9x on little- and 12x improvement on big-endian +# systems were measured. +# +###################################################################### +# Current large-block performance in cycles per byte processed with +# 128-bit key (less is better). +# +# CBC en-/decrypt CTR XTS +# POWER8[le] 3.96/0.72 0.74 1.1 +# POWER8[be] 3.75/0.65 0.66 1.0 $flavour = shift; @@ -1875,6 +1895,1845 @@ Lctr32_enc8x_done: ___ }} }}} +######################################################################### +{{{ # XTS procedures # +# int aes_p8_xts_[en|de]crypt(const char *inp, char *out, size_t len, # +# const AES_KEY *key1, const AES_KEY *key2, # +# [const] unsigned char iv[16]); # +# If $key2 is NULL, then a "tweak chaining" mode is engaged, in which # +# input tweak value is assumed to be encrypted already, and last tweak # +# value, one suitable for consecutive call on same chunk of data, is # +# written back to original buffer. In addition, in "tweak chaining" # +# mode only complete input blocks are processed. # + +my ($inp,$out,$len,$key1,$key2,$ivp,$rounds,$idx) = map("r$_",(3..10)); +my ($rndkey0,$rndkey1,$inout) = map("v$_",(0..2)); +my ($output,$inptail,$inpperm,$leperm,$keyperm) = map("v$_",(3..7)); +my ($tweak,$seven,$eighty7,$tmp,$tweak1) = map("v$_",(8..12)); +my $taillen = $key2; + + ($inp,$idx) = ($idx,$inp); # reassign + +$code.=<<___; +.globl .${prefix}_xts_encrypt + mr $inp,r3 # reassign + li r3,-1 + ${UCMP}i $len,16 + bltlr- + + lis r0,0xfff0 + mfspr r12,256 # save vrsave + li r11,0 + mtspr 256,r0 + + vspltisb $seven,0x07 # 0x070707..07 + le?lvsl $leperm,r11,r11 + le?vspltisb $tmp,0x0f + le?vxor $leperm,$leperm,$seven + + li $idx,15 + lvx $tweak,0,$ivp # load [unaligned] iv + lvsl $inpperm,0,$ivp + lvx $inptail,$idx,$ivp + le?vxor $inpperm,$inpperm,$tmp + vperm $tweak,$tweak,$inptail,$inpperm + + neg r11,$inp + lvsr $inpperm,0,r11 # prepare for unaligned load + lvx $inout,0,$inp + addi $inp,$inp,15 # 15 is not typo + le?vxor $inpperm,$inpperm,$tmp + + ${UCMP}i $key2,0 # key2==NULL? + beq Lxts_enc_no_key2 + + ?lvsl $keyperm,0,$key2 # prepare for unaligned key + lwz $rounds,240($key2) + srwi $rounds,$rounds,1 + subi $rounds,$rounds,1 + li $idx,16 + + lvx $rndkey0,0,$key2 + lvx $rndkey1,$idx,$key2 + addi $idx,$idx,16 + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vxor $tweak,$tweak,$rndkey0 + lvx $rndkey0,$idx,$key2 + addi $idx,$idx,16 + mtctr $rounds + +Ltweak_xts_enc: + ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm + vcipher $tweak,$tweak,$rndkey1 + lvx $rndkey1,$idx,$key2 + addi $idx,$idx,16 + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vcipher $tweak,$tweak,$rndkey0 + lvx $rndkey0,$idx,$key2 + addi $idx,$idx,16 + bdnz Ltweak_xts_enc + + ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm + vcipher $tweak,$tweak,$rndkey1 + lvx $rndkey1,$idx,$key2 + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vcipherlast $tweak,$tweak,$rndkey0 + + li $ivp,0 # don't chain the tweak + b Lxts_enc + +Lxts_enc_no_key2: + li $idx,-16 + and $len,$len,$idx # in "tweak chaining" + # mode only complete + # blocks are processed +Lxts_enc: + lvx $inptail,0,$inp + addi $inp,$inp,16 + + ?lvsl $keyperm,0,$key1 # prepare for unaligned key + lwz $rounds,240($key1) + srwi $rounds,$rounds,1 + subi $rounds,$rounds,1 + li $idx,16 + + vslb $eighty7,$seven,$seven # 0x808080..80 + vor $eighty7,$eighty7,$seven # 0x878787..87 + vspltisb $tmp,1 # 0x010101..01 + vsldoi $eighty7,$eighty7,$tmp,15 # 0x870101..01 + + ${UCMP}i $len,96 + bge _aesp8_xts_encrypt6x + + andi. $taillen,$len,15 + subic r0,$len,32 + subi $taillen,$taillen,16 + subfe r0,r0,r0 + and r0,r0,$taillen + add $inp,$inp,r0 + + lvx $rndkey0,0,$key1 + lvx $rndkey1,$idx,$key1 + addi $idx,$idx,16 + vperm $inout,$inout,$inptail,$inpperm + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vxor $inout,$inout,$tweak + vxor $inout,$inout,$rndkey0 + lvx $rndkey0,$idx,$key1 + addi $idx,$idx,16 + mtctr $rounds + b Loop_xts_enc + +.align 5 +Loop_xts_enc: + ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm + vcipher $inout,$inout,$rndkey1 + lvx $rndkey1,$idx,$key1 + addi $idx,$idx,16 + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vcipher $inout,$inout,$rndkey0 + lvx $rndkey0,$idx,$key1 + addi $idx,$idx,16 + bdnz Loop_xts_enc + + ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm + vcipher $inout,$inout,$rndkey1 + lvx $rndkey1,$idx,$key1 + li $idx,16 + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vxor $rndkey0,$rndkey0,$tweak + vcipherlast $output,$inout,$rndkey0 + + le?vperm $tmp,$output,$output,$leperm + be?nop + le?stvx_u $tmp,0,$out + be?stvx_u $output,0,$out + addi $out,$out,16 + + subic. $len,$len,16 + beq Lxts_enc_done + + vmr $inout,$inptail + lvx $inptail,0,$inp + addi $inp,$inp,16 + lvx $rndkey0,0,$key1 + lvx $rndkey1,$idx,$key1 + addi $idx,$idx,16 + + subic r0,$len,32 + subfe r0,r0,r0 + and r0,r0,$taillen + add $inp,$inp,r0 + + vsrab $tmp,$tweak,$seven # next tweak value + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + vand $tmp,$tmp,$eighty7 + vxor $tweak,$tweak,$tmp + + vperm $inout,$inout,$inptail,$inpperm + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vxor $inout,$inout,$tweak + vxor $output,$output,$rndkey0 # just in case $len<16 + vxor $inout,$inout,$rndkey0 + lvx $rndkey0,$idx,$key1 + addi $idx,$idx,16 + + mtctr $rounds + ${UCMP}i $len,16 + bge Loop_xts_enc + + vxor $output,$output,$tweak + lvsr $inpperm,0,$len # $inpperm is no longer needed + vxor $inptail,$inptail,$inptail # $inptail is no longer needed + vspltisb $tmp,-1 + vperm $inptail,$inptail,$tmp,$inpperm + vsel $inout,$inout,$output,$inptail + + subi r11,$out,17 + subi $out,$out,16 + mtctr $len + li $len,16 +Loop_xts_enc_steal: + lbzu r0,1(r11) + stb r0,16(r11) + bdnz Loop_xts_enc_steal + + mtctr $rounds + b Loop_xts_enc # one more time... + +Lxts_enc_done: + ${UCMP}i $ivp,0 + beq Lxts_enc_ret + + vsrab $tmp,$tweak,$seven # next tweak value + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + vand $tmp,$tmp,$eighty7 + vxor $tweak,$tweak,$tmp + + le?vperm $tweak,$tweak,$tweak,$leperm + stvx_u $tweak,0,$ivp + +Lxts_enc_ret: + mtspr 256,r12 # restore vrsave + li r3,0 + blr + .long 0 + .byte 0,12,0x04,0,0x80,6,6,0 + .long 0 +.size .${prefix}_xts_encrypt,.-.${prefix}_xts_encrypt + +.globl .${prefix}_xts_decrypt + mr $inp,r3 # reassign + li r3,-1 + ${UCMP}i $len,16 + bltlr- + + lis r0,0xfff8 + mfspr r12,256 # save vrsave + li r11,0 + mtspr 256,r0 + + andi. r0,$len,15 + neg r0,r0 + andi. r0,r0,16 + sub $len,$len,r0 + + vspltisb $seven,0x07 # 0x070707..07 + le?lvsl $leperm,r11,r11 + le?vspltisb $tmp,0x0f + le?vxor $leperm,$leperm,$seven + + li $idx,15 + lvx $tweak,0,$ivp # load [unaligned] iv + lvsl $inpperm,0,$ivp + lvx $inptail,$idx,$ivp + le?vxor $inpperm,$inpperm,$tmp + vperm $tweak,$tweak,$inptail,$inpperm + + neg r11,$inp + lvsr $inpperm,0,r11 # prepare for unaligned load + lvx $inout,0,$inp + addi $inp,$inp,15 # 15 is not typo + le?vxor $inpperm,$inpperm,$tmp + + ${UCMP}i $key2,0 # key2==NULL? + beq Lxts_dec_no_key2 + + ?lvsl $keyperm,0,$key2 # prepare for unaligned key + lwz $rounds,240($key2) + srwi $rounds,$rounds,1 + subi $rounds,$rounds,1 + li $idx,16 + + lvx $rndkey0,0,$key2 + lvx $rndkey1,$idx,$key2 + addi $idx,$idx,16 + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vxor $tweak,$tweak,$rndkey0 + lvx $rndkey0,$idx,$key2 + addi $idx,$idx,16 + mtctr $rounds + +Ltweak_xts_dec: + ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm + vcipher $tweak,$tweak,$rndkey1 + lvx $rndkey1,$idx,$key2 + addi $idx,$idx,16 + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vcipher $tweak,$tweak,$rndkey0 + lvx $rndkey0,$idx,$key2 + addi $idx,$idx,16 + bdnz Ltweak_xts_dec + + ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm + vcipher $tweak,$tweak,$rndkey1 + lvx $rndkey1,$idx,$key2 + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vcipherlast $tweak,$tweak,$rndkey0 + + li $ivp,0 # don't chain the tweak + b Lxts_dec + +Lxts_dec_no_key2: + neg $idx,$len + andi. $idx,$idx,15 + add $len,$len,$idx # in "tweak chaining" + # mode only complete + # blocks are processed +Lxts_dec: + lvx $inptail,0,$inp + addi $inp,$inp,16 + + ?lvsl $keyperm,0,$key1 # prepare for unaligned key + lwz $rounds,240($key1) + srwi $rounds,$rounds,1 + subi $rounds,$rounds,1 + li $idx,16 + + vslb $eighty7,$seven,$seven # 0x808080..80 + vor $eighty7,$eighty7,$seven # 0x878787..87 + vspltisb $tmp,1 # 0x010101..01 + vsldoi $eighty7,$eighty7,$tmp,15 # 0x870101..01 + + ${UCMP}i $len,96 + bge _aesp8_xts_decrypt6x + + lvx $rndkey0,0,$key1 + lvx $rndkey1,$idx,$key1 + addi $idx,$idx,16 + vperm $inout,$inout,$inptail,$inpperm + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vxor $inout,$inout,$tweak + vxor $inout,$inout,$rndkey0 + lvx $rndkey0,$idx,$key1 + addi $idx,$idx,16 + mtctr $rounds + + ${UCMP}i $len,16 + blt Ltail_xts_dec + be?b Loop_xts_dec + +.align 5 +Loop_xts_dec: + ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm + vncipher $inout,$inout,$rndkey1 + lvx $rndkey1,$idx,$key1 + addi $idx,$idx,16 + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vncipher $inout,$inout,$rndkey0 + lvx $rndkey0,$idx,$key1 + addi $idx,$idx,16 + bdnz Loop_xts_dec + + ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm + vncipher $inout,$inout,$rndkey1 + lvx $rndkey1,$idx,$key1 + li $idx,16 + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vxor $rndkey0,$rndkey0,$tweak + vncipherlast $output,$inout,$rndkey0 + + le?vperm $tmp,$output,$output,$leperm + be?nop + le?stvx_u $tmp,0,$out + be?stvx_u $output,0,$out + addi $out,$out,16 + + subic. $len,$len,16 + beq Lxts_dec_done + + vmr $inout,$inptail + lvx $inptail,0,$inp + addi $inp,$inp,16 + lvx $rndkey0,0,$key1 + lvx $rndkey1,$idx,$key1 + addi $idx,$idx,16 + + vsrab $tmp,$tweak,$seven # next tweak value + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + vand $tmp,$tmp,$eighty7 + vxor $tweak,$tweak,$tmp + + vperm $inout,$inout,$inptail,$inpperm + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vxor $inout,$inout,$tweak + vxor $inout,$inout,$rndkey0 + lvx $rndkey0,$idx,$key1 + addi $idx,$idx,16 + + mtctr $rounds + ${UCMP}i $len,16 + bge Loop_xts_dec + +Ltail_xts_dec: + vsrab $tmp,$tweak,$seven # next tweak value + vaddubm $tweak1,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + vand $tmp,$tmp,$eighty7 + vxor $tweak1,$tweak1,$tmp + + subi $inp,$inp,16 + add $inp,$inp,$len + + vxor $inout,$inout,$tweak # :-( + vxor $inout,$inout,$tweak1 # :-) + +Loop_xts_dec_short: + ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm + vncipher $inout,$inout,$rndkey1 + lvx $rndkey1,$idx,$key1 + addi $idx,$idx,16 + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vncipher $inout,$inout,$rndkey0 + lvx $rndkey0,$idx,$key1 + addi $idx,$idx,16 + bdnz Loop_xts_dec_short + + ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm + vncipher $inout,$inout,$rndkey1 + lvx $rndkey1,$idx,$key1 + li $idx,16 + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + vxor $rndkey0,$rndkey0,$tweak1 + vncipherlast $output,$inout,$rndkey0 + + le?vperm $tmp,$output,$output,$leperm + be?nop + le?stvx_u $tmp,0,$out + be?stvx_u $output,0,$out + + vmr $inout,$inptail + lvx $inptail,0,$inp + #addi $inp,$inp,16 + lvx $rndkey0,0,$key1 + lvx $rndkey1,$idx,$key1 + addi $idx,$idx,16 + vperm $inout,$inout,$inptail,$inpperm + ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm + + lvsr $inpperm,0,$len # $inpperm is no longer needed + vxor $inptail,$inptail,$inptail # $inptail is no longer needed + vspltisb $tmp,-1 + vperm $inptail,$inptail,$tmp,$inpperm + vsel $inout,$inout,$output,$inptail + + vxor $rndkey0,$rndkey0,$tweak + vxor $inout,$inout,$rndkey0 + lvx $rndkey0,$idx,$key1 + addi $idx,$idx,16 + + subi r11,$out,1 + mtctr $len + li $len,16 +Loop_xts_dec_steal: + lbzu r0,1(r11) + stb r0,16(r11) + bdnz Loop_xts_dec_steal + + mtctr $rounds + b Loop_xts_dec # one more time... + +Lxts_dec_done: + ${UCMP}i $ivp,0 + beq Lxts_dec_ret + + vsrab $tmp,$tweak,$seven # next tweak value + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + vand $tmp,$tmp,$eighty7 + vxor $tweak,$tweak,$tmp + + le?vperm $tweak,$tweak,$tweak,$leperm + stvx_u $tweak,0,$ivp + +Lxts_dec_ret: + mtspr 256,r12 # restore vrsave + li r3,0 + blr + .long 0 + .byte 0,12,0x04,0,0x80,6,6,0 + .long 0 +.size .${prefix}_xts_decrypt,.-.${prefix}_xts_decrypt +___ +######################################################################### +{{ # Optimized XTS procedures # +my $key_=$key2; +my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,3,26..31)); + $x00=0 if ($flavour =~ /osx/); +my ($in0, $in1, $in2, $in3, $in4, $in5 )=map("v$_",(0..5)); +my ($out0, $out1, $out2, $out3, $out4, $out5)=map("v$_",(7,12..16)); +my ($twk0, $twk1, $twk2, $twk3, $twk4, $twk5)=map("v$_",(17..22)); +my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys + # v26-v31 last 6 round keys +my ($keyperm)=($out0); # aliases with "caller", redundant assignment +my $taillen=$x70; + +$code.=<<___; +.align 5 +_aesp8_xts_encrypt6x: + $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp) + mflr r11 + li r7,`$FRAME+8*16+15` + li r3,`$FRAME+8*16+31` + $PUSH r11,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp) + stvx v20,r7,$sp # ABI says so + addi r7,r7,32 + stvx v21,r3,$sp + addi r3,r3,32 + stvx v22,r7,$sp + addi r7,r7,32 + stvx v23,r3,$sp + addi r3,r3,32 + stvx v24,r7,$sp + addi r7,r7,32 + stvx v25,r3,$sp + addi r3,r3,32 + stvx v26,r7,$sp + addi r7,r7,32 + stvx v27,r3,$sp + addi r3,r3,32 + stvx v28,r7,$sp + addi r7,r7,32 + stvx v29,r3,$sp + addi r3,r3,32 + stvx v30,r7,$sp + stvx v31,r3,$sp + li r0,-1 + stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave + li $x10,0x10 + $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp) + li $x20,0x20 + $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp) + li $x30,0x30 + $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp) + li $x40,0x40 + $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp) + li $x50,0x50 + $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp) + li $x60,0x60 + $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp) + li $x70,0x70 + mtspr 256,r0 + + subi $rounds,$rounds,3 # -4 in total + + lvx $rndkey0,$x00,$key1 # load key schedule + lvx v30,$x10,$key1 + addi $key1,$key1,0x20 + lvx v31,$x00,$key1 + ?vperm $rndkey0,$rndkey0,v30,$keyperm + addi $key_,$sp,$FRAME+15 + mtctr $rounds + +Load_xts_enc_key: + ?vperm v24,v30,v31,$keyperm + lvx v30,$x10,$key1 + addi $key1,$key1,0x20 + stvx v24,$x00,$key_ # off-load round[1] + ?vperm v25,v31,v30,$keyperm + lvx v31,$x00,$key1 + stvx v25,$x10,$key_ # off-load round[2] + addi $key_,$key_,0x20 + bdnz Load_xts_enc_key + + lvx v26,$x10,$key1 + ?vperm v24,v30,v31,$keyperm + lvx v27,$x20,$key1 + stvx v24,$x00,$key_ # off-load round[3] + ?vperm v25,v31,v26,$keyperm + lvx v28,$x30,$key1 + stvx v25,$x10,$key_ # off-load round[4] + addi $key_,$sp,$FRAME+15 # rewind $key_ + ?vperm v26,v26,v27,$keyperm + lvx v29,$x40,$key1 + ?vperm v27,v27,v28,$keyperm + lvx v30,$x50,$key1 + ?vperm v28,v28,v29,$keyperm + lvx v31,$x60,$key1 + ?vperm v29,v29,v30,$keyperm + lvx $twk5,$x70,$key1 # borrow $twk5 + ?vperm v30,v30,v31,$keyperm + lvx v24,$x00,$key_ # pre-load round[1] + ?vperm v31,v31,$twk5,$keyperm + lvx v25,$x10,$key_ # pre-load round[2] + + vperm $in0,$inout,$inptail,$inpperm + subi $inp,$inp,31 # undo "caller" + vxor $twk0,$tweak,$rndkey0 + vsrab $tmp,$tweak,$seven # next tweak value + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + vand $tmp,$tmp,$eighty7 + vxor $out0,$in0,$twk0 + vxor $tweak,$tweak,$tmp + + lvx_u $in1,$x10,$inp + vxor $twk1,$tweak,$rndkey0 + vsrab $tmp,$tweak,$seven # next tweak value + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + le?vperm $in1,$in1,$in1,$leperm + vand $tmp,$tmp,$eighty7 + vxor $out1,$in1,$twk1 + vxor $tweak,$tweak,$tmp + + lvx_u $in2,$x20,$inp + andi. $taillen,$len,15 + vxor $twk2,$tweak,$rndkey0 + vsrab $tmp,$tweak,$seven # next tweak value + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + le?vperm $in2,$in2,$in2,$leperm + vand $tmp,$tmp,$eighty7 + vxor $out2,$in2,$twk2 + vxor $tweak,$tweak,$tmp + + lvx_u $in3,$x30,$inp + sub $len,$len,$taillen + vxor $twk3,$tweak,$rndkey0 + vsrab $tmp,$tweak,$seven # next tweak value + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + le?vperm $in3,$in3,$in3,$leperm + vand $tmp,$tmp,$eighty7 + vxor $out3,$in3,$twk3 + vxor $tweak,$tweak,$tmp + + lvx_u $in4,$x40,$inp + subi $len,$len,0x60 + vxor $twk4,$tweak,$rndkey0 + vsrab $tmp,$tweak,$seven # next tweak value + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + le?vperm $in4,$in4,$in4,$leperm + vand $tmp,$tmp,$eighty7 + vxor $out4,$in4,$twk4 + vxor $tweak,$tweak,$tmp + + lvx_u $in5,$x50,$inp + addi $inp,$inp,0x60 + vxor $twk5,$tweak,$rndkey0 + vsrab $tmp,$tweak,$seven # next tweak value + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + le?vperm $in5,$in5,$in5,$leperm + vand $tmp,$tmp,$eighty7 + vxor $out5,$in5,$twk5 + vxor $tweak,$tweak,$tmp + + vxor v31,v31,$rndkey0 + mtctr $rounds + b Loop_xts_enc6x + +.align 5 +Loop_xts_enc6x: + vcipher $out0,$out0,v24 + vcipher $out1,$out1,v24 + vcipher $out2,$out2,v24 + vcipher $out3,$out3,v24 + vcipher $out4,$out4,v24 + vcipher $out5,$out5,v24 + lvx v24,$x20,$key_ # round[3] + addi $key_,$key_,0x20 + + vcipher $out0,$out0,v25 + vcipher $out1,$out1,v25 + vcipher $out2,$out2,v25 + vcipher $out3,$out3,v25 + vcipher $out4,$out4,v25 + vcipher $out5,$out5,v25 + lvx v25,$x10,$key_ # round[4] + bdnz Loop_xts_enc6x + + subic $len,$len,96 # $len-=96 + vxor $in0,$twk0,v31 # xor with last round key + vcipher $out0,$out0,v24 + vcipher $out1,$out1,v24 + vsrab $tmp,$tweak,$seven # next tweak value + vxor $twk0,$tweak,$rndkey0 + vaddubm $tweak,$tweak,$tweak + vcipher $out2,$out2,v24 + vcipher $out3,$out3,v24 + vsldoi $tmp,$tmp,$tmp,15 + vcipher $out4,$out4,v24 + vcipher $out5,$out5,v24 + + subfe. r0,r0,r0 # borrow?-1:0 + vand $tmp,$tmp,$eighty7 + vcipher $out0,$out0,v25 + vcipher $out1,$out1,v25 + vxor $tweak,$tweak,$tmp + vcipher $out2,$out2,v25 + vcipher $out3,$out3,v25 + vxor $in1,$twk1,v31 + vsrab $tmp,$tweak,$seven # next tweak value + vxor $twk1,$tweak,$rndkey0 + vcipher $out4,$out4,v25 + vcipher $out5,$out5,v25 + + and r0,r0,$len + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + vcipher $out0,$out0,v26 + vcipher $out1,$out1,v26 + vand $tmp,$tmp,$eighty7 + vcipher $out2,$out2,v26 + vcipher $out3,$out3,v26 + vxor $tweak,$tweak,$tmp + vcipher $out4,$out4,v26 + vcipher $out5,$out5,v26 + + add $inp,$inp,r0 # $inp is adjusted in such + # way that at exit from the + # loop inX-in5 are loaded + # with last "words" + vxor $in2,$twk2,v31 + vsrab $tmp,$tweak,$seven # next tweak value + vxor $twk2,$tweak,$rndkey0 + vaddubm $tweak,$tweak,$tweak + vcipher $out0,$out0,v27 + vcipher $out1,$out1,v27 + vsldoi $tmp,$tmp,$tmp,15 + vcipher $out2,$out2,v27 + vcipher $out3,$out3,v27 + vand $tmp,$tmp,$eighty7 + vcipher $out4,$out4,v27 + vcipher $out5,$out5,v27 + + addi $key_,$sp,$FRAME+15 # rewind $key_ + vxor $tweak,$tweak,$tmp + vcipher $out0,$out0,v28 + vcipher $out1,$out1,v28 + vxor $in3,$twk3,v31 + vsrab $tmp,$tweak,$seven # next tweak value + vxor $twk3,$tweak,$rndkey0 + vcipher $out2,$out2,v28 + vcipher $out3,$out3,v28 + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + vcipher $out4,$out4,v28 + vcipher $out5,$out5,v28 + lvx v24,$x00,$key_ # re-pre-load round[1] + vand $tmp,$tmp,$eighty7 + + vcipher $out0,$out0,v29 + vcipher $out1,$out1,v29 + vxor $tweak,$tweak,$tmp + vcipher $out2,$out2,v29 + vcipher $out3,$out3,v29 + vxor $in4,$twk4,v31 + vsrab $tmp,$tweak,$seven # next tweak value + vxor $twk4,$tweak,$rndkey0 + vcipher $out4,$out4,v29 + vcipher $out5,$out5,v29 + lvx v25,$x10,$key_ # re-pre-load round[2] + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + + vcipher $out0,$out0,v30 + vcipher $out1,$out1,v30 + vand $tmp,$tmp,$eighty7 + vcipher $out2,$out2,v30 + vcipher $out3,$out3,v30 + vxor $tweak,$tweak,$tmp + vcipher $out4,$out4,v30 + vcipher $out5,$out5,v30 + vxor $in5,$twk5,v31 + vsrab $tmp,$tweak,$seven # next tweak value + vxor $twk5,$tweak,$rndkey0 + + vcipherlast $out0,$out0,$in0 + lvx_u $in0,$x00,$inp # load next input block + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + vcipherlast $out1,$out1,$in1 + lvx_u $in1,$x10,$inp + vcipherlast $out2,$out2,$in2 + le?vperm $in0,$in0,$in0,$leperm + lvx_u $in2,$x20,$inp + vand $tmp,$tmp,$eighty7 + vcipherlast $out3,$out3,$in3 + le?vperm $in1,$in1,$in1,$leperm + lvx_u $in3,$x30,$inp + vcipherlast $out4,$out4,$in4 + le?vperm $in2,$in2,$in2,$leperm + lvx_u $in4,$x40,$inp + vxor $tweak,$tweak,$tmp + vcipherlast $tmp,$out5,$in5 # last block might be needed + # in stealing mode + le?vperm $in3,$in3,$in3,$leperm + lvx_u $in5,$x50,$inp + addi $inp,$inp,0x60 + le?vperm $in4,$in4,$in4,$leperm + le?vperm $in5,$in5,$in5,$leperm + + le?vperm $out0,$out0,$out0,$leperm + le?vperm $out1,$out1,$out1,$leperm + stvx_u $out0,$x00,$out # store output + vxor $out0,$in0,$twk0 + le?vperm $out2,$out2,$out2,$leperm + stvx_u $out1,$x10,$out + vxor $out1,$in1,$twk1 + le?vperm $out3,$out3,$out3,$leperm + stvx_u $out2,$x20,$out + vxor $out2,$in2,$twk2 + le?vperm $out4,$out4,$out4,$leperm + stvx_u $out3,$x30,$out + vxor $out3,$in3,$twk3 + le?vperm $out5,$tmp,$tmp,$leperm + stvx_u $out4,$x40,$out + vxor $out4,$in4,$twk4 + le?stvx_u $out5,$x50,$out + be?stvx_u $tmp, $x50,$out + vxor $out5,$in5,$twk5 + addi $out,$out,0x60 + + mtctr $rounds + beq Loop_xts_enc6x # did $len-=96 borrow? + + addic. $len,$len,0x60 + beq Lxts_enc6x_zero + cmpwi $len,0x20 + blt Lxts_enc6x_one + nop + beq Lxts_enc6x_two + cmpwi $len,0x40 + blt Lxts_enc6x_three + nop + beq Lxts_enc6x_four + +Lxts_enc6x_five: + vxor $out0,$in1,$twk0 + vxor $out1,$in2,$twk1 + vxor $out2,$in3,$twk2 + vxor $out3,$in4,$twk3 + vxor $out4,$in5,$twk4 + + bl _aesp8_xts_enc5x + + le?vperm $out0,$out0,$out0,$leperm + vmr $twk0,$twk5 # unused tweak + le?vperm $out1,$out1,$out1,$leperm + stvx_u $out0,$x00,$out # store output + le?vperm $out2,$out2,$out2,$leperm + stvx_u $out1,$x10,$out + le?vperm $out3,$out3,$out3,$leperm + stvx_u $out2,$x20,$out + vxor $tmp,$out4,$twk5 # last block prep for stealing + le?vperm $out4,$out4,$out4,$leperm + stvx_u $out3,$x30,$out + stvx_u $out4,$x40,$out + addi $out,$out,0x50 + bne Lxts_enc6x_steal + b Lxts_enc6x_done + +.align 4 +Lxts_enc6x_four: + vxor $out0,$in2,$twk0 + vxor $out1,$in3,$twk1 + vxor $out2,$in4,$twk2 + vxor $out3,$in5,$twk3 + vxor $out4,$out4,$out4 + + bl _aesp8_xts_enc5x + + le?vperm $out0,$out0,$out0,$leperm + vmr $twk0,$twk4 # unused tweak + le?vperm $out1,$out1,$out1,$leperm + stvx_u $out0,$x00,$out # store output + le?vperm $out2,$out2,$out2,$leperm + stvx_u $out1,$x10,$out + vxor $tmp,$out3,$twk4 # last block prep for stealing + le?vperm $out3,$out3,$out3,$leperm + stvx_u $out2,$x20,$out + stvx_u $out3,$x30,$out + addi $out,$out,0x40 + bne Lxts_enc6x_steal + b Lxts_enc6x_done + +.align 4 +Lxts_enc6x_three: + vxor $out0,$in3,$twk0 + vxor $out1,$in4,$twk1 + vxor $out2,$in5,$twk2 + vxor $out3,$out3,$out3 + vxor $out4,$out4,$out4 + + bl _aesp8_xts_enc5x + + le?vperm $out0,$out0,$out0,$leperm + vmr $twk0,$twk3 # unused tweak + le?vperm $out1,$out1,$out1,$leperm + stvx_u $out0,$x00,$out # store output + vxor $tmp,$out2,$twk3 # last block prep for stealing + le?vperm $out2,$out2,$out2,$leperm + stvx_u $out1,$x10,$out + stvx_u $out2,$x20,$out + addi $out,$out,0x30 + bne Lxts_enc6x_steal + b Lxts_enc6x_done + +.align 4 +Lxts_enc6x_two: + vxor $out0,$in4,$twk0 + vxor $out1,$in5,$twk1 + vxor $out2,$out2,$out2 + vxor $out3,$out3,$out3 + vxor $out4,$out4,$out4 + + bl _aesp8_xts_enc5x + + le?vperm $out0,$out0,$out0,$leperm + vmr $twk0,$twk2 # unused tweak + vxor $tmp,$out1,$twk2 # last block prep for stealing + le?vperm $out1,$out1,$out1,$leperm + stvx_u $out0,$x00,$out # store output + stvx_u $out1,$x10,$out + addi $out,$out,0x20 + bne Lxts_enc6x_steal + b Lxts_enc6x_done + +.align 4 +Lxts_enc6x_one: + vxor $out0,$in5,$twk0 + nop +Loop_xts_enc1x: + vcipher $out0,$out0,v24 + lvx v24,$x20,$key_ # round[3] + addi $key_,$key_,0x20 + + vcipher $out0,$out0,v25 + lvx v25,$x10,$key_ # round[4] + bdnz Loop_xts_enc1x + + add $inp,$inp,$taillen + cmpwi $taillen,0 + vcipher $out0,$out0,v24 + + subi $inp,$inp,16 + vcipher $out0,$out0,v25 + + lvsr $inpperm,0,$taillen + vcipher $out0,$out0,v26 + + lvx_u $in0,0,$inp + vcipher $out0,$out0,v27 + + addi $key_,$sp,$FRAME+15 # rewind $key_ + vcipher $out0,$out0,v28 + lvx v24,$x00,$key_ # re-pre-load round[1] + + vcipher $out0,$out0,v29 + lvx v25,$x10,$key_ # re-pre-load round[2] + vxor $twk0,$twk0,v31 + + le?vperm $in0,$in0,$in0,$leperm + vcipher $out0,$out0,v30 + + vperm $in0,$in0,$in0,$inpperm + vcipherlast $out0,$out0,$twk0 + + vmr $twk0,$twk1 # unused tweak + vxor $tmp,$out0,$twk1 # last block prep for stealing + le?vperm $out0,$out0,$out0,$leperm + stvx_u $out0,$x00,$out # store output + addi $out,$out,0x10 + bne Lxts_enc6x_steal + b Lxts_enc6x_done + +.align 4 +Lxts_enc6x_zero: + cmpwi $taillen,0 + beq Lxts_enc6x_done + + add $inp,$inp,$taillen + subi $inp,$inp,16 + lvx_u $in0,0,$inp + lvsr $inpperm,0,$taillen # $in5 is no more + le?vperm $in0,$in0,$in0,$leperm + vperm $in0,$in0,$in0,$inpperm + vxor $tmp,$tmp,$twk0 +Lxts_enc6x_steal: + vxor $in0,$in0,$twk0 + vxor $out0,$out0,$out0 + vspltisb $out1,-1 + vperm $out0,$out0,$out1,$inpperm + vsel $out0,$in0,$tmp,$out0 # $tmp is last block, remember? + + subi r30,$out,17 + subi $out,$out,16 + mtctr $taillen +Loop_xts_enc6x_steal: + lbzu r0,1(r30) + stb r0,16(r30) + bdnz Loop_xts_enc6x_steal + + li $taillen,0 + mtctr $rounds + b Loop_xts_enc1x # one more time... + +.align 4 +Lxts_enc6x_done: + ${UCMP}i $ivp,0 + beq Lxts_enc6x_ret + + vxor $tweak,$twk0,$rndkey0 + le?vperm $tweak,$tweak,$tweak,$leperm + stvx_u $tweak,0,$ivp + +Lxts_enc6x_ret: + mtlr r11 + li r10,`$FRAME+15` + li r11,`$FRAME+31` + stvx $seven,r10,$sp # wipe copies of round keys + addi r10,r10,32 + stvx $seven,r11,$sp + addi r11,r11,32 + stvx $seven,r10,$sp + addi r10,r10,32 + stvx $seven,r11,$sp + addi r11,r11,32 + stvx $seven,r10,$sp + addi r10,r10,32 + stvx $seven,r11,$sp + addi r11,r11,32 + stvx $seven,r10,$sp + addi r10,r10,32 + stvx $seven,r11,$sp + addi r11,r11,32 + + mtspr 256,$vrsave + lvx v20,r10,$sp # ABI says so + addi r10,r10,32 + lvx v21,r11,$sp + addi r11,r11,32 + lvx v22,r10,$sp + addi r10,r10,32 + lvx v23,r11,$sp + addi r11,r11,32 + lvx v24,r10,$sp + addi r10,r10,32 + lvx v25,r11,$sp + addi r11,r11,32 + lvx v26,r10,$sp + addi r10,r10,32 + lvx v27,r11,$sp + addi r11,r11,32 + lvx v28,r10,$sp + addi r10,r10,32 + lvx v29,r11,$sp + addi r11,r11,32 + lvx v30,r10,$sp + lvx v31,r11,$sp + $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp) + $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp) + $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp) + $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp) + $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp) + $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp) + addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T` + blr + .long 0 + .byte 0,12,0x04,1,0x80,6,6,0 + .long 0 + +.align 5 +_aesp8_xts_enc5x: + vcipher $out0,$out0,v24 + vcipher $out1,$out1,v24 + vcipher $out2,$out2,v24 + vcipher $out3,$out3,v24 + vcipher $out4,$out4,v24 + lvx v24,$x20,$key_ # round[3] + addi $key_,$key_,0x20 + + vcipher $out0,$out0,v25 + vcipher $out1,$out1,v25 + vcipher $out2,$out2,v25 + vcipher $out3,$out3,v25 + vcipher $out4,$out4,v25 + lvx v25,$x10,$key_ # round[4] + bdnz _aesp8_xts_enc5x + + add $inp,$inp,$taillen + cmpwi $taillen,0 + vcipher $out0,$out0,v24 + vcipher $out1,$out1,v24 + vcipher $out2,$out2,v24 + vcipher $out3,$out3,v24 + vcipher $out4,$out4,v24 + + subi $inp,$inp,16 + vcipher $out0,$out0,v25 + vcipher $out1,$out1,v25 + vcipher $out2,$out2,v25 + vcipher $out3,$out3,v25 + vcipher $out4,$out4,v25 + vxor $twk0,$twk0,v31 + + vcipher $out0,$out0,v26 + lvsr $inpperm,r0,$taillen # $in5 is no more + vcipher $out1,$out1,v26 + vcipher $out2,$out2,v26 + vcipher $out3,$out3,v26 + vcipher $out4,$out4,v26 + vxor $in1,$twk1,v31 + + vcipher $out0,$out0,v27 + lvx_u $in0,0,$inp + vcipher $out1,$out1,v27 + vcipher $out2,$out2,v27 + vcipher $out3,$out3,v27 + vcipher $out4,$out4,v27 + vxor $in2,$twk2,v31 + + addi $key_,$sp,$FRAME+15 # rewind $key_ + vcipher $out0,$out0,v28 + vcipher $out1,$out1,v28 + vcipher $out2,$out2,v28 + vcipher $out3,$out3,v28 + vcipher $out4,$out4,v28 + lvx v24,$x00,$key_ # re-pre-load round[1] + vxor $in3,$twk3,v31 + + vcipher $out0,$out0,v29 + le?vperm $in0,$in0,$in0,$leperm + vcipher $out1,$out1,v29 + vcipher $out2,$out2,v29 + vcipher $out3,$out3,v29 + vcipher $out4,$out4,v29 + lvx v25,$x10,$key_ # re-pre-load round[2] + vxor $in4,$twk4,v31 + + vcipher $out0,$out0,v30 + vperm $in0,$in0,$in0,$inpperm + vcipher $out1,$out1,v30 + vcipher $out2,$out2,v30 + vcipher $out3,$out3,v30 + vcipher $out4,$out4,v30 + + vcipherlast $out0,$out0,$twk0 + vcipherlast $out1,$out1,$in1 + vcipherlast $out2,$out2,$in2 + vcipherlast $out3,$out3,$in3 + vcipherlast $out4,$out4,$in4 + blr + .long 0 + .byte 0,12,0x14,0,0,0,0,0 + +.align 5 +_aesp8_xts_decrypt6x: + $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp) + mflr r11 + li r7,`$FRAME+8*16+15` + li r3,`$FRAME+8*16+31` + $PUSH r11,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp) + stvx v20,r7,$sp # ABI says so + addi r7,r7,32 + stvx v21,r3,$sp + addi r3,r3,32 + stvx v22,r7,$sp + addi r7,r7,32 + stvx v23,r3,$sp + addi r3,r3,32 + stvx v24,r7,$sp + addi r7,r7,32 + stvx v25,r3,$sp + addi r3,r3,32 + stvx v26,r7,$sp + addi r7,r7,32 + stvx v27,r3,$sp + addi r3,r3,32 + stvx v28,r7,$sp + addi r7,r7,32 + stvx v29,r3,$sp + addi r3,r3,32 + stvx v30,r7,$sp + stvx v31,r3,$sp + li r0,-1 + stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave + li $x10,0x10 + $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp) + li $x20,0x20 + $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp) + li $x30,0x30 + $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp) + li $x40,0x40 + $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp) + li $x50,0x50 + $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp) + li $x60,0x60 + $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp) + li $x70,0x70 + mtspr 256,r0 + + subi $rounds,$rounds,3 # -4 in total + + lvx $rndkey0,$x00,$key1 # load key schedule + lvx v30,$x10,$key1 + addi $key1,$key1,0x20 + lvx v31,$x00,$key1 + ?vperm $rndkey0,$rndkey0,v30,$keyperm + addi $key_,$sp,$FRAME+15 + mtctr $rounds + +Load_xts_dec_key: + ?vperm v24,v30,v31,$keyperm + lvx v30,$x10,$key1 + addi $key1,$key1,0x20 + stvx v24,$x00,$key_ # off-load round[1] + ?vperm v25,v31,v30,$keyperm + lvx v31,$x00,$key1 + stvx v25,$x10,$key_ # off-load round[2] + addi $key_,$key_,0x20 + bdnz Load_xts_dec_key + + lvx v26,$x10,$key1 + ?vperm v24,v30,v31,$keyperm + lvx v27,$x20,$key1 + stvx v24,$x00,$key_ # off-load round[3] + ?vperm v25,v31,v26,$keyperm + lvx v28,$x30,$key1 + stvx v25,$x10,$key_ # off-load round[4] + addi $key_,$sp,$FRAME+15 # rewind $key_ + ?vperm v26,v26,v27,$keyperm + lvx v29,$x40,$key1 + ?vperm v27,v27,v28,$keyperm + lvx v30,$x50,$key1 + ?vperm v28,v28,v29,$keyperm + lvx v31,$x60,$key1 + ?vperm v29,v29,v30,$keyperm + lvx $twk5,$x70,$key1 # borrow $twk5 + ?vperm v30,v30,v31,$keyperm + lvx v24,$x00,$key_ # pre-load round[1] + ?vperm v31,v31,$twk5,$keyperm + lvx v25,$x10,$key_ # pre-load round[2] + + vperm $in0,$inout,$inptail,$inpperm + subi $inp,$inp,31 # undo "caller" + vxor $twk0,$tweak,$rndkey0 + vsrab $tmp,$tweak,$seven # next tweak value + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + vand $tmp,$tmp,$eighty7 + vxor $out0,$in0,$twk0 + vxor $tweak,$tweak,$tmp + + lvx_u $in1,$x10,$inp + vxor $twk1,$tweak,$rndkey0 + vsrab $tmp,$tweak,$seven # next tweak value + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + le?vperm $in1,$in1,$in1,$leperm + vand $tmp,$tmp,$eighty7 + vxor $out1,$in1,$twk1 + vxor $tweak,$tweak,$tmp + + lvx_u $in2,$x20,$inp + andi. $taillen,$len,15 + vxor $twk2,$tweak,$rndkey0 + vsrab $tmp,$tweak,$seven # next tweak value + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + le?vperm $in2,$in2,$in2,$leperm + vand $tmp,$tmp,$eighty7 + vxor $out2,$in2,$twk2 + vxor $tweak,$tweak,$tmp + + lvx_u $in3,$x30,$inp + sub $len,$len,$taillen + vxor $twk3,$tweak,$rndkey0 + vsrab $tmp,$tweak,$seven # next tweak value + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + le?vperm $in3,$in3,$in3,$leperm + vand $tmp,$tmp,$eighty7 + vxor $out3,$in3,$twk3 + vxor $tweak,$tweak,$tmp + + lvx_u $in4,$x40,$inp + subi $len,$len,0x60 + vxor $twk4,$tweak,$rndkey0 + vsrab $tmp,$tweak,$seven # next tweak value + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + le?vperm $in4,$in4,$in4,$leperm + vand $tmp,$tmp,$eighty7 + vxor $out4,$in4,$twk4 + vxor $tweak,$tweak,$tmp + + lvx_u $in5,$x50,$inp + addi $inp,$inp,0x60 + vxor $twk5,$tweak,$rndkey0 + vsrab $tmp,$tweak,$seven # next tweak value + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + le?vperm $in5,$in5,$in5,$leperm + vand $tmp,$tmp,$eighty7 + vxor $out5,$in5,$twk5 + vxor $tweak,$tweak,$tmp + + vxor v31,v31,$rndkey0 + mtctr $rounds + b Loop_xts_dec6x + +.align 5 +Loop_xts_dec6x: + vncipher $out0,$out0,v24 + vncipher $out1,$out1,v24 + vncipher $out2,$out2,v24 + vncipher $out3,$out3,v24 + vncipher $out4,$out4,v24 + vncipher $out5,$out5,v24 + lvx v24,$x20,$key_ # round[3] + addi $key_,$key_,0x20 + + vncipher $out0,$out0,v25 + vncipher $out1,$out1,v25 + vncipher $out2,$out2,v25 + vncipher $out3,$out3,v25 + vncipher $out4,$out4,v25 + vncipher $out5,$out5,v25 + lvx v25,$x10,$key_ # round[4] + bdnz Loop_xts_dec6x + + subic $len,$len,96 # $len-=96 + vxor $in0,$twk0,v31 # xor with last round key + vncipher $out0,$out0,v24 + vncipher $out1,$out1,v24 + vsrab $tmp,$tweak,$seven # next tweak value + vxor $twk0,$tweak,$rndkey0 + vaddubm $tweak,$tweak,$tweak + vncipher $out2,$out2,v24 + vncipher $out3,$out3,v24 + vsldoi $tmp,$tmp,$tmp,15 + vncipher $out4,$out4,v24 + vncipher $out5,$out5,v24 + + subfe. r0,r0,r0 # borrow?-1:0 + vand $tmp,$tmp,$eighty7 + vncipher $out0,$out0,v25 + vncipher $out1,$out1,v25 + vxor $tweak,$tweak,$tmp + vncipher $out2,$out2,v25 + vncipher $out3,$out3,v25 + vxor $in1,$twk1,v31 + vsrab $tmp,$tweak,$seven # next tweak value + vxor $twk1,$tweak,$rndkey0 + vncipher $out4,$out4,v25 + vncipher $out5,$out5,v25 + + and r0,r0,$len + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + vncipher $out0,$out0,v26 + vncipher $out1,$out1,v26 + vand $tmp,$tmp,$eighty7 + vncipher $out2,$out2,v26 + vncipher $out3,$out3,v26 + vxor $tweak,$tweak,$tmp + vncipher $out4,$out4,v26 + vncipher $out5,$out5,v26 + + add $inp,$inp,r0 # $inp is adjusted in such + # way that at exit from the + # loop inX-in5 are loaded + # with last "words" + vxor $in2,$twk2,v31 + vsrab $tmp,$tweak,$seven # next tweak value + vxor $twk2,$tweak,$rndkey0 + vaddubm $tweak,$tweak,$tweak + vncipher $out0,$out0,v27 + vncipher $out1,$out1,v27 + vsldoi $tmp,$tmp,$tmp,15 + vncipher $out2,$out2,v27 + vncipher $out3,$out3,v27 + vand $tmp,$tmp,$eighty7 + vncipher $out4,$out4,v27 + vncipher $out5,$out5,v27 + + addi $key_,$sp,$FRAME+15 # rewind $key_ + vxor $tweak,$tweak,$tmp + vncipher $out0,$out0,v28 + vncipher $out1,$out1,v28 + vxor $in3,$twk3,v31 + vsrab $tmp,$tweak,$seven # next tweak value + vxor $twk3,$tweak,$rndkey0 + vncipher $out2,$out2,v28 + vncipher $out3,$out3,v28 + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + vncipher $out4,$out4,v28 + vncipher $out5,$out5,v28 + lvx v24,$x00,$key_ # re-pre-load round[1] + vand $tmp,$tmp,$eighty7 + + vncipher $out0,$out0,v29 + vncipher $out1,$out1,v29 + vxor $tweak,$tweak,$tmp + vncipher $out2,$out2,v29 + vncipher $out3,$out3,v29 + vxor $in4,$twk4,v31 + vsrab $tmp,$tweak,$seven # next tweak value + vxor $twk4,$tweak,$rndkey0 + vncipher $out4,$out4,v29 + vncipher $out5,$out5,v29 + lvx v25,$x10,$key_ # re-pre-load round[2] + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + + vncipher $out0,$out0,v30 + vncipher $out1,$out1,v30 + vand $tmp,$tmp,$eighty7 + vncipher $out2,$out2,v30 + vncipher $out3,$out3,v30 + vxor $tweak,$tweak,$tmp + vncipher $out4,$out4,v30 + vncipher $out5,$out5,v30 + vxor $in5,$twk5,v31 + vsrab $tmp,$tweak,$seven # next tweak value + vxor $twk5,$tweak,$rndkey0 + + vncipherlast $out0,$out0,$in0 + lvx_u $in0,$x00,$inp # load next input block + vaddubm $tweak,$tweak,$tweak + vsldoi $tmp,$tmp,$tmp,15 + vncipherlast $out1,$out1,$in1 + lvx_u $in1,$x10,$inp + vncipherlast $out2,$out2,$in2 + le?vperm $in0,$in0,$in0,$leperm + lvx_u $in2,$x20,$inp + vand $tmp,$tmp,$eighty7 + vncipherlast $out3,$out3,$in3 + le?vperm $in1,$in1,$in1,$leperm + lvx_u $in3,$x30,$inp + vncipherlast $out4,$out4,$in4 + le?vperm $in2,$in2,$in2,$leperm + lvx_u $in4,$x40,$inp + vxor $tweak,$tweak,$tmp + vncipherlast $out5,$out5,$in5 + le?vperm $in3,$in3,$in3,$leperm + lvx_u $in5,$x50,$inp + addi $inp,$inp,0x60 + le?vperm $in4,$in4,$in4,$leperm + le?vperm $in5,$in5,$in5,$leperm + + le?vperm $out0,$out0,$out0,$leperm + le?vperm $out1,$out1,$out1,$leperm + stvx_u $out0,$x00,$out # store output + vxor $out0,$in0,$twk0 + le?vperm $out2,$out2,$out2,$leperm + stvx_u $out1,$x10,$out + vxor $out1,$in1,$twk1 + le?vperm $out3,$out3,$out3,$leperm + stvx_u $out2,$x20,$out + vxor $out2,$in2,$twk2 + le?vperm $out4,$out4,$out4,$leperm + stvx_u $out3,$x30,$out + vxor $out3,$in3,$twk3 + le?vperm $out5,$out5,$out5,$leperm + stvx_u $out4,$x40,$out + vxor $out4,$in4,$twk4 + stvx_u $out5,$x50,$out + vxor $out5,$in5,$twk5 + addi $out,$out,0x60 + + mtctr $rounds + beq Loop_xts_dec6x # did $len-=96 borrow? + + addic. $len,$len,0x60 + beq Lxts_dec6x_zero + cmpwi $len,0x20 + blt Lxts_dec6x_one + nop + beq Lxts_dec6x_two + cmpwi $len,0x40 + blt Lxts_dec6x_three + nop + beq Lxts_dec6x_four + +Lxts_dec6x_five: + vxor $out0,$in1,$twk0 + vxor $out1,$in2,$twk1 + vxor $out2,$in3,$twk2 + vxor $out3,$in4,$twk3 + vxor $out4,$in5,$twk4 + + bl _aesp8_xts_dec5x + + le?vperm $out0,$out0,$out0,$leperm + vmr $twk0,$twk5 # unused tweak + vxor $twk1,$tweak,$rndkey0 + le?vperm $out1,$out1,$out1,$leperm + stvx_u $out0,$x00,$out # store output + vxor $out0,$in0,$twk1 + le?vperm $out2,$out2,$out2,$leperm + stvx_u $out1,$x10,$out + le?vperm $out3,$out3,$out3,$leperm + stvx_u $out2,$x20,$out + le?vperm $out4,$out4,$out4,$leperm + stvx_u $out3,$x30,$out + stvx_u $out4,$x40,$out + addi $out,$out,0x50 + bne Lxts_dec6x_steal + b Lxts_dec6x_done + +.align 4 +Lxts_dec6x_four: + vxor $out0,$in2,$twk0 + vxor $out1,$in3,$twk1 + vxor $out2,$in4,$twk2 + vxor $out3,$in5,$twk3 + vxor $out4,$out4,$out4 + + bl _aesp8_xts_dec5x + + le?vperm $out0,$out0,$out0,$leperm + vmr $twk0,$twk4 # unused tweak + vmr $twk1,$twk5 + le?vperm $out1,$out1,$out1,$leperm + stvx_u $out0,$x00,$out # store output + vxor $out0,$in0,$twk5 + le?vperm $out2,$out2,$out2,$leperm + stvx_u $out1,$x10,$out + le?vperm $out3,$out3,$out3,$leperm + stvx_u $out2,$x20,$out + stvx_u $out3,$x30,$out + addi $out,$out,0x40 + bne Lxts_dec6x_steal + b Lxts_dec6x_done + +.align 4 +Lxts_dec6x_three: + vxor $out0,$in3,$twk0 + vxor $out1,$in4,$twk1 + vxor $out2,$in5,$twk2 + vxor $out3,$out3,$out3 + vxor $out4,$out4,$out4 + + bl _aesp8_xts_dec5x + + le?vperm $out0,$out0,$out0,$leperm + vmr $twk0,$twk3 # unused tweak + vmr $twk1,$twk4 + le?vperm $out1,$out1,$out1,$leperm + stvx_u $out0,$x00,$out # store output + vxor $out0,$in0,$twk4 + le?vperm $out2,$out2,$out2,$leperm + stvx_u $out1,$x10,$out + stvx_u $out2,$x20,$out + addi $out,$out,0x30 + bne Lxts_dec6x_steal + b Lxts_dec6x_done + +.align 4 +Lxts_dec6x_two: + vxor $out0,$in4,$twk0 + vxor $out1,$in5,$twk1 + vxor $out2,$out2,$out2 + vxor $out3,$out3,$out3 + vxor $out4,$out4,$out4 + + bl _aesp8_xts_dec5x + + le?vperm $out0,$out0,$out0,$leperm + vmr $twk0,$twk2 # unused tweak + vmr $twk1,$twk3 + le?vperm $out1,$out1,$out1,$leperm + stvx_u $out0,$x00,$out # store output + vxor $out0,$in0,$twk3 + stvx_u $out1,$x10,$out + addi $out,$out,0x20 + bne Lxts_dec6x_steal + b Lxts_dec6x_done + +.align 4 +Lxts_dec6x_one: + vxor $out0,$in5,$twk0 + nop +Loop_xts_dec1x: + vncipher $out0,$out0,v24 + lvx v24,$x20,$key_ # round[3] + addi $key_,$key_,0x20 + + vncipher $out0,$out0,v25 + lvx v25,$x10,$key_ # round[4] + bdnz Loop_xts_dec1x + + subi r0,$taillen,1 + vncipher $out0,$out0,v24 + + andi. r0,r0,16 + cmpwi $taillen,0 + vncipher $out0,$out0,v25 + + sub $inp,$inp,r0 + vncipher $out0,$out0,v26 + + lvx_u $in0,0,$inp + vncipher $out0,$out0,v27 + + addi $key_,$sp,$FRAME+15 # rewind $key_ + vncipher $out0,$out0,v28 + lvx v24,$x00,$key_ # re-pre-load round[1] + + vncipher $out0,$out0,v29 + lvx v25,$x10,$key_ # re-pre-load round[2] + vxor $twk0,$twk0,v31 + + le?vperm $in0,$in0,$in0,$leperm + vncipher $out0,$out0,v30 + + mtctr $rounds + vncipherlast $out0,$out0,$twk0 + + vmr $twk0,$twk1 # unused tweak + vmr $twk1,$twk2 + le?vperm $out0,$out0,$out0,$leperm + stvx_u $out0,$x00,$out # store output + addi $out,$out,0x10 + vxor $out0,$in0,$twk2 + bne Lxts_dec6x_steal + b Lxts_dec6x_done + +.align 4 +Lxts_dec6x_zero: + cmpwi $taillen,0 + beq Lxts_dec6x_done + + lvx_u $in0,0,$inp + le?vperm $in0,$in0,$in0,$leperm + vxor $out0,$in0,$twk1 +Lxts_dec6x_steal: + vncipher $out0,$out0,v24 + lvx v24,$x20,$key_ # round[3] + addi $key_,$key_,0x20 + + vncipher $out0,$out0,v25 + lvx v25,$x10,$key_ # round[4] + bdnz Lxts_dec6x_steal + + add $inp,$inp,$taillen + vncipher $out0,$out0,v24 + + cmpwi $taillen,0 + vncipher $out0,$out0,v25 + + lvx_u $in0,0,$inp + vncipher $out0,$out0,v26 + + lvsr $inpperm,0,$taillen # $in5 is no more + vncipher $out0,$out0,v27 + + addi $key_,$sp,$FRAME+15 # rewind $key_ + vncipher $out0,$out0,v28 + lvx v24,$x00,$key_ # re-pre-load round[1] + + vncipher $out0,$out0,v29 + lvx v25,$x10,$key_ # re-pre-load round[2] + vxor $twk1,$twk1,v31 + + le?vperm $in0,$in0,$in0,$leperm + vncipher $out0,$out0,v30 + + vperm $in0,$in0,$in0,$inpperm + vncipherlast $tmp,$out0,$twk1 + + le?vperm $out0,$tmp,$tmp,$leperm + le?stvx_u $out0,0,$out + be?stvx_u $tmp,0,$out + + vxor $out0,$out0,$out0 + vspltisb $out1,-1 + vperm $out0,$out0,$out1,$inpperm + vsel $out0,$in0,$tmp,$out0 + vxor $out0,$out0,$twk0 + + subi r30,$out,1 + mtctr $taillen +Loop_xts_dec6x_steal: + lbzu r0,1(r30) + stb r0,16(r30) + bdnz Loop_xts_dec6x_steal + + li $taillen,0 + mtctr $rounds + b Loop_xts_dec1x # one more time... + +.align 4 +Lxts_dec6x_done: + ${UCMP}i $ivp,0 + beq Lxts_dec6x_ret + + vxor $tweak,$twk0,$rndkey0 + le?vperm $tweak,$tweak,$tweak,$leperm + stvx_u $tweak,0,$ivp + +Lxts_dec6x_ret: + mtlr r11 + li r10,`$FRAME+15` + li r11,`$FRAME+31` + stvx $seven,r10,$sp # wipe copies of round keys + addi r10,r10,32 + stvx $seven,r11,$sp + addi r11,r11,32 + stvx $seven,r10,$sp + addi r10,r10,32 + stvx $seven,r11,$sp + addi r11,r11,32 + stvx $seven,r10,$sp + addi r10,r10,32 + stvx $seven,r11,$sp + addi r11,r11,32 + stvx $seven,r10,$sp + addi r10,r10,32 + stvx $seven,r11,$sp + addi r11,r11,32 + + mtspr 256,$vrsave + lvx v20,r10,$sp # ABI says so + addi r10,r10,32 + lvx v21,r11,$sp + addi r11,r11,32 + lvx v22,r10,$sp + addi r10,r10,32 + lvx v23,r11,$sp + addi r11,r11,32 + lvx v24,r10,$sp + addi r10,r10,32 + lvx v25,r11,$sp + addi r11,r11,32 + lvx v26,r10,$sp + addi r10,r10,32 + lvx v27,r11,$sp + addi r11,r11,32 + lvx v28,r10,$sp + addi r10,r10,32 + lvx v29,r11,$sp + addi r11,r11,32 + lvx v30,r10,$sp + lvx v31,r11,$sp + $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp) + $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp) + $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp) + $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp) + $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp) + $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp) + addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T` + blr + .long 0 + .byte 0,12,0x04,1,0x80,6,6,0 + .long 0 + +.align 5 +_aesp8_xts_dec5x: + vncipher $out0,$out0,v24 + vncipher $out1,$out1,v24 + vncipher $out2,$out2,v24 + vncipher $out3,$out3,v24 + vncipher $out4,$out4,v24 + lvx v24,$x20,$key_ # round[3] + addi $key_,$key_,0x20 + + vncipher $out0,$out0,v25 + vncipher $out1,$out1,v25 + vncipher $out2,$out2,v25 + vncipher $out3,$out3,v25 + vncipher $out4,$out4,v25 + lvx v25,$x10,$key_ # round[4] + bdnz _aesp8_xts_dec5x + + subi r0,$taillen,1 + vncipher $out0,$out0,v24 + vncipher $out1,$out1,v24 + vncipher $out2,$out2,v24 + vncipher $out3,$out3,v24 + vncipher $out4,$out4,v24 + + andi. r0,r0,16 + cmpwi $taillen,0 + vncipher $out0,$out0,v25 + vncipher $out1,$out1,v25 + vncipher $out2,$out2,v25 + vncipher $out3,$out3,v25 + vncipher $out4,$out4,v25 + vxor $twk0,$twk0,v31 + + sub $inp,$inp,r0 + vncipher $out0,$out0,v26 + vncipher $out1,$out1,v26 + vncipher $out2,$out2,v26 + vncipher $out3,$out3,v26 + vncipher $out4,$out4,v26 + vxor $in1,$twk1,v31 + + vncipher $out0,$out0,v27 + lvx_u $in0,0,$inp + vncipher $out1,$out1,v27 + vncipher $out2,$out2,v27 + vncipher $out3,$out3,v27 + vncipher $out4,$out4,v27 + vxor $in2,$twk2,v31 + + addi $key_,$sp,$FRAME+15 # rewind $key_ + vncipher $out0,$out0,v28 + vncipher $out1,$out1,v28 + vncipher $out2,$out2,v28 + vncipher $out3,$out3,v28 + vncipher $out4,$out4,v28 + lvx v24,$x00,$key_ # re-pre-load round[1] + vxor $in3,$twk3,v31 + + vncipher $out0,$out0,v29 + le?vperm $in0,$in0,$in0,$leperm + vncipher $out1,$out1,v29 + vncipher $out2,$out2,v29 + vncipher $out3,$out3,v29 + vncipher $out4,$out4,v29 + lvx v25,$x10,$key_ # re-pre-load round[2] + vxor $in4,$twk4,v31 + + vncipher $out0,$out0,v30 + vncipher $out1,$out1,v30 + vncipher $out2,$out2,v30 + vncipher $out3,$out3,v30 + vncipher $out4,$out4,v30 + + vncipherlast $out0,$out0,$twk0 + vncipherlast $out1,$out1,$in1 + vncipherlast $out2,$out2,$in2 + vncipherlast $out3,$out3,$in3 + vncipherlast $out4,$out4,$in4 + mtctr $rounds + blr + .long 0 + .byte 0,12,0x14,0,0,0,0,0 +___ +}} }}} + my $consts=1; foreach(split("\n",$code)) { s/\`([^\`]*)\`/eval($1)/geo; @@ -1898,7 +3757,7 @@ foreach(split("\n",$code)) { if ($flavour =~ /le$/o) { SWITCH: for($conv) { /\?inv/ && do { @bytes=map($_^0xf,@bytes); last; }; - /\?rev/ && do { @bytes=reverse(@bytes); last; }; + /\?rev/ && do { @bytes=reverse(@bytes); last; }; } } @@ -141,7 +141,7 @@ my $vmr = sub { # Some ABIs specify vrsave, special-purpose register #256, as reserved # for system use. -my $no_vrsave = ($flavour =~ /aix|linux64le/); +my $no_vrsave = ($flavour =~ /linux-ppc64le/); my $mtspr = sub { my ($f,$idx,$ra) = @_; if ($idx == 256 && $no_vrsave) { @@ -31,10 +31,12 @@ extern struct shash_alg p8_ghash_alg; extern struct crypto_alg p8_aes_alg; extern struct crypto_alg p8_aes_cbc_alg; extern struct crypto_alg p8_aes_ctr_alg; +extern struct crypto_alg p8_aes_xts_alg; static struct crypto_alg *algs[] = { &p8_aes_alg, &p8_aes_cbc_alg, &p8_aes_ctr_alg, + &p8_aes_xts_alg, NULL, }; @@ -75,7 +75,7 @@ config DEVFREQ_GOV_PASSIVE comment "DEVFREQ Drivers" config ARM_EXYNOS_BUS_DEVFREQ - bool "ARM EXYNOS Generic Memory Bus DEVFREQ Driver" + tristate "ARM EXYNOS Generic Memory Bus DEVFREQ Driver" depends on ARCH_EXYNOS select DEVFREQ_GOV_SIMPLE_ONDEMAND select DEVFREQ_GOV_PASSIVE @@ -15,7 +15,7 @@ #include <linux/kernel.h> #include <linux/err.h> #include <linux/init.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/of.h> @@ -481,13 +481,3 @@ static int __init devfreq_event_init(void) return 0; } subsys_initcall(devfreq_event_init); - -static void __exit devfreq_event_exit(void) -{ - class_destroy(devfreq_event_class); -} -module_exit(devfreq_event_exit); - -MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>"); -MODULE_DESCRIPTION("DEVFREQ-Event class support"); -MODULE_LICENSE("GPL"); @@ -15,7 +15,7 @@ #include <linux/errno.h> #include <linux/err.h> #include <linux/init.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/pm_opp.h> @@ -268,8 +268,11 @@ int update_devfreq(struct devfreq *devfreq) devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE); err = devfreq->profile->target(devfreq->dev.parent, &freq, flags); - if (err) + if (err) { + freqs.new = cur_freq; + devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); return err; + } freqs.new = freq; devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); @@ -552,6 +555,7 @@ struct devfreq *devfreq_add_device(struct device *dev, devfreq->profile = profile; strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); devfreq->previous_freq = profile->initial_freq; + devfreq->last_status.current_frequency = profile->initial_freq; devfreq->data = data; devfreq->nb.notifier_call = devfreq_notifier_call; @@ -561,23 +565,22 @@ struct devfreq *devfreq_add_device(struct device *dev, mutex_lock(&devfreq->lock); } - devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) * - devfreq->profile->max_state * - devfreq->profile->max_state, - GFP_KERNEL); - devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) * - devfreq->profile->max_state, - GFP_KERNEL); - devfreq->last_stat_updated = jiffies; - dev_set_name(&devfreq->dev, "%s", dev_name(dev)); err = device_register(&devfreq->dev); if (err) { - put_device(&devfreq->dev); mutex_unlock(&devfreq->lock); goto err_out; } + devfreq->trans_table = devm_kzalloc(&devfreq->dev, sizeof(unsigned int) * + devfreq->profile->max_state * + devfreq->profile->max_state, + GFP_KERNEL); + devfreq->time_in_state = devm_kzalloc(&devfreq->dev, sizeof(unsigned long) * + devfreq->profile->max_state, + GFP_KERNEL); + devfreq->last_stat_updated = jiffies; + srcu_init_notifier_head(&devfreq->transition_notifier_list); mutex_unlock(&devfreq->lock); @@ -603,7 +606,6 @@ struct devfreq *devfreq_add_device(struct device *dev, err_init: list_del(&devfreq->node); device_unregister(&devfreq->dev); - kfree(devfreq); err_out: return ERR_PTR(err); } @@ -621,7 +623,6 @@ int devfreq_remove_device(struct devfreq *devfreq) return -EINVAL; device_unregister(&devfreq->dev); - put_device(&devfreq->dev); return 0; } @@ -706,10 +707,12 @@ struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index) if (devfreq->dev.parent && devfreq->dev.parent->of_node == node) { mutex_unlock(&devfreq_list_lock); + of_node_put(node); return devfreq; } } mutex_unlock(&devfreq_list_lock); + of_node_put(node); return ERR_PTR(-EPROBE_DEFER); } @@ -1198,13 +1201,6 @@ static int __init devfreq_init(void) } subsys_initcall(devfreq_init); -static void __exit devfreq_exit(void) -{ - class_destroy(devfreq_class); - destroy_workqueue(devfreq_wq); -} -module_exit(devfreq_exit); - /* * The followings are helper functions for devfreq user device drivers with * OPP framework. @@ -1470,7 +1466,3 @@ void devm_devfreq_unregister_notifier(struct device *dev, devm_devfreq_dev_match, devfreq)); } EXPORT_SYMBOL(devm_devfreq_unregister_notifier); - -MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); -MODULE_DESCRIPTION("devfreq class support"); -MODULE_LICENSE("GPL"); @@ -14,7 +14,7 @@ menuconfig PM_DEVFREQ_EVENT if PM_DEVFREQ_EVENT config DEVFREQ_EVENT_EXYNOS_NOCP - bool "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver" + tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver" depends on ARCH_EXYNOS select PM_OPP help @@ -22,7 +22,7 @@ config DEVFREQ_EVENT_EXYNOS_NOCP (Network on Chip) Probe counters to measure the bandwidth of AXI bus. config DEVFREQ_EVENT_EXYNOS_PPMU - bool "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver" + tristate "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver" depends on ARCH_EXYNOS select PM_OPP help @@ -220,9 +220,6 @@ static int exynos_nocp_parse_dt(struct platform_device *pdev, /* Maps the memory mapped IO to control nocp register */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (IS_ERR(res)) - return PTR_ERR(res); - base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) return PTR_ERR(base); @@ -482,7 +482,8 @@ static int exynos_ppmu_probe(struct platform_device *pdev) if (!info->edev) { dev_err(&pdev->dev, "failed to allocate memory devfreq-event devices\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err; } edev = info->edev; platform_set_drvdata(pdev, info); @@ -383,7 +383,7 @@ err_clk: static int exynos_bus_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; + struct device_node *np = dev->of_node, *node; struct devfreq_dev_profile *profile; struct devfreq_simple_ondemand_data *ondemand_data; struct devfreq_passive_data *passive_data; @@ -407,7 +407,7 @@ static int exynos_bus_probe(struct platform_device *pdev) /* Parse the device-tree to get the resource information */ ret = exynos_bus_parse_of(np, bus); if (ret < 0) - goto err; + return ret; profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL); if (!profile) { @@ -415,10 +415,13 @@ static int exynos_bus_probe(struct platform_device *pdev) goto err; } - if (of_parse_phandle(dev->of_node, "devfreq", 0)) + node = of_parse_phandle(dev->of_node, "devfreq", 0); + if (node) { + of_node_put(node); goto passive; - else + } else { ret = exynos_bus_parent_parse_of(np, bus); + } if (ret < 0) goto err; @@ -33,6 +33,7 @@ #include <linux/seq_file.h> #include <linux/poll.h> #include <linux/reservation.h> +#include <linux/mm.h> #include <uapi/linux/dma-buf.h> @@ -90,7 +91,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) dmabuf = file->private_data; /* check for overflowing the buffer's size */ - if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > + if (vma->vm_pgoff + vma_pages(vma) > dmabuf->size >> PAGE_SHIFT) return -EINVAL; @@ -723,11 +724,11 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, return -EINVAL; /* check for offset overflow */ - if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff) + if (pgoff + vma_pages(vma) < pgoff) return -EOVERFLOW; /* check for overflowing the buffer's size */ - if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > + if (pgoff + vma_pages(vma) > dmabuf->size >> PAGE_SHIFT) return -EINVAL; @@ -35,6 +35,17 @@ #include <linux/reservation.h> #include <linux/export.h> +/** + * DOC: Reservation Object Overview + * + * The reservation object provides a mechanism to manage shared and + * exclusive fences associated with a buffer. A reservation object + * can have attached one exclusive fence (normally associated with + * write operations) or N shared fences (read operations). The RCU + * mechanism is used to protect read access to fences from locked + * write-side updates. + */ + DEFINE_WW_CLASS(reservation_ww_class); EXPORT_SYMBOL(reservation_ww_class); @@ -43,9 +54,17 @@ EXPORT_SYMBOL(reservation_seqcount_class); const char reservation_seqcount_string[] = "reservation_seqcount"; EXPORT_SYMBOL(reservation_seqcount_string); -/* - * Reserve space to add a shared fence to a reservation_object, - * must be called with obj->lock held. + +/** + * reservation_object_reserve_shared - Reserve space to add a shared + * fence to a reservation_object. + * @obj: reservation object + * + * Should be called before reservation_object_add_shared_fence(). Must + * be called with obj->lock held. + * + * RETURNS + * Zero for success, or -errno */ int reservation_object_reserve_shared(struct reservation_object *obj) { @@ -180,7 +199,11 @@ done: fence_put(old_fence); } -/* +/** + * reservation_object_add_shared_fence - Add a fence to a shared slot + * @obj: the reservation object + * @fence: the shared fence to add + * * Add a fence to a shared slot, obj->lock must be held, and * reservation_object_reserve_shared_fence has been called. */ @@ -200,6 +223,13 @@ void reservation_object_add_shared_fence(struct reservation_object *obj, } EXPORT_SYMBOL(reservation_object_add_shared_fence); +/** + * reservation_object_add_excl_fence - Add an exclusive fence. + * @obj: the reservation object + * @fence: the shared fence to add + * + * Add a fence to the exclusive slot. The obj->lock must be held. + */ void reservation_object_add_excl_fence(struct reservation_object *obj, struct fence *fence) { @@ -233,6 +263,18 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, } EXPORT_SYMBOL(reservation_object_add_excl_fence); +/** + * reservation_object_get_fences_rcu - Get an object's shared and exclusive + * fences without update side lock held + * @obj: the reservation object + * @pfence_excl: the returned exclusive fence (or NULL) + * @pshared_count: the number of shared fences returned + * @pshared: the array of shared fence ptrs returned (array is krealloc'd to + * the required size, and must be freed by caller) + * + * RETURNS + * Zero or -errno + */ int reservation_object_get_fences_rcu(struct reservation_object *obj, struct fence **pfence_excl, unsigned *pshared_count, @@ -319,6 +361,18 @@ unlock: } EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); +/** + * reservation_object_wait_timeout_rcu - Wait on reservation's objects + * shared and/or exclusive fences. + * @obj: the reservation object + * @wait_all: if true, wait on all fences, else wait on just exclusive fence + * @intr: if true, do interruptible wait + * @timeout: timeout value in jiffies or zero to return immediately + * + * RETURNS + * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or + * greater than zer on success. + */ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, bool wait_all, bool intr, unsigned long timeout) @@ -416,6 +470,16 @@ reservation_object_test_signaled_single(struct fence *passed_fence) return ret; } +/** + * reservation_object_test_signaled_rcu - Test if a reservation object's + * fences have been signaled. + * @obj: the reservation object + * @test_all: if true, test all fences, otherwise only test the exclusive + * fence + * + * RETURNS + * true if all fences signaled, else false + */ bool reservation_object_test_signaled_rcu(struct reservation_object *obj, bool test_all) { @@ -339,6 +339,20 @@ config MV_XOR ---help--- Enable support for the Marvell XOR engine. +config MV_XOR_V2 + bool "Marvell XOR engine version 2 support " + depends on ARM64 + select DMA_ENGINE + select DMA_ENGINE_RAID + select ASYNC_TX_ENABLE_CHANNEL_SWITCH + select GENERIC_MSI_IRQ_DOMAIN + ---help--- + Enable support for the Marvell version 2 XOR engine. + + This engine provides acceleration for copy, XOR and RAID6 + operations, and is available on Marvell Armada 7K and 8K + platforms. + config MXS_DMA bool "MXS DMA support" depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q || SOC_IMX6UL @@ -519,19 +533,31 @@ config XGENE_DMA help Enable support for the APM X-Gene SoC DMA engine. -config XILINX_VDMA - tristate "Xilinx AXI VDMA Engine" +config XILINX_DMA + tristate "Xilinx AXI DMAS Engine" depends on (ARCH_ZYNQ || MICROBLAZE || ARM64) select DMA_ENGINE help Enable support for Xilinx AXI VDMA Soft IP. - This engine provides high-bandwidth direct memory access + AXI VDMA engine provides high-bandwidth direct memory access between memory and AXI4-Stream video type target peripherals including peripherals which support AXI4- Stream Video Protocol. It has two stream interfaces/ channels, Memory Mapped to Stream (MM2S) and Stream to Memory Mapped (S2MM) for the data transfers. + AXI CDMA engine provides high-bandwidth direct memory access + between a memory-mapped source address and a memory-mapped + destination address. + AXI DMA engine provides high-bandwidth one dimensional direct + memory access between memory and AXI4-Stream target peripherals. + +config XILINX_ZYNQMP_DMA + tristate "Xilinx ZynqMP DMA Engine" + depends on (ARCH_ZYNQ || MICROBLAZE || ARM64) + select DMA_ENGINE + help + Enable support for Xilinx ZynqMP DMA controller. config ZX_DMA tristate "ZTE ZX296702 DMA support" @@ -45,6 +45,7 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o obj-$(CONFIG_MOXART_DMA) += moxart-dma.o obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o obj-$(CONFIG_MV_XOR) += mv_xor.o +obj-$(CONFIG_MV_XOR_V2) += mv_xor_v2.o obj-$(CONFIG_MXS_DMA) += mxs-dma.o obj-$(CONFIG_MX3_IPU) += ipu/ obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o @@ -1443,8 +1443,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); if (!dsg) { pl08x_free_txd(pl08x, txd); - dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n", - __func__); return NULL; } list_add_tail(&dsg->node, &txd->dsg_list); @@ -1901,11 +1899,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, */ for (i = 0; i < channels; i++) { chan = kzalloc(sizeof(*chan), GFP_KERNEL); - if (!chan) { - dev_err(&pl08x->adev->dev, - "%s no memory for channel\n", __func__); + if (!chan) return -ENOMEM; - } chan->host = pl08x; chan->state = PL08X_CHAN_IDLE; @@ -2360,9 +2355,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)), GFP_KERNEL); if (!pl08x->phy_chans) { - dev_err(&adev->dev, "%s failed to allocate " - "physical channel holders\n", - __func__); ret = -ENOMEM; goto out_no_phychans; } @@ -242,7 +242,7 @@ struct at_xdmac_lld { u32 mbr_dus; /* Destination Microblock Stride Register */ }; - +/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */ struct at_xdmac_desc { struct at_xdmac_lld lld; enum dma_transfer_direction direction; @@ -253,7 +253,7 @@ struct at_xdmac_desc { unsigned int xfer_size; struct list_head descs_list; struct list_head xfer_node; -}; +} __aligned(sizeof(u64)); static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb) { @@ -456,7 +456,7 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan, return desc; } -void at_xdmac_init_used_desc(struct at_xdmac_desc *desc) +static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc) { memset(&desc->lld, 0, sizeof(desc->lld)); INIT_LIST_HEAD(&desc->descs_list); @@ -1195,14 +1195,14 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, desc->lld.mbr_cfg = chan_cc; dev_dbg(chan2dev(chan), - "%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", - __func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc, + "%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", + __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc, desc->lld.mbr_cfg); return desc; } -struct dma_async_tx_descriptor * +static struct dma_async_tx_descriptor * at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags) { @@ -1400,6 +1400,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, u32 cur_nda, check_nda, cur_ubc, mask, value; u8 dwidth = 0; unsigned long flags; + bool initd; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_COMPLETE) @@ -1424,7 +1425,16 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, residue = desc->xfer_size; /* * Flush FIFO: only relevant when the transfer is source peripheral - * synchronized. + * synchronized. Flush is needed before reading CUBC because data in + * the FIFO are not reported by CUBC. Reporting a residue of the + * transfer length while we have data in FIFO can cause issue. + * Usecase: atmel USART has a timeout which means I have received + * characters but there is no more character received for a while. On + * timeout, it requests the residue. If the data are in the DMA FIFO, + * we will return a residue of the transfer length. It means no data + * received. If an application is waiting for these data, it will hang + * since we won't have another USART timeout without receiving new + * data. */ mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; @@ -1435,34 +1445,43 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, } /* - * When processing the residue, we need to read two registers but we - * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where - * we stand in the descriptor list and AT_XDMAC_CUBC is used - * to know how many data are remaining for the current descriptor. - * Since the dma channel is not paused to not loose data, between the - * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of - * descriptor. - * For that reason, after reading AT_XDMAC_CUBC, we check if we are - * still using the same descriptor by reading a second time - * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to - * read again AT_XDMAC_CUBC. + * The easiest way to compute the residue should be to pause the DMA + * but doing this can lead to miss some data as some devices don't + * have FIFO. + * We need to read several registers because: + * - DMA is running therefore a descriptor change is possible while + * reading these registers + * - When the block transfer is done, the value of the CUBC register + * is set to its initial value until the fetch of the next descriptor. + * This value will corrupt the residue calculation so we have to skip + * it. + * + * INITD -------- ------------ + * |____________________| + * _______________________ _______________ + * NDA @desc2 \/ @desc3 + * _______________________/\_______________ + * __________ ___________ _______________ + * CUBC 0 \/ MAX desc1 \/ MAX desc2 + * __________/\___________/\_______________ + * + * Since descriptors are aligned on 64 bits, we can assume that + * the update of NDA and CUBC is atomic. * Memory barriers are used to ensure the read order of the registers. - * A max number of retries is set because unlikely it can never ends if - * we are transferring a lot of data with small buffers. + * A max number of retries is set because unlikely it could never ends. */ - cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; - rmb(); - cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { - rmb(); check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; - - if (likely(cur_nda == check_nda)) - break; - - cur_nda = check_nda; + rmb(); + initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD); rmb(); cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); + rmb(); + cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; + rmb(); + + if ((check_nda == cur_nda) && initd) + break; } if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) { @@ -1471,6 +1490,19 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, } /* + * Flush FIFO: only relevant when the transfer is source peripheral + * synchronized. Another flush is needed here because CUBC is updated + * when the controller sends the data write command. It can lead to + * report data that are not written in the memory or the device. The + * FIFO flush ensures that data are really written. + */ + if ((desc->lld.mbr_cfg & mask) == value) { + at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); + while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS)) + cpu_relax(); + } + + /* * Remove size of all microblocks already transferred and the current * one. Then add the remaining size to transfer of the current * microblock. @@ -393,11 +393,12 @@ static void bcm2835_dma_fill_cb_chain_with_sg( unsigned int sg_len) { struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); - size_t max_len = bcm2835_dma_max_frame_length(c); - unsigned int i, len; + size_t len, max_len; + unsigned int i; dma_addr_t addr; struct scatterlist *sgent; + max_len = bcm2835_dma_max_frame_length(c); for_each_sg(sgl, sgent, sg_len, i) { for (addr = sg_dma_address(sgent), len = sg_dma_len(sgent); len > 0; @@ -613,7 +614,7 @@ static void bcm2835_dma_issue_pending(struct dma_chan *chan) spin_unlock_irqrestore(&c->vc.lock, flags); } -struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy( +static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy( struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, size_t len, unsigned long flags) { @@ -397,8 +397,6 @@ static int mpc52xx_bcom_probe(struct platform_device *op) /* Get a clean struct */ bcom_eng = kzalloc(sizeof(struct bcom_engine), GFP_KERNEL); if (!bcom_eng) { - printk(KERN_ERR DRIVER_NAME ": " - "Can't allocate state structure\n"); rv = -ENOMEM; goto error_sramclean; } @@ -266,7 +266,7 @@ static int dma_memcpy_channels[] = { COH901318_CX_CTRL_DDMA_LEGACY | \ COH901318_CX_CTRL_PRDD_SOURCE) -const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = { +static const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = { { .number = U300_DMA_MSL_TX_0, .name = "MSL TX 0", @@ -1280,6 +1280,7 @@ struct coh901318_desc { struct coh901318_base { struct device *dev; void __iomem *virtbase; + unsigned int irq; struct coh901318_pool pool; struct powersave pm; struct dma_device dma_slave; @@ -1364,7 +1365,6 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf, } static const struct file_operations coh901318_debugfs_status_operations = { - .owner = THIS_MODULE, .open = simple_open, .read = coh901318_debugfs_read, .llseek = default_llseek, @@ -2422,7 +2422,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_COMPLETE) + if (ret == DMA_COMPLETE || !txstate) return ret; dma_set_residue(txstate, coh901318_get_bytes_left(chan)); @@ -2680,6 +2680,8 @@ static int __init coh901318_probe(struct platform_device *pdev) if (err) return err; + base->irq = irq; + err = coh901318_pool_create(&base->pool, &pdev->dev, sizeof(struct coh901318_lli), 32); @@ -2755,11 +2757,31 @@ static int __init coh901318_probe(struct platform_device *pdev) coh901318_pool_destroy(&base->pool); return err; } +static void coh901318_base_remove(struct coh901318_base *base, const int *pick_chans) +{ + int chans_i; + int i = 0; + struct coh901318_chan *cohc; + + for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) { + for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) { + cohc = &base->chans[i]; + + tasklet_kill(&cohc->tasklet); + } + } + +} static int coh901318_remove(struct platform_device *pdev) { struct coh901318_base *base = platform_get_drvdata(pdev); + devm_free_irq(&pdev->dev, base->irq, base); + + coh901318_base_remove(base, dma_slave_channels); + coh901318_base_remove(base, dma_memcpy_channels); + of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&base->dma_memcpy); dma_async_device_unregister(&base->dma_slave); @@ -2780,13 +2802,13 @@ static struct platform_driver coh901318_driver = { }, }; -int __init coh901318_init(void) +static int __init coh901318_init(void) { return platform_driver_probe(&coh901318_driver, coh901318_probe); } subsys_initcall(coh901318_init); -void __exit coh901318_exit(void) +static void __exit coh901318_exit(void) { platform_driver_unregister(&coh901318_driver); } @@ -497,16 +497,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( struct cppi41_desc *d; struct scatterlist *sg; unsigned int i; - unsigned int num; - num = 0; d = c->desc; for_each_sg(sgl, sg, sg_len, i) { u32 addr; u32 len; /* We need to use more than one desc once musb supports sg */ - BUG_ON(num > 0); addr = lower_32_bits(sg_dma_address(sg)); len = sg_dma_len(sg); @@ -270,6 +270,9 @@ static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) unsigned int pending; pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING); + if (!pending) + return IRQ_NONE; + axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending); spin_lock(&dmac->chan.vchan.lock); @@ -579,7 +582,9 @@ static int axi_dmac_probe(struct platform_device *pdev) return -ENOMEM; dmac->irq = platform_get_irq(pdev, 0); - if (dmac->irq <= 0) + if (dmac->irq < 0) + return dmac->irq; + if (dmac->irq == 0) return -EINVAL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -683,6 +688,7 @@ static const struct of_device_id axi_dmac_of_match_table[] = { { .compatible = "adi,axi-dmac-1.00.a" }, { }, }; +MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table); static struct platform_driver axi_dmac_driver = { .driver = { @@ -573,12 +573,26 @@ err_unregister: return ret; } +static void jz4740_cleanup_vchan(struct dma_device *dmadev) +{ + struct jz4740_dmaengine_chan *chan, *_chan; + + list_for_each_entry_safe(chan, _chan, + &dmadev->channels, vchan.chan.device_node) { + list_del(&chan->vchan.chan.device_node); + tasklet_kill(&chan->vchan.task); + } +} + + static int jz4740_dma_remove(struct platform_device *pdev) { struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev); int irq = platform_get_irq(pdev, 0); free_irq(irq, dmadev); + + jz4740_cleanup_vchan(&dmadev->ddev); dma_async_device_unregister(&dmadev->ddev); clk_disable_unprepare(dmadev->clk); @@ -51,6 +51,16 @@ module_param(iterations, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(iterations, "Iterations before stopping test (default: infinite)"); +static unsigned int sg_buffers = 1; +module_param(sg_buffers, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(sg_buffers, + "Number of scatter gather buffers (default: 1)"); + +static unsigned int dmatest = 1; +module_param(dmatest, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dmatest, + "dmatest 0-memcpy 1-slave_sg (default: 1)"); + static unsigned int xor_sources = 3; module_param(xor_sources, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(xor_sources, @@ -431,6 +441,8 @@ static int dmatest_func(void *data) dev = chan->device; if (thread->type == DMA_MEMCPY) src_cnt = dst_cnt = 1; + else if (thread->type == DMA_SG) + src_cnt = dst_cnt = sg_buffers; else if (thread->type == DMA_XOR) { /* force odd to ensure dst = src */ src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); @@ -485,6 +497,8 @@ static int dmatest_func(void *data) dma_addr_t *dsts; unsigned int src_off, dst_off, len; u8 align = 0; + struct scatterlist tx_sg[src_cnt]; + struct scatterlist rx_sg[src_cnt]; total_tests++; @@ -577,10 +591,22 @@ static int dmatest_func(void *data) um->bidi_cnt++; } + sg_init_table(tx_sg, src_cnt); + sg_init_table(rx_sg, src_cnt); + for (i = 0; i < src_cnt; i++) { + sg_dma_address(&rx_sg[i]) = srcs[i]; + sg_dma_address(&tx_sg[i]) = dsts[i] + dst_off; + sg_dma_len(&tx_sg[i]) = len; + sg_dma_len(&rx_sg[i]) = len; + } + if (thread->type == DMA_MEMCPY) tx = dev->device_prep_dma_memcpy(chan, dsts[0] + dst_off, srcs[0], len, flags); + else if (thread->type == DMA_SG) + tx = dev->device_prep_dma_sg(chan, tx_sg, src_cnt, + rx_sg, src_cnt, flags); else if (thread->type == DMA_XOR) tx = dev->device_prep_dma_xor(chan, dsts[0] + dst_off, @@ -748,6 +774,8 @@ static int dmatest_add_threads(struct dmatest_info *info, if (type == DMA_MEMCPY) op = "copy"; + else if (type == DMA_SG) + op = "sg"; else if (type == DMA_XOR) op = "xor"; else if (type == DMA_PQ) @@ -802,9 +830,19 @@ static int dmatest_add_channel(struct dmatest_info *info, INIT_LIST_HEAD(&dtc->threads); if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { - cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY); - thread_count += cnt > 0 ? cnt : 0; + if (dmatest == 0) { + cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY); + thread_count += cnt > 0 ? cnt : 0; + } } + + if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) { + if (dmatest == 1) { + cnt = dmatest_add_threads(info, dtc, DMA_SG); + thread_count += cnt > 0 ? cnt : 0; + } + } + if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { cnt = dmatest_add_threads(info, dtc, DMA_XOR); thread_count += cnt > 0 ? cnt : 0; @@ -877,6 +915,7 @@ static void run_threaded_test(struct dmatest_info *info) request_channels(info, DMA_MEMCPY); request_channels(info, DMA_XOR); + request_channels(info, DMA_SG); request_channels(info, DMA_PQ); } @@ -239,6 +239,9 @@ struct edma_cc { bool chmap_exist; enum dma_event_q default_queue; + unsigned int ccint; + unsigned int ccerrint; + /* * The slot_inuse bit for each PaRAM slot is clear unless the slot is * in use by Linux or if it is allocated to be used by DSP. @@ -1069,10 +1072,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]), GFP_ATOMIC); - if (!edesc) { - dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); + if (!edesc) return NULL; - } edesc->pset_nr = sg_len; edesc->residue = 0; @@ -1114,14 +1115,17 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( edesc->absync = ret; edesc->residue += sg_dma_len(sg); - /* If this is the last in a current SG set of transactions, - enable interrupts so that next set is processed */ - if (!((i+1) % MAX_NR_SG)) - edesc->pset[i].param.opt |= TCINTEN; - - /* If this is the last set, enable completion interrupt flag */ if (i == sg_len - 1) + /* Enable completion interrupt */ edesc->pset[i].param.opt |= TCINTEN; + else if (!((i+1) % MAX_NR_SG)) + /* + * Enable early completion interrupt for the + * intermediateset. In this case the driver will be + * notified when the paRAM set is submitted to TC. This + * will allow more time to set up the next set of slots. + */ + edesc->pset[i].param.opt |= (TCINTEN | TCCMODE); } edesc->residue_stat = edesc->residue; @@ -1173,10 +1177,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), GFP_ATOMIC); - if (!edesc) { - dev_dbg(dev, "Failed to allocate a descriptor\n"); + if (!edesc) return NULL; - } edesc->pset_nr = nslots; edesc->residue = edesc->residue_stat = len; @@ -1298,10 +1300,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), GFP_ATOMIC); - if (!edesc) { - dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); + if (!edesc) return NULL; - } edesc->cyclic = 1; edesc->pset_nr = nslots; @@ -2207,10 +2207,8 @@ static int edma_probe(struct platform_device *pdev) return ret; ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); - if (!ecc) { - dev_err(dev, "Can't allocate controller\n"); + if (!ecc) return -ENOMEM; - } ecc->dev = dev; ecc->id = pdev->id; @@ -2288,6 +2286,7 @@ static int edma_probe(struct platform_device *pdev) dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret); return ret; } + ecc->ccint = irq; } irq = platform_get_irq_byname(pdev, "edma3_ccerrint"); @@ -2303,6 +2302,7 @@ static int edma_probe(struct platform_device *pdev) dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret); return ret; } + ecc->ccerrint = irq; } ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY); @@ -2393,11 +2393,27 @@ err_reg1: return ret; } +static void edma_cleanupp_vchan(struct dma_device *dmadev) +{ + struct edma_chan *echan, *_echan; + + list_for_each_entry_safe(echan, _echan, + &dmadev->channels, vchan.chan.device_node) { + list_del(&echan->vchan.chan.device_node); + tasklet_kill(&echan->vchan.task); + } +} + static int edma_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct edma_cc *ecc = dev_get_drvdata(dev); + devm_free_irq(dev, ecc->ccint, ecc); + devm_free_irq(dev, ecc->ccerrint, ecc); + + edma_cleanupp_vchan(&ecc->dma_slave); + if (dev->of_node) of_dma_controller_free(dev->of_node); dma_async_device_unregister(&ecc->dma_slave); @@ -852,6 +852,25 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma return 0; } +static void fsl_edma_irq_exit( + struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) +{ + if (fsl_edma->txirq == fsl_edma->errirq) { + devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma); + } else { + devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma); + devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma); + } +} + +static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma) +{ + int i; + + for (i = 0; i < DMAMUX_NR; i++) + clk_disable_unprepare(fsl_edma->muxclk[i]); +} + static int fsl_edma_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -897,6 +916,10 @@ static int fsl_edma_probe(struct platform_device *pdev) ret = clk_prepare_enable(fsl_edma->muxclk[i]); if (ret) { + /* disable only clks which were enabled on error */ + for (; i >= 0; i--) + clk_disable_unprepare(fsl_edma->muxclk[i]); + dev_err(&pdev->dev, "DMAMUX clk block failed.\n"); return ret; } @@ -951,14 +974,18 @@ static int fsl_edma_probe(struct platform_device *pdev) ret = dma_async_device_register(&fsl_edma->dma_dev); if (ret) { - dev_err(&pdev->dev, "Can't register Freescale eDMA engine.\n"); + dev_err(&pdev->dev, + "Can't register Freescale eDMA engine. (%d)\n", ret); + fsl_disable_clocks(fsl_edma); return ret; } ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma); if (ret) { - dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma.\n"); + dev_err(&pdev->dev, + "Can't register Freescale eDMA of_dma. (%d)\n", ret); dma_async_device_unregister(&fsl_edma->dma_dev); + fsl_disable_clocks(fsl_edma); return ret; } @@ -968,17 +995,27 @@ static int fsl_edma_probe(struct platform_device *pdev) return 0; } +static void fsl_edma_cleanup_vchan(struct dma_device *dmadev) +{ + struct fsl_edma_chan *chan, *_chan; + + list_for_each_entry_safe(chan, _chan, + &dmadev->channels, vchan.chan.device_node) { + list_del(&chan->vchan.chan.device_node); + tasklet_kill(&chan->vchan.task); + } +} + static int fsl_edma_remove(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev); - int i; + fsl_edma_irq_exit(pdev, fsl_edma); + fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); of_dma_controller_free(np); dma_async_device_unregister(&fsl_edma->dma_dev); - - for (i = 0; i < DMAMUX_NR; i++) - clk_disable_unprepare(fsl_edma->muxclk[i]); + fsl_disable_clocks(fsl_edma); return 0; } @@ -337,7 +337,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_genq( re_chan = container_of(chan, struct fsl_re_chan, chan); if (len > FSL_RE_MAX_DATA_LEN) { - dev_err(re_chan->dev, "genq tx length %lu, max length %d\n", + dev_err(re_chan->dev, "genq tx length %zu, max length %d\n", len, FSL_RE_MAX_DATA_LEN); return NULL; } @@ -424,7 +424,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_pq( re_chan = container_of(chan, struct fsl_re_chan, chan); if (len > FSL_RE_MAX_DATA_LEN) { - dev_err(re_chan->dev, "pq tx length is %lu, max length is %d\n", + dev_err(re_chan->dev, "pq tx length is %zu, max length is %d\n", len, FSL_RE_MAX_DATA_LEN); return NULL; } @@ -545,7 +545,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_memcpy( re_chan = container_of(chan, struct fsl_re_chan, chan); if (len > FSL_RE_MAX_DATA_LEN) { - dev_err(re_chan->dev, "cp tx length is %lu, max length is %d\n", + dev_err(re_chan->dev, "cp tx length is %zu, max length is %d\n", len, FSL_RE_MAX_DATA_LEN); return NULL; } @@ -856,6 +856,8 @@ static int fsl_re_probe(struct platform_device *ofdev) static void fsl_re_remove_chan(struct fsl_re_chan *chan) { + tasklet_kill(&chan->irqtask); + dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr, chan->inb_phys_addr); @@ -890,7 +892,6 @@ static struct of_device_id fsl_re_ids[] = { static struct platform_driver fsl_re_driver = { .driver = { .name = "fsl-raideng", - .owner = THIS_MODULE, .of_match_table = fsl_re_ids, }, .probe = fsl_re_probe, @@ -1234,7 +1234,6 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, /* alloc channel */ chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) { - dev_err(fdev->dev, "no free memory for DMA channels!\n"); err = -ENOMEM; goto out_return; } @@ -1340,7 +1339,6 @@ static int fsldma_of_probe(struct platform_device *op) fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); if (!fdev) { - dev_err(&op->dev, "No enough memory for 'priv'\n"); err = -ENOMEM; goto out_return; } @@ -126,28 +126,33 @@ static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc) hsu_dma_start_channel(hsuc); } -static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc) -{ - unsigned long flags; - u32 sr; - - spin_lock_irqsave(&hsuc->vchan.lock, flags); - sr = hsu_chan_readl(hsuc, HSU_CH_SR); - spin_unlock_irqrestore(&hsuc->vchan.lock, flags); - - return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY); -} - -irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr) +/* + * hsu_dma_get_status() - get DMA channel status + * @chip: HSUART DMA chip + * @nr: DMA channel number + * @status: pointer for DMA Channel Status Register value + * + * Description: + * The function reads and clears the DMA Channel Status Register, checks + * if it was a timeout interrupt and returns a corresponding value. + * + * Caller should provide a valid pointer for the DMA Channel Status + * Register value that will be returned in @status. + * + * Return: + * 1 for DMA timeout status, 0 for other DMA status, or error code for + * invalid parameters or no interrupt pending. + */ +int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr, + u32 *status) { struct hsu_dma_chan *hsuc; - struct hsu_dma_desc *desc; unsigned long flags; u32 sr; /* Sanity check */ if (nr >= chip->hsu->nr_channels) - return IRQ_NONE; + return -EINVAL; hsuc = &chip->hsu->chan[nr]; @@ -155,22 +160,65 @@ irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr) * No matter what situation, need read clear the IRQ status * There is a bug, see Errata 5, HSD 2900918 */ - sr = hsu_dma_chan_get_sr(hsuc); + spin_lock_irqsave(&hsuc->vchan.lock, flags); + sr = hsu_chan_readl(hsuc, HSU_CH_SR); + spin_unlock_irqrestore(&hsuc->vchan.lock, flags); + + /* Check if any interrupt is pending */ + sr &= ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY); if (!sr) - return IRQ_NONE; + return -EIO; /* Timeout IRQ, need wait some time, see Errata 2 */ if (sr & HSU_CH_SR_DESCTO_ANY) udelay(2); + /* + * At this point, at least one of Descriptor Time Out, Channel Error + * or Descriptor Done bits must be set. Clear the Descriptor Time Out + * bits and if sr is still non-zero, it must be channel error or + * descriptor done which are higher priority than timeout and handled + * in hsu_dma_do_irq(). Else, it must be a timeout. + */ sr &= ~HSU_CH_SR_DESCTO_ANY; - if (!sr) - return IRQ_HANDLED; + + *status = sr; + + return sr ? 0 : 1; +} +EXPORT_SYMBOL_GPL(hsu_dma_get_status); + +/* + * hsu_dma_do_irq() - DMA interrupt handler + * @chip: HSUART DMA chip + * @nr: DMA channel number + * @status: Channel Status Register value + * + * Description: + * This function handles Channel Error and Descriptor Done interrupts. + * This function should be called after determining that the DMA interrupt + * is not a normal timeout interrupt, ie. hsu_dma_get_status() returned 0. + * + * Return: + * IRQ_NONE for invalid channel number, IRQ_HANDLED otherwise. + */ +irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, + u32 status) +{ + struct hsu_dma_chan *hsuc; + struct hsu_dma_desc *desc; + unsigned long flags; + + /* Sanity check */ + if (nr >= chip->hsu->nr_channels) + return IRQ_NONE; + + hsuc = &chip->hsu->chan[nr]; spin_lock_irqsave(&hsuc->vchan.lock, flags); desc = hsuc->desc; if (desc) { - if (sr & HSU_CH_SR_CHE) { + if (status & HSU_CH_SR_CHE) { desc->status = DMA_ERROR; } else if (desc->active < desc->nents) { hsu_dma_start_channel(hsuc); @@ -184,7 +232,7 @@ irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr) return IRQ_HANDLED; } -EXPORT_SYMBOL_GPL(hsu_dma_irq); +EXPORT_SYMBOL_GPL(hsu_dma_do_irq); static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents) { @@ -27,13 +27,20 @@ static irqreturn_t hsu_pci_irq(int irq, void *dev) { struct hsu_dma_chip *chip = dev; u32 dmaisr; + u32 status; unsigned short i; irqreturn_t ret = IRQ_NONE; + int err; dmaisr = readl(chip->regs + HSU_PCI_DMAISR); for (i = 0; i < chip->hsu->nr_channels; i++) { - if (dmaisr & 0x1) - ret |= hsu_dma_irq(chip, i); + if (dmaisr & 0x1) { + err = hsu_dma_get_status(chip, i, &status); + if (err > 0) + ret |= IRQ_HANDLED; + else if (err == 0) + ret |= hsu_dma_do_irq(chip, i, status); + } dmaisr >>= 1; } @@ -167,6 +167,7 @@ struct imxdma_channel { u32 ccr_to_device; bool enabled_2d; int slot_2d; + unsigned int irq; }; enum imx_dma_type { @@ -186,6 +187,9 @@ struct imxdma_engine { struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS]; struct imxdma_channel channel[IMX_DMA_CHANNELS]; enum imx_dma_type devtype; + unsigned int irq; + unsigned int irq_err; + }; struct imxdma_filter_data { @@ -1048,7 +1052,7 @@ static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec, } static int __init imxdma_probe(struct platform_device *pdev) - { +{ struct imxdma_engine *imxdma; struct resource *res; const struct of_device_id *of_id; @@ -1100,6 +1104,7 @@ static int __init imxdma_probe(struct platform_device *pdev) dev_warn(imxdma->dev, "Can't register IRQ for DMA\n"); goto disable_dma_ahb_clk; } + imxdma->irq = irq; irq_err = platform_get_irq(pdev, 1); if (irq_err < 0) { @@ -1113,6 +1118,7 @@ static int __init imxdma_probe(struct platform_device *pdev) dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n"); goto disable_dma_ahb_clk; } + imxdma->irq_err = irq_err; } /* enable DMA module */ @@ -1150,6 +1156,8 @@ static int __init imxdma_probe(struct platform_device *pdev) irq + i, i); goto disable_dma_ahb_clk; } + + imxdmac->irq = irq + i; init_timer(&imxdmac->watchdog); imxdmac->watchdog.function = &imxdma_watchdog; imxdmac->watchdog.data = (unsigned long)imxdmac; @@ -1217,10 +1225,31 @@ disable_dma_ipg_clk: return ret; } +static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma) +{ + int i; + + if (is_imx1_dma(imxdma)) { + disable_irq(imxdma->irq); + disable_irq(imxdma->irq_err); + } + + for (i = 0; i < IMX_DMA_CHANNELS; i++) { + struct imxdma_channel *imxdmac = &imxdma->channel[i]; + + if (!is_imx1_dma(imxdma)) + disable_irq(imxdmac->irq); + + tasklet_kill(&imxdmac->dma_tasklet); + } +} + static int imxdma_remove(struct platform_device *pdev) { struct imxdma_engine *imxdma = platform_get_drvdata(pdev); + imxdma_free_irq(pdev, imxdma); + dma_async_device_unregister(&imxdma->dma_device); if (pdev->dev.of_node) @@ -18,6 +18,7 @@ */ #include <linux/init.h> +#include <linux/iopoll.h> #include <linux/module.h> #include <linux/types.h> #include <linux/bitops.h> @@ -385,6 +386,7 @@ struct sdma_engine { const struct sdma_driver_data *drvdata; u32 spba_start_addr; u32 spba_end_addr; + unsigned int irq; }; static struct sdma_driver_data sdma_imx31 = { @@ -571,28 +573,20 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel) static int sdma_run_channel0(struct sdma_engine *sdma) { int ret; - unsigned long timeout = 500; + u32 reg; sdma_enable_channel(sdma, 0); - while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) { - if (timeout-- <= 0) - break; - udelay(1); - } - - if (ret) { - /* Clear the interrupt status */ - writel_relaxed(ret, sdma->regs + SDMA_H_INTR); - } else { + ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP, + reg, !(reg & 1), 1, 500); + if (ret) dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); - } /* Set bits of CONFIG register with dynamic context switching */ if (readl(sdma->regs + SDMA_H_CONFIG) == 0) writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); - return ret ? 0 : -ETIMEDOUT; + return ret; } static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, @@ -727,9 +721,9 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id) unsigned long stat; stat = readl_relaxed(sdma->regs + SDMA_H_INTR); - /* not interested in channel 0 interrupts */ - stat &= ~1; writel_relaxed(stat, sdma->regs + SDMA_H_INTR); + /* channel 0 is special and not handled here, see run_channel0() */ + stat &= ~1; while (stat) { int channel = fls(stat) - 1; @@ -758,7 +752,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac, * These are needed once we start to support transfers between * two peripherals or memory-to-memory transfers */ - int per_2_per = 0, emi_2_emi = 0; + int per_2_per = 0; sdmac->pc_from_device = 0; sdmac->pc_to_device = 0; @@ -766,7 +760,6 @@ static void sdma_get_pc(struct sdma_channel *sdmac, switch (peripheral_type) { case IMX_DMATYPE_MEMORY: - emi_2_emi = sdma->script_addrs->ap_2_ap_addr; break; case IMX_DMATYPE_DSP: emi_2_per = sdma->script_addrs->bp_2_ap_addr; @@ -999,8 +992,6 @@ static int sdma_config_channel(struct dma_chan *chan) } else __set_bit(sdmac->event_id0, sdmac->event_mask); - /* Watermark Level */ - sdmac->watermark_level |= sdmac->watermark_level; /* Address */ sdmac->shp_addr = sdmac->per_address; sdmac->per_addr = sdmac->per_address2; @@ -1715,6 +1706,8 @@ static int sdma_probe(struct platform_device *pdev) if (ret) return ret; + sdma->irq = irq; + sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); if (!sdma->script_addrs) return -ENOMEM; @@ -1840,6 +1833,7 @@ static int sdma_remove(struct platform_device *pdev) struct sdma_engine *sdma = platform_get_drvdata(pdev); int i; + devm_free_irq(&pdev->dev, sdma->irq, sdma); dma_async_device_unregister(&sdma->dma_device); kfree(sdma->script_addrs); /* Kill the tasklet */ @@ -1212,7 +1212,7 @@ static void ioat_shutdown(struct pci_dev *pdev) ioat_disable_interrupts(ioat_dma); } -void ioat_resume(struct ioatdma_device *ioat_dma) +static void ioat_resume(struct ioatdma_device *ioat_dma) { struct ioatdma_chan *ioat_chan; u32 chanerr; @@ -102,6 +102,7 @@ struct k3_dma_dev { struct clk *clk; u32 dma_channels; u32 dma_requests; + unsigned int irq; }; #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave) @@ -425,10 +426,9 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy( num = DIV_ROUND_UP(len, DMA_MAX_SIZE); ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC); - if (!ds) { - dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc); + if (!ds) return NULL; - } + ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]); ds->size = len; ds->desc_num = num; @@ -481,10 +481,9 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( } ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC); - if (!ds) { - dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc); + if (!ds) return NULL; - } + ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]); ds->desc_num = num; num = 0; @@ -705,6 +704,8 @@ static int k3_dma_probe(struct platform_device *op) if (ret) return ret; + d->irq = irq; + /* init phy channel */ d->phy = devm_kzalloc(&op->dev, d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL); @@ -759,7 +760,7 @@ static int k3_dma_probe(struct platform_device *op) ret = dma_async_device_register(&d->slave); if (ret) - return ret; + goto dma_async_register_fail; ret = of_dma_controller_register((&op->dev)->of_node, k3_of_dma_simple_xlate, d); @@ -776,6 +777,8 @@ static int k3_dma_probe(struct platform_device *op) of_dma_register_fail: dma_async_device_unregister(&d->slave); +dma_async_register_fail: + clk_disable_unprepare(d->clk); return ret; } @@ -787,6 +790,8 @@ static int k3_dma_remove(struct platform_device *op) dma_async_device_unregister(&d->slave); of_dma_controller_free((&op->dev)->of_node); + devm_free_irq(&op->dev, d->irq, d); + list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { list_del(&c->vc.chan.device_node); tasklet_kill(&c->vc.task); @@ -931,6 +931,25 @@ static void dma_do_tasklet(unsigned long data) static int mmp_pdma_remove(struct platform_device *op) { struct mmp_pdma_device *pdev = platform_get_drvdata(op); + struct mmp_pdma_phy *phy; + int i, irq = 0, irq_num = 0; + + + for (i = 0; i < pdev->dma_channels; i++) { + if (platform_get_irq(op, i) > 0) + irq_num++; + } + + if (irq_num != pdev->dma_channels) { + irq = platform_get_irq(op, 0); + devm_free_irq(&op->dev, irq, pdev); + } else { + for (i = 0; i < pdev->dma_channels; i++) { + phy = &pdev->phy[i]; + irq = platform_get_irq(op, i); + devm_free_irq(&op->dev, irq, phy); + } + } dma_async_device_unregister(&pdev->device); return 0; @@ -404,7 +404,7 @@ static void mmp_tdma_free_chan_resources(struct dma_chan *chan) return; } -struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac) +static struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac) { struct gen_pool *gpool; int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); @@ -551,10 +551,9 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, /* alloc channel */ tdmac = devm_kzalloc(tdev->dev, sizeof(*tdmac), GFP_KERNEL); - if (!tdmac) { - dev_err(tdev->dev, "no free memory for DMA channels!\n"); + if (!tdmac) return -ENOMEM; - } + if (irq) tdmac->irq = irq; tdmac->dev = tdev->dev; @@ -593,7 +592,7 @@ static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param) return true; } -struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec, +static struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct mmp_tdma_device *tdev = ofdma->of_dma_data; @@ -148,6 +148,7 @@ struct moxart_chan { struct moxart_dmadev { struct dma_device dma_slave; struct moxart_chan slave_chans[APB_DMA_MAX_CHANNEL]; + unsigned int irq; }; struct moxart_filter_data { @@ -574,10 +575,8 @@ static int moxart_probe(struct platform_device *pdev) struct moxart_dmadev *mdc; mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL); - if (!mdc) { - dev_err(dev, "can't allocate DMA container\n"); + if (!mdc) return -ENOMEM; - } irq = irq_of_parse_and_map(node, 0); if (irq == NO_IRQ) { @@ -617,6 +616,7 @@ static int moxart_probe(struct platform_device *pdev) dev_err(dev, "devm_request_irq failed\n"); return ret; } + mdc->irq = irq; ret = dma_async_device_register(&mdc->dma_slave); if (ret) { @@ -640,6 +640,8 @@ static int moxart_remove(struct platform_device *pdev) { struct moxart_dmadev *m = platform_get_drvdata(pdev); + devm_free_irq(&pdev->dev, m->irq, m); + dma_async_device_unregister(&m->dma_slave); if (pdev->dev.of_node) @@ -1110,6 +1110,7 @@ static int mpc_dma_remove(struct platform_device *op) } free_irq(mdma->irq, mdma); irq_dispose_mapping(mdma->irq); + tasklet_kill(&mdma->tasklet); return 0; } @@ -703,8 +703,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) goto free_resources; } - src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, - PAGE_SIZE, DMA_TO_DEVICE); + src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), + (size_t)src & ~PAGE_MASK, PAGE_SIZE, + DMA_TO_DEVICE); unmap->addr[0] = src_dma; ret = dma_mapping_error(dma_chan->device->dev, src_dma); @@ -714,8 +715,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) } unmap->to_cnt = 1; - dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, - PAGE_SIZE, DMA_FROM_DEVICE); + dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), + (size_t)dest & ~PAGE_MASK, PAGE_SIZE, + DMA_FROM_DEVICE); unmap->addr[1] = dest_dma; ret = dma_mapping_error(dma_chan->device->dev, dest_dma); @@ -1055,7 +1057,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev, err_free_irq: free_irq(mv_chan->irq, mv_chan); - err_free_dma: +err_free_dma: dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); return ERR_PTR(ret); diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c new file mode 100644 index 000000000000..a28a01fcba67 --- /dev/null +++ b/ drivers/dma/mv_xor_v2.c@@ -0,0 +1,878 @@ +/* + * Copyright (C) 2015-2016 Marvell International Ltd. + + * This program is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, either version 2 of the + * License, or any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> + +#include "dmaengine.h" + +/* DMA Engine Registers */ +#define MV_XOR_V2_DMA_DESQ_BALR_OFF 0x000 +#define MV_XOR_V2_DMA_DESQ_BAHR_OFF 0x004 +#define MV_XOR_V2_DMA_DESQ_SIZE_OFF 0x008 +#define MV_XOR_V2_DMA_DESQ_DONE_OFF 0x00C +#define MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK 0x7FFF +#define MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT 0 +#define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK 0x1FFF +#define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT 16 +#define MV_XOR_V2_DMA_DESQ_ARATTR_OFF 0x010 +#define MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK 0x3F3F +#define MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE 0x202 +#define MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE 0x3C3C +#define MV_XOR_V2_DMA_IMSG_CDAT_OFF 0x014 +#define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018 +#define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF +#define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0 +#define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C + /* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */ +#define MV_XOR_V2_DMA_DESQ_ALLOC_OFF 0x04C +#define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK 0xFFFF +#define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT 16 +#define MV_XOR_V2_DMA_IMSG_BALR_OFF 0x050 +#define MV_XOR_V2_DMA_IMSG_BAHR_OFF 0x054 +#define MV_XOR_V2_DMA_DESQ_CTRL_OFF 0x100 +#define MV_XOR_V2_DMA_DESQ_CTRL_32B 1 +#define MV_XOR_V2_DMA_DESQ_CTRL_128B 7 +#define MV_XOR_V2_DMA_DESQ_STOP_OFF 0x800 +#define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF 0x804 +#define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808 + +/* XOR Global registers */ +#define MV_XOR_V2_GLOB_BW_CTRL 0x4 +#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT 0 +#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL 64 +#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT 8 +#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL 8 +#define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT 12 +#define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL 4 +#define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT 16 +#define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL 4 +#define MV_XOR_V2_GLOB_PAUSE 0x014 +#define MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL 0x8 +#define MV_XOR_V2_GLOB_SYS_INT_CAUSE 0x200 +#define MV_XOR_V2_GLOB_SYS_INT_MASK 0x204 +#define MV_XOR_V2_GLOB_MEM_INT_CAUSE 0x220 +#define MV_XOR_V2_GLOB_MEM_INT_MASK 0x224 + +#define MV_XOR_V2_MIN_DESC_SIZE 32 +#define MV_XOR_V2_EXT_DESC_SIZE 128 + +#define MV_XOR_V2_DESC_RESERVED_SIZE 12 +#define MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE 12 + +#define MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF 8 + +/* + * Descriptors queue size. With 32 bytes descriptors, up to 2^14 + * descriptors are allowed, with 128 bytes descriptors, up to 2^12 + * descriptors are allowed. This driver uses 128 bytes descriptors, + * but experimentation has shown that a set of 1024 descriptors is + * sufficient to reach a good level of performance. + */ +#define MV_XOR_V2_DESC_NUM 1024 + +/** + * struct mv_xor_v2_descriptor - DMA HW descriptor + * @desc_id: used by S/W and is not affected by H/W. + * @flags: error and status flags + * @crc32_result: CRC32 calculation result + * @desc_ctrl: operation mode and control flags + * @buff_size: amount of bytes to be processed + * @fill_pattern_src_addr: Fill-Pattern or Source-Address and + * AW-Attributes + * @data_buff_addr: Source (and might be RAID6 destination) + * addresses of data buffers in RAID5 and RAID6 + * @reserved: reserved + */ +struct mv_xor_v2_descriptor { + u16 desc_id; + u16 flags; + u32 crc32_result; + u32 desc_ctrl; + + /* Definitions for desc_ctrl */ +#define DESC_NUM_ACTIVE_D_BUF_SHIFT 22 +#define DESC_OP_MODE_SHIFT 28 +#define DESC_OP_MODE_NOP 0 /* Idle operation */ +#define DESC_OP_MODE_MEMCPY 1 /* Pure-DMA operation */ +#define DESC_OP_MODE_MEMSET 2 /* Mem-Fill operation */ +#define DESC_OP_MODE_MEMINIT 3 /* Mem-Init operation */ +#define DESC_OP_MODE_MEM_COMPARE 4 /* Mem-Compare operation */ +#define DESC_OP_MODE_CRC32 5 /* CRC32 calculation */ +#define DESC_OP_MODE_XOR 6 /* RAID5 (XOR) operation */ +#define DESC_OP_MODE_RAID6 7 /* RAID6 P&Q-generation */ +#define DESC_OP_MODE_RAID6_REC 8 /* RAID6 Recovery */ +#define DESC_Q_BUFFER_ENABLE BIT(16) +#define DESC_P_BUFFER_ENABLE BIT(17) +#define DESC_IOD BIT(27) + + u32 buff_size; + u32 fill_pattern_src_addr[4]; + u32 data_buff_addr[MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE]; + u32 reserved[MV_XOR_V2_DESC_RESERVED_SIZE]; +}; + +/** + * struct mv_xor_v2_device - implements a xor device + * @lock: lock for the engine + * @dma_base: memory mapped DMA register base + * @glob_base: memory mapped global register base + * @irq_tasklet: + * @free_sw_desc: linked list of free SW descriptors + * @dmadev: dma device + * @dmachan: dma channel + * @hw_desq: HW descriptors queue + * @hw_desq_virt: virtual address of DESCQ + * @sw_desq: SW descriptors queue + * @desc_size: HW descriptor size + * @npendings: number of pending descriptors (for which tx_submit has + * been called, but not yet issue_pending) + */ +struct mv_xor_v2_device { + spinlock_t lock; + void __iomem *dma_base; + void __iomem *glob_base; + struct clk *clk; + struct tasklet_struct irq_tasklet; + struct list_head free_sw_desc; + struct dma_device dmadev; + struct dma_chan dmachan; + dma_addr_t hw_desq; + struct mv_xor_v2_descriptor *hw_desq_virt; + struct mv_xor_v2_sw_desc *sw_desq; + int desc_size; + unsigned int npendings; +}; + +/** + * struct mv_xor_v2_sw_desc - implements a xor SW descriptor + * @idx: descriptor index + * @async_tx: support for the async_tx api + * @hw_desc: assosiated HW descriptor + * @free_list: node of the free SW descriprots list +*/ +struct mv_xor_v2_sw_desc { + int idx; + struct dma_async_tx_descriptor async_tx; + struct mv_xor_v2_descriptor hw_desc; + struct list_head free_list; +}; + +/* + * Fill the data buffers to a HW descriptor + */ +static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev, + struct mv_xor_v2_descriptor *desc, + dma_addr_t src, int index) +{ + int arr_index = ((index >> 1) * 3); + + /* + * Fill the buffer's addresses to the descriptor. + * + * The format of the buffers address for 2 sequential buffers + * X and X + 1: + * + * First word: Buffer-DX-Address-Low[31:0] + * Second word: Buffer-DX+1-Address-Low[31:0] + * Third word: DX+1-Buffer-Address-High[47:32] [31:16] + * DX-Buffer-Address-High[47:32] [15:0] + */ + if ((index & 0x1) == 0) { + desc->data_buff_addr[arr_index] = lower_32_bits(src); + + desc->data_buff_addr[arr_index + 2] &= ~0xFFFF; + desc->data_buff_addr[arr_index + 2] |= + upper_32_bits(src) & 0xFFFF; + } else { + desc->data_buff_addr[arr_index + 1] = + lower_32_bits(src); + + desc->data_buff_addr[arr_index + 2] &= ~0xFFFF0000; + desc->data_buff_addr[arr_index + 2] |= + (upper_32_bits(src) & 0xFFFF) << 16; + } +} + +/* + * Return the next available index in the DESQ. + */ +static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev) +{ + /* read the index for the next available descriptor in the DESQ */ + u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF); + + return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT) + & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK); +} + +/* + * notify the engine of new descriptors, and update the available index. + */ +static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev, + int num_of_desc) +{ + /* write the number of new descriptors in the DESQ. */ + writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ADD_OFF); +} + +/* + * free HW descriptors + */ +static void mv_xor_v2_free_desc_from_desq(struct mv_xor_v2_device *xor_dev, + int num_of_desc) +{ + /* write the number of new descriptors in the DESQ. */ + writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DEALLOC_OFF); +} + +/* + * Set descriptor size + * Return the HW descriptor size in bytes + */ +static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev) +{ + writel(MV_XOR_V2_DMA_DESQ_CTRL_128B, + xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_CTRL_OFF); + + return MV_XOR_V2_EXT_DESC_SIZE; +} + +/* + * Set the IMSG threshold + */ +static inline +void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val) +{ + u32 reg; + + reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); + + reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); + reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); + + writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); +} + +static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) +{ + struct mv_xor_v2_device *xor_dev = data; + unsigned int ndescs; + u32 reg; + + reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF); + + ndescs = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) & + MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK); + + /* No descriptors to process */ + if (!ndescs) + return IRQ_NONE; + + /* + * Update IMSG threshold, to disable new IMSG interrupts until + * end of the tasklet + */ + mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM); + + /* schedule a tasklet to handle descriptors callbacks */ + tasklet_schedule(&xor_dev->irq_tasklet); + + return IRQ_HANDLED; +} + +/* + * submit a descriptor to the DMA engine + */ +static dma_cookie_t +mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) +{ + int desq_ptr; + void *dest_hw_desc; + dma_cookie_t cookie; + struct mv_xor_v2_sw_desc *sw_desc = + container_of(tx, struct mv_xor_v2_sw_desc, async_tx); + struct mv_xor_v2_device *xor_dev = + container_of(tx->chan, struct mv_xor_v2_device, dmachan); + + dev_dbg(xor_dev->dmadev.dev, + "%s sw_desc %p: async_tx %p\n", + __func__, sw_desc, &sw_desc->async_tx); + + /* assign coookie */ + spin_lock_bh(&xor_dev->lock); + cookie = dma_cookie_assign(tx); + + /* get the next available slot in the DESQ */ + desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev); + + /* copy the HW descriptor from the SW descriptor to the DESQ */ + dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr; + + memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size); + + xor_dev->npendings++; + + spin_unlock_bh(&xor_dev->lock); + + return cookie; +} + +/* + * Prepare a SW descriptor + */ +static struct mv_xor_v2_sw_desc * +mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) +{ + struct mv_xor_v2_sw_desc *sw_desc; + + /* Lock the channel */ + spin_lock_bh(&xor_dev->lock); + + if (list_empty(&xor_dev->free_sw_desc)) { + spin_unlock_bh(&xor_dev->lock); + /* schedule tasklet to free some descriptors */ + tasklet_schedule(&xor_dev->irq_tasklet); + return NULL; + } + + /* get a free SW descriptor from the SW DESQ */ + sw_desc = list_first_entry(&xor_dev->free_sw_desc, + struct mv_xor_v2_sw_desc, free_list); + list_del(&sw_desc->free_list); + + /* Release the channel */ + spin_unlock_bh(&xor_dev->lock); + + /* set the async tx descriptor */ + dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan); + sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; + async_tx_ack(&sw_desc->async_tx); + + return sw_desc; +} + +/* + * Prepare a HW descriptor for a memcpy operation + */ +static struct dma_async_tx_descriptor * +mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, + dma_addr_t src, size_t len, unsigned long flags) +{ + struct mv_xor_v2_sw_desc *sw_desc; + struct mv_xor_v2_descriptor *hw_descriptor; + struct mv_xor_v2_device *xor_dev; + + xor_dev = container_of(chan, struct mv_xor_v2_device, dmachan); + + dev_dbg(xor_dev->dmadev.dev, + "%s len: %zu src %pad dest %pad flags: %ld\n", + __func__, len, &src, &dest, flags); + + sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); + + sw_desc->async_tx.flags = flags; + + /* set the HW descriptor */ + hw_descriptor = &sw_desc->hw_desc; + + /* save the SW descriptor ID to restore when operation is done */ + hw_descriptor->desc_id = sw_desc->idx; + + /* Set the MEMCPY control word */ + hw_descriptor->desc_ctrl = + DESC_OP_MODE_MEMCPY << DESC_OP_MODE_SHIFT; + + if (flags & DMA_PREP_INTERRUPT) + hw_descriptor->desc_ctrl |= DESC_IOD; + + /* Set source address */ + hw_descriptor->fill_pattern_src_addr[0] = lower_32_bits(src); + hw_descriptor->fill_pattern_src_addr[1] = + upper_32_bits(src) & 0xFFFF; + + /* Set Destination address */ + hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest); + hw_descriptor->fill_pattern_src_addr[3] = + upper_32_bits(dest) & 0xFFFF; + + /* Set buffers size */ + hw_descriptor->buff_size = len; + + /* return the async tx descriptor */ + return &sw_desc->async_tx; +} + +/* + * Prepare a HW descriptor for a XOR operation + */ +static struct dma_async_tx_descriptor * +mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, + unsigned int src_cnt, size_t len, unsigned long flags) +{ + struct mv_xor_v2_sw_desc *sw_desc; + struct mv_xor_v2_descriptor *hw_descriptor; + struct mv_xor_v2_device *xor_dev = + container_of(chan, struct mv_xor_v2_device, dmachan); + int i; + + if (src_cnt > MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF || src_cnt < 1) + return NULL; + + dev_dbg(xor_dev->dmadev.dev, + "%s src_cnt: %d len: %zu dest %pad flags: %ld\n", + __func__, src_cnt, len, &dest, flags); + + sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); + + sw_desc->async_tx.flags = flags; + + /* set the HW descriptor */ + hw_descriptor = &sw_desc->hw_desc; + + /* save the SW descriptor ID to restore when operation is done */ + hw_descriptor->desc_id = sw_desc->idx; + + /* Set the XOR control word */ + hw_descriptor->desc_ctrl = + DESC_OP_MODE_XOR << DESC_OP_MODE_SHIFT; + hw_descriptor->desc_ctrl |= DESC_P_BUFFER_ENABLE; + + if (flags & DMA_PREP_INTERRUPT) + hw_descriptor->desc_ctrl |= DESC_IOD; + + /* Set the data buffers */ + for (i = 0; i < src_cnt; i++) + mv_xor_v2_set_data_buffers(xor_dev, hw_descriptor, src[i], i); + + hw_descriptor->desc_ctrl |= + src_cnt << DESC_NUM_ACTIVE_D_BUF_SHIFT; + + /* Set Destination address */ + hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest); + hw_descriptor->fill_pattern_src_addr[3] = + upper_32_bits(dest) & 0xFFFF; + + /* Set buffers size */ + hw_descriptor->buff_size = len; + + /* return the async tx descriptor */ + return &sw_desc->async_tx; +} + +/* + * Prepare a HW descriptor for interrupt operation. + */ +static struct dma_async_tx_descriptor * +mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) +{ + struct mv_xor_v2_sw_desc *sw_desc; + struct mv_xor_v2_descriptor *hw_descriptor; + struct mv_xor_v2_device *xor_dev = + container_of(chan, struct mv_xor_v2_device, dmachan); + + sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); + + /* set the HW descriptor */ + hw_descriptor = &sw_desc->hw_desc; + + /* save the SW descriptor ID to restore when operation is done */ + hw_descriptor->desc_id = sw_desc->idx; + + /* Set the INTERRUPT control word */ + hw_descriptor->desc_ctrl = + DESC_OP_MODE_NOP << DESC_OP_MODE_SHIFT; + hw_descriptor->desc_ctrl |= DESC_IOD; + + /* return the async tx descriptor */ + return &sw_desc->async_tx; +} + +/* + * push pending transactions to hardware + */ +static void mv_xor_v2_issue_pending(struct dma_chan *chan) +{ + struct mv_xor_v2_device *xor_dev = + container_of(chan, struct mv_xor_v2_device, dmachan); + + spin_lock_bh(&xor_dev->lock); + + /* + * update the engine with the number of descriptors to + * process + */ + mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings); + xor_dev->npendings = 0; + + /* Activate the channel */ + writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); + + spin_unlock_bh(&xor_dev->lock); +} + +static inline +int mv_xor_v2_get_pending_params(struct mv_xor_v2_device *xor_dev, + int *pending_ptr) +{ + u32 reg; + + reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF); + + /* get the next pending descriptor index */ + *pending_ptr = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT) & + MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK); + + /* get the number of descriptors pending handle */ + return ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) & + MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK); +} + +/* + * handle the descriptors after HW process + */ +static void mv_xor_v2_tasklet(unsigned long data) +{ + struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data; + int pending_ptr, num_of_pending, i; + struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL; + struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL; + + dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__); + + /* get the pending descriptors parameters */ + num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr); + + /* next HW descriptor */ + next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr; + + /* loop over free descriptors */ + for (i = 0; i < num_of_pending; i++) { + + if (pending_ptr > MV_XOR_V2_DESC_NUM) + pending_ptr = 0; + + if (next_pending_sw_desc != NULL) + next_pending_hw_desc++; + + /* get the SW descriptor related to the HW descriptor */ + next_pending_sw_desc = + &xor_dev->sw_desq[next_pending_hw_desc->desc_id]; + + /* call the callback */ + if (next_pending_sw_desc->async_tx.cookie > 0) { + /* + * update the channel's completed cookie - no + * lock is required the IMSG threshold provide + * the locking + */ + dma_cookie_complete(&next_pending_sw_desc->async_tx); + + if (next_pending_sw_desc->async_tx.callback) + next_pending_sw_desc->async_tx.callback( + next_pending_sw_desc->async_tx.callback_param); + + dma_descriptor_unmap(&next_pending_sw_desc->async_tx); + } + + dma_run_dependencies(&next_pending_sw_desc->async_tx); + + /* Lock the channel */ + spin_lock_bh(&xor_dev->lock); + + /* add the SW descriptor to the free descriptors list */ + list_add(&next_pending_sw_desc->free_list, + &xor_dev->free_sw_desc); + + /* Release the channel */ + spin_unlock_bh(&xor_dev->lock); + + /* increment the next descriptor */ + pending_ptr++; + } + + if (num_of_pending != 0) { + /* free the descriptores */ + mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending); + } + + /* Update IMSG threshold, to enable new IMSG interrupts */ + mv_xor_v2_set_imsg_thrd(xor_dev, 0); +} + +/* + * Set DMA Interrupt-message (IMSG) parameters + */ +static void mv_xor_v2_set_msi_msg(struct msi_desc *desc, struct msi_msg *msg) +{ + struct mv_xor_v2_device *xor_dev = dev_get_drvdata(desc->dev); + + writel(msg->address_lo, + xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BALR_OFF); + writel(msg->address_hi & 0xFFFF, + xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BAHR_OFF); + writel(msg->data, + xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_CDAT_OFF); +} + +static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev) +{ + u32 reg; + + /* write the DESQ size to the DMA engine */ + writel(MV_XOR_V2_DESC_NUM, + xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF); + + /* write the DESQ address to the DMA enngine*/ + writel(xor_dev->hw_desq & 0xFFFFFFFF, + xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF); + writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32, + xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF); + + /* enable the DMA engine */ + writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); + + /* + * This is a temporary solution, until we activate the + * SMMU. Set the attributes for reading & writing data buffers + * & descriptors to: + * + * - OuterShareable - Snoops will be performed on CPU caches + * - Enable cacheable - Bufferable, Modifiable, Other Allocate + * and Allocate + */ + reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF); + reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK; + reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE | + MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE; + writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF); + + reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF); + reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK; + reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE | + MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE; + writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF); + + /* BW CTRL - set values to optimize the XOR performance: + * + * - Set WrBurstLen & RdBurstLen - the unit will issue + * maximum of 256B write/read transactions. + * - Limit the number of outstanding write & read data + * (OBB/IBB) requests to the maximal value. + */ + reg = ((MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL << + MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT) | + (MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL << + MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT) | + (MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL << + MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT) | + (MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL << + MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT)); + writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_BW_CTRL); + + /* Disable the AXI timer feature */ + reg = readl(xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); + reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL; + writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); + + return 0; +} + +static int mv_xor_v2_probe(struct platform_device *pdev) +{ + struct mv_xor_v2_device *xor_dev; + struct resource *res; + int i, ret = 0; + struct dma_device *dma_dev; + struct mv_xor_v2_sw_desc *sw_desc; + struct msi_desc *msi_desc; + + BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) != + MV_XOR_V2_EXT_DESC_SIZE); + + xor_dev = devm_kzalloc(&pdev->dev, sizeof(*xor_dev), GFP_KERNEL); + if (!xor_dev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(xor_dev->dma_base)) + return PTR_ERR(xor_dev->dma_base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(xor_dev->glob_base)) + return PTR_ERR(xor_dev->glob_base); + + platform_set_drvdata(pdev, xor_dev); + + xor_dev->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (!IS_ERR(xor_dev->clk)) { + ret = clk_prepare_enable(xor_dev->clk); + if (ret) + return ret; + } + + ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1, + mv_xor_v2_set_msi_msg); + if (ret) + goto disable_clk; + + msi_desc = first_msi_entry(&pdev->dev); + if (!msi_desc) + goto free_msi_irqs; + + ret = devm_request_irq(&pdev->dev, msi_desc->irq, + mv_xor_v2_interrupt_handler, 0, + dev_name(&pdev->dev), xor_dev); + if (ret) + goto free_msi_irqs; + + tasklet_init(&xor_dev->irq_tasklet, mv_xor_v2_tasklet, + (unsigned long) xor_dev); + + xor_dev->desc_size = mv_xor_v2_set_desc_size(xor_dev); + + dma_cookie_init(&xor_dev->dmachan); + + /* + * allocate coherent memory for hardware descriptors + * note: writecombine gives slightly better performance, but + * requires that we explicitly flush the writes + */ + xor_dev->hw_desq_virt = + dma_alloc_coherent(&pdev->dev, + xor_dev->desc_size * MV_XOR_V2_DESC_NUM, + &xor_dev->hw_desq, GFP_KERNEL); + if (!xor_dev->hw_desq_virt) { + ret = -ENOMEM; + goto free_msi_irqs; + } + + /* alloc memory for the SW descriptors */ + xor_dev->sw_desq = devm_kzalloc(&pdev->dev, sizeof(*sw_desc) * + MV_XOR_V2_DESC_NUM, GFP_KERNEL); + if (!xor_dev->sw_desq) { + ret = -ENOMEM; + goto free_hw_desq; + } + + spin_lock_init(&xor_dev->lock); + + /* init the free SW descriptors list */ + INIT_LIST_HEAD(&xor_dev->free_sw_desc); + + /* add all SW descriptors to the free list */ + for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) { + xor_dev->sw_desq[i].idx = i; + list_add(&xor_dev->sw_desq[i].free_list, + &xor_dev->free_sw_desc); + } + + dma_dev = &xor_dev->dmadev; + + /* set DMA capabilities */ + dma_cap_zero(dma_dev->cap_mask); + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); + dma_cap_set(DMA_XOR, dma_dev->cap_mask); + dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); + + /* init dma link list */ + INIT_LIST_HEAD(&dma_dev->channels); + + /* set base routines */ + dma_dev->device_tx_status = dma_cookie_status; + dma_dev->device_issue_pending = mv_xor_v2_issue_pending; + dma_dev->dev = &pdev->dev; + + dma_dev->device_prep_dma_memcpy = mv_xor_v2_prep_dma_memcpy; + dma_dev->device_prep_dma_interrupt = mv_xor_v2_prep_dma_interrupt; + dma_dev->max_xor = 8; + dma_dev->device_prep_dma_xor = mv_xor_v2_prep_dma_xor; + + xor_dev->dmachan.device = dma_dev; + + list_add_tail(&xor_dev->dmachan.device_node, + &dma_dev->channels); + + mv_xor_v2_descq_init(xor_dev); + + ret = dma_async_device_register(dma_dev); + if (ret) + goto free_hw_desq; + + dev_notice(&pdev->dev, "Marvell Version 2 XOR driver\n"); + + return 0; + +free_hw_desq: + dma_free_coherent(&pdev->dev, + xor_dev->desc_size * MV_XOR_V2_DESC_NUM, + xor_dev->hw_desq_virt, xor_dev->hw_desq); +free_msi_irqs: + platform_msi_domain_free_irqs(&pdev->dev); +disable_clk: + if (!IS_ERR(xor_dev->clk)) + clk_disable_unprepare(xor_dev->clk); + return ret; +} + +static int mv_xor_v2_remove(struct platform_device *pdev) +{ + struct mv_xor_v2_device *xor_dev = platform_get_drvdata(pdev); + + dma_async_device_unregister(&xor_dev->dmadev); + + dma_free_coherent(&pdev->dev, + xor_dev->desc_size * MV_XOR_V2_DESC_NUM, + xor_dev->hw_desq_virt, xor_dev->hw_desq); + + platform_msi_domain_free_irqs(&pdev->dev); + + clk_disable_unprepare(xor_dev->clk); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id mv_xor_v2_dt_ids[] = { + { .compatible = "marvell,xor-v2", }, + {}, +}; +MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids); +#endif + +static struct platform_driver mv_xor_v2_driver = { + .probe = mv_xor_v2_probe, + .remove = mv_xor_v2_remove, + .driver = { + .name = "mv_xor_v2", + .of_match_table = of_match_ptr(mv_xor_v2_dt_ids), + }, +}; + +module_platform_driver(mv_xor_v2_driver); + +MODULE_DESCRIPTION("DMA engine driver for Marvell's Version 2 of XOR engine"); +MODULE_LICENSE("GPL"); @@ -227,6 +227,7 @@ struct nbpf_device { void __iomem *base; struct clk *clk; const struct nbpf_config *config; + unsigned int eirq; struct nbpf_channel chan[]; }; @@ -1300,10 +1301,9 @@ static int nbpf_probe(struct platform_device *pdev) nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels * sizeof(nbpf->chan[0]), GFP_KERNEL); - if (!nbpf) { - dev_err(dev, "Memory allocation failed\n"); + if (!nbpf) return -ENOMEM; - } + dma_dev = &nbpf->dma_dev; dma_dev->dev = dev; @@ -1376,6 +1376,7 @@ static int nbpf_probe(struct platform_device *pdev) IRQF_SHARED, "dma error", nbpf); if (ret < 0) return ret; + nbpf->eirq = eirq; INIT_LIST_HEAD(&dma_dev->channels); @@ -1447,6 +1448,17 @@ e_clk_off: static int nbpf_remove(struct platform_device *pdev) { struct nbpf_device *nbpf = platform_get_drvdata(pdev); + int i; + + devm_free_irq(&pdev->dev, nbpf->eirq, nbpf); + + for (i = 0; i < nbpf->config->num_channels; i++) { + struct nbpf_channel *chan = nbpf->chan + i; + + devm_free_irq(&pdev->dev, chan->irq, chan); + + tasklet_kill(&chan->tasklet); + } of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&nbpf->dma_dev); @@ -59,6 +59,8 @@ struct omap_sg { dma_addr_t addr; uint32_t en; /* number of elements (24-bit) */ uint32_t fn; /* number of frames (16-bit) */ + int32_t fi; /* for double indexing */ + int16_t ei; /* for double indexing */ }; struct omap_desc { @@ -66,7 +68,8 @@ struct omap_desc { enum dma_transfer_direction dir; dma_addr_t dev_addr; - int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ + int32_t fi; /* for OMAP_DMA_SYNC_PACKET / double indexing */ + int16_t ei; /* for double indexing */ uint8_t es; /* CSDP_DATA_TYPE_xxx */ uint32_t ccr; /* CCR value */ uint16_t clnk_ctrl; /* CLNK_CTRL value */ @@ -379,8 +382,8 @@ static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, } omap_dma_chan_write(c, cxsa, sg->addr); - omap_dma_chan_write(c, cxei, 0); - omap_dma_chan_write(c, cxfi, 0); + omap_dma_chan_write(c, cxei, sg->ei); + omap_dma_chan_write(c, cxfi, sg->fi); omap_dma_chan_write(c, CEN, sg->en); omap_dma_chan_write(c, CFN, sg->fn); @@ -425,7 +428,7 @@ static void omap_dma_start_desc(struct omap_chan *c) } omap_dma_chan_write(c, cxsa, d->dev_addr); - omap_dma_chan_write(c, cxei, 0); + omap_dma_chan_write(c, cxei, d->ei); omap_dma_chan_write(c, cxfi, d->fi); omap_dma_chan_write(c, CSDP, d->csdp); omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl); @@ -971,6 +974,89 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy( return vchan_tx_prep(&c->vc, &d->vd, tx_flags); } +static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved( + struct dma_chan *chan, struct dma_interleaved_template *xt, + unsigned long flags) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + struct omap_desc *d; + struct omap_sg *sg; + uint8_t data_type; + size_t src_icg, dst_icg; + + /* Slave mode is not supported */ + if (is_slave_direction(xt->dir)) + return NULL; + + if (xt->frame_size != 1 || xt->numf == 0) + return NULL; + + d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); + if (!d) + return NULL; + + data_type = __ffs((xt->src_start | xt->dst_start | xt->sgl[0].size)); + if (data_type > CSDP_DATA_TYPE_32) + data_type = CSDP_DATA_TYPE_32; + + sg = &d->sg[0]; + d->dir = DMA_MEM_TO_MEM; + d->dev_addr = xt->src_start; + d->es = data_type; + sg->en = xt->sgl[0].size / BIT(data_type); + sg->fn = xt->numf; + sg->addr = xt->dst_start; + d->sglen = 1; + d->ccr = c->ccr; + + src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]); + dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]); + if (src_icg) { + d->ccr |= CCR_SRC_AMODE_DBLIDX; + d->ei = 1; + d->fi = src_icg; + } else if (xt->src_inc) { + d->ccr |= CCR_SRC_AMODE_POSTINC; + d->fi = 0; + } else { + dev_err(chan->device->dev, + "%s: SRC constant addressing is not supported\n", + __func__); + kfree(d); + return NULL; + } + + if (dst_icg) { + d->ccr |= CCR_DST_AMODE_DBLIDX; + sg->ei = 1; + sg->fi = dst_icg; + } else if (xt->dst_inc) { + d->ccr |= CCR_DST_AMODE_POSTINC; + sg->fi = 0; + } else { + dev_err(chan->device->dev, + "%s: DST constant addressing is not supported\n", + __func__); + kfree(d); + return NULL; + } + + d->cicr = CICR_DROP_IE | CICR_FRAME_IE; + + d->csdp = data_type; + + if (dma_omap1()) { + d->cicr |= CICR_TOUT_IE; + d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF; + } else { + d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED; + d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; + d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; + } + + return vchan_tx_prep(&c->vc, &d->vd, flags); +} + static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) { struct omap_chan *c = to_omap_dma_chan(chan); @@ -1116,6 +1202,7 @@ static int omap_dma_probe(struct platform_device *pdev) dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask); + dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask); od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; od->ddev.device_tx_status = omap_dma_tx_status; @@ -1123,6 +1210,7 @@ static int omap_dma_probe(struct platform_device *pdev) od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy; + od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved; od->ddev.device_config = omap_dma_slave_config; od->ddev.device_pause = omap_dma_pause; od->ddev.device_resume = omap_dma_resume; @@ -1204,10 +1292,14 @@ static int omap_dma_probe(struct platform_device *pdev) static int omap_dma_remove(struct platform_device *pdev) { struct omap_dmadev *od = platform_get_drvdata(pdev); + int irq; if (pdev->dev.of_node) of_dma_controller_free(pdev->dev.of_node); + irq = platform_get_irq(pdev, 1); + devm_free_irq(&pdev->dev, irq, od); + dma_async_device_unregister(&od->ddev); if (!od->legacy) { @@ -2828,10 +2828,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) /* Allocate a new DMAC and its Channels */ pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL); - if (!pl330) { - dev_err(&adev->dev, "unable to allocate mem\n"); + if (!pl330) return -ENOMEM; - } pd = &pl330->ddma; pd->dev = &adev->dev; @@ -2890,7 +2888,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); if (!pl330->peripherals) { ret = -ENOMEM; - dev_err(&adev->dev, "unable to allocate pl330->peripherals\n"); goto probe_err2; } @@ -3005,12 +3002,18 @@ static int pl330_remove(struct amba_device *adev) { struct pl330_dmac *pl330 = amba_get_drvdata(adev); struct dma_pl330_chan *pch, *_p; + int i, irq; pm_runtime_get_noresume(pl330->ddma.dev); if (adev->dev.of_node) of_dma_controller_free(adev->dev.of_node); + for (i = 0; i < AMBA_NR_IRQS; i++) { + irq = adev->irq[i]; + devm_free_irq(&adev->dev, irq, pl330); + } + dma_async_device_unregister(&pl330->ddma); /* Idle the DMAC */ @@ -4084,7 +4084,6 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev) /* create a device */ adev = kzalloc(sizeof(*adev), GFP_KERNEL); if (!adev) { - dev_err(&ofdev->dev, "failed to allocate device\n"); initcode = PPC_ADMA_INIT_ALLOC; ret = -ENOMEM; goto err_adev_alloc; @@ -4145,7 +4144,6 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev) /* create a channel */ chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) { - dev_err(&ofdev->dev, "can't allocate channel structure\n"); initcode = PPC_ADMA_INIT_CHANNEL; ret = -ENOMEM; goto err_chan_alloc; @@ -21,6 +21,7 @@ #include <linux/of_device.h> #include <linux/of_dma.h> #include <linux/of.h> +#include <linux/wait.h> #include <linux/dma/pxa-dma.h> #include "dmaengine.h" @@ -118,6 +119,8 @@ struct pxad_chan { struct pxad_phy *phy; struct dma_pool *desc_pool; /* Descriptors pool */ dma_cookie_t bus_error; + + wait_queue_head_t wq_state; }; struct pxad_device { @@ -318,7 +321,6 @@ static int dbg_open_##name(struct inode *inode, struct file *file) \ return single_open(file, dbg_show_##name, inode->i_private); \ } \ static const struct file_operations dbg_fops_##name = { \ - .owner = THIS_MODULE, \ .open = dbg_open_##name, \ .llseek = seq_lseek, \ .read = seq_read, \ @@ -572,6 +574,7 @@ static void pxad_launch_chan(struct pxad_chan *chan, */ phy_writel(chan->phy, desc->first, DDADR); phy_enable(chan->phy, chan->misaligned); + wake_up(&chan->wq_state); } static void set_updater_desc(struct pxad_desc_sw *sw_desc, @@ -717,6 +720,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id) } } spin_unlock_irqrestore(&chan->vc.lock, flags); + wake_up(&chan->wq_state); return IRQ_HANDLED; } @@ -1268,6 +1272,14 @@ static enum dma_status pxad_tx_status(struct dma_chan *dchan, return ret; } +static void pxad_synchronize(struct dma_chan *dchan) +{ + struct pxad_chan *chan = to_pxad_chan(dchan); + + wait_event(chan->wq_state, !is_chan_running(chan)); + vchan_synchronize(&chan->vc); +} + static void pxad_free_channels(struct dma_device *dmadev) { struct pxad_chan *c, *cn; @@ -1372,6 +1384,7 @@ static int pxad_init_dmadev(struct platform_device *op, pdev->slave.device_tx_status = pxad_tx_status; pdev->slave.device_issue_pending = pxad_issue_pending; pdev->slave.device_config = pxad_config; + pdev->slave.device_synchronize = pxad_synchronize; pdev->slave.device_terminate_all = pxad_terminate_all; if (op->dev.coherent_dma_mask) @@ -1389,6 +1402,7 @@ static int pxad_init_dmadev(struct platform_device *op, return -ENOMEM; c->vc.desc_free = pxad_free_desc; vchan_init(&c->vc, &pdev->slave); + init_waitqueue_head(&c->wq_state); } return dma_async_device_register(&pdev->slave); @@ -48,6 +48,7 @@ #include <linux/of_dma.h> #include <linux/clk.h> #include <linux/dmaengine.h> +#include <linux/pm_runtime.h> #include "../dmaengine.h" #include "../virt-dma.h" @@ -58,6 +59,8 @@ struct bam_desc_hw { __le16 flags; }; +#define BAM_DMA_AUTOSUSPEND_DELAY 100 + #define DESC_FLAG_INT BIT(15) #define DESC_FLAG_EOT BIT(14) #define DESC_FLAG_EOB BIT(13) @@ -527,12 +530,17 @@ static void bam_free_chan(struct dma_chan *chan) struct bam_device *bdev = bchan->bdev; u32 val; unsigned long flags; + int ret; + + ret = pm_runtime_get_sync(bdev->dev); + if (ret < 0) + return; vchan_free_chan_resources(to_virt_chan(chan)); if (bchan->curr_txd) { dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); - return; + goto err; } spin_lock_irqsave(&bchan->vc.lock, flags); @@ -550,6 +558,10 @@ static void bam_free_chan(struct dma_chan *chan) /* disable irq */ writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); + +err: + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); } /** @@ -696,11 +708,18 @@ static int bam_pause(struct dma_chan *chan) struct bam_chan *bchan = to_bam_chan(chan); struct bam_device *bdev = bchan->bdev; unsigned long flag; + int ret; + + ret = pm_runtime_get_sync(bdev->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&bchan->vc.lock, flag); writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); bchan->paused = 1; spin_unlock_irqrestore(&bchan->vc.lock, flag); + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); return 0; } @@ -715,11 +734,18 @@ static int bam_resume(struct dma_chan *chan) struct bam_chan *bchan = to_bam_chan(chan); struct bam_device *bdev = bchan->bdev; unsigned long flag; + int ret; + + ret = pm_runtime_get_sync(bdev->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&bchan->vc.lock, flag); writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); bchan->paused = 0; spin_unlock_irqrestore(&bchan->vc.lock, flag); + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); return 0; } @@ -795,6 +821,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data) { struct bam_device *bdev = data; u32 clr_mask = 0, srcs = 0; + int ret; srcs |= process_channel_irqs(bdev); @@ -802,6 +829,10 @@ static irqreturn_t bam_dma_irq(int irq, void *data) if (srcs & P_IRQ) tasklet_schedule(&bdev->task); + ret = pm_runtime_get_sync(bdev->dev); + if (ret < 0) + return ret; + if (srcs & BAM_IRQ) { clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS)); @@ -814,6 +845,9 @@ static irqreturn_t bam_dma_irq(int irq, void *data) writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); } + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); + return IRQ_HANDLED; } @@ -893,6 +927,7 @@ static void bam_start_dma(struct bam_chan *bchan) struct bam_desc_hw *desc; struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, sizeof(struct bam_desc_hw)); + int ret; lockdep_assert_held(&bchan->vc.lock); @@ -904,6 +939,10 @@ static void bam_start_dma(struct bam_chan *bchan) async_desc = container_of(vd, struct bam_async_desc, vd); bchan->curr_txd = async_desc; + ret = pm_runtime_get_sync(bdev->dev); + if (ret < 0) + return; + /* on first use, initialize the channel hardware */ if (!bchan->initialized) bam_chan_init_hw(bchan, async_desc->dir); @@ -946,6 +985,9 @@ static void bam_start_dma(struct bam_chan *bchan) wmb(); writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), bam_addr(bdev, bchan->id, BAM_P_EVNT_REG)); + + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); } /** @@ -970,6 +1012,7 @@ static void dma_tasklet(unsigned long data) bam_start_dma(bchan); spin_unlock_irqrestore(&bchan->vc.lock, flags); } + } /** @@ -1213,6 +1256,13 @@ static int bam_dma_probe(struct platform_device *pdev) if (ret) goto err_unregister_dma; + pm_runtime_irq_safe(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + return 0; err_unregister_dma: @@ -1233,6 +1283,8 @@ static int bam_dma_remove(struct platform_device *pdev) struct bam_device *bdev = platform_get_drvdata(pdev); u32 i; + pm_runtime_force_suspend(&pdev->dev); + of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&bdev->common); @@ -1260,11 +1312,66 @@ static int bam_dma_remove(struct platform_device *pdev) return 0; } +static int __maybe_unused bam_dma_runtime_suspend(struct device *dev) +{ + struct bam_device *bdev = dev_get_drvdata(dev); + + clk_disable(bdev->bamclk); + + return 0; +} + +static int __maybe_unused bam_dma_runtime_resume(struct device *dev) +{ + struct bam_device *bdev = dev_get_drvdata(dev); + int ret; + + ret = clk_enable(bdev->bamclk); + if (ret < 0) { + dev_err(dev, "clk_enable failed: %d\n", ret); + return ret; + } + + return 0; +} + +static int __maybe_unused bam_dma_suspend(struct device *dev) +{ + struct bam_device *bdev = dev_get_drvdata(dev); + + pm_runtime_force_suspend(dev); + + clk_unprepare(bdev->bamclk); + + return 0; +} + +static int __maybe_unused bam_dma_resume(struct device *dev) +{ + struct bam_device *bdev = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare(bdev->bamclk); + if (ret) + return ret; + + pm_runtime_force_resume(dev); + + return 0; +} + +static const struct dev_pm_ops bam_dma_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(bam_dma_suspend, bam_dma_resume) + SET_RUNTIME_PM_OPS(bam_dma_runtime_suspend, bam_dma_runtime_resume, + NULL) +}; + static struct platform_driver bam_dma_driver = { .probe = bam_dma_probe, .remove = bam_dma_remove, .driver = { .name = "bam-dma-engine", + .pm = &bam_dma_pm_ops, .of_match_table = bam_of_match, }, }; @@ -708,6 +708,7 @@ static int hidma_remove(struct platform_device *pdev) pm_runtime_get_sync(dmadev->ddev.dev); dma_async_device_unregister(&dmadev->ddev); devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); + tasklet_kill(&dmadev->task); hidma_debug_uninit(dmadev); hidma_ll_uninit(dmadev->lldev); hidma_free(dmadev); @@ -831,6 +831,7 @@ int hidma_ll_uninit(struct hidma_lldev *lldev) required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres; tasklet_kill(&lldev->task); + tasklet_kill(&lldev->rst_task); memset(lldev->trepool, 0, required_bytes); lldev->trepool = NULL; lldev->pending_tre_count = 0; @@ -371,8 +371,8 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np) pdevinfo.size_data = 0; pdevinfo.dma_mask = DMA_BIT_MASK(64); new_pdev = platform_device_register_full(&pdevinfo); - if (!new_pdev) { - ret = -ENODEV; + if (IS_ERR(new_pdev)) { + ret = PTR_ERR(new_pdev); goto out; } of_dma_configure(&new_pdev->dev, child); @@ -392,8 +392,7 @@ static int __init hidma_mgmt_init(void) #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) struct device_node *child; - for (child = of_find_matching_node(NULL, hidma_mgmt_match); child; - child = of_find_matching_node(child, hidma_mgmt_match)) { + for_each_matching_node(child, hidma_mgmt_match) { /* device tree based firmware here */ hidma_mgmt_of_populate_channels(child); of_node_put(child); @@ -768,16 +768,12 @@ static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan, spin_lock_irqsave(&s3cchan->vc.lock, flags); ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_COMPLETE) { - spin_unlock_irqrestore(&s3cchan->vc.lock, flags); - return ret; - } /* * There's no point calculating the residue if there's * no txstate to store the value. */ - if (!txstate) { + if (ret == DMA_COMPLETE || !txstate) { spin_unlock_irqrestore(&s3cchan->vc.lock, flags); return ret; } @@ -1105,11 +1101,8 @@ static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma, */ for (i = 0; i < channels; i++) { chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL); - if (!chan) { - dev_err(dmadev->dev, - "%s no memory for channel\n", __func__); + if (!chan) return -ENOMEM; - } chan->id = i; chan->host = s3cdma; @@ -1143,8 +1136,10 @@ static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev) struct s3c24xx_dma_chan *next; list_for_each_entry_safe(chan, - next, &dmadev->channels, vc.chan.device_node) + next, &dmadev->channels, vc.chan.device_node) { list_del(&chan->vc.chan.device_node); + tasklet_kill(&chan->vc.task); + } } /* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */ @@ -1366,6 +1361,18 @@ err_memcpy: return ret; } +static void s3c24xx_dma_free_irq(struct platform_device *pdev, + struct s3c24xx_dma_engine *s3cdma) +{ + int i; + + for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) { + struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; + + devm_free_irq(&pdev->dev, phy->irq, phy); + } +} + static int s3c24xx_dma_remove(struct platform_device *pdev) { const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev); @@ -1376,6 +1383,8 @@ static int s3c24xx_dma_remove(struct platform_device *pdev) dma_async_device_unregister(&s3cdma->slave); dma_async_device_unregister(&s3cdma->memcpy); + s3c24xx_dma_free_irq(pdev, s3cdma); + s3c24xx_dma_free_virtual_channels(&s3cdma->slave); s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy); @@ -311,7 +311,7 @@ static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan) { u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); - return (chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)) == RCAR_DMACHCR_DE; + return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)); } static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) @@ -510,7 +510,7 @@ static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan, spin_lock_irqsave(&chan->lock, flags); list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); - list_add_tail(&desc->node, &chan->desc.free); + list_add(&desc->node, &chan->desc.free); spin_unlock_irqrestore(&chan->lock, flags); } @@ -990,6 +990,8 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan) list_splice_init(&rchan->desc.done, &list); list_splice_init(&rchan->desc.wait, &list); + rchan->desc.running = NULL; + list_for_each_entry(desc, &list, node) rcar_dmac_realloc_hwdesc(rchan, desc, 0); @@ -1143,6 +1145,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, struct rcar_dmac_desc *desc = chan->desc.running; struct rcar_dmac_xfer_chunk *running = NULL; struct rcar_dmac_xfer_chunk *chunk; + enum dma_status status; unsigned int residue = 0; unsigned int dptr = 0; @@ -1150,12 +1153,38 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, return 0; /* + * If the cookie corresponds to a descriptor that has been completed + * there is no residue. The same check has already been performed by the + * caller but without holding the channel lock, so the descriptor could + * now be complete. + */ + status = dma_cookie_status(&chan->chan, cookie, NULL); + if (status == DMA_COMPLETE) + return 0; + + /* * If the cookie doesn't correspond to the currently running transfer * then the descriptor hasn't been processed yet, and the residue is * equal to the full descriptor size. */ - if (cookie != desc->async_tx.cookie) - return desc->size; + if (cookie != desc->async_tx.cookie) { + list_for_each_entry(desc, &chan->desc.pending, node) { + if (cookie == desc->async_tx.cookie) + return desc->size; + } + list_for_each_entry(desc, &chan->desc.active, node) { + if (cookie == desc->async_tx.cookie) + return desc->size; + } + + /* + * No descriptor found for the cookie, there's thus no residue. + * This shouldn't happen if the calling driver passes a correct + * cookie value. + */ + WARN(1, "No descriptor for cookie!"); + return 0; + } /* * In descriptor mode the descriptor running pointer is not maintained @@ -1202,6 +1231,10 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan, residue = rcar_dmac_chan_get_residue(rchan, cookie); spin_unlock_irqrestore(&rchan->lock, flags); + /* if there's no residue, the cookie is complete */ + if (!residue) + return DMA_COMPLETE; + dma_set_residue(txstate, residue); return status; @@ -532,11 +532,8 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan), GFP_KERNEL); - if (!sh_chan) { - dev_err(sdev->dma_dev.dev, - "No free memory for allocating dma channels!\n"); + if (!sh_chan) return -ENOMEM; - } schan = &sh_chan->shdma_chan; schan->max_xfer_len = SH_DMA_TCR_MAX + 1; @@ -732,10 +729,8 @@ static int sh_dmae_probe(struct platform_device *pdev) shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device), GFP_KERNEL); - if (!shdev) { - dev_err(&pdev->dev, "Not enough memory\n"); + if (!shdev) return -ENOMEM; - } dma_dev = &shdev->shdma_dev.dma_dev; @@ -245,11 +245,8 @@ static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq, int err; sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL); - if (!sc) { - dev_err(sdev->dma_dev.dev, - "No free memory for allocating dma channels!\n"); + if (!sc) return -ENOMEM; - } schan = &sc->shdma_chan; schan->max_xfer_len = 64 * 1024 * 1024 - 1; @@ -349,10 +346,8 @@ static int sudmac_probe(struct platform_device *pdev) err = -ENOMEM; su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device), GFP_KERNEL); - if (!su_dev) { - dev_err(&pdev->dev, "Not enough memory\n"); + if (!su_dev) return err; - } dma_dev = &su_dev->shdma_dev.dma_dev; @@ -854,10 +854,9 @@ static int sirfsoc_dma_probe(struct platform_device *op) int ret, i; sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL); - if (!sdma) { - dev_err(dev, "Memory exhausted!\n"); + if (!sdma) return -ENOMEM; - } + data = (struct sirfsoc_dmadata *) (of_match_device(op->dev.driver->of_match_table, &op->dev)->data); @@ -981,6 +980,7 @@ static int sirfsoc_dma_remove(struct platform_device *op) of_dma_controller_free(op->dev.of_node); dma_async_device_unregister(&sdma->dma); free_irq(sdma->irq, sdma); + tasklet_kill(&sdma->tasklet); irq_dispose_mapping(sdma->irq); pm_runtime_disable(&op->dev); if (!pm_runtime_status_suspended(&op->dev)) @@ -1126,17 +1126,17 @@ static const struct dev_pm_ops sirfsoc_dma_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume) }; -struct sirfsoc_dmadata sirfsoc_dmadata_a6 = { +static struct sirfsoc_dmadata sirfsoc_dmadata_a6 = { .exec = sirfsoc_dma_execute_hw_a6, .type = SIRFSOC_DMA_VER_A6, }; -struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = { +static struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = { .exec = sirfsoc_dma_execute_hw_a7v1, .type = SIRFSOC_DMA_VER_A7V1, }; -struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = { +static struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = { .exec = sirfsoc_dma_execute_hw_a7v2, .type = SIRFSOC_DMA_VER_A7V2, }; @@ -2588,7 +2588,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, } ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_COMPLETE) + if (ret != DMA_COMPLETE && txstate) dma_set_residue(txstate, stedma40_residue(chan)); if (d40_is_paused(d40c)) @@ -3237,10 +3237,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) (num_phy_chans + num_log_chans + num_memcpy_chans) * sizeof(struct d40_chan), GFP_KERNEL); - if (base == NULL) { - d40_err(&pdev->dev, "Out of memory\n"); + if (base == NULL) goto failure; - } base->rev = rev; base->clk = clk; @@ -10,7 +10,7 @@ #include "ste_dma40_ll.h" -u8 d40_width_to_bits(enum dma_slave_buswidth width) +static u8 d40_width_to_bits(enum dma_slave_buswidth width) { if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) return STEDMA40_ESIZE_8_BIT; @@ -865,7 +865,7 @@ static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan, size_t bytes = 0; ret = dma_cookie_status(chan, cookie, state); - if (ret == DMA_COMPLETE) + if (ret == DMA_COMPLETE || !state) return ret; spin_lock_irqsave(&vchan->vc.lock, flags); @@ -300,10 +300,8 @@ static struct tegra_dma_desc *tegra_dma_desc_get( /* Allocate DMA desc */ dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT); - if (!dma_desc) { - dev_err(tdc2dev(tdc), "dma_desc alloc failed\n"); + if (!dma_desc) return NULL; - } dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan); dma_desc->txd.tx_submit = tegra_dma_tx_submit; @@ -340,8 +338,7 @@ static struct tegra_dma_sg_req *tegra_dma_sg_req_get( spin_unlock_irqrestore(&tdc->lock, flags); sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_NOWAIT); - if (!sg_req) - dev_err(tdc2dev(tdc), "sg_req alloc failed\n"); + return sg_req; } @@ -484,7 +481,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, * load new configuration. */ tegra_dma_pause(tdc, false); - status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); + status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); /* * If interrupt is pending then do nothing as the ISR will handle @@ -822,13 +819,8 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, /* Check on wait_ack desc status */ list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { if (dma_desc->txd.cookie == cookie) { - residual = dma_desc->bytes_requested - - (dma_desc->bytes_transferred % - dma_desc->bytes_requested); - dma_set_residue(txstate, residual); ret = dma_desc->dma_status; - spin_unlock_irqrestore(&tdc->lock, flags); - return ret; + goto found; } } @@ -836,17 +828,22 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, list_for_each_entry(sg_req, &tdc->pending_sg_req, node) { dma_desc = sg_req->dma_desc; if (dma_desc->txd.cookie == cookie) { - residual = dma_desc->bytes_requested - - (dma_desc->bytes_transferred % - dma_desc->bytes_requested); - dma_set_residue(txstate, residual); ret = dma_desc->dma_status; - spin_unlock_irqrestore(&tdc->lock, flags); - return ret; + goto found; } } - dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie); + dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie); + dma_desc = NULL; + +found: + if (dma_desc && txstate) { + residual = dma_desc->bytes_requested - + (dma_desc->bytes_transferred % + dma_desc->bytes_requested); + dma_set_residue(txstate, residual); + } + spin_unlock_irqrestore(&tdc->lock, flags); return ret; } @@ -905,7 +902,6 @@ static int get_transfer_param(struct tegra_dma_channel *tdc, unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size, enum dma_slave_buswidth *slave_bw) { - switch (direction) { case DMA_MEM_TO_DEV: *apb_addr = tdc->dma_sconfig.dst_addr; @@ -948,8 +944,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); struct tegra_dma_desc *dma_desc; - unsigned int i; - struct scatterlist *sg; + unsigned int i; + struct scatterlist *sg; unsigned long csr, ahb_seq, apb_ptr, apb_seq; struct list_head req_list; struct tegra_dma_sg_req *sg_req = NULL; @@ -1062,7 +1058,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); struct tegra_dma_desc *dma_desc = NULL; - struct tegra_dma_sg_req *sg_req = NULL; + struct tegra_dma_sg_req *sg_req = NULL; unsigned long csr, ahb_seq, apb_ptr, apb_seq; int len; size_t remain_len; @@ -1204,7 +1200,6 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); struct tegra_dma *tdma = tdc->tdma; - struct tegra_dma_desc *dma_desc; struct tegra_dma_sg_req *sg_req; struct list_head dma_desc_list; @@ -1305,7 +1300,7 @@ static const struct tegra_dma_chip_data tegra148_dma_chip_data = { static int tegra_dma_probe(struct platform_device *pdev) { - struct resource *res; + struct resource *res; struct tegra_dma *tdma; int ret; int i; @@ -1319,10 +1314,8 @@ static int tegra_dma_probe(struct platform_device *pdev) tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * sizeof(struct tegra_dma_channel), GFP_KERNEL); - if (!tdma) { - dev_err(&pdev->dev, "Error: memory allocation failed\n"); + if (!tdma) return -ENOMEM; - } tdma->dev = &pdev->dev; tdma->chip_data = cdata; @@ -452,7 +452,7 @@ static struct platform_driver ti_dma_xbar_driver = { .probe = ti_dma_xbar_probe, }; -int omap_dmaxbar_init(void) +static int omap_dmaxbar_init(void) { return platform_driver_register(&ti_dma_xbar_driver); } @@ -337,18 +337,14 @@ static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan) int err; td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL); - if (!td_desc) { - dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); + if (!td_desc) goto out; - } td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE; td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL); - if (!td_desc->desc_list) { - dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); + if (!td_desc->desc_list) goto err; - } dma_async_tx_descriptor_init(&td_desc->txd, chan); td_desc->txd.tx_submit = td_tx_submit; @@ -1165,9 +1165,12 @@ static int txx9dmac_chan_remove(struct platform_device *pdev) { struct txx9dmac_chan *dc = platform_get_drvdata(pdev); + dma_async_device_unregister(&dc->dma); - if (dc->irq >= 0) + if (dc->irq >= 0) { + devm_free_irq(&pdev->dev, dc->irq, dc); tasklet_kill(&dc->tasklet); + } dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL; return 0; } @@ -1228,8 +1231,10 @@ static int txx9dmac_remove(struct platform_device *pdev) struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); txx9dmac_off(ddev); - if (ddev->irq >= 0) + if (ddev->irq >= 0) { + devm_free_irq(&pdev->dev, ddev->irq, ddev); tasklet_kill(&ddev->tasklet); + } return 0; } @@ -1 +1,2 @@ -obj-$(CONFIG_XILINX_VDMA) += xilinx_vdma.o +obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o +obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o @@ -45,6 +45,7 @@ #include <linux/of_irq.h> #include <linux/slab.h> #include <linux/clk.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include "../dmaengine.h" @@ -113,7 +114,7 @@ #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) /* HW specific definitions */ -#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2 +#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ @@ -157,12 +158,25 @@ /* AXI DMA Specific Masks/Bit fields */ #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) +#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) #define XILINX_DMA_CR_COALESCE_SHIFT 16 #define XILINX_DMA_BD_SOP BIT(27) #define XILINX_DMA_BD_EOP BIT(26) #define XILINX_DMA_COALESCE_MAX 255 #define XILINX_DMA_NUM_APP_WORDS 5 +/* Multi-Channel DMA Descriptor offsets*/ +#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20) +#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20) + +/* Multi-Channel DMA Masks/Shifts */ +#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0) +#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0) +#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19) +#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0) +#define XILINX_DMA_BD_STRIDE_SHIFT 0 +#define XILINX_DMA_BD_VSIZE_SHIFT 19 + /* AXI CDMA Specific Registers/Offsets */ #define XILINX_CDMA_REG_SRCADDR 0x18 #define XILINX_CDMA_REG_DSTADDR 0x20 @@ -194,22 +208,22 @@ struct xilinx_vdma_desc_hw { /** * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA * @next_desc: Next Descriptor Pointer @0x00 - * @pad1: Reserved @0x04 + * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 * @buf_addr: Buffer address @0x08 - * @pad2: Reserved @0x0C - * @pad3: Reserved @0x10 - * @pad4: Reserved @0x14 + * @buf_addr_msb: MSB of Buffer address @0x0C + * @pad1: Reserved @0x10 + * @pad2: Reserved @0x14 * @control: Control field @0x18 * @status: Status field @0x1C * @app: APP Fields @0x20 - 0x30 */ struct xilinx_axidma_desc_hw { u32 next_desc; - u32 pad1; + u32 next_desc_msb; u32 buf_addr; - u32 pad2; - u32 pad3; - u32 pad4; + u32 buf_addr_msb; + u32 mcdma_control; + u32 vsize_stride; u32 control; u32 status; u32 app[XILINX_DMA_NUM_APP_WORDS]; @@ -218,21 +232,21 @@ struct xilinx_axidma_desc_hw { /** * struct xilinx_cdma_desc_hw - Hardware Descriptor * @next_desc: Next Descriptor Pointer @0x00 - * @pad1: Reserved @0x04 + * @next_descmsb: Next Descriptor Pointer MSB @0x04 * @src_addr: Source address @0x08 - * @pad2: Reserved @0x0C + * @src_addrmsb: Source address MSB @0x0C * @dest_addr: Destination address @0x10 - * @pad3: Reserved @0x14 + * @dest_addrmsb: Destination address MSB @0x14 * @control: Control field @0x18 * @status: Status field @0x1C */ struct xilinx_cdma_desc_hw { u32 next_desc; - u32 pad1; + u32 next_desc_msb; u32 src_addr; - u32 pad2; + u32 src_addr_msb; u32 dest_addr; - u32 pad3; + u32 dest_addr_msb; u32 control; u32 status; } __aligned(64); @@ -278,11 +292,13 @@ struct xilinx_cdma_tx_segment { * @async_tx: Async transaction descriptor * @segments: TX segments list * @node: Node in the channel descriptors list + * @cyclic: Check for cyclic transfers. */ struct xilinx_dma_tx_descriptor { struct dma_async_tx_descriptor async_tx; struct list_head segments; struct list_head node; + bool cyclic; }; /** @@ -302,6 +318,7 @@ struct xilinx_dma_tx_descriptor { * @direction: Transfer direction * @num_frms: Number of frames * @has_sg: Support scatter transfers + * @cyclic: Check for cyclic transfers. * @genlock: Support genlock mode * @err: Channel has errors * @tasklet: Cleanup work after irq @@ -312,6 +329,7 @@ struct xilinx_dma_tx_descriptor { * @desc_submitcount: Descriptor h/w submitted count * @residue: Residue for AXI DMA * @seg_v: Statically allocated segments base + * @cyclic_seg_v: Statically allocated segment base for cyclic transfers * @start_transfer: Differentiate b/w DMA IP's transfer */ struct xilinx_dma_chan { @@ -330,6 +348,7 @@ struct xilinx_dma_chan { enum dma_transfer_direction direction; int num_frms; bool has_sg; + bool cyclic; bool genlock; bool err; struct tasklet_struct tasklet; @@ -340,7 +359,9 @@ struct xilinx_dma_chan { u32 desc_submitcount; u32 residue; struct xilinx_axidma_tx_segment *seg_v; + struct xilinx_axidma_tx_segment *cyclic_seg_v; void (*start_transfer)(struct xilinx_dma_chan *chan); + u16 tdest; }; struct xilinx_dma_config { @@ -357,6 +378,7 @@ struct xilinx_dma_config { * @common: DMA device structure * @chan: Driver specific DMA channel * @has_sg: Specifies whether Scatter-Gather is present or not + * @mcdma: Specifies whether Multi-Channel is present or not * @flush_on_fsync: Flush on frame sync * @ext_addr: Indicates 64 bit addressing is supported by dma device * @pdev: Platform device structure pointer @@ -366,6 +388,8 @@ struct xilinx_dma_config { * @txs_clk: DMA mm2s stream clock * @rx_clk: DMA s2mm clock * @rxs_clk: DMA s2mm stream clock + * @nr_channels: Number of channels DMA device supports + * @chan_id: DMA channel identifier */ struct xilinx_dma_device { void __iomem *regs; @@ -373,6 +397,7 @@ struct xilinx_dma_device { struct dma_device common; struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; bool has_sg; + bool mcdma; u32 flush_on_fsync; bool ext_addr; struct platform_device *pdev; @@ -382,6 +407,8 @@ struct xilinx_dma_device { struct clk *txs_clk; struct clk *rx_clk; struct clk *rxs_clk; + u32 nr_channels; + u32 chan_id; }; /* Macros */ @@ -454,6 +481,34 @@ static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg, writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4); } +static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value) +{ + lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg); +} + +static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg, + dma_addr_t addr) +{ + if (chan->ext_addr) + dma_writeq(chan, reg, addr); + else + dma_ctrl_write(chan, reg, addr); +} + +static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, + struct xilinx_axidma_desc_hw *hw, + dma_addr_t buf_addr, size_t sg_used, + size_t period_len) +{ + if (chan->ext_addr) { + hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len); + hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used + + period_len); + } else { + hw->buf_addr = buf_addr + sg_used + period_len; + } +} + /* ----------------------------------------------------------------------------- * Descriptors and segments alloc and free */ @@ -491,11 +546,10 @@ xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan) struct xilinx_cdma_tx_segment *segment; dma_addr_t phys; - segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys); + segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); if (!segment) return NULL; - memset(segment, 0, sizeof(*segment)); segment->phys = phys; return segment; @@ -513,11 +567,10 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) struct xilinx_axidma_tx_segment *segment; dma_addr_t phys; - segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys); + segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); if (!segment) return NULL; - memset(segment, 0, sizeof(*segment)); segment->phys = phys; return segment; @@ -660,13 +713,37 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) dev_dbg(chan->dev, "Free all channel resources.\n"); xilinx_dma_free_descriptors(chan); - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v); xilinx_dma_free_tx_segment(chan, chan->seg_v); + } dma_pool_destroy(chan->desc_pool); chan->desc_pool = NULL; } /** + * xilinx_dma_chan_handle_cyclic - Cyclic dma callback + * @chan: Driver specific dma channel + * @desc: dma transaction descriptor + * @flags: flags for spin lock + */ +static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan, + struct xilinx_dma_tx_descriptor *desc, + unsigned long *flags) +{ + dma_async_tx_callback callback; + void *callback_param; + + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; + if (callback) { + spin_unlock_irqrestore(&chan->lock, *flags); + callback(callback_param); + spin_lock_irqsave(&chan->lock, *flags); + } +} + +/** * xilinx_dma_chan_desc_cleanup - Clean channel descriptors * @chan: Driver specific DMA channel */ @@ -681,6 +758,11 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) dma_async_tx_callback callback; void *callback_param; + if (desc->cyclic) { + xilinx_dma_chan_handle_cyclic(chan, desc, &flags); + break; + } + /* Remove from the list of running transactions */ list_del(&desc->node); @@ -757,7 +839,7 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) return -ENOMEM; } - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { /* * For AXI DMA case after submitting a pending_list, keep * an extra segment allocated so that the "next descriptor" @@ -768,6 +850,15 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) */ chan->seg_v = xilinx_axidma_alloc_tx_segment(chan); + /* + * For cyclic DMA mode we need to program the tail Descriptor + * register with a value which is not a part of the BD chain + * so allocating a desc segment during channel allocation for + * programming tail descriptor. + */ + chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan); + } + dma_cookie_init(dchan); if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { @@ -1065,12 +1156,12 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) } if (chan->has_sg) { - dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, - head_desc->async_tx.phys); + xilinx_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); /* Update tail ptr register which will start the transfer */ - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, - tail_segment->phys); + xilinx_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); } else { /* In simple mode */ struct xilinx_cdma_tx_segment *segment; @@ -1082,8 +1173,8 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) hw = &segment->hw; - dma_ctrl_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); - dma_ctrl_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); + xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); + xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); /* Start the transfer */ dma_ctrl_write(chan, XILINX_DMA_REG_BTT, @@ -1124,18 +1215,20 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) tail_segment = list_last_entry(&tail_desc->segments, struct xilinx_axidma_tx_segment, node); - old_head = list_first_entry(&head_desc->segments, - struct xilinx_axidma_tx_segment, node); - new_head = chan->seg_v; - /* Copy Buffer Descriptor fields. */ - new_head->hw = old_head->hw; + if (chan->has_sg && !chan->xdev->mcdma) { + old_head = list_first_entry(&head_desc->segments, + struct xilinx_axidma_tx_segment, node); + new_head = chan->seg_v; + /* Copy Buffer Descriptor fields. */ + new_head->hw = old_head->hw; - /* Swap and save new reserve */ - list_replace_init(&old_head->node, &new_head->node); - chan->seg_v = old_head; + /* Swap and save new reserve */ + list_replace_init(&old_head->node, &new_head->node); + chan->seg_v = old_head; - tail_segment->hw.next_desc = chan->seg_v->phys; - head_desc->async_tx.phys = new_head->phys; + tail_segment->hw.next_desc = chan->seg_v->phys; + head_desc->async_tx.phys = new_head->phys; + } reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); @@ -1146,9 +1239,25 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); } - if (chan->has_sg) - dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, - head_desc->async_tx.phys); + if (chan->has_sg && !chan->xdev->mcdma) + xilinx_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); + + if (chan->has_sg && chan->xdev->mcdma) { + if (chan->direction == DMA_MEM_TO_DEV) { + dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); + } else { + if (!chan->tdest) { + dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, + head_desc->async_tx.phys); + } else { + dma_ctrl_write(chan, + XILINX_DMA_MCRX_CDESC(chan->tdest), + head_desc->async_tx.phys); + } + } + } xilinx_dma_start(chan); @@ -1156,9 +1265,27 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) return; /* Start the transfer */ - if (chan->has_sg) { - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, + if (chan->has_sg && !chan->xdev->mcdma) { + if (chan->cyclic) + xilinx_write(chan, XILINX_DMA_REG_TAILDESC, + chan->cyclic_seg_v->phys); + else + xilinx_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); + } else if (chan->has_sg && chan->xdev->mcdma) { + if (chan->direction == DMA_MEM_TO_DEV) { + dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, tail_segment->phys); + } else { + if (!chan->tdest) { + dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, + tail_segment->phys); + } else { + dma_ctrl_write(chan, + XILINX_DMA_MCRX_TDESC(chan->tdest), + tail_segment->phys); + } + } } else { struct xilinx_axidma_tx_segment *segment; struct xilinx_axidma_desc_hw *hw; @@ -1168,7 +1295,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) node); hw = &segment->hw; - dma_ctrl_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); + xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); /* Start the transfer */ dma_ctrl_write(chan, XILINX_DMA_REG_BTT, @@ -1209,7 +1336,8 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) list_for_each_entry_safe(desc, next, &chan->active_list, node) { list_del(&desc->node); - dma_cookie_complete(&desc->async_tx); + if (!desc->cyclic) + dma_cookie_complete(&desc->async_tx); list_add_tail(&desc->node, &chan->done_list); } } @@ -1397,6 +1525,11 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) unsigned long flags; int err; + if (chan->cyclic) { + xilinx_dma_free_tx_descriptor(chan, desc); + return -EBUSY; + } + if (chan->err) { /* * If reset fails, need to hard reset the system. @@ -1414,6 +1547,9 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) /* Put this transaction onto the tail of the pending queue */ append_desc_queue(chan, desc); + if (desc->cyclic) + chan->cyclic = true; + spin_unlock_irqrestore(&chan->lock, flags); return cookie; @@ -1541,6 +1677,10 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, hw->control = len; hw->src_addr = dma_src; hw->dest_addr = dma_dst; + if (chan->ext_addr) { + hw->src_addr_msb = upper_32_bits(dma_src); + hw->dest_addr_msb = upper_32_bits(dma_dst); + } /* Fill the previous next descriptor with current */ prev = list_last_entry(&desc->segments, @@ -1623,7 +1763,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( hw = &segment->hw; /* Fill in the descriptor */ - hw->buf_addr = sg_dma_address(sg) + sg_used; + xilinx_axidma_buf(chan, hw, sg_dma_address(sg), + sg_used, 0); hw->control = copy; @@ -1669,12 +1810,204 @@ error: } /** + * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction + * @chan: DMA channel + * @sgl: scatterlist to transfer to/from + * @sg_len: number of entries in @scatterlist + * @direction: DMA direction + * @flags: transfer ack flags + */ +static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( + struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long flags) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL; + size_t copy, sg_used; + unsigned int num_periods; + int i; + u32 reg; + + if (!period_len) + return NULL; + + num_periods = buf_len / period_len; + + if (!num_periods) + return NULL; + + if (!is_slave_direction(direction)) + return NULL; + + /* Allocate a transaction descriptor. */ + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + chan->direction = direction; + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + + for (i = 0; i < num_periods; ++i) { + sg_used = 0; + + while (sg_used < period_len) { + struct xilinx_axidma_desc_hw *hw; + + /* Get a free segment */ + segment = xilinx_axidma_alloc_tx_segment(chan); + if (!segment) + goto error; + + /* + * Calculate the maximum number of bytes to transfer, + * making sure it is less than the hw limit + */ + copy = min_t(size_t, period_len - sg_used, + XILINX_DMA_MAX_TRANS_LEN); + hw = &segment->hw; + xilinx_axidma_buf(chan, hw, buf_addr, sg_used, + period_len * i); + hw->control = copy; + + if (prev) + prev->hw.next_desc = segment->phys; + + prev = segment; + sg_used += copy; + + /* + * Insert the segment into the descriptor segments + * list. + */ + list_add_tail(&segment->node, &desc->segments); + } + } + + head_segment = list_first_entry(&desc->segments, + struct xilinx_axidma_tx_segment, node); + desc->async_tx.phys = head_segment->phys; + + desc->cyclic = true; + reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); + reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK; + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); + + segment = list_last_entry(&desc->segments, + struct xilinx_axidma_tx_segment, + node); + segment->hw.next_desc = (u32) head_segment->phys; + + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ + if (direction == DMA_MEM_TO_DEV) { + head_segment->hw.control |= XILINX_DMA_BD_SOP; + segment->hw.control |= XILINX_DMA_BD_EOP; + } + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** + * xilinx_dma_prep_interleaved - prepare a descriptor for a + * DMA_SLAVE transaction + * @dchan: DMA channel + * @xt: Interleaved template pointer + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor * +xilinx_dma_prep_interleaved(struct dma_chan *dchan, + struct dma_interleaved_template *xt, + unsigned long flags) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_axidma_tx_segment *segment; + struct xilinx_axidma_desc_hw *hw; + + if (!is_slave_direction(xt->dir)) + return NULL; + + if (!xt->numf || !xt->sgl[0].size) + return NULL; + + if (xt->frame_size != 1) + return NULL; + + /* Allocate a transaction descriptor. */ + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + chan->direction = xt->dir; + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + + /* Get a free segment */ + segment = xilinx_axidma_alloc_tx_segment(chan); + if (!segment) + goto error; + + hw = &segment->hw; + + /* Fill in the descriptor */ + if (xt->dir != DMA_MEM_TO_DEV) + hw->buf_addr = xt->dst_start; + else + hw->buf_addr = xt->src_start; + + hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK; + hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) & + XILINX_DMA_BD_VSIZE_MASK; + hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) & + XILINX_DMA_BD_STRIDE_MASK; + hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK; + + /* + * Insert the segment into the descriptor segments + * list. + */ + list_add_tail(&segment->node, &desc->segments); + + + segment = list_first_entry(&desc->segments, + struct xilinx_axidma_tx_segment, node); + desc->async_tx.phys = segment->phys; + + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ + if (xt->dir == DMA_MEM_TO_DEV) { + segment->hw.control |= XILINX_DMA_BD_SOP; + segment = list_last_entry(&desc->segments, + struct xilinx_axidma_tx_segment, + node); + segment->hw.control |= XILINX_DMA_BD_EOP; + } + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; +} + +/** * xilinx_dma_terminate_all - Halt the channel and free descriptors * @chan: Driver specific DMA Channel pointer */ static int xilinx_dma_terminate_all(struct dma_chan *dchan) { struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + u32 reg; + + if (chan->cyclic) + xilinx_dma_chan_reset(chan); /* Halt the DMA engine */ xilinx_dma_halt(chan); @@ -1682,6 +2015,13 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan) /* Remove and free all of the descriptors in the lists */ xilinx_dma_free_descriptors(chan); + if (chan->cyclic) { + reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); + reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK; + dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); + chan->cyclic = false; + } + return 0; } @@ -1972,7 +2312,7 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev) * Return: '0' on success and failure value on error */ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, - struct device_node *node) + struct device_node *node, int chan_id) { struct xilinx_dma_chan *chan; bool has_dre = false; @@ -2014,9 +2354,12 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, if (!has_dre) xdev->common.copy_align = fls(width - 1); - if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel")) { + if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || + of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || + of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { chan->direction = DMA_MEM_TO_DEV; - chan->id = 0; + chan->id = chan_id; + chan->tdest = chan_id; chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { @@ -2027,9 +2370,12 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, chan->flush_on_fsync = true; } } else if (of_device_is_compatible(node, - "xlnx,axi-vdma-s2mm-channel")) { + "xlnx,axi-vdma-s2mm-channel") || + of_device_is_compatible(node, + "xlnx,axi-dma-s2mm-channel")) { chan->direction = DMA_DEV_TO_MEM; - chan->id = 1; + chan->id = chan_id; + chan->tdest = chan_id - xdev->nr_channels; chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { @@ -2084,6 +2430,32 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, } /** + * xilinx_dma_child_probe - Per child node probe + * It get number of dma-channels per child node from + * device-tree and initializes all the channels. + * + * @xdev: Driver specific device structure + * @node: Device node + * + * Return: 0 always. + */ +static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, + struct device_node *node) { + int ret, i, nr_channels = 1; + + ret = of_property_read_u32(node, "dma-channels", &nr_channels); + if ((ret < 0) && xdev->mcdma) + dev_warn(xdev->dev, "missing dma-channels property\n"); + + for (i = 0; i < nr_channels; i++) + xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); + + xdev->nr_channels += nr_channels; + + return 0; +} + +/** * of_dma_xilinx_xlate - Translation function * @dma_spec: Pointer to DMA specifier as found in the device tree * @ofdma: Pointer to DMA controller data @@ -2096,7 +2468,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, struct xilinx_dma_device *xdev = ofdma->of_dma_data; int chan_id = dma_spec->args[0]; - if (chan_id >= XILINX_DMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id]) + if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id]) return NULL; return dma_get_slave_channel(&xdev->chan[chan_id]->common); @@ -2172,6 +2544,8 @@ static int xilinx_dma_probe(struct platform_device *pdev) /* Retrieve the DMA engine properties from the device tree */ xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { err = of_property_read_u32(node, "xlnx,num-fstores", @@ -2218,7 +2592,12 @@ static int xilinx_dma_probe(struct platform_device *pdev) xdev->common.device_tx_status = xilinx_dma_tx_status; xdev->common.device_issue_pending = xilinx_dma_issue_pending; if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; + xdev->common.device_prep_dma_cyclic = + xilinx_dma_prep_dma_cyclic; + xdev->common.device_prep_interleaved_dma = + xilinx_dma_prep_interleaved; /* Residue calculation is supported by only AXI DMA */ xdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; @@ -2234,13 +2613,13 @@ static int xilinx_dma_probe(struct platform_device *pdev) /* Initialize the channels */ for_each_child_of_node(node, child) { - err = xilinx_dma_chan_probe(xdev, child); + err = xilinx_dma_child_probe(xdev, child); if (err < 0) goto disable_clks; } if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { - for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) + for (i = 0; i < xdev->nr_channels; i++) if (xdev->chan[i]) xdev->chan[i]->num_frms = num_frames; } @@ -2263,7 +2642,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) disable_clks: xdma_disable_allclks(xdev); error: - for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) + for (i = 0; i < xdev->nr_channels; i++) if (xdev->chan[i]) xilinx_dma_chan_remove(xdev->chan[i]); @@ -2285,7 +2664,7 @@ static int xilinx_dma_remove(struct platform_device *pdev) dma_async_device_unregister(&xdev->common); - for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) + for (i = 0; i < xdev->nr_channels; i++) if (xdev->chan[i]) xilinx_dma_chan_remove(xdev->chan[i]); diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c new file mode 100644 index 000000000000..6d221e5c72ee --- /dev/null +++ b/ drivers/dma/xilinx/zynqmp_dma.c@@ -0,0 +1,1151 @@ +/* + * DMA driver for Xilinx ZynqMP DMA Engine + * + * Copyright (C) 2016 Xilinx, Inc. All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/bitops.h> +#include <linux/dmapool.h> +#include <linux/dma/xilinx_dma.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_dma.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/slab.h> +#include <linux/clk.h> +#include <linux/io-64-nonatomic-lo-hi.h> + +#include "../dmaengine.h" + +/* Register Offsets */ +#define ZYNQMP_DMA_ISR 0x100 +#define ZYNQMP_DMA_IMR 0x104 +#define ZYNQMP_DMA_IER 0x108 +#define ZYNQMP_DMA_IDS 0x10C +#define ZYNQMP_DMA_CTRL0 0x110 +#define ZYNQMP_DMA_CTRL1 0x114 +#define ZYNQMP_DMA_DATA_ATTR 0x120 +#define ZYNQMP_DMA_DSCR_ATTR 0x124 +#define ZYNQMP_DMA_SRC_DSCR_WRD0 0x128 +#define ZYNQMP_DMA_SRC_DSCR_WRD1 0x12C +#define ZYNQMP_DMA_SRC_DSCR_WRD2 0x130 +#define ZYNQMP_DMA_SRC_DSCR_WRD3 0x134 +#define ZYNQMP_DMA_DST_DSCR_WRD0 0x138 +#define ZYNQMP_DMA_DST_DSCR_WRD1 0x13C +#define ZYNQMP_DMA_DST_DSCR_WRD2 0x140 +#define ZYNQMP_DMA_DST_DSCR_WRD3 0x144 +#define ZYNQMP_DMA_SRC_START_LSB 0x158 +#define ZYNQMP_DMA_SRC_START_MSB 0x15C +#define ZYNQMP_DMA_DST_START_LSB 0x160 +#define ZYNQMP_DMA_DST_START_MSB 0x164 +#define ZYNQMP_DMA_RATE_CTRL 0x18C +#define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190 +#define ZYNQMP_DMA_IRQ_DST_ACCT 0x194 +#define ZYNQMP_DMA_CTRL2 0x200 + +/* Interrupt registers bit field definitions */ +#define ZYNQMP_DMA_DONE BIT(10) +#define ZYNQMP_DMA_AXI_WR_DATA BIT(9) +#define ZYNQMP_DMA_AXI_RD_DATA BIT(8) +#define ZYNQMP_DMA_AXI_RD_DST_DSCR BIT(7) +#define ZYNQMP_DMA_AXI_RD_SRC_DSCR BIT(6) +#define ZYNQMP_DMA_IRQ_DST_ACCT_ERR BIT(5) +#define ZYNQMP_DMA_IRQ_SRC_ACCT_ERR BIT(4) +#define ZYNQMP_DMA_BYTE_CNT_OVRFL BIT(3) +#define ZYNQMP_DMA_DST_DSCR_DONE BIT(2) +#define ZYNQMP_DMA_INV_APB BIT(0) + +/* Control 0 register bit field definitions */ +#define ZYNQMP_DMA_OVR_FETCH BIT(7) +#define ZYNQMP_DMA_POINT_TYPE_SG BIT(6) +#define ZYNQMP_DMA_RATE_CTRL_EN BIT(3) + +/* Control 1 register bit field definitions */ +#define ZYNQMP_DMA_SRC_ISSUE GENMASK(4, 0) + +/* Data Attribute register bit field definitions */ +#define ZYNQMP_DMA_ARBURST GENMASK(27, 26) +#define ZYNQMP_DMA_ARCACHE GENMASK(25, 22) +#define ZYNQMP_DMA_ARCACHE_OFST 22 +#define ZYNQMP_DMA_ARQOS GENMASK(21, 18) +#define ZYNQMP_DMA_ARQOS_OFST 18 +#define ZYNQMP_DMA_ARLEN GENMASK(17, 14) +#define ZYNQMP_DMA_ARLEN_OFST 14 +#define ZYNQMP_DMA_AWBURST GENMASK(13, 12) +#define ZYNQMP_DMA_AWCACHE GENMASK(11, 8) +#define ZYNQMP_DMA_AWCACHE_OFST 8 +#define ZYNQMP_DMA_AWQOS GENMASK(7, 4) +#define ZYNQMP_DMA_AWQOS_OFST 4 +#define ZYNQMP_DMA_AWLEN GENMASK(3, 0) +#define ZYNQMP_DMA_AWLEN_OFST 0 + +/* Descriptor Attribute register bit field definitions */ +#define ZYNQMP_DMA_AXCOHRNT BIT(8) +#define ZYNQMP_DMA_AXCACHE GENMASK(7, 4) +#define ZYNQMP_DMA_AXCACHE_OFST 4 +#define ZYNQMP_DMA_AXQOS GENMASK(3, 0) +#define ZYNQMP_DMA_AXQOS_OFST 0 + +/* Control register 2 bit field definitions */ +#define ZYNQMP_DMA_ENABLE BIT(0) + +/* Buffer Descriptor definitions */ +#define ZYNQMP_DMA_DESC_CTRL_STOP 0x10 +#define ZYNQMP_DMA_DESC_CTRL_COMP_INT 0x4 +#define ZYNQMP_DMA_DESC_CTRL_SIZE_256 0x2 +#define ZYNQMP_DMA_DESC_CTRL_COHRNT 0x1 + +/* Interrupt Mask specific definitions */ +#define ZYNQMP_DMA_INT_ERR (ZYNQMP_DMA_AXI_RD_DATA | \ + ZYNQMP_DMA_AXI_WR_DATA | \ + ZYNQMP_DMA_AXI_RD_DST_DSCR | \ + ZYNQMP_DMA_AXI_RD_SRC_DSCR | \ + ZYNQMP_DMA_INV_APB) +#define ZYNQMP_DMA_INT_OVRFL (ZYNQMP_DMA_BYTE_CNT_OVRFL | \ + ZYNQMP_DMA_IRQ_SRC_ACCT_ERR | \ + ZYNQMP_DMA_IRQ_DST_ACCT_ERR) +#define ZYNQMP_DMA_INT_DONE (ZYNQMP_DMA_DONE | ZYNQMP_DMA_DST_DSCR_DONE) +#define ZYNQMP_DMA_INT_EN_DEFAULT_MASK (ZYNQMP_DMA_INT_DONE | \ + ZYNQMP_DMA_INT_ERR | \ + ZYNQMP_DMA_INT_OVRFL | \ + ZYNQMP_DMA_DST_DSCR_DONE) + +/* Max number of descriptors per channel */ +#define ZYNQMP_DMA_NUM_DESCS 32 + +/* Max transfer size per descriptor */ +#define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000 + +/* Reset values for data attributes */ +#define ZYNQMP_DMA_AXCACHE_VAL 0xF +#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF +#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF + +#define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F + +#define ZYNQMP_DMA_IDS_DEFAULT_MASK 0xFFF + +/* Bus width in bits */ +#define ZYNQMP_DMA_BUS_WIDTH_64 64 +#define ZYNQMP_DMA_BUS_WIDTH_128 128 + +#define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size) + +#define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \ + common) +#define tx_to_desc(tx) container_of(tx, struct zynqmp_dma_desc_sw, \ + async_tx) + +/** + * struct zynqmp_dma_desc_ll - Hw linked list descriptor + * @addr: Buffer address + * @size: Size of the buffer + * @ctrl: Control word + * @nxtdscraddr: Next descriptor base address + * @rsvd: Reserved field and for Hw internal use. + */ +struct zynqmp_dma_desc_ll { + u64 addr; + u32 size; + u32 ctrl; + u64 nxtdscraddr; + u64 rsvd; +}; __aligned(64) + +/** + * struct zynqmp_dma_desc_sw - Per Transaction structure + * @src: Source address for simple mode dma + * @dst: Destination address for simple mode dma + * @len: Transfer length for simple mode dma + * @node: Node in the channel descriptor list + * @tx_list: List head for the current transfer + * @async_tx: Async transaction descriptor + * @src_v: Virtual address of the src descriptor + * @src_p: Physical address of the src descriptor + * @dst_v: Virtual address of the dst descriptor + * @dst_p: Physical address of the dst descriptor + */ +struct zynqmp_dma_desc_sw { + u64 src; + u64 dst; + u32 len; + struct list_head node; + struct list_head tx_list; + struct dma_async_tx_descriptor async_tx; + struct zynqmp_dma_desc_ll *src_v; + dma_addr_t src_p; + struct zynqmp_dma_desc_ll *dst_v; + dma_addr_t dst_p; +}; + +/** + * struct zynqmp_dma_chan - Driver specific DMA channel structure + * @zdev: Driver specific device structure + * @regs: Control registers offset + * @lock: Descriptor operation lock + * @pending_list: Descriptors waiting + * @free_list: Descriptors free + * @active_list: Descriptors active + * @sw_desc_pool: SW descriptor pool + * @done_list: Complete descriptors + * @common: DMA common channel + * @desc_pool_v: Statically allocated descriptor base + * @desc_pool_p: Physical allocated descriptor base + * @desc_free_cnt: Descriptor available count + * @dev: The dma device + * @irq: Channel IRQ + * @is_dmacoherent: Tells whether dma operations are coherent or not + * @tasklet: Cleanup work after irq + * @idle : Channel status; + * @desc_size: Size of the low level descriptor + * @err: Channel has errors + * @bus_width: Bus width + * @src_burst_len: Source burst length + * @dst_burst_len: Dest burst length + * @clk_main: Pointer to main clock + * @clk_apb: Pointer to apb clock + */ +struct zynqmp_dma_chan { + struct zynqmp_dma_device *zdev; + void __iomem *regs; + spinlock_t lock; + struct list_head pending_list; + struct list_head free_list; + struct list_head active_list; + struct zynqmp_dma_desc_sw *sw_desc_pool; + struct list_head done_list; + struct dma_chan common; + void *desc_pool_v; + dma_addr_t desc_pool_p; + u32 desc_free_cnt; + struct device *dev; + int irq; + bool is_dmacoherent; + struct tasklet_struct tasklet; + bool idle; + u32 desc_size; + bool err; + u32 bus_width; + u32 src_burst_len; + u32 dst_burst_len; + struct clk *clk_main; + struct clk *clk_apb; +}; + +/** + * struct zynqmp_dma_device - DMA device structure + * @dev: Device Structure + * @common: DMA device structure + * @chan: Driver specific DMA channel + */ +struct zynqmp_dma_device { + struct device *dev; + struct dma_device common; + struct zynqmp_dma_chan *chan; +}; + +static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg, + u64 value) +{ + lo_hi_writeq(value, chan->regs + reg); +} + +/** + * zynqmp_dma_update_desc_to_ctrlr - Updates descriptor to the controller + * @chan: ZynqMP DMA DMA channel pointer + * @desc: Transaction descriptor pointer + */ +static void zynqmp_dma_update_desc_to_ctrlr(struct zynqmp_dma_chan *chan, + struct zynqmp_dma_desc_sw *desc) +{ + dma_addr_t addr; + + addr = desc->src_p; + zynqmp_dma_writeq(chan, ZYNQMP_DMA_SRC_START_LSB, addr); + addr = desc->dst_p; + zynqmp_dma_writeq(chan, ZYNQMP_DMA_DST_START_LSB, addr); +} + +/** + * zynqmp_dma_desc_config_eod - Mark the descriptor as end descriptor + * @chan: ZynqMP DMA channel pointer + * @desc: Hw descriptor pointer + */ +static void zynqmp_dma_desc_config_eod(struct zynqmp_dma_chan *chan, + void *desc) +{ + struct zynqmp_dma_desc_ll *hw = (struct zynqmp_dma_desc_ll *)desc; + + hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_STOP; + hw++; + hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_COMP_INT | ZYNQMP_DMA_DESC_CTRL_STOP; +} + +/** + * zynqmp_dma_config_sg_ll_desc - Configure the linked list descriptor + * @chan: ZynqMP DMA channel pointer + * @sdesc: Hw descriptor pointer + * @src: Source buffer address + * @dst: Destination buffer address + * @len: Transfer length + * @prev: Previous hw descriptor pointer + */ +static void zynqmp_dma_config_sg_ll_desc(struct zynqmp_dma_chan *chan, + struct zynqmp_dma_desc_ll *sdesc, + dma_addr_t src, dma_addr_t dst, size_t len, + struct zynqmp_dma_desc_ll *prev) +{ + struct zynqmp_dma_desc_ll *ddesc = sdesc + 1; + + sdesc->size = ddesc->size = len; + sdesc->addr = src; + ddesc->addr = dst; + + sdesc->ctrl = ddesc->ctrl = ZYNQMP_DMA_DESC_CTRL_SIZE_256; + if (chan->is_dmacoherent) { + sdesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT; + ddesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT; + } + + if (prev) { + dma_addr_t addr = chan->desc_pool_p + + ((uintptr_t)sdesc - (uintptr_t)chan->desc_pool_v); + ddesc = prev + 1; + prev->nxtdscraddr = addr; + ddesc->nxtdscraddr = addr + ZYNQMP_DMA_DESC_SIZE(chan); + } +} + +/** + * zynqmp_dma_init - Initialize the channel + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_init(struct zynqmp_dma_chan *chan) +{ + u32 val; + + writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); + val = readl(chan->regs + ZYNQMP_DMA_ISR); + writel(val, chan->regs + ZYNQMP_DMA_ISR); + + if (chan->is_dmacoherent) { + val = ZYNQMP_DMA_AXCOHRNT; + val = (val & ~ZYNQMP_DMA_AXCACHE) | + (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AXCACHE_OFST); + writel(val, chan->regs + ZYNQMP_DMA_DSCR_ATTR); + } + + val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); + if (chan->is_dmacoherent) { + val = (val & ~ZYNQMP_DMA_ARCACHE) | + (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_ARCACHE_OFST); + val = (val & ~ZYNQMP_DMA_AWCACHE) | + (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AWCACHE_OFST); + } + writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); + + /* Clearing the interrupt account rgisters */ + val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); + val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); + + chan->idle = true; +} + +/** + * zynqmp_dma_tx_submit - Submit DMA transaction + * @tx: Async transaction descriptor pointer + * + * Return: cookie value + */ +static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct zynqmp_dma_chan *chan = to_chan(tx->chan); + struct zynqmp_dma_desc_sw *desc, *new; + dma_cookie_t cookie; + + new = tx_to_desc(tx); + spin_lock_bh(&chan->lock); + cookie = dma_cookie_assign(tx); + + if (!list_empty(&chan->pending_list)) { + desc = list_last_entry(&chan->pending_list, + struct zynqmp_dma_desc_sw, node); + if (!list_empty(&desc->tx_list)) + desc = list_last_entry(&desc->tx_list, + struct zynqmp_dma_desc_sw, node); + desc->src_v->nxtdscraddr = new->src_p; + desc->src_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP; + desc->dst_v->nxtdscraddr = new->dst_p; + desc->dst_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP; + } + + list_add_tail(&new->node, &chan->pending_list); + spin_unlock_bh(&chan->lock); + + return cookie; +} + +/** + * zynqmp_dma_get_descriptor - Get the sw descriptor from the pool + * @chan: ZynqMP DMA channel pointer + * + * Return: The sw descriptor + */ +static struct zynqmp_dma_desc_sw * +zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan) +{ + struct zynqmp_dma_desc_sw *desc; + + spin_lock_bh(&chan->lock); + desc = list_first_entry(&chan->free_list, + struct zynqmp_dma_desc_sw, node); + list_del(&desc->node); + spin_unlock_bh(&chan->lock); + + INIT_LIST_HEAD(&desc->tx_list); + /* Clear the src and dst descriptor memory */ + memset((void *)desc->src_v, 0, ZYNQMP_DMA_DESC_SIZE(chan)); + memset((void *)desc->dst_v, 0, ZYNQMP_DMA_DESC_SIZE(chan)); + + return desc; +} + +/** + * zynqmp_dma_free_descriptor - Issue pending transactions + * @chan: ZynqMP DMA channel pointer + * @sdesc: Transaction descriptor pointer + */ +static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan, + struct zynqmp_dma_desc_sw *sdesc) +{ + struct zynqmp_dma_desc_sw *child, *next; + + chan->desc_free_cnt++; + list_add_tail(&sdesc->node, &chan->free_list); + list_for_each_entry_safe(child, next, &sdesc->tx_list, node) { + chan->desc_free_cnt++; + list_move_tail(&child->node, &chan->free_list); + } +} + +/** + * zynqmp_dma_free_desc_list - Free descriptors list + * @chan: ZynqMP DMA channel pointer + * @list: List to parse and delete the descriptor + */ +static void zynqmp_dma_free_desc_list(struct zynqmp_dma_chan *chan, + struct list_head *list) +{ + struct zynqmp_dma_desc_sw *desc, *next; + + list_for_each_entry_safe(desc, next, list, node) + zynqmp_dma_free_descriptor(chan, desc); +} + +/** + * zynqmp_dma_alloc_chan_resources - Allocate channel resources + * @dchan: DMA channel + * + * Return: Number of descriptors on success and failure value on error + */ +static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + struct zynqmp_dma_desc_sw *desc; + int i; + + chan->sw_desc_pool = kzalloc(sizeof(*desc) * ZYNQMP_DMA_NUM_DESCS, + GFP_KERNEL); + if (!chan->sw_desc_pool) + return -ENOMEM; + + chan->idle = true; + chan->desc_free_cnt = ZYNQMP_DMA_NUM_DESCS; + + INIT_LIST_HEAD(&chan->free_list); + + for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) { + desc = chan->sw_desc_pool + i; + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = zynqmp_dma_tx_submit; + list_add_tail(&desc->node, &chan->free_list); + } + + chan->desc_pool_v = dma_zalloc_coherent(chan->dev, + (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), + &chan->desc_pool_p, GFP_KERNEL); + if (!chan->desc_pool_v) + return -ENOMEM; + + for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) { + desc = chan->sw_desc_pool + i; + desc->src_v = (struct zynqmp_dma_desc_ll *) (chan->desc_pool_v + + (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2)); + desc->dst_v = (struct zynqmp_dma_desc_ll *) (desc->src_v + 1); + desc->src_p = chan->desc_pool_p + + (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2); + desc->dst_p = desc->src_p + ZYNQMP_DMA_DESC_SIZE(chan); + } + + return ZYNQMP_DMA_NUM_DESCS; +} + +/** + * zynqmp_dma_start - Start DMA channel + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_start(struct zynqmp_dma_chan *chan) +{ + writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER); + chan->idle = false; + writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2); +} + +/** + * zynqmp_dma_handle_ovfl_int - Process the overflow interrupt + * @chan: ZynqMP DMA channel pointer + * @status: Interrupt status value + */ +static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status) +{ + u32 val; + + if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR) + val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); + if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR) + val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); +} + +static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) +{ + u32 val; + + val = readl(chan->regs + ZYNQMP_DMA_CTRL0); + val |= ZYNQMP_DMA_POINT_TYPE_SG; + writel(val, chan->regs + ZYNQMP_DMA_CTRL0); + + val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); + val = (val & ~ZYNQMP_DMA_ARLEN) | + (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST); + val = (val & ~ZYNQMP_DMA_AWLEN) | + (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST); + writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); +} + +/** + * zynqmp_dma_device_config - Zynqmp dma device configuration + * @dchan: DMA channel + * @config: DMA device config + */ +static int zynqmp_dma_device_config(struct dma_chan *dchan, + struct dma_slave_config *config) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + + chan->src_burst_len = config->src_maxburst; + chan->dst_burst_len = config->dst_maxburst; + + return 0; +} + +/** + * zynqmp_dma_start_transfer - Initiate the new transfer + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_start_transfer(struct zynqmp_dma_chan *chan) +{ + struct zynqmp_dma_desc_sw *desc; + + if (!chan->idle) + return; + + zynqmp_dma_config(chan); + + desc = list_first_entry_or_null(&chan->pending_list, + struct zynqmp_dma_desc_sw, node); + if (!desc) + return; + + list_splice_tail_init(&chan->pending_list, &chan->active_list); + zynqmp_dma_update_desc_to_ctrlr(chan, desc); + zynqmp_dma_start(chan); +} + + +/** + * zynqmp_dma_chan_desc_cleanup - Cleanup the completed descriptors + * @chan: ZynqMP DMA channel + */ +static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan) +{ + struct zynqmp_dma_desc_sw *desc, *next; + + list_for_each_entry_safe(desc, next, &chan->done_list, node) { + dma_async_tx_callback callback; + void *callback_param; + + list_del(&desc->node); + + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; + if (callback) { + spin_unlock(&chan->lock); + callback(callback_param); + spin_lock(&chan->lock); + } + + /* Run any dependencies, then free the descriptor */ + zynqmp_dma_free_descriptor(chan, desc); + } +} + +/** + * zynqmp_dma_complete_descriptor - Mark the active descriptor as complete + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan) +{ + struct zynqmp_dma_desc_sw *desc; + + desc = list_first_entry_or_null(&chan->active_list, + struct zynqmp_dma_desc_sw, node); + if (!desc) + return; + list_del(&desc->node); + dma_cookie_complete(&desc->async_tx); + list_add_tail(&desc->node, &chan->done_list); +} + +/** + * zynqmp_dma_issue_pending - Issue pending transactions + * @dchan: DMA channel pointer + */ +static void zynqmp_dma_issue_pending(struct dma_chan *dchan) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + + spin_lock_bh(&chan->lock); + zynqmp_dma_start_transfer(chan); + spin_unlock_bh(&chan->lock); +} + +/** + * zynqmp_dma_free_descriptors - Free channel descriptors + * @dchan: DMA channel pointer + */ +static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan) +{ + zynqmp_dma_free_desc_list(chan, &chan->active_list); + zynqmp_dma_free_desc_list(chan, &chan->pending_list); + zynqmp_dma_free_desc_list(chan, &chan->done_list); +} + +/** + * zynqmp_dma_free_chan_resources - Free channel resources + * @dchan: DMA channel pointer + */ +static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + + spin_lock_bh(&chan->lock); + zynqmp_dma_free_descriptors(chan); + spin_unlock_bh(&chan->lock); + dma_free_coherent(chan->dev, + (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS), + chan->desc_pool_v, chan->desc_pool_p); + kfree(chan->sw_desc_pool); +} + +/** + * zynqmp_dma_reset - Reset the channel + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan) +{ + writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); + + zynqmp_dma_complete_descriptor(chan); + zynqmp_dma_chan_desc_cleanup(chan); + zynqmp_dma_free_descriptors(chan); + zynqmp_dma_init(chan); +} + +/** + * zynqmp_dma_irq_handler - ZynqMP DMA Interrupt handler + * @irq: IRQ number + * @data: Pointer to the ZynqMP DMA channel structure + * + * Return: IRQ_HANDLED/IRQ_NONE + */ +static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data) +{ + struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data; + u32 isr, imr, status; + irqreturn_t ret = IRQ_NONE; + + isr = readl(chan->regs + ZYNQMP_DMA_ISR); + imr = readl(chan->regs + ZYNQMP_DMA_IMR); + status = isr & ~imr; + + writel(isr, chan->regs + ZYNQMP_DMA_ISR); + if (status & ZYNQMP_DMA_INT_DONE) { + tasklet_schedule(&chan->tasklet); + ret = IRQ_HANDLED; + } + + if (status & ZYNQMP_DMA_DONE) + chan->idle = true; + + if (status & ZYNQMP_DMA_INT_ERR) { + chan->err = true; + tasklet_schedule(&chan->tasklet); + dev_err(chan->dev, "Channel %p has errors\n", chan); + ret = IRQ_HANDLED; + } + + if (status & ZYNQMP_DMA_INT_OVRFL) { + zynqmp_dma_handle_ovfl_int(chan, status); + dev_info(chan->dev, "Channel %p overflow interrupt\n", chan); + ret = IRQ_HANDLED; + } + + return ret; +} + +/** + * zynqmp_dma_do_tasklet - Schedule completion tasklet + * @data: Pointer to the ZynqMP DMA channel structure + */ +static void zynqmp_dma_do_tasklet(unsigned long data) +{ + struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data; + u32 count; + + spin_lock(&chan->lock); + + if (chan->err) { + zynqmp_dma_reset(chan); + chan->err = false; + goto unlock; + } + + count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); + + while (count) { + zynqmp_dma_complete_descriptor(chan); + zynqmp_dma_chan_desc_cleanup(chan); + count--; + } + + if (chan->idle) + zynqmp_dma_start_transfer(chan); + +unlock: + spin_unlock(&chan->lock); +} + +/** + * zynqmp_dma_device_terminate_all - Aborts all transfers on a channel + * @dchan: DMA channel pointer + * + * Return: Always '0' + */ +static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan) +{ + struct zynqmp_dma_chan *chan = to_chan(dchan); + + spin_lock_bh(&chan->lock); + writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); + zynqmp_dma_free_descriptors(chan); + spin_unlock_bh(&chan->lock); + + return 0; +} + +/** + * zynqmp_dma_prep_memcpy - prepare descriptors for memcpy transaction + * @dchan: DMA channel + * @dma_dst: Destination buffer address + * @dma_src: Source buffer address + * @len: Transfer length + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy( + struct dma_chan *dchan, dma_addr_t dma_dst, + dma_addr_t dma_src, size_t len, ulong flags) +{ + struct zynqmp_dma_chan *chan; + struct zynqmp_dma_desc_sw *new, *first = NULL; + void *desc = NULL, *prev = NULL; + size_t copy; + u32 desc_cnt; + + chan = to_chan(dchan); + + if (len > ZYNQMP_DMA_MAX_TRANS_LEN) + return NULL; + + desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN); + + spin_lock_bh(&chan->lock); + if (desc_cnt > chan->desc_free_cnt) { + spin_unlock_bh(&chan->lock); + dev_dbg(chan->dev, "chan %p descs are not available\n", chan); + return NULL; + } + chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; + spin_unlock_bh(&chan->lock); + + do { + /* Allocate and populate the descriptor */ + new = zynqmp_dma_get_descriptor(chan); + + copy = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN); + desc = (struct zynqmp_dma_desc_ll *)new->src_v; + zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, + dma_dst, copy, prev); + prev = desc; + len -= copy; + dma_src += copy; + dma_dst += copy; + if (!first) + first = new; + else + list_add_tail(&new->node, &first->tx_list); + } while (len); + + zynqmp_dma_desc_config_eod(chan, desc); + async_tx_ack(&first->async_tx); + first->async_tx.flags = flags; + return &first->async_tx; +} + +/** + * zynqmp_dma_prep_slave_sg - prepare descriptors for a memory sg transaction + * @dchan: DMA channel + * @dst_sg: Destination scatter list + * @dst_sg_len: Number of entries in destination scatter list + * @src_sg: Source scatter list + * @src_sg_len: Number of entries in source scatter list + * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor *zynqmp_dma_prep_sg( + struct dma_chan *dchan, struct scatterlist *dst_sg, + unsigned int dst_sg_len, struct scatterlist *src_sg, + unsigned int src_sg_len, unsigned long flags) +{ + struct zynqmp_dma_desc_sw *new, *first = NULL; + struct zynqmp_dma_chan *chan = to_chan(dchan); + void *desc = NULL, *prev = NULL; + size_t len, dst_avail, src_avail; + dma_addr_t dma_dst, dma_src; + u32 desc_cnt = 0, i; + struct scatterlist *sg; + + for_each_sg(src_sg, sg, src_sg_len, i) + desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), + ZYNQMP_DMA_MAX_TRANS_LEN); + + spin_lock_bh(&chan->lock); + if (desc_cnt > chan->desc_free_cnt) { + spin_unlock_bh(&chan->lock); + dev_dbg(chan->dev, "chan %p descs are not available\n", chan); + return NULL; + } + chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; + spin_unlock_bh(&chan->lock); + + dst_avail = sg_dma_len(dst_sg); + src_avail = sg_dma_len(src_sg); + + /* Run until we are out of scatterlist entries */ + while (true) { + /* Allocate and populate the descriptor */ + new = zynqmp_dma_get_descriptor(chan); + desc = (struct zynqmp_dma_desc_ll *)new->src_v; + len = min_t(size_t, src_avail, dst_avail); + len = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN); + if (len == 0) + goto fetch; + dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - + dst_avail; + dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - + src_avail; + + zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, dma_dst, + len, prev); + prev = desc; + dst_avail -= len; + src_avail -= len; + + if (!first) + first = new; + else + list_add_tail(&new->node, &first->tx_list); +fetch: + /* Fetch the next dst scatterlist entry */ + if (dst_avail == 0) { + if (dst_sg_len == 0) + break; + dst_sg = sg_next(dst_sg); + if (dst_sg == NULL) + break; + dst_sg_len--; + dst_avail = sg_dma_len(dst_sg); + } + /* Fetch the next src scatterlist entry */ + if (src_avail == 0) { + if (src_sg_len == 0) + break; + src_sg = sg_next(src_sg); + if (src_sg == NULL) + break; + src_sg_len--; + src_avail = sg_dma_len(src_sg); + } + } + + zynqmp_dma_desc_config_eod(chan, desc); + first->async_tx.flags = flags; + return &first->async_tx; +} + +/** + * zynqmp_dma_chan_remove - Channel remove function + * @chan: ZynqMP DMA channel pointer + */ +static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan) +{ + if (!chan) + return; + + devm_free_irq(chan->zdev->dev, chan->irq, chan); + tasklet_kill(&chan->tasklet); + list_del(&chan->common.device_node); + clk_disable_unprepare(chan->clk_apb); + clk_disable_unprepare(chan->clk_main); +} + +/** + * zynqmp_dma_chan_probe - Per Channel Probing + * @zdev: Driver specific device structure + * @pdev: Pointer to the platform_device structure + * + * Return: '0' on success and failure value on error + */ +static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, + struct platform_device *pdev) +{ + struct zynqmp_dma_chan *chan; + struct resource *res; + struct device_node *node = pdev->dev.of_node; + int err; + + chan = devm_kzalloc(zdev->dev, sizeof(*chan), GFP_KERNEL); + if (!chan) + return -ENOMEM; + chan->dev = zdev->dev; + chan->zdev = zdev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + chan->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(chan->regs)) + return PTR_ERR(chan->regs); + + chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64; + chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL; + chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL; + err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); + if (err < 0) { + dev_err(&pdev->dev, "missing xlnx,bus-width property\n"); + return err; + } + + if (chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_64 && + chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_128) { + dev_err(zdev->dev, "invalid bus-width value"); + return -EINVAL; + } + + chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent"); + zdev->chan = chan; + tasklet_init(&chan->tasklet, zynqmp_dma_do_tasklet, (ulong)chan); + spin_lock_init(&chan->lock); + INIT_LIST_HEAD(&chan->active_list); + INIT_LIST_HEAD(&chan->pending_list); + INIT_LIST_HEAD(&chan->done_list); + INIT_LIST_HEAD(&chan->free_list); + + dma_cookie_init(&chan->common); + chan->common.device = &zdev->common; + list_add_tail(&chan->common.device_node, &zdev->common.channels); + + zynqmp_dma_init(chan); + chan->irq = platform_get_irq(pdev, 0); + if (chan->irq < 0) + return -ENXIO; + err = devm_request_irq(&pdev->dev, chan->irq, zynqmp_dma_irq_handler, 0, + "zynqmp-dma", chan); + if (err) + return err; + chan->clk_main = devm_clk_get(&pdev->dev, "clk_main"); + if (IS_ERR(chan->clk_main)) { + dev_err(&pdev->dev, "main clock not found.\n"); + return PTR_ERR(chan->clk_main); + } + + chan->clk_apb = devm_clk_get(&pdev->dev, "clk_apb"); + if (IS_ERR(chan->clk_apb)) { + dev_err(&pdev->dev, "apb clock not found.\n"); + return PTR_ERR(chan->clk_apb); + } + + err = clk_prepare_enable(chan->clk_main); + if (err) { + dev_err(&pdev->dev, "Unable to enable main clock.\n"); + return err; + } + + err = clk_prepare_enable(chan->clk_apb); + if (err) { + clk_disable_unprepare(chan->clk_main); + dev_err(&pdev->dev, "Unable to enable apb clock.\n"); + return err; + } + + chan->desc_size = sizeof(struct zynqmp_dma_desc_ll); + chan->idle = true; + return 0; +} + +/** + * of_zynqmp_dma_xlate - Translation function + * @dma_spec: Pointer to DMA specifier as found in the device tree + * @ofdma: Pointer to DMA controller data + * + * Return: DMA channel pointer on success and NULL on error + */ +static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct zynqmp_dma_device *zdev = ofdma->of_dma_data; + + return dma_get_slave_channel(&zdev->chan->common); +} + +/** + * zynqmp_dma_probe - Driver probe function + * @pdev: Pointer to the platform_device structure + * + * Return: '0' on success and failure value on error + */ +static int zynqmp_dma_probe(struct platform_device *pdev) +{ + struct zynqmp_dma_device *zdev; + struct dma_device *p; + int ret; + + zdev = devm_kzalloc(&pdev->dev, sizeof(*zdev), GFP_KERNEL); + if (!zdev) + return -ENOMEM; + + zdev->dev = &pdev->dev; + INIT_LIST_HEAD(&zdev->common.channels); + + dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); + dma_cap_set(DMA_SG, zdev->common.cap_mask); + dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask); + + p = &zdev->common; + p->device_prep_dma_sg = zynqmp_dma_prep_sg; + p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy; + p->device_terminate_all = zynqmp_dma_device_terminate_all; + p->device_issue_pending = zynqmp_dma_issue_pending; + p->device_alloc_chan_resources = zynqmp_dma_alloc_chan_resources; + p->device_free_chan_resources = zynqmp_dma_free_chan_resources; + p->device_tx_status = dma_cookie_status; + p->device_config = zynqmp_dma_device_config; + p->dev = &pdev->dev; + + platform_set_drvdata(pdev, zdev); + + ret = zynqmp_dma_chan_probe(zdev, pdev); + if (ret) { + dev_err(&pdev->dev, "Probing channel failed\n"); + goto free_chan_resources; + } + + p->dst_addr_widths = BIT(zdev->chan->bus_width / 8); + p->src_addr_widths = BIT(zdev->chan->bus_width / 8); + + dma_async_device_register(&zdev->common); + + ret = of_dma_controller_register(pdev->dev.of_node, + of_zynqmp_dma_xlate, zdev); + if (ret) { + dev_err(&pdev->dev, "Unable to register DMA to DT\n"); + dma_async_device_unregister(&zdev->common); + goto free_chan_resources; + } + + dev_info(&pdev->dev, "ZynqMP DMA driver Probe success\n"); + + return 0; + +free_chan_resources: + zynqmp_dma_chan_remove(zdev->chan); + return ret; +} + +/** + * zynqmp_dma_remove - Driver remove function + * @pdev: Pointer to the platform_device structure + * + * Return: Always '0' + */ +static int zynqmp_dma_remove(struct platform_device *pdev) +{ + struct zynqmp_dma_device *zdev = platform_get_drvdata(pdev); + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&zdev->common); + + zynqmp_dma_chan_remove(zdev->chan); + + return 0; +} + +static const struct of_device_id zynqmp_dma_of_match[] = { + { .compatible = "xlnx,zynqmp-dma-1.0", }, + {} +}; +MODULE_DEVICE_TABLE(of, zynqmp_dma_of_match); + +static struct platform_driver zynqmp_dma_driver = { + .driver = { + .name = "xilinx-zynqmp-dma", + .of_match_table = zynqmp_dma_of_match, + }, + .probe = zynqmp_dma_probe, + .remove = zynqmp_dma_remove, +}; + +module_platform_driver(zynqmp_dma_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Xilinx, Inc."); +MODULE_DESCRIPTION("Xilinx ZynqMP DMA driver"); @@ -391,6 +391,13 @@ config EDAC_ALTERA_OCRAM Support for error detection and correction on the Altera On-Chip RAM Memory for Altera SoCs. +config EDAC_ALTERA_ETHERNET + bool "Altera Ethernet FIFO ECC" + depends on EDAC_ALTERA=y + help + Support for error detection and correction on the + Altera Ethernet FIFO Memory for Altera SoCs. + config EDAC_SYNOPSYS tristate "Synopsys DDR Memory Controller" depends on EDAC_MM_EDAC && ARCH_ZYNQ @@ -19,12 +19,15 @@ #include <asm/cacheflush.h> #include <linux/ctype.h> +#include <linux/delay.h> #include <linux/edac.h> #include <linux/genalloc.h> #include <linux/interrupt.h> +#include <linux/irqchip/chained_irq.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/regmap.h> @@ -548,10 +551,10 @@ module_platform_driver(altr_edac_driver); * trigger testing are different for each memory. */ -const struct edac_device_prv_data ocramecc_data; -const struct edac_device_prv_data l2ecc_data; -const struct edac_device_prv_data a10_ocramecc_data; -const struct edac_device_prv_data a10_l2ecc_data; +static const struct edac_device_prv_data ocramecc_data; +static const struct edac_device_prv_data l2ecc_data; +static const struct edac_device_prv_data a10_ocramecc_data; +static const struct edac_device_prv_data a10_l2ecc_data; static irqreturn_t altr_edac_device_handler(int irq, void *dev_id) { @@ -686,11 +689,9 @@ static void altr_create_edacdev_dbgfs(struct edac_device_ctl_info *edac_dci, static const struct of_device_id altr_edac_device_of_match[] = { #ifdef CONFIG_EDAC_ALTERA_L2C { .compatible = "altr,socfpga-l2-ecc", .data = &l2ecc_data }, - { .compatible = "altr,socfpga-a10-l2-ecc", .data = &a10_l2ecc_data }, #endif #ifdef CONFIG_EDAC_ALTERA_OCRAM { .compatible = "altr,socfpga-ocram-ecc", .data = &ocramecc_data }, - { .compatible = "altr,socfpga-a10-ocram-ecc", .data = &a10_ocramecc_data }, #endif {}, }; @@ -825,16 +826,16 @@ static struct platform_driver altr_edac_device_driver = { }; module_platform_driver(altr_edac_device_driver); -/*********************** OCRAM EDAC Device Functions *********************/ +/******************* Arria10 Device ECC Shared Functions *****************/ -#ifdef CONFIG_EDAC_ALTERA_OCRAM /* * Test for memory's ECC dependencies upon entry because platform specific * startup should have initialized the memory and enabled the ECC. * Can't turn on ECC here because accessing un-initialized memory will * cause CE/UE errors possibly causing an ABORT. */ -static int altr_check_ecc_deps(struct altr_edac_device_dev *device) +static int __maybe_unused +altr_check_ecc_deps(struct altr_edac_device_dev *device) { void __iomem *base = device->base; const struct edac_device_prv_data *prv = device->data; @@ -848,6 +849,227 @@ static int altr_check_ecc_deps(struct altr_edac_device_dev *device) return -ENODEV; } +static irqreturn_t __maybe_unused altr_edac_a10_ecc_irq(int irq, void *dev_id) +{ + struct altr_edac_device_dev *dci = dev_id; + void __iomem *base = dci->base; + + if (irq == dci->sb_irq) { + writel(ALTR_A10_ECC_SERRPENA, + base + ALTR_A10_ECC_INTSTAT_OFST); + edac_device_handle_ce(dci->edac_dev, 0, 0, dci->edac_dev_name); + + return IRQ_HANDLED; + } else if (irq == dci->db_irq) { + writel(ALTR_A10_ECC_DERRPENA, + base + ALTR_A10_ECC_INTSTAT_OFST); + edac_device_handle_ue(dci->edac_dev, 0, 0, dci->edac_dev_name); + if (dci->data->panic) + panic("\nEDAC:ECC_DEVICE[Uncorrectable errors]\n"); + + return IRQ_HANDLED; + } + + WARN_ON(1); + + return IRQ_NONE; +} + +/******************* Arria10 Memory Buffer Functions *********************/ + +static inline int a10_get_irq_mask(struct device_node *np) +{ + int irq; + const u32 *handle = of_get_property(np, "interrupts", NULL); + + if (!handle) + return -ENODEV; + irq = be32_to_cpup(handle); + return irq; +} + +static inline void ecc_set_bits(u32 bit_mask, void __iomem *ioaddr) +{ + u32 value = readl(ioaddr); + + value |= bit_mask; + writel(value, ioaddr); +} + +static inline void ecc_clear_bits(u32 bit_mask, void __iomem *ioaddr) +{ + u32 value = readl(ioaddr); + + value &= ~bit_mask; + writel(value, ioaddr); +} + +static inline int ecc_test_bits(u32 bit_mask, void __iomem *ioaddr) +{ + u32 value = readl(ioaddr); + + return (value & bit_mask) ? 1 : 0; +} + +/* + * This function uses the memory initialization block in the Arria10 ECC + * controller to initialize/clear the entire memory data and ECC data. + */ +static int __maybe_unused altr_init_memory_port(void __iomem *ioaddr, int port) +{ + int limit = ALTR_A10_ECC_INIT_WATCHDOG_10US; + u32 init_mask, stat_mask, clear_mask; + int ret = 0; + + if (port) { + init_mask = ALTR_A10_ECC_INITB; + stat_mask = ALTR_A10_ECC_INITCOMPLETEB; + clear_mask = ALTR_A10_ECC_ERRPENB_MASK; + } else { + init_mask = ALTR_A10_ECC_INITA; + stat_mask = ALTR_A10_ECC_INITCOMPLETEA; + clear_mask = ALTR_A10_ECC_ERRPENA_MASK; + } + + ecc_set_bits(init_mask, (ioaddr + ALTR_A10_ECC_CTRL_OFST)); + while (limit--) { + if (ecc_test_bits(stat_mask, + (ioaddr + ALTR_A10_ECC_INITSTAT_OFST))) + break; + udelay(1); + } + if (limit < 0) + ret = -EBUSY; + + /* Clear any pending ECC interrupts */ + writel(clear_mask, (ioaddr + ALTR_A10_ECC_INTSTAT_OFST)); + + return ret; +} + +static __init int __maybe_unused +altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask, + u32 ecc_ctrl_en_mask, bool dual_port) +{ + int ret = 0; + void __iomem *ecc_block_base; + struct regmap *ecc_mgr_map; + char *ecc_name; + struct device_node *np_eccmgr; + + ecc_name = (char *)np->name; + + /* Get the ECC Manager - parent of the device EDACs */ + np_eccmgr = of_get_parent(np); + ecc_mgr_map = syscon_regmap_lookup_by_phandle(np_eccmgr, + "altr,sysmgr-syscon"); + of_node_put(np_eccmgr); + if (IS_ERR(ecc_mgr_map)) { + edac_printk(KERN_ERR, EDAC_DEVICE, + "Unable to get syscon altr,sysmgr-syscon\n"); + return -ENODEV; + } + + /* Map the ECC Block */ + ecc_block_base = of_iomap(np, 0); + if (!ecc_block_base) { + edac_printk(KERN_ERR, EDAC_DEVICE, + "Unable to map %s ECC block\n", ecc_name); + return -ENODEV; + } + + /* Disable ECC */ + regmap_write(ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST, irq_mask); + writel(ALTR_A10_ECC_SERRINTEN, + (ecc_block_base + ALTR_A10_ECC_ERRINTENR_OFST)); + ecc_clear_bits(ecc_ctrl_en_mask, + (ecc_block_base + ALTR_A10_ECC_CTRL_OFST)); + /* Ensure all writes complete */ + wmb(); + /* Use HW initialization block to initialize memory for ECC */ + ret = altr_init_memory_port(ecc_block_base, 0); + if (ret) { + edac_printk(KERN_ERR, EDAC_DEVICE, + "ECC: cannot init %s PORTA memory\n", ecc_name); + goto out; + } + + if (dual_port) { + ret = altr_init_memory_port(ecc_block_base, 1); + if (ret) { + edac_printk(KERN_ERR, EDAC_DEVICE, + "ECC: cannot init %s PORTB memory\n", + ecc_name); + goto out; + } + } + + /* Interrupt mode set to every SBERR */ + regmap_write(ecc_mgr_map, ALTR_A10_ECC_INTMODE_OFST, + ALTR_A10_ECC_INTMODE); + /* Enable ECC */ + ecc_set_bits(ecc_ctrl_en_mask, (ecc_block_base + + ALTR_A10_ECC_CTRL_OFST)); + writel(ALTR_A10_ECC_SERRINTEN, + (ecc_block_base + ALTR_A10_ECC_ERRINTENS_OFST)); + regmap_write(ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_CLR_OFST, irq_mask); + /* Ensure all writes complete */ + wmb(); +out: + iounmap(ecc_block_base); + return ret; +} + +static int validate_parent_available(struct device_node *np); +static const struct of_device_id altr_edac_a10_device_of_match[]; +static int __init __maybe_unused altr_init_a10_ecc_device_type(char *compat) +{ + int irq; + struct device_node *child, *np = of_find_compatible_node(NULL, NULL, + "altr,socfpga-a10-ecc-manager"); + if (!np) { + edac_printk(KERN_ERR, EDAC_DEVICE, "ECC Manager not found\n"); + return -ENODEV; + } + + for_each_child_of_node(np, child) { + const struct of_device_id *pdev_id; + const struct edac_device_prv_data *prv; + + if (!of_device_is_available(child)) + continue; + if (!of_device_is_compatible(child, compat)) + continue; + + if (validate_parent_available(child)) + continue; + + irq = a10_get_irq_mask(child); + if (irq < 0) + continue; + + /* Get matching node and check for valid result */ + pdev_id = of_match_node(altr_edac_a10_device_of_match, child); + if (IS_ERR_OR_NULL(pdev_id)) + continue; + + /* Validate private data pointer before dereferencing */ + prv = pdev_id->data; + if (!prv) + continue; + + altr_init_a10_ecc_block(child, BIT(irq), + prv->ecc_enable_mask, 0); + } + + of_node_put(np); + return 0; +} + +/*********************** OCRAM EDAC Device Functions *********************/ + +#ifdef CONFIG_EDAC_ALTERA_OCRAM + static void *ocram_alloc_mem(size_t size, void **other) { struct device_node *np; @@ -882,25 +1104,7 @@ static void ocram_free_mem(void *p, size_t size, void *other) gen_pool_free((struct gen_pool *)other, (u32)p, size); } -static irqreturn_t altr_edac_a10_ecc_irq(struct altr_edac_device_dev *dci, - bool sberr) -{ - void __iomem *base = dci->base; - - if (sberr) { - writel(ALTR_A10_ECC_SERRPENA, - base + ALTR_A10_ECC_INTSTAT_OFST); - edac_device_handle_ce(dci->edac_dev, 0, 0, dci->edac_dev_name); - } else { - writel(ALTR_A10_ECC_DERRPENA, - base + ALTR_A10_ECC_INTSTAT_OFST); - edac_device_handle_ue(dci->edac_dev, 0, 0, dci->edac_dev_name); - panic("\nEDAC:ECC_DEVICE[Uncorrectable errors]\n"); - } - return IRQ_HANDLED; -} - -const struct edac_device_prv_data ocramecc_data = { +static const struct edac_device_prv_data ocramecc_data = { .setup = altr_check_ecc_deps, .ce_clear_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_SERR), .ue_clear_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_DERR), @@ -916,7 +1120,7 @@ const struct edac_device_prv_data ocramecc_data = { .inject_fops = &altr_edac_device_inject_fops, }; -const struct edac_device_prv_data a10_ocramecc_data = { +static const struct edac_device_prv_data a10_ocramecc_data = { .setup = altr_check_ecc_deps, .ce_clear_mask = ALTR_A10_ECC_SERRPENA, .ue_clear_mask = ALTR_A10_ECC_DERRPENA, @@ -929,6 +1133,12 @@ const struct edac_device_prv_data a10_ocramecc_data = { .set_err_ofst = ALTR_A10_ECC_INTTEST_OFST, .ecc_irq_handler = altr_edac_a10_ecc_irq, .inject_fops = &altr_edac_a10_device_inject_fops, + /* + * OCRAM panic on uncorrectable error because sleep/resume + * functions and FPGA contents are stored in OCRAM. Prefer + * a kernel panic over executing/loading corrupted data. + */ + .panic = true, }; #endif /* CONFIG_EDAC_ALTERA_OCRAM */ @@ -988,25 +1198,33 @@ static int altr_l2_check_deps(struct altr_edac_device_dev *device) return -ENODEV; } -static irqreturn_t altr_edac_a10_l2_irq(struct altr_edac_device_dev *dci, - bool sberr) +static irqreturn_t altr_edac_a10_l2_irq(int irq, void *dev_id) { - if (sberr) { + struct altr_edac_device_dev *dci = dev_id; + + if (irq == dci->sb_irq) { regmap_write(dci->edac->ecc_mgr_map, A10_SYSGMR_MPU_CLEAR_L2_ECC_OFST, A10_SYSGMR_MPU_CLEAR_L2_ECC_SB); edac_device_handle_ce(dci->edac_dev, 0, 0, dci->edac_dev_name); - } else { + + return IRQ_HANDLED; + } else if (irq == dci->db_irq) { regmap_write(dci->edac->ecc_mgr_map, A10_SYSGMR_MPU_CLEAR_L2_ECC_OFST, A10_SYSGMR_MPU_CLEAR_L2_ECC_MB); edac_device_handle_ue(dci->edac_dev, 0, 0, dci->edac_dev_name); panic("\nEDAC:ECC_DEVICE[Uncorrectable errors]\n"); + + return IRQ_HANDLED; } - return IRQ_HANDLED; + + WARN_ON(1); + + return IRQ_NONE; } -const struct edac_device_prv_data l2ecc_data = { +static const struct edac_device_prv_data l2ecc_data = { .setup = altr_l2_check_deps, .ce_clear_mask = 0, .ue_clear_mask = 0, @@ -1021,7 +1239,7 @@ const struct edac_device_prv_data l2ecc_data = { .inject_fops = &altr_edac_device_inject_fops, }; -const struct edac_device_prv_data a10_l2ecc_data = { +static const struct edac_device_prv_data a10_l2ecc_data = { .setup = altr_l2_check_deps, .ce_clear_mask = ALTR_A10_L2_ECC_SERR_CLR, .ue_clear_mask = ALTR_A10_L2_ECC_MERR_CLR, @@ -1040,7 +1258,49 @@ const struct edac_device_prv_data a10_l2ecc_data = { #endif /* CONFIG_EDAC_ALTERA_L2C */ +/********************* Ethernet Device Functions ********************/ + +#ifdef CONFIG_EDAC_ALTERA_ETHERNET + +static const struct edac_device_prv_data a10_enetecc_data = { + .setup = altr_check_ecc_deps, + .ce_clear_mask = ALTR_A10_ECC_SERRPENA, + .ue_clear_mask = ALTR_A10_ECC_DERRPENA, + .dbgfs_name = "altr_trigger", + .ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL, + .ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST, + .ce_set_mask = ALTR_A10_ECC_TSERRA, + .ue_set_mask = ALTR_A10_ECC_TDERRA, + .set_err_ofst = ALTR_A10_ECC_INTTEST_OFST, + .ecc_irq_handler = altr_edac_a10_ecc_irq, + .inject_fops = &altr_edac_a10_device_inject_fops, +}; + +static int __init socfpga_init_ethernet_ecc(void) +{ + return altr_init_a10_ecc_device_type("altr,socfpga-eth-mac-ecc"); +} + +early_initcall(socfpga_init_ethernet_ecc); + +#endif /* CONFIG_EDAC_ALTERA_ETHERNET */ + /********************* Arria10 EDAC Device Functions *************************/ +static const struct of_device_id altr_edac_a10_device_of_match[] = { +#ifdef CONFIG_EDAC_ALTERA_L2C + { .compatible = "altr,socfpga-a10-l2-ecc", .data = &a10_l2ecc_data }, +#endif +#ifdef CONFIG_EDAC_ALTERA_OCRAM + { .compatible = "altr,socfpga-a10-ocram-ecc", + .data = &a10_ocramecc_data }, +#endif +#ifdef CONFIG_EDAC_ALTERA_ETHERNET + { .compatible = "altr,socfpga-eth-mac-ecc", + .data = &a10_enetecc_data }, +#endif + {}, +}; +MODULE_DEVICE_TABLE(of, altr_edac_a10_device_of_match); /* * The Arria10 EDAC Device Functions differ from the Cyclone5/Arria5 @@ -1075,28 +1335,42 @@ static ssize_t altr_edac_a10_device_trig(struct file *file, return count; } -static irqreturn_t altr_edac_a10_irq_handler(int irq, void *dev_id) +static void altr_edac_a10_irq_handler(struct irq_desc *desc) { - irqreturn_t rc = IRQ_NONE; - struct altr_arria10_edac *edac = dev_id; - struct altr_edac_device_dev *dci; - int irq_status; - bool sberr = (irq == edac->sb_irq) ? 1 : 0; - int sm_offset = sberr ? A10_SYSMGR_ECC_INTSTAT_SERR_OFST : - A10_SYSMGR_ECC_INTSTAT_DERR_OFST; + int dberr, bit, sm_offset, irq_status; + struct altr_arria10_edac *edac = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + int irq = irq_desc_get_irq(desc); + + dberr = (irq == edac->db_irq) ? 1 : 0; + sm_offset = dberr ? A10_SYSMGR_ECC_INTSTAT_DERR_OFST : + A10_SYSMGR_ECC_INTSTAT_SERR_OFST; + + chained_irq_enter(chip, desc); regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status); - if ((irq != edac->sb_irq) && (irq != edac->db_irq)) { - WARN_ON(1); - } else { - list_for_each_entry(dci, &edac->a10_ecc_devices, next) { - if (irq_status & dci->data->irq_status_mask) - rc = dci->data->ecc_irq_handler(dci, sberr); - } + for_each_set_bit(bit, (unsigned long *)&irq_status, 32) { + irq = irq_linear_revmap(edac->domain, dberr * 32 + bit); + if (irq) + generic_handle_irq(irq); } - return rc; + chained_irq_exit(chip, desc); +} + +static int validate_parent_available(struct device_node *np) +{ + struct device_node *parent; + int ret = 0; + + /* Ensure parent device is enabled if parent node exists */ + parent = of_parse_phandle(np, "altr,ecc-parent", 0); + if (parent && !of_device_is_available(parent)) + ret = -ENODEV; + + of_node_put(parent); + return ret; } static int altr_edac_a10_device_add(struct altr_arria10_edac *edac, @@ -1111,7 +1385,7 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac, const struct edac_device_prv_data *prv; /* Get matching node and check for valid result */ const struct of_device_id *pdev_id = - of_match_node(altr_edac_device_of_match, np); + of_match_node(altr_edac_a10_device_of_match, np); if (IS_ERR_OR_NULL(pdev_id)) return -ENODEV; @@ -1120,6 +1394,9 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac, if (IS_ERR_OR_NULL(prv)) return -ENODEV; + if (validate_parent_available(np)) + return -ENODEV; + if (!devres_open_group(edac->dev, altr_edac_a10_device_add, GFP_KERNEL)) return -ENOMEM; @@ -1168,6 +1445,34 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac, goto err_release_group1; } + altdev->sb_irq = irq_of_parse_and_map(np, 0); + if (!altdev->sb_irq) { + edac_printk(KERN_ERR, EDAC_DEVICE, "Error allocating SBIRQ\n"); + rc = -ENODEV; + goto err_release_group1; + } + rc = devm_request_irq(edac->dev, altdev->sb_irq, + prv->ecc_irq_handler, + IRQF_SHARED, ecc_name, altdev); + if (rc) { + edac_printk(KERN_ERR, EDAC_DEVICE, "No DBERR IRQ resource\n"); + goto err_release_group1; + } + + altdev->db_irq = irq_of_parse_and_map(np, 1); + if (!altdev->db_irq) { + edac_printk(KERN_ERR, EDAC_DEVICE, "Error allocating DBIRQ\n"); + rc = -ENODEV; + goto err_release_group1; + } + rc = devm_request_irq(edac->dev, altdev->db_irq, + prv->ecc_irq_handler, + IRQF_SHARED, ecc_name, altdev); + if (rc) { + edac_printk(KERN_ERR, EDAC_DEVICE, "No DBERR IRQ resource\n"); + goto err_release_group1; + } + rc = edac_device_add_device(dci); if (rc) { dev_err(edac->dev, "edac_device_add_device failed\n"); @@ -1186,7 +1491,6 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac, err_release_group1: edac_device_free_ctl_info(dci); err_release_group: - edac_printk(KERN_ALERT, EDAC_DEVICE, "%s: %d\n", __func__, __LINE__); devres_release_group(edac->dev, NULL); edac_printk(KERN_ERR, EDAC_DEVICE, "%s:Error setting up EDAC device: %d\n", ecc_name, rc); @@ -1194,11 +1498,43 @@ err_release_group: return rc; } +static void a10_eccmgr_irq_mask(struct irq_data *d) +{ + struct altr_arria10_edac *edac = irq_data_get_irq_chip_data(d); + + regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST, + BIT(d->hwirq)); +} + +static void a10_eccmgr_irq_unmask(struct irq_data *d) +{ + struct altr_arria10_edac *edac = irq_data_get_irq_chip_data(d); + + regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_CLR_OFST, + BIT(d->hwirq)); +} + +static int a10_eccmgr_irqdomain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hwirq) +{ + struct altr_arria10_edac *edac = d->host_data; + + irq_set_chip_and_handler(irq, &edac->irq_chip, handle_simple_irq); + irq_set_chip_data(irq, edac); + irq_set_noprobe(irq); + + return 0; +} + +struct irq_domain_ops a10_eccmgr_ic_ops = { + .map = a10_eccmgr_irqdomain_map, + .xlate = irq_domain_xlate_twocell, +}; + static int altr_edac_a10_probe(struct platform_device *pdev) { struct altr_arria10_edac *edac; struct device_node *child; - int rc; edac = devm_kzalloc(&pdev->dev, sizeof(*edac), GFP_KERNEL); if (!edac) @@ -1216,32 +1552,50 @@ static int altr_edac_a10_probe(struct platform_device *pdev) return PTR_ERR(edac->ecc_mgr_map); } + edac->irq_chip.name = pdev->dev.of_node->name; + edac->irq_chip.irq_mask = a10_eccmgr_irq_mask; + edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask; + edac->domain = irq_domain_add_linear(pdev->dev.of_node, 64, + &a10_eccmgr_ic_ops, edac); + if (!edac->domain) { + dev_err(&pdev->dev, "Error adding IRQ domain\n"); + return -ENOMEM; + } + edac->sb_irq = platform_get_irq(pdev, 0); - rc = devm_request_irq(&pdev->dev, edac->sb_irq, - altr_edac_a10_irq_handler, - IRQF_SHARED, dev_name(&pdev->dev), edac); - if (rc) { - edac_printk(KERN_ERR, EDAC_DEVICE, "No SBERR IRQ resource\n"); - return rc; + if (edac->sb_irq < 0) { + dev_err(&pdev->dev, "No SBERR IRQ resource\n"); + return edac->sb_irq; } + irq_set_chained_handler_and_data(edac->sb_irq, + altr_edac_a10_irq_handler, + edac); + edac->db_irq = platform_get_irq(pdev, 1); - rc = devm_request_irq(&pdev->dev, edac->db_irq, - altr_edac_a10_irq_handler, - IRQF_SHARED, dev_name(&pdev->dev), edac); - if (rc) { - edac_printk(KERN_ERR, EDAC_DEVICE, "No DBERR IRQ resource\n"); - return rc; + if (edac->db_irq < 0) { + dev_err(&pdev->dev, "No DBERR IRQ resource\n"); + return edac->db_irq; } + irq_set_chained_handler_and_data(edac->db_irq, + altr_edac_a10_irq_handler, + edac); for_each_child_of_node(pdev->dev.of_node, child) { if (!of_device_is_available(child)) continue; if (of_device_is_compatible(child, "altr,socfpga-a10-l2-ecc")) altr_edac_a10_device_add(edac, child); - else if (of_device_is_compatible(child, - "altr,socfpga-a10-ocram-ecc")) + else if ((of_device_is_compatible(child, + "altr,socfpga-a10-ocram-ecc")) || + (of_device_is_compatible(child, + "altr,socfpga-eth-mac-ecc"))) altr_edac_a10_device_add(edac, child); + else if (of_device_is_compatible(child, + "altr,sdram-edac-a10")) + of_platform_populate(pdev->dev.of_node, + altr_sdram_ctrl_of_match, + NULL, &pdev->dev); } return 0; @@ -230,8 +230,13 @@ struct altr_sdram_mc_data { #define ALTR_A10_ECC_INITCOMPLETEB BIT(8) #define ALTR_A10_ECC_ERRINTEN_OFST 0x10 +#define ALTR_A10_ECC_ERRINTENS_OFST 0x14 +#define ALTR_A10_ECC_ERRINTENR_OFST 0x18 #define ALTR_A10_ECC_SERRINTEN BIT(0) +#define ALTR_A10_ECC_INTMODE_OFST 0x1C +#define ALTR_A10_ECC_INTMODE BIT(0) + #define ALTR_A10_ECC_INTSTAT_OFST 0x20 #define ALTR_A10_ECC_SERRPENA BIT(0) #define ALTR_A10_ECC_DERRPENA BIT(8) @@ -280,6 +285,12 @@ struct altr_sdram_mc_data { /* Arria 10 OCRAM ECC Management Group Defines */ #define ALTR_A10_OCRAM_ECC_EN_CTL (BIT(1) | BIT(0)) +/* Arria 10 Ethernet ECC Management Group Defines */ +#define ALTR_A10_COMMON_ECC_EN_CTL BIT(0) + +/* A10 ECC Controller memory initialization timeout */ +#define ALTR_A10_ECC_INIT_WATCHDOG_10US 10000 + struct altr_edac_device_dev; struct edac_device_prv_data { @@ -295,10 +306,10 @@ struct edac_device_prv_data { int ce_set_mask; int ue_set_mask; int set_err_ofst; - irqreturn_t (*ecc_irq_handler)(struct altr_edac_device_dev *dci, - bool sb); + irqreturn_t (*ecc_irq_handler)(int irq, void *dev_id); int trig_alloc_sz; const struct file_operations *inject_fops; + bool panic; }; struct altr_edac_device_dev { @@ -320,6 +331,8 @@ struct altr_arria10_edac { struct regmap *ecc_mgr_map; int sb_irq; int db_irq; + struct irq_domain *domain; + struct irq_chip irq_chip; struct list_head a10_ecc_devices; }; @@ -2966,11 +2966,11 @@ static int __init amd64_edac_init(void) int err = -ENODEV; int i; - opstate_init(); - if (amd_cache_northbridges() < 0) goto err_ret; + opstate_init(); + err = -ENOMEM; ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL); if (!ecc_stngs) @@ -565,7 +565,8 @@ void edac_mc_reset_delay_period(unsigned long value) list_for_each(item, &mc_devices) { mci = list_entry(item, struct mem_ctl_info, link); - edac_mod_work(&mci->work, value); + if (mci->op_state == OP_RUNNING_POLL) + edac_mod_work(&mci->work, value); } mutex_unlock(&mem_ctls_mutex); } @@ -313,7 +313,6 @@ static struct device_type csrow_attr_type = { * possible dynamic channel DIMM Label attribute files * */ - DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 0); DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR, @@ -326,6 +325,10 @@ DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 4); DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 5); +DEVICE_CHANNEL(ch6_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 6); +DEVICE_CHANNEL(ch7_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 7); /* Total possible dynamic DIMM Label attribute file table */ static struct attribute *dynamic_csrow_dimm_attr[] = { @@ -335,6 +338,8 @@ static struct attribute *dynamic_csrow_dimm_attr[] = { &dev_attr_legacy_ch3_dimm_label.attr.attr, &dev_attr_legacy_ch4_dimm_label.attr.attr, &dev_attr_legacy_ch5_dimm_label.attr.attr, + &dev_attr_legacy_ch6_dimm_label.attr.attr, + &dev_attr_legacy_ch7_dimm_label.attr.attr, NULL }; @@ -351,6 +356,10 @@ DEVICE_CHANNEL(ch4_ce_count, S_IRUGO, channel_ce_count_show, NULL, 4); DEVICE_CHANNEL(ch5_ce_count, S_IRUGO, channel_ce_count_show, NULL, 5); +DEVICE_CHANNEL(ch6_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 6); +DEVICE_CHANNEL(ch7_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 7); /* Total possible dynamic ce_count attribute file table */ static struct attribute *dynamic_csrow_ce_count_attr[] = { @@ -360,6 +369,8 @@ static struct attribute *dynamic_csrow_ce_count_attr[] = { &dev_attr_legacy_ch3_ce_count.attr.attr, &dev_attr_legacy_ch4_ce_count.attr.attr, &dev_attr_legacy_ch5_ce_count.attr.attr, + &dev_attr_legacy_ch6_ce_count.attr.attr, + &dev_attr_legacy_ch7_ce_count.attr.attr, NULL }; @@ -371,9 +382,16 @@ static umode_t csrow_dev_is_visible(struct kobject *kobj, if (idx >= csrow->nr_channels) return 0; + + if (idx >= ARRAY_SIZE(dynamic_csrow_ce_count_attr) - 1) { + WARN_ONCE(1, "idx: %d\n", idx); + return 0; + } + /* Only expose populated DIMMs */ if (!csrow->channels[idx]->dimm->nr_pages) return 0; + return attr->mode; } @@ -239,8 +239,11 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = { { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc }, }; -#define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19) -#define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14) +#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \ + GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19)) + +#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \ + GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14)) /* Device 16, functions 2-7 */ @@ -326,6 +329,7 @@ struct pci_id_descr { struct pci_id_table { const struct pci_id_descr *descr; int n_devs; + enum type type; }; struct sbridge_dev { @@ -394,9 +398,14 @@ static const struct pci_id_descr pci_dev_descr_sbridge[] = { { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) }, }; -#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) } +#define PCI_ID_TABLE_ENTRY(A, T) { \ + .descr = A, \ + .n_devs = ARRAY_SIZE(A), \ + .type = T \ +} + static const struct pci_id_table pci_dev_descr_sbridge_table[] = { - PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge), + PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE), {0,} /* 0 terminated list. */ }; @@ -463,7 +472,7 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = { }; static const struct pci_id_table pci_dev_descr_ibridge_table[] = { - PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge), + PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE), {0,} /* 0 terminated list. */ }; @@ -536,7 +545,7 @@ static const struct pci_id_descr pci_dev_descr_haswell[] = { }; static const struct pci_id_table pci_dev_descr_haswell_table[] = { - PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell), + PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL), {0,} /* 0 terminated list. */ }; @@ -580,7 +589,7 @@ static const struct pci_id_descr pci_dev_descr_knl[] = { }; static const struct pci_id_table pci_dev_descr_knl_table[] = { - PCI_ID_TABLE_ENTRY(pci_dev_descr_knl), + PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING), {0,} }; @@ -648,7 +657,7 @@ static const struct pci_id_descr pci_dev_descr_broadwell[] = { }; static const struct pci_id_table pci_dev_descr_broadwell_table[] = { - PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell), + PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL), {0,} /* 0 terminated list. */ }; @@ -1894,14 +1903,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci) pci_read_config_dword(pvt->pci_tad[i], rir_offset[j][k], ®); - tmp_mb = RIR_OFFSET(reg) << 6; + tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6; gb = div_u64_rem(tmp_mb, 1024, &mb); edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", i, j, k, gb, (mb*1000)/1024, ((u64)tmp_mb) << 20L, - (u32)RIR_RNK_TGT(reg), + (u32)RIR_RNK_TGT(pvt->info.type, reg), reg); } } @@ -2234,7 +2243,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci, pci_read_config_dword(pvt->pci_tad[ch_add + base_ch], rir_offset[n_rir][idx], ®); - *rank = RIR_RNK_TGT(reg); + *rank = RIR_RNK_TGT(pvt->info.type, reg); edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", n_rir, @@ -2369,22 +2378,19 @@ static int sbridge_get_onedevice(struct pci_dev **prev, * @num_mc: pointer to the memory controllers count, to be incremented in case * of success. * @table: model specific table - * @allow_dups: allow for multiple devices to exist with the same device id - * (as implemented, this isn't expected to work correctly in the - * multi-socket case). - * @multi_bus: don't assume devices on different buses belong to different - * memory controllers. * * returns 0 in case of success or error code */ -static int sbridge_get_all_devices_full(u8 *num_mc, - const struct pci_id_table *table, - int allow_dups, - int multi_bus) +static int sbridge_get_all_devices(u8 *num_mc, + const struct pci_id_table *table) { int i, rc; struct pci_dev *pdev = NULL; + int allow_dups = 0; + int multi_bus = 0; + if (table->type == KNIGHTS_LANDING) + allow_dups = multi_bus = 1; while (table && table->descr) { for (i = 0; i < table->n_devs; i++) { if (!allow_dups || i == 0 || @@ -2411,11 +2417,6 @@ static int sbridge_get_all_devices_full(u8 *num_mc, return 0; } -#define sbridge_get_all_devices(num_mc, table) \ - sbridge_get_all_devices_full(num_mc, table, 0, 0) -#define sbridge_get_all_devices_knl(num_mc, table) \ - sbridge_get_all_devices_full(num_mc, table, 1, 1) - static int sbridge_mci_bind_devs(struct mem_ctl_info *mci, struct sbridge_dev *sbridge_dev) { @@ -3357,12 +3358,12 @@ fail0: #define ICPU(model, table) \ { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table } -/* Order here must match "enum type" */ static const struct x86_cpu_id sbridge_cpuids[] = { ICPU(0x2d, pci_dev_descr_sbridge_table), /* SANDY_BRIDGE */ ICPU(0x3e, pci_dev_descr_ibridge_table), /* IVY_BRIDGE */ ICPU(0x3f, pci_dev_descr_haswell_table), /* HASWELL */ ICPU(0x4f, pci_dev_descr_broadwell_table), /* BROADWELL */ + ICPU(0x56, pci_dev_descr_broadwell_table), /* BROADWELL-DE */ ICPU(0x57, pci_dev_descr_knl_table), /* KNIGHTS_LANDING */ { } }; @@ -3398,7 +3399,7 @@ static int sbridge_probe(const struct x86_cpu_id *id) mc, mc + 1, num_mc); sbridge_dev->mc = mc++; - rc = sbridge_register_mci(sbridge_dev, id - sbridge_cpuids); + rc = sbridge_register_mci(sbridge_dev, ptable->type); if (unlikely(rc < 0)) goto fail1; } @@ -2,7 +2,8 @@ # Makefile for external connector class (extcon) devices # -obj-$(CONFIG_EXTCON) += extcon.o +obj-$(CONFIG_EXTCON) += extcon-core.o +extcon-core-objs += extcon.o devres.o obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o obj-$(CONFIG_EXTCON_AXP288) += extcon-axp288.o diff --git a/drivers/extcon/devres.c b/drivers/extcon/devres.c new file mode 100644 index 000000000000..e686acd1c459 --- /dev/null +++ b/ drivers/extcon/devres.c@@ -0,0 +1,216 @@ +/* + * drivers/extcon/devres.c - EXTCON device's resource management + * + * Copyright (C) 2016 Samsung Electronics + * Author: Chanwoo Choi <cw00.choi@samsung.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/extcon.h> + +static int devm_extcon_dev_match(struct device *dev, void *res, void *data) +{ + struct extcon_dev **r = res; + + if (WARN_ON(!r || !*r)) + return 0; + + return *r == data; +} + +static void devm_extcon_dev_release(struct device *dev, void *res) +{ + extcon_dev_free(*(struct extcon_dev **)res); +} + + +static void devm_extcon_dev_unreg(struct device *dev, void *res) +{ + extcon_dev_unregister(*(struct extcon_dev **)res); +} + +struct extcon_dev_notifier_devres { + struct extcon_dev *edev; + unsigned int id; + struct notifier_block *nb; +}; + +static void devm_extcon_dev_notifier_unreg(struct device *dev, void *res) +{ + struct extcon_dev_notifier_devres *this = res; + + extcon_unregister_notifier(this->edev, this->id, this->nb); +} + +/** + * devm_extcon_dev_allocate - Allocate managed extcon device + * @dev: device owning the extcon device being created + * @supported_cable: Array of supported extcon ending with EXTCON_NONE. + * If supported_cable is NULL, cable name related APIs + * are disabled. + * + * This function manages automatically the memory of extcon device using device + * resource management and simplify the control of freeing the memory of extcon + * device. + * + * Returns the pointer memory of allocated extcon_dev if success + * or ERR_PTR(err) if fail + */ +struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, + const unsigned int *supported_cable) +{ + struct extcon_dev **ptr, *edev; + + ptr = devres_alloc(devm_extcon_dev_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + edev = extcon_dev_allocate(supported_cable); + if (IS_ERR(edev)) { + devres_free(ptr); + return edev; + } + + edev->dev.parent = dev; + + *ptr = edev; + devres_add(dev, ptr); + + return edev; +} +EXPORT_SYMBOL_GPL(devm_extcon_dev_allocate); + +/** + * devm_extcon_dev_free() - Resource-managed extcon_dev_unregister() + * @dev: device the extcon belongs to + * @edev: the extcon device to unregister + * + * Free the memory that is allocated with devm_extcon_dev_allocate() + * function. + */ +void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev) +{ + WARN_ON(devres_release(dev, devm_extcon_dev_release, + devm_extcon_dev_match, edev)); +} +EXPORT_SYMBOL_GPL(devm_extcon_dev_free); + +/** + * devm_extcon_dev_register() - Resource-managed extcon_dev_register() + * @dev: device to allocate extcon device + * @edev: the new extcon device to register + * + * Managed extcon_dev_register() function. If extcon device is attached with + * this function, that extcon device is automatically unregistered on driver + * detach. Internally this function calls extcon_dev_register() function. + * To get more information, refer that function. + * + * If extcon device is registered with this function and the device needs to be + * unregistered separately, devm_extcon_dev_unregister() should be used. + * + * Returns 0 if success or negaive error number if failure. + */ +int devm_extcon_dev_register(struct device *dev, struct extcon_dev *edev) +{ + struct extcon_dev **ptr; + int ret; + + ptr = devres_alloc(devm_extcon_dev_unreg, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + ret = extcon_dev_register(edev); + if (ret) { + devres_free(ptr); + return ret; + } + + *ptr = edev; + devres_add(dev, ptr); + + return 0; +} +EXPORT_SYMBOL_GPL(devm_extcon_dev_register); + +/** + * devm_extcon_dev_unregister() - Resource-managed extcon_dev_unregister() + * @dev: device the extcon belongs to + * @edev: the extcon device to unregister + * + * Unregister extcon device that is registered with devm_extcon_dev_register() + * function. + */ +void devm_extcon_dev_unregister(struct device *dev, struct extcon_dev *edev) +{ + WARN_ON(devres_release(dev, devm_extcon_dev_unreg, + devm_extcon_dev_match, edev)); +} +EXPORT_SYMBOL_GPL(devm_extcon_dev_unregister); + +/** + * devm_extcon_register_notifier() - Resource-managed extcon_register_notifier() + * @dev: device to allocate extcon device + * @edev: the extcon device that has the external connecotr. + * @id: the unique id of each external connector in extcon enumeration. + * @nb: a notifier block to be registered. + * + * This function manages automatically the notifier of extcon device using + * device resource management and simplify the control of unregistering + * the notifier of extcon device. + * + * Note that the second parameter given to the callback of nb (val) is + * "old_state", not the current state. The current state can be retrieved + * by looking at the third pameter (edev pointer)'s state value. + * + * Returns 0 if success or negaive error number if failure. + */ +int devm_extcon_register_notifier(struct device *dev, struct extcon_dev *edev, + unsigned int id, struct notifier_block *nb) +{ + struct extcon_dev_notifier_devres *ptr; + int ret; + + ptr = devres_alloc(devm_extcon_dev_notifier_unreg, sizeof(*ptr), + GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + ret = extcon_register_notifier(edev, id, nb); + if (ret) { + devres_free(ptr); + return ret; + } + + ptr->edev = edev; + ptr->id = id; + ptr->nb = nb; + devres_add(dev, ptr); + + return 0; +} +EXPORT_SYMBOL(devm_extcon_register_notifier); + +/** + * devm_extcon_unregister_notifier() + - Resource-managed extcon_unregister_notifier() + * @dev: device to allocate extcon device + * @edev: the extcon device that has the external connecotr. + * @id: the unique id of each external connector in extcon enumeration. + * @nb: a notifier block to be registered. + */ +void devm_extcon_unregister_notifier(struct device *dev, + struct extcon_dev *edev, unsigned int id, + struct notifier_block *nb) +{ + WARN_ON(devres_release(dev, devm_extcon_dev_notifier_unreg, + devm_extcon_dev_match, edev)); +} +EXPORT_SYMBOL(devm_extcon_unregister_notifier); @@ -38,6 +38,7 @@ * @chan: iio channel being queried. */ struct adc_jack_data { + struct device *dev; struct extcon_dev *edev; const unsigned int **cable_names; @@ -49,6 +50,7 @@ struct adc_jack_data { struct delayed_work handler; struct iio_channel *chan; + bool wakeup_source; }; static void adc_jack_handler(struct work_struct *work) @@ -105,6 +107,7 @@ static int adc_jack_probe(struct platform_device *pdev) return -EINVAL; } + data->dev = &pdev->dev; data->edev = devm_extcon_dev_allocate(&pdev->dev, pdata->cable_names); if (IS_ERR(data->edev)) { dev_err(&pdev->dev, "failed to allocate extcon device\n"); @@ -128,6 +131,7 @@ static int adc_jack_probe(struct platform_device *pdev) return PTR_ERR(data->chan); data->handling_delay = msecs_to_jiffies(pdata->handling_delay_ms); + data->wakeup_source = pdata->wakeup_source; INIT_DEFERRABLE_WORK(&data->handler, adc_jack_handler); @@ -151,6 +155,9 @@ static int adc_jack_probe(struct platform_device *pdev) return err; } + if (data->wakeup_source) + device_init_wakeup(&pdev->dev, 1); + return 0; } @@ -165,11 +172,38 @@ static int adc_jack_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int adc_jack_suspend(struct device *dev) +{ + struct adc_jack_data *data = dev_get_drvdata(dev); + + cancel_delayed_work_sync(&data->handler); + if (device_may_wakeup(data->dev)) + enable_irq_wake(data->irq); + + return 0; +} + +static int adc_jack_resume(struct device *dev) +{ + struct adc_jack_data *data = dev_get_drvdata(dev); + + if (device_may_wakeup(data->dev)) + disable_irq_wake(data->irq); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(adc_jack_pm_ops, + adc_jack_suspend, adc_jack_resume); + static struct platform_driver adc_jack_driver = { .probe = adc_jack_probe, .remove = adc_jack_remove, .driver = { .name = "adc-jack", + .pm = &adc_jack_pm_ops, }, }; @@ -360,6 +360,8 @@ static int palmas_usb_probe(struct platform_device *pdev) palmas_enable_irq(palmas_usb); /* perform initial detection */ + if (palmas_usb->enable_gpio_vbus_detection) + palmas_vbus_irq_handler(palmas_usb->gpio_vbus_irq, palmas_usb); palmas_gpio_id_detect(&palmas_usb->wq_detectid.work); device_set_wakeup_capable(&pdev->dev, true); return 0; @@ -24,8 +24,10 @@ #include <linux/module.h> #include <linux/of_gpio.h> #include <linux/platform_device.h> +#include <linux/pm_wakeirq.h> #include <linux/slab.h> #include <linux/workqueue.h> +#include <linux/acpi.h> #define USB_GPIO_DEBOUNCE_MS 20 /* ms */ @@ -91,7 +93,7 @@ static int usb_extcon_probe(struct platform_device *pdev) struct usb_extcon_info *info; int ret; - if (!np) + if (!np && !ACPI_HANDLE(dev)) return -EINVAL; info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); @@ -141,7 +143,8 @@ static int usb_extcon_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, info); - device_init_wakeup(dev, 1); + device_init_wakeup(dev, true); + dev_pm_set_wake_irq(dev, info->id_irq); /* Perform initial detection */ usb_extcon_detect_cable(&info->wq_detcable.work); @@ -155,6 +158,9 @@ static int usb_extcon_remove(struct platform_device *pdev) cancel_delayed_work_sync(&info->wq_detcable); + dev_pm_clear_wake_irq(&pdev->dev); + device_init_wakeup(&pdev->dev, false); + return 0; } @@ -164,12 +170,6 @@ static int usb_extcon_suspend(struct device *dev) struct usb_extcon_info *info = dev_get_drvdata(dev); int ret = 0; - if (device_may_wakeup(dev)) { - ret = enable_irq_wake(info->id_irq); - if (ret) - return ret; - } - /* * We don't want to process any IRQs after this point * as GPIOs used behind I2C subsystem might not be @@ -185,13 +185,10 @@ static int usb_extcon_resume(struct device *dev) struct usb_extcon_info *info = dev_get_drvdata(dev); int ret = 0; - if (device_may_wakeup(dev)) { - ret = disable_irq_wake(info->id_irq); - if (ret) - return ret; - } - enable_irq(info->id_irq); + if (!device_may_wakeup(dev)) + queue_delayed_work(system_power_efficient_wq, + &info->wq_detcable, 0); return ret; } @@ -206,6 +203,12 @@ static const struct of_device_id usb_extcon_dt_match[] = { }; MODULE_DEVICE_TABLE(of, usb_extcon_dt_match); +static const struct platform_device_id usb_extcon_platform_ids[] = { + { .name = "extcon-usb-gpio", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, usb_extcon_platform_ids); + static struct platform_driver usb_extcon_driver = { .probe = usb_extcon_probe, .remove = usb_extcon_remove, @@ -214,6 +217,7 @@ static struct platform_driver usb_extcon_driver = { .pm = &usb_extcon_pm_ops, .of_match_table = usb_extcon_dt_match, }, + .id_table = usb_extcon_platform_ids, }; module_platform_driver(usb_extcon_driver); @@ -77,6 +77,26 @@ static const char *extcon_name[] = { NULL, }; +/** + * struct extcon_cable - An internal data for each cable of extcon device. + * @edev: The extcon device + * @cable_index: Index of this cable in the edev + * @attr_g: Attribute group for the cable + * @attr_name: "name" sysfs entry + * @attr_state: "state" sysfs entry + * @attrs: Array pointing to attr_name and attr_state for attr_g + */ +struct extcon_cable { + struct extcon_dev *edev; + int cable_index; + + struct attribute_group attr_g; + struct device_attribute attr_name; + struct device_attribute attr_state; + + struct attribute *attrs[3]; /* to be fed to attr_g.attrs */ +}; + static struct class *extcon_class; #if defined(CONFIG_ANDROID) static struct class_compat *switch_class; @@ -127,38 +147,6 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id return -EINVAL; } -static int find_cable_id_by_name(struct extcon_dev *edev, const char *name) -{ - int id = -EINVAL; - int i = 0; - - /* Find the id of extcon cable */ - while (extcon_name[i]) { - if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) { - id = i; - break; - } - i++; - } - - return id; -} - -static int find_cable_index_by_name(struct extcon_dev *edev, const char *name) -{ - int id; - - if (edev->max_supported == 0) - return -EINVAL; - - /* Find the the number of extcon cable */ - id = find_cable_id_by_name(edev, name); - if (id < 0) - return id; - - return find_cable_index_by_id(edev, id); -} - static bool is_extcon_changed(u32 prev, u32 new, int idx, bool *attached) { if (((prev >> idx) & 0x1) != ((new >> idx) & 0x1)) { @@ -374,25 +362,6 @@ int extcon_get_cable_state_(struct extcon_dev *edev, const unsigned int id) EXPORT_SYMBOL_GPL(extcon_get_cable_state_); /** - * extcon_get_cable_state() - Get the status of a specific cable. - * @edev: the extcon device that has the cable. - * @cable_name: cable name. - * - * Note that this is slower than extcon_get_cable_state_. - */ -int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name) -{ - int id; - - id = find_cable_id_by_name(edev, cable_name); - if (id < 0) - return id; - - return extcon_get_cable_state_(edev, id); -} -EXPORT_SYMBOL_GPL(extcon_get_cable_state); - -/** * extcon_set_cable_state_() - Set the status of a specific cable. * @edev: the extcon device that has the cable. * @id: the unique id of each external connector @@ -422,28 +391,6 @@ int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id, EXPORT_SYMBOL_GPL(extcon_set_cable_state_); /** - * extcon_set_cable_state() - Set the status of a specific cable. - * @edev: the extcon device that has the cable. - * @cable_name: cable name. - * @cable_state: the new cable status. The default semantics is - * true: attached / false: detached. - * - * Note that this is slower than extcon_set_cable_state_. - */ -int extcon_set_cable_state(struct extcon_dev *edev, - const char *cable_name, bool cable_state) -{ - int id; - - id = find_cable_id_by_name(edev, cable_name); - if (id < 0) - return id; - - return extcon_set_cable_state_(edev, id, cable_state); -} -EXPORT_SYMBOL_GPL(extcon_set_cable_state); - -/** * extcon_get_extcon_dev() - Get the extcon device instance from the name * @extcon_name: The extcon name provided with extcon_dev_register() */ @@ -467,105 +414,6 @@ out: EXPORT_SYMBOL_GPL(extcon_get_extcon_dev); /** - * extcon_register_interest() - Register a notifier for a state change of a - * specific cable, not an entier set of cables of a - * extcon device. - * @obj: an empty extcon_specific_cable_nb object to be returned. - * @extcon_name: the name of extcon device. - * if NULL, extcon_register_interest will register - * every cable with the target cable_name given. - * @cable_name: the target cable name. - * @nb: the notifier block to get notified. - * - * Provide an empty extcon_specific_cable_nb. extcon_register_interest() sets - * the struct for you. - * - * extcon_register_interest is a helper function for those who want to get - * notification for a single specific cable's status change. If a user wants - * to get notification for any changes of all cables of a extcon device, - * he/she should use the general extcon_register_notifier(). - * - * Note that the second parameter given to the callback of nb (val) is - * "old_state", not the current state. The current state can be retrieved - * by looking at the third pameter (edev pointer)'s state value. - */ -int extcon_register_interest(struct extcon_specific_cable_nb *obj, - const char *extcon_name, const char *cable_name, - struct notifier_block *nb) -{ - unsigned long flags; - int ret; - - if (!obj || !cable_name || !nb) - return -EINVAL; - - if (extcon_name) { - obj->edev = extcon_get_extcon_dev(extcon_name); - if (!obj->edev) - return -ENODEV; - - obj->cable_index = find_cable_index_by_name(obj->edev, - cable_name); - if (obj->cable_index < 0) - return obj->cable_index; - - obj->user_nb = nb; - - spin_lock_irqsave(&obj->edev->lock, flags); - ret = raw_notifier_chain_register( - &obj->edev->nh[obj->cable_index], - obj->user_nb); - spin_unlock_irqrestore(&obj->edev->lock, flags); - } else { - struct class_dev_iter iter; - struct extcon_dev *extd; - struct device *dev; - - if (!extcon_class) - return -ENODEV; - class_dev_iter_init(&iter, extcon_class, NULL, NULL); - while ((dev = class_dev_iter_next(&iter))) { - extd = dev_get_drvdata(dev); - - if (find_cable_index_by_name(extd, cable_name) < 0) - continue; - - class_dev_iter_exit(&iter); - return extcon_register_interest(obj, extd->name, - cable_name, nb); - } - - ret = -ENODEV; - } - - return ret; -} -EXPORT_SYMBOL_GPL(extcon_register_interest); - -/** - * extcon_unregister_interest() - Unregister the notifier registered by - * extcon_register_interest(). - * @obj: the extcon_specific_cable_nb object returned by - * extcon_register_interest(). - */ -int extcon_unregister_interest(struct extcon_specific_cable_nb *obj) -{ - unsigned long flags; - int ret; - - if (!obj) - return -EINVAL; - - spin_lock_irqsave(&obj->edev->lock, flags); - ret = raw_notifier_chain_unregister( - &obj->edev->nh[obj->cable_index], obj->user_nb); - spin_unlock_irqrestore(&obj->edev->lock, flags); - - return ret; -} -EXPORT_SYMBOL_GPL(extcon_unregister_interest); - -/** * extcon_register_notifier() - Register a notifiee to get notified by * any attach status changes from the extcon. * @edev: the extcon device that has the external connecotr. @@ -582,14 +430,35 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id, unsigned long flags; int ret, idx; - if (!edev || !nb) + if (!nb) return -EINVAL; - idx = find_cable_index_by_id(edev, id); + if (edev) { + idx = find_cable_index_by_id(edev, id); + if (idx < 0) + return idx; - spin_lock_irqsave(&edev->lock, flags); - ret = raw_notifier_chain_register(&edev->nh[idx], nb); - spin_unlock_irqrestore(&edev->lock, flags); + spin_lock_irqsave(&edev->lock, flags); + ret = raw_notifier_chain_register(&edev->nh[idx], nb); + spin_unlock_irqrestore(&edev->lock, flags); + } else { + struct extcon_dev *extd; + + mutex_lock(&extcon_dev_list_lock); + list_for_each_entry(extd, &extcon_dev_list, entry) { + idx = find_cable_index_by_id(extd, id); + if (idx >= 0) + break; + } + mutex_unlock(&extcon_dev_list_lock); + + if (idx >= 0) { + edev = extd; + return extcon_register_notifier(extd, id, nb); + } else { + ret = -ENODEV; + } + } return ret; } @@ -611,6 +480,8 @@ int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id, return -EINVAL; idx = find_cable_index_by_id(edev, id); + if (idx < 0) + return idx; spin_lock_irqsave(&edev->lock, flags); ret = raw_notifier_chain_unregister(&edev->nh[idx], nb); @@ -693,66 +564,6 @@ void extcon_dev_free(struct extcon_dev *edev) } EXPORT_SYMBOL_GPL(extcon_dev_free); -static int devm_extcon_dev_match(struct device *dev, void *res, void *data) -{ - struct extcon_dev **r = res; - - if (WARN_ON(!r || !*r)) - return 0; - - return *r == data; -} - -static void devm_extcon_dev_release(struct device *dev, void *res) -{ - extcon_dev_free(*(struct extcon_dev **)res); -} - -/** - * devm_extcon_dev_allocate - Allocate managed extcon device - * @dev: device owning the extcon device being created - * @supported_cable: Array of supported extcon ending with EXTCON_NONE. - * If supported_cable is NULL, cable name related APIs - * are disabled. - * - * This function manages automatically the memory of extcon device using device - * resource management and simplify the control of freeing the memory of extcon - * device. - * - * Returns the pointer memory of allocated extcon_dev if success - * or ERR_PTR(err) if fail - */ -struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, - const unsigned int *supported_cable) -{ - struct extcon_dev **ptr, *edev; - - ptr = devres_alloc(devm_extcon_dev_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return ERR_PTR(-ENOMEM); - - edev = extcon_dev_allocate(supported_cable); - if (IS_ERR(edev)) { - devres_free(ptr); - return edev; - } - - edev->dev.parent = dev; - - *ptr = edev; - devres_add(dev, ptr); - - return edev; -} -EXPORT_SYMBOL_GPL(devm_extcon_dev_allocate); - -void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev) -{ - WARN_ON(devres_release(dev, devm_extcon_dev_release, - devm_extcon_dev_match, edev)); -} -EXPORT_SYMBOL_GPL(devm_extcon_dev_free); - /** * extcon_dev_register() - Register a new extcon device * @edev : the new extcon device (should be allocated before calling) @@ -1018,63 +829,6 @@ void extcon_dev_unregister(struct extcon_dev *edev) } EXPORT_SYMBOL_GPL(extcon_dev_unregister); -static void devm_extcon_dev_unreg(struct device *dev, void *res) -{ - extcon_dev_unregister(*(struct extcon_dev **)res); -} - -/** - * devm_extcon_dev_register() - Resource-managed extcon_dev_register() - * @dev: device to allocate extcon device - * @edev: the new extcon device to register - * - * Managed extcon_dev_register() function. If extcon device is attached with - * this function, that extcon device is automatically unregistered on driver - * detach. Internally this function calls extcon_dev_register() function. - * To get more information, refer that function. - * - * If extcon device is registered with this function and the device needs to be - * unregistered separately, devm_extcon_dev_unregister() should be used. - * - * Returns 0 if success or negaive error number if failure. - */ -int devm_extcon_dev_register(struct device *dev, struct extcon_dev *edev) -{ - struct extcon_dev **ptr; - int ret; - - ptr = devres_alloc(devm_extcon_dev_unreg, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return -ENOMEM; - - ret = extcon_dev_register(edev); - if (ret) { - devres_free(ptr); - return ret; - } - - *ptr = edev; - devres_add(dev, ptr); - - return 0; -} -EXPORT_SYMBOL_GPL(devm_extcon_dev_register); - -/** - * devm_extcon_dev_unregister() - Resource-managed extcon_dev_unregister() - * @dev: device the extcon belongs to - * @edev: the extcon device to unregister - * - * Unregister extcon device that is registered with devm_extcon_dev_register() - * function. - */ -void devm_extcon_dev_unregister(struct device *dev, struct extcon_dev *edev) -{ - WARN_ON(devres_release(dev, devm_extcon_dev_unreg, - devm_extcon_dev_match, edev)); -} -EXPORT_SYMBOL_GPL(devm_extcon_dev_unregister); - #ifdef CONFIG_OF /* * extcon_get_edev_by_phandle - Get the extcon device from devicetree @@ -1107,10 +861,12 @@ struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index) list_for_each_entry(edev, &extcon_dev_list, entry) { if (edev->dev.parent && edev->dev.parent->of_node == node) { mutex_unlock(&extcon_dev_list_lock); + of_node_put(node); return edev; } } mutex_unlock(&extcon_dev_list_lock); + of_node_put(node); return ERR_PTR(-EPROBE_DEFER); } @@ -174,6 +174,7 @@ static __init void reserve_regions(void) { efi_memory_desc_t *md; u64 paddr, npages, size; + int resv; if (efi_enabled(EFI_DBG)) pr_info("Processing EFI memory map:\n"); @@ -190,12 +191,14 @@ static __init void reserve_regions(void) paddr = md->phys_addr; npages = md->num_pages; + resv = is_reserve_region(md); if (efi_enabled(EFI_DBG)) { char buf[64]; - pr_info(" 0x%012llx-0x%012llx %s", + pr_info(" 0x%012llx-0x%012llx %s%s\n", paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, - efi_md_typeattr_format(buf, sizeof(buf), md)); + efi_md_typeattr_format(buf, sizeof(buf), md), + resv ? "*" : ""); } memrange_efi_to_native(&paddr, &npages); @@ -204,14 +207,9 @@ static __init void reserve_regions(void) if (is_normal_ram(md)) early_init_dt_add_memory_arch(paddr, size); - if (is_reserve_region(md)) { + if (resv) memblock_mark_nomap(paddr, size); - if (efi_enabled(EFI_DBG)) - pr_cont("*"); - } - if (efi_enabled(EFI_DBG)) - pr_cont("\n"); } set_bit(EFI_MEMMAP, &efi.flags); @@ -107,6 +107,11 @@ static int __init arm_enable_runtime_services(void) return 0; } + if (efi_enabled(EFI_RUNTIME_SERVICES)) { + pr_info("EFI runtime services access via paravirt.\n"); + return 0; + } + pr_info("Remapping and enabling EFI services.\n"); mapsize = efi.memmap.map_end - efi.memmap.map; @@ -34,6 +34,7 @@ struct pstore_read_data { int *count; struct timespec *timespec; bool *compressed; + ssize_t *ecc_notice_size; char **buf; }; @@ -69,6 +70,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data) *cb_data->compressed = true; else *cb_data->compressed = false; + *cb_data->ecc_notice_size = 0; } else if (sscanf(name, "dump-type%u-%u-%d-%lu", cb_data->type, &part, &cnt, &time) == 4) { *cb_data->id = generic_id(time, part, cnt); @@ -76,6 +78,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data) cb_data->timespec->tv_sec = time; cb_data->timespec->tv_nsec = 0; *cb_data->compressed = false; + *cb_data->ecc_notice_size = 0; } else if (sscanf(name, "dump-type%u-%u-%lu", cb_data->type, &part, &time) == 3) { /* @@ -88,6 +91,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data) cb_data->timespec->tv_sec = time; cb_data->timespec->tv_nsec = 0; *cb_data->compressed = false; + *cb_data->ecc_notice_size = 0; } else return 0; @@ -210,6 +214,7 @@ static int efi_pstore_sysfs_entry_iter(void *data, struct efivar_entry **pos) static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, int *count, struct timespec *timespec, char **buf, bool *compressed, + ssize_t *ecc_notice_size, struct pstore_info *psi) { struct pstore_read_data data; @@ -220,6 +225,7 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, data.count = count; data.timespec = timespec; data.compressed = compressed; + data.ecc_notice_size = ecc_notice_size; data.buf = buf; *data.buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL); @@ -393,6 +399,13 @@ static __init int efivars_pstore_init(void) static __exit void efivars_pstore_exit(void) { + if (!efi_pstore_info.bufsize) + return; + + pstore_unregister(&efi_pstore_info); + kfree(efi_pstore_info.buf); + efi_pstore_info.buf = NULL; + efi_pstore_info.bufsize = 0; } module_init(efivars_pstore_init); @@ -24,6 +24,9 @@ #include <linux/of_fdt.h> #include <linux/io.h> #include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/acpi.h> +#include <linux/ucs2_string.h> #include <asm/early_ioremap.h> @@ -195,6 +198,96 @@ static void generic_ops_unregister(void) efivars_unregister(&generic_efivars); } +#if IS_ENABLED(CONFIG_ACPI) +#define EFIVAR_SSDT_NAME_MAX 16 +static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata; +static int __init efivar_ssdt_setup(char *str) +{ + if (strlen(str) < sizeof(efivar_ssdt)) + memcpy(efivar_ssdt, str, strlen(str)); + else + pr_warn("efivar_ssdt: name too long: %s\n", str); + return 0; +} +__setup("efivar_ssdt=", efivar_ssdt_setup); + +static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor, + unsigned long name_size, void *data) +{ + struct efivar_entry *entry; + struct list_head *list = data; + char utf8_name[EFIVAR_SSDT_NAME_MAX]; + int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size); + + ucs2_as_utf8(utf8_name, name, limit - 1); + if (strncmp(utf8_name, efivar_ssdt, limit) != 0) + return 0; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return 0; + + memcpy(entry->var.VariableName, name, name_size); + memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t)); + + efivar_entry_add(entry, list); + + return 0; +} + +static __init int efivar_ssdt_load(void) +{ + LIST_HEAD(entries); + struct efivar_entry *entry, *aux; + unsigned long size; + void *data; + int ret; + + ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries); + + list_for_each_entry_safe(entry, aux, &entries, list) { + pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, + &entry->var.VendorGuid); + + list_del(&entry->list); + + ret = efivar_entry_size(entry, &size); + if (ret) { + pr_err("failed to get var size\n"); + goto free_entry; + } + + data = kmalloc(size, GFP_KERNEL); + if (!data) + goto free_entry; + + ret = efivar_entry_get(entry, NULL, &size, data); + if (ret) { + pr_err("failed to get var data\n"); + goto free_data; + } + + ret = acpi_load_table(data); + if (ret) { + pr_err("failed to load table: %d\n", ret); + goto free_data; + } + + goto free_entry; + +free_data: + kfree(data); + +free_entry: + kfree(entry); + } + + return ret; +} +#else +static inline int efivar_ssdt_load(void) { return 0; } +#endif + /* * We register the efi subsystem with the firmware subsystem and the * efivars subsystem with the efi subsystem, if the system was booted with @@ -218,6 +311,9 @@ static int __init efisubsys_init(void) if (error) goto err_put; + if (efi_enabled(EFI_RUNTIME_SERVICES)) + efivar_ssdt_load(); + error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group); if (error) { pr_err("efi: Sysfs attribute export failed with error %d.\n", @@ -472,12 +568,14 @@ device_initcall(efi_load_efivars); FIELD_SIZEOF(struct efi_fdt_params, field) \ } -static __initdata struct { +struct params { const char name[32]; const char propname[32]; int offset; int size; -} dt_params[] = { +}; + +static __initdata struct params fdt_params[] = { UEFI_PARAM("System Table", "linux,uefi-system-table", system_table), UEFI_PARAM("MemMap Address", "linux,uefi-mmap-start", mmap), UEFI_PARAM("MemMap Size", "linux,uefi-mmap-size", mmap_size), @@ -485,44 +583,91 @@ static __initdata struct { UEFI_PARAM("MemMap Desc. Version", "linux,uefi-mmap-desc-ver", desc_ver) }; +static __initdata struct params xen_fdt_params[] = { + UEFI_PARAM("System Table", "xen,uefi-system-table", system_table), + UEFI_PARAM("MemMap Address", "xen,uefi-mmap-start", mmap), + UEFI_PARAM("MemMap Size", "xen,uefi-mmap-size", mmap_size), + UEFI_PARAM("MemMap Desc. Size", "xen,uefi-mmap-desc-size", desc_size), + UEFI_PARAM("MemMap Desc. Version", "xen,uefi-mmap-desc-ver", desc_ver) +}; + +#define EFI_FDT_PARAMS_SIZE ARRAY_SIZE(fdt_params) + +static __initdata struct { + const char *uname; + const char *subnode; + struct params *params; +} dt_params[] = { + { "hypervisor", "uefi", xen_fdt_params }, + { "chosen", NULL, fdt_params }, +}; + struct param_info { int found; void *params; + const char *missing; }; -static int __init fdt_find_uefi_params(unsigned long node, const char *uname, - int depth, void *data) +static int __init __find_uefi_params(unsigned long node, + struct param_info *info, + struct params *params) { - struct param_info *info = data; const void *prop; void *dest; u64 val; int i, len; - if (depth != 1 || strcmp(uname, "chosen") != 0) - return 0; - - for (i = 0; i < ARRAY_SIZE(dt_params); i++) { - prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len); - if (!prop) + for (i = 0; i < EFI_FDT_PARAMS_SIZE; i++) { + prop = of_get_flat_dt_prop(node, params[i].propname, &len); + if (!prop) { + info->missing = params[i].name; return 0; - dest = info->params + dt_params[i].offset; + } + + dest = info->params + params[i].offset; info->found++; val = of_read_number(prop, len / sizeof(u32)); - if (dt_params[i].size == sizeof(u32)) + if (params[i].size == sizeof(u32)) *(u32 *)dest = val; else *(u64 *)dest = val; if (efi_enabled(EFI_DBG)) - pr_info(" %s: 0x%0*llx\n", dt_params[i].name, - dt_params[i].size * 2, val); + pr_info(" %s: 0x%0*llx\n", params[i].name, + params[i].size * 2, val); } + return 1; } +static int __init fdt_find_uefi_params(unsigned long node, const char *uname, + int depth, void *data) +{ + struct param_info *info = data; + int i; + + for (i = 0; i < ARRAY_SIZE(dt_params); i++) { + const char *subnode = dt_params[i].subnode; + + if (depth != 1 || strcmp(uname, dt_params[i].uname) != 0) { + info->missing = dt_params[i].params[0].name; + continue; + } + + if (subnode) { + node = of_get_flat_dt_subnode_by_name(node, subnode); + if (node < 0) + return 0; + } + + return __find_uefi_params(node, info, dt_params[i].params); + } + + return 0; +} + int __init efi_get_fdt_params(struct efi_fdt_params *params) { struct param_info info; @@ -538,7 +683,7 @@ int __init efi_get_fdt_params(struct efi_fdt_params *params) pr_info("UEFI not found.\n"); else if (!ret) pr_err("Can't find '%s' in device tree!\n", - dt_params[info.found].name); + info.missing); return ret; } @@ -37,13 +37,13 @@ static int efibc_set_variable(const char *name, const char *value) size_t size = (strlen(value) + 1) * sizeof(efi_char16_t); if (size > sizeof(entry->var.Data)) { - pr_err("value is too large"); + pr_err("value is too large (%zu bytes) for '%s' EFI variable\n", size, name); return -EINVAL; } entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { - pr_err("failed to allocate efivar entry"); + pr_err("failed to allocate efivar entry for '%s' EFI variable\n", name); return -ENOMEM; } @@ -22,7 +22,16 @@ #include <linux/stringify.h> #include <asm/efi.h> -static void efi_call_virt_check_flags(unsigned long flags, const char *call) +/* + * Wrap around the new efi_call_virt_generic() macros so that the + * code doesn't get too cluttered: + */ +#define efi_call_virt(f, args...) \ + efi_call_virt_pointer(efi.systab->runtime, f, args) +#define __efi_call_virt(f, args...) \ + __efi_call_virt_pointer(efi.systab->runtime, f, args) + +void efi_call_virt_check_flags(unsigned long flags, const char *call) { unsigned long cur_flags, mismatch; @@ -39,48 +48,6 @@ static void efi_call_virt_check_flags(unsigned long flags, const char *call) } /* - * Arch code can implement the following three template macros, avoiding - * reptition for the void/non-void return cases of {__,}efi_call_virt: - * - * * arch_efi_call_virt_setup - * - * Sets up the environment for the call (e.g. switching page tables, - * allowing kernel-mode use of floating point, if required). - * - * * arch_efi_call_virt - * - * Performs the call. The last expression in the macro must be the call - * itself, allowing the logic to be shared by the void and non-void - * cases. - * - * * arch_efi_call_virt_teardown - * - * Restores the usual kernel environment once the call has returned. - */ - -#define efi_call_virt(f, args...) \ -({ \ - efi_status_t __s; \ - unsigned long flags; \ - arch_efi_call_virt_setup(); \ - local_save_flags(flags); \ - __s = arch_efi_call_virt(f, args); \ - efi_call_virt_check_flags(flags, __stringify(f)); \ - arch_efi_call_virt_teardown(); \ - __s; \ -}) - -#define __efi_call_virt(f, args...) \ -({ \ - unsigned long flags; \ - arch_efi_call_virt_setup(); \ - local_save_flags(flags); \ - arch_efi_call_virt(f, args); \ - efi_call_virt_check_flags(flags, __stringify(f)); \ - arch_efi_call_virt_teardown(); \ -}) - -/* * According to section 7.1 of the UEFI spec, Runtime Services are not fully * reentrant, and there are particular combinations of calls that need to be * serialized. (source: UEFI Specification v2.4A) @@ -13,6 +13,7 @@ #define pr_fmt(fmt) "psci: " fmt +#include <linux/acpi.h> #include <linux/arm-smccc.h> #include <linux/cpuidle.h> #include <linux/errno.h> @@ -256,13 +257,6 @@ static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu) u32 *psci_states; struct device_node *state_node; - /* - * If the PSCI cpu_suspend function hook has not been initialized - * idle states must not be enabled, so bail out - */ - if (!psci_ops.cpu_suspend) - return -EOPNOTSUPP; - /* Count idle states */ while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states", count))) { @@ -310,11 +304,69 @@ free_mem: return ret; } +#ifdef CONFIG_ACPI +#include <acpi/processor.h> + +static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu) +{ + int i, count; + u32 *psci_states; + struct acpi_lpi_state *lpi; + struct acpi_processor *pr = per_cpu(processors, cpu); + + if (unlikely(!pr || !pr->flags.has_lpi)) + return -EINVAL; + + count = pr->power.count - 1; + if (count <= 0) + return -ENODEV; + + psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); + if (!psci_states) + return -ENOMEM; + + for (i = 0; i < count; i++) { + u32 state; + + lpi = &pr->power.lpi_states[i + 1]; + /* + * Only bits[31:0] represent a PSCI power_state while + * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification + */ + state = lpi->address; + if (!psci_power_state_is_valid(state)) { + pr_warn("Invalid PSCI power state %#x\n", state); + kfree(psci_states); + return -EINVAL; + } + psci_states[i] = state; + } + /* Idle states parsed correctly, initialize per-cpu pointer */ + per_cpu(psci_power_state, cpu) = psci_states; + return 0; +} +#else +static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu) +{ + return -EINVAL; +} +#endif + int psci_cpu_init_idle(unsigned int cpu) { struct device_node *cpu_node; int ret; + /* + * If the PSCI cpu_suspend function hook has not been initialized + * idle states must not be enabled, so bail out + */ + if (!psci_ops.cpu_suspend) + return -EOPNOTSUPP; + + if (!acpi_disabled) + return psci_acpi_cpu_init_idle(cpu); + cpu_node = of_get_cpu_node(cpu, NULL); if (!cpu_node) return -ENODEV; @@ -33,6 +33,7 @@ config ARCH_REQUIRE_GPIOLIB menuconfig GPIOLIB bool "GPIO Support" + select ANON_INODES help This enables GPIO support through the generic GPIO library. You only need to enable this, if you also want to enable @@ -48,7 +49,7 @@ config GPIO_DEVRES config OF_GPIO def_bool y - depends on OF || COMPILE_TEST + depends on OF config GPIO_ACPI def_bool y @@ -249,7 +250,7 @@ config GPIO_LOONGSON driver for GPIO functionality on Loongson-2F/3A/3B processors. config GPIO_LPC18XX - bool "NXP LPC18XX/43XX GPIO support" + tristate "NXP LPC18XX/43XX GPIO support" default y if ARCH_LPC18XX depends on OF_GPIO && (ARCH_LPC18XX || COMPILE_TEST) help @@ -401,9 +402,12 @@ config GPIO_TB10X select OF_GPIO config GPIO_TEGRA - bool - default y + bool "NVIDIA Tegra GPIO support" + default ARCH_TEGRA depends on ARCH_TEGRA || COMPILE_TEST + depends on OF + help + Say yes here to support GPIO pins on NVIDIA Tegra SoCs. config GPIO_TS4800 tristate "TS-4800 DIO blocks and compatibles" @@ -530,7 +534,7 @@ menu "Port-mapped I/O GPIO drivers" config GPIO_104_DIO_48E tristate "ACCES 104-DIO-48E GPIO support" - depends on ISA + depends on ISA_BUS_API select GPIOLIB_IRQCHIP help Enables GPIO support for the ACCES 104-DIO-48E series (104-DIO-48E, @@ -540,7 +544,7 @@ config GPIO_104_DIO_48E config GPIO_104_IDIO_16 tristate "ACCES 104-IDIO-16 GPIO support" - depends on ISA + depends on ISA_BUS_API select GPIOLIB_IRQCHIP help Enables GPIO support for the ACCES 104-IDIO-16 family (104-IDIO-16, @@ -551,7 +555,7 @@ config GPIO_104_IDIO_16 config GPIO_104_IDI_48 tristate "ACCES 104-IDI-48 GPIO support" - depends on ISA + depends on ISA_BUS_API select GPIOLIB_IRQCHIP help Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A, @@ -627,7 +631,7 @@ config GPIO_TS5500 config GPIO_WS16C48 tristate "WinSystems WS16C48 GPIO support" - depends on ISA + depends on ISA_BUS_API select GPIOLIB_IRQCHIP help Enables GPIO support for the WinSystems WS16C48. The base port @@ -870,6 +874,15 @@ config GPIO_LP3943 LP3943 can be used as a GPIO expander which provides up to 16 GPIOs. Open drain outputs are required for this usage. +config GPIO_MAX77620 + tristate "GPIO support for PMIC MAX77620 and MAX20024" + depends on MFD_MAX77620 + help + GPIO driver for MAX77620 and MAX20024 PMIC from Maxim Semiconductor. + MAX77620 PMIC has 8 pins that can be configured as GPIOs. The + driver also provides interrupt support for each of the gpios. + Say yes here to enable the max77620 to be used as gpio controller. + config GPIO_MSIC bool "Intel MSIC mixed signal gpio support" depends on MFD_INTEL_MSIC @@ -1025,11 +1038,18 @@ config GPIO_BT8XX If unsure, say N. config GPIO_INTEL_MID - bool "Intel Mid GPIO support" - depends on X86 + bool "Intel MID GPIO support" + depends on X86_INTEL_MID + select GPIOLIB_IRQCHIP + help + Say Y here to support Intel MID GPIO. + +config GPIO_MERRIFIELD + tristate "Intel Merrifield GPIO support" + depends on X86_INTEL_MID select GPIOLIB_IRQCHIP help - Say Y here to support Intel Mid GPIO. + Say Y here to support Intel Merrifield GPIO. config GPIO_ML_IOH tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support" @@ -61,8 +61,10 @@ obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o obj-$(CONFIG_GPIO_MAX732X) += gpio-max732x.o +obj-$(CONFIG_GPIO_MAX77620) += gpio-max77620.o obj-$(CONFIG_GPIO_MB86S7X) += gpio-mb86s7x.o obj-$(CONFIG_GPIO_MENZ127) += gpio-menz127.o +obj-$(CONFIG_GPIO_MERRIFIELD) += gpio-merrifield.o obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o obj-$(CONFIG_GPIO_MC9S08DZ60) += gpio-mc9s08dz60.o obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o @@ -75,7 +75,7 @@ static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); const unsigned io_port = offset / 8; - const unsigned control_port = io_port / 2; + const unsigned int control_port = io_port / 3; const unsigned control_addr = dio48egpio->base + 3 + control_port*4; unsigned long flags; unsigned control; @@ -115,7 +115,7 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset, { struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); const unsigned io_port = offset / 8; - const unsigned control_port = io_port / 2; + const unsigned int control_port = io_port / 3; const unsigned mask = BIT(offset % 8); const unsigned control_addr = dio48egpio->base + 3 + control_port*4; const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port; @@ -247,6 +247,7 @@ static int idi_48_probe(struct device *dev, unsigned int id) idi48gpio->irq = irq[id]; spin_lock_init(&idi48gpio->lock); + spin_lock_init(&idi48gpio->ack_lock); dev_set_drvdata(dev, idi48gpio); @@ -35,13 +35,8 @@ struct gen_74x164_chip { static int __gen_74x164_write_config(struct gen_74x164_chip *chip) { - struct spi_transfer xfer = { - .tx_buf = chip->buffer, - .len = chip->registers, - }; - - return spi_sync_transfer(to_spi_device(chip->gpio_chip.parent), - &xfer, 1); + return spi_write(to_spi_device(chip->gpio_chip.parent), chip->buffer, + chip->registers); } static int gen_74x164_get_value(struct gpio_chip *gc, unsigned offset) @@ -547,11 +547,11 @@ static void bcm_kona_gpio_reset(struct bcm_kona_gpio *kona_gpio) /* disable interrupts and clear status */ for (i = 0; i < kona_gpio->num_bank; i++) { /* Unlock the entire bank first */ - bcm_kona_gpio_write_lock_regs(kona_gpio, i, UNLOCK_CODE); + bcm_kona_gpio_write_lock_regs(reg_base, i, UNLOCK_CODE); writel(0xffffffff, reg_base + GPIO_INT_MASK(i)); writel(0xffffffff, reg_base + GPIO_INT_STATUS(i)); /* Now re-lock the bank */ - bcm_kona_gpio_write_lock_regs(kona_gpio, i, LOCK_CODE); + bcm_kona_gpio_write_lock_regs(reg_base, i, LOCK_CODE); } } @@ -20,8 +20,12 @@ static int clps711x_gpio_probe(struct platform_device *pdev) void __iomem *dat, *dir; struct gpio_chip *gc; struct resource *res; - int err, id = np ? of_alias_get_id(np, "gpio") : pdev->id; + int err, id; + if (!np) + return -ENODEV; + + id = of_alias_get_id(np, "gpio"); if ((id < 0) || (id > 4)) return -ENODEV; @@ -63,7 +67,7 @@ static int clps711x_gpio_probe(struct platform_device *pdev) break; } - gc->base = id * 8; + gc->base = -1; gc->owner = THIS_MODULE; platform_set_drvdata(pdev, gc); @@ -71,7 +75,7 @@ static int clps711x_gpio_probe(struct platform_device *pdev) } static const struct of_device_id __maybe_unused clps711x_gpio_ids[] = { - { .compatible = "cirrus,clps711x-gpio" }, + { .compatible = "cirrus,ep7209-gpio" }, { } }; MODULE_DEVICE_TABLE(of, clps711x_gpio_ids); @@ -486,6 +486,7 @@ dwapb_gpio_get_pdata(struct device *dev) pp->idx >= DWAPB_MAX_PORTS) { dev_err(dev, "missing/invalid port index for port%d\n", i); + fwnode_handle_put(fwnode); return ERR_PTR(-EINVAL); } @@ -125,6 +125,7 @@ static inline void superio_exit(int base) * GPIO chip. */ +static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset); static int f7188x_gpio_direction_in(struct gpio_chip *chip, unsigned offset); static int f7188x_gpio_get(struct gpio_chip *chip, unsigned offset); static int f7188x_gpio_direction_out(struct gpio_chip *chip, @@ -139,6 +140,7 @@ static int f7188x_gpio_set_single_ended(struct gpio_chip *gc, .chip = { \ .label = DRVNAME, \ .owner = THIS_MODULE, \ + .get_direction = f7188x_gpio_get_direction, \ .direction_input = f7188x_gpio_direction_in, \ .get = f7188x_gpio_get, \ .direction_output = f7188x_gpio_direction_out, \ @@ -209,6 +211,26 @@ static struct f7188x_gpio_bank f81866_gpio_bank[] = { F7188X_GPIO_BANK(80, 8, 0x88), }; +static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset) +{ + int err; + struct f7188x_gpio_bank *bank = + container_of(chip, struct f7188x_gpio_bank, chip); + struct f7188x_sio *sio = bank->data->sio; + u8 dir; + + err = superio_enter(sio->addr); + if (err) + return err; + superio_select(sio->addr, SIO_LD_GPIO); + + dir = superio_inb(sio->addr, gpio_dir(bank->regbase)); + + superio_exit(sio->addr); + + return !(dir & 1 << offset); +} + static int f7188x_gpio_direction_in(struct gpio_chip *chip, unsigned offset) { int err; @@ -1,7 +1,7 @@ /* * Intel MID GPIO driver * - * Copyright (c) 2008-2014 Intel Corporation. + * Copyright (c) 2008-2014,2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -17,21 +17,20 @@ * Moorestown platform Langwell chip. * Medfield platform Penwell chip. * Clovertrail platform Cloverview chip. - * Merrifield platform Tangier chip. */ -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/platform_device.h> -#include <linux/kernel.h> #include <linux/delay.h> -#include <linux/stddef.h> -#include <linux/interrupt.h> #include <linux/init.h> +#include <linux/interrupt.h> #include <linux/io.h> #include <linux/gpio/driver.h> -#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <linux/stddef.h> #define INTEL_MID_IRQ_TYPE_EDGE (1 << 0) #define INTEL_MID_IRQ_TYPE_LEVEL (1 << 1) @@ -64,10 +63,6 @@ enum GPIO_REG { /* intel_mid gpio driver data */ struct intel_mid_gpio_ddata { u16 ngpio; /* number of gpio pins */ - u32 gplr_offset; /* offset of first GPLR register from base */ - u32 flis_base; /* base address of FLIS registers */ - u32 flis_len; /* length of FLIS registers */ - u32 (*get_flis_offset)(int gpio); u32 chip_irq_type; /* chip interrupt type */ }; @@ -252,15 +247,6 @@ static const struct intel_mid_gpio_ddata gpio_cloverview_core = { .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE, }; -static const struct intel_mid_gpio_ddata gpio_tangier = { - .ngpio = 192, - .gplr_offset = 4, - .flis_base = 0xff0c0000, - .flis_len = 0x8000, - .get_flis_offset = NULL, - .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE, -}; - static const struct pci_device_id intel_gpio_ids[] = { { /* Lincroft */ @@ -287,11 +273,6 @@ static const struct pci_device_id intel_gpio_ids[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7), .driver_data = (kernel_ulong_t)&gpio_cloverview_core, }, - { - /* Tangier */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1199), - .driver_data = (kernel_ulong_t)&gpio_tangier, - }, { 0 } }; MODULE_DEVICE_TABLE(pci, intel_gpio_ids); @@ -401,7 +382,7 @@ static int intel_gpio_probe(struct pci_dev *pdev, spin_lock_init(&priv->lock); pci_set_drvdata(pdev, priv); - retval = gpiochip_add_data(&priv->chip, priv); + retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv); if (retval) { dev_err(&pdev->dev, "gpiochip_add error %d\n", retval); return retval; @@ -29,7 +29,6 @@ #include <mach/hardware.h> #include <mach/platform.h> -#include <mach/irqs.h> #define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000) #define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004) @@ -371,61 +370,16 @@ static int lpc32xx_gpio_request(struct gpio_chip *chip, unsigned pin) static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset) { - return IRQ_LPC32XX_P0_P1_IRQ; + return -ENXIO; } -static const char lpc32xx_gpio_to_irq_gpio_p3_table[] = { - IRQ_LPC32XX_GPIO_00, - IRQ_LPC32XX_GPIO_01, - IRQ_LPC32XX_GPIO_02, - IRQ_LPC32XX_GPIO_03, - IRQ_LPC32XX_GPIO_04, - IRQ_LPC32XX_GPIO_05, -}; - static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset) { - if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpio_p3_table)) - return lpc32xx_gpio_to_irq_gpio_p3_table[offset]; return -ENXIO; } -static const char lpc32xx_gpio_to_irq_gpi_p3_table[] = { - IRQ_LPC32XX_GPI_00, - IRQ_LPC32XX_GPI_01, - IRQ_LPC32XX_GPI_02, - IRQ_LPC32XX_GPI_03, - IRQ_LPC32XX_GPI_04, - IRQ_LPC32XX_GPI_05, - IRQ_LPC32XX_GPI_06, - IRQ_LPC32XX_GPI_07, - IRQ_LPC32XX_GPI_08, - IRQ_LPC32XX_GPI_09, - -ENXIO, /* 10 */ - -ENXIO, /* 11 */ - -ENXIO, /* 12 */ - -ENXIO, /* 13 */ - -ENXIO, /* 14 */ - -ENXIO, /* 15 */ - -ENXIO, /* 16 */ - -ENXIO, /* 17 */ - -ENXIO, /* 18 */ - IRQ_LPC32XX_GPI_19, - -ENXIO, /* 20 */ - -ENXIO, /* 21 */ - -ENXIO, /* 22 */ - -ENXIO, /* 23 */ - -ENXIO, /* 24 */ - -ENXIO, /* 25 */ - -ENXIO, /* 26 */ - -ENXIO, /* 27 */ - IRQ_LPC32XX_GPI_28, -}; - static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset) { - if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpi_p3_table)) - return lpc32xx_gpio_to_irq_gpi_p3_table[offset]; return -ENXIO; } @@ -383,7 +383,6 @@ static int lp_gpio_probe(struct platform_device *pdev) handle_simple_irq, IRQ_TYPE_NONE); if (ret) { dev_err(dev, "failed to add irqchip\n"); - gpiochip_remove(gc); return ret; } diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c new file mode 100644 index 000000000000..b46b436cb97f --- /dev/null +++ b/ drivers/gpio/gpio-max77620.c@@ -0,0 +1,315 @@ +/* + * MAXIM MAX77620 GPIO driver + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#include <linux/gpio/driver.h> +#include <linux/interrupt.h> +#include <linux/mfd/max77620.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#define GPIO_REG_ADDR(offset) (MAX77620_REG_GPIO0 + offset) + +struct max77620_gpio { + struct gpio_chip gpio_chip; + struct regmap *rmap; + struct device *dev; + int gpio_irq; + int irq_base; + int gpio_base; +}; + +static const struct regmap_irq max77620_gpio_irqs[] = { + [0] = { + .mask = MAX77620_IRQ_LVL2_GPIO_EDGE0, + .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING, + .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING, + .reg_offset = 0, + .type_reg_offset = 0, + }, + [1] = { + .mask = MAX77620_IRQ_LVL2_GPIO_EDGE1, + .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING, + .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING, + .reg_offset = 0, + .type_reg_offset = 1, + }, + [2] = { + .mask = MAX77620_IRQ_LVL2_GPIO_EDGE2, + .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING, + .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING, + .reg_offset = 0, + .type_reg_offset = 2, + }, + [3] = { + .mask = MAX77620_IRQ_LVL2_GPIO_EDGE3, + .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING, + .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING, + .reg_offset = 0, + .type_reg_offset = 3, + }, + [4] = { + .mask = MAX77620_IRQ_LVL2_GPIO_EDGE4, + .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING, + .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING, + .reg_offset = 0, + .type_reg_offset = 4, + }, + [5] = { + .mask = MAX77620_IRQ_LVL2_GPIO_EDGE5, + .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING, + .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING, + .reg_offset = 0, + .type_reg_offset = 5, + }, + [6] = { + .mask = MAX77620_IRQ_LVL2_GPIO_EDGE6, + .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING, + .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING, + .reg_offset = 0, + .type_reg_offset = 6, + }, + [7] = { + .mask = MAX77620_IRQ_LVL2_GPIO_EDGE7, + .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING, + .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING, + .reg_offset = 0, + .type_reg_offset = 7, + }, +}; + +static struct regmap_irq_chip max77620_gpio_irq_chip = { + .name = "max77620-gpio", + .irqs = max77620_gpio_irqs, + .num_irqs = ARRAY_SIZE(max77620_gpio_irqs), + .num_regs = 1, + .num_type_reg = 8, + .irq_reg_stride = 1, + .type_reg_stride = 1, + .status_base = MAX77620_REG_IRQ_LVL2_GPIO, + .type_base = MAX77620_REG_GPIO0, +}; + +static int max77620_gpio_dir_input(struct gpio_chip *gc, unsigned int offset) +{ + struct max77620_gpio *mgpio = gpiochip_get_data(gc); + int ret; + + ret = regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset), + MAX77620_CNFG_GPIO_DIR_MASK, + MAX77620_CNFG_GPIO_DIR_INPUT); + if (ret < 0) + dev_err(mgpio->dev, "CNFG_GPIOx dir update failed: %d\n", ret); + + return ret; +} + +static int max77620_gpio_get(struct gpio_chip *gc, unsigned int offset) +{ + struct max77620_gpio *mgpio = gpiochip_get_data(gc); + unsigned int val; + int ret; + + ret = regmap_read(mgpio->rmap, GPIO_REG_ADDR(offset), &val); + if (ret < 0) { + dev_err(mgpio->dev, "CNFG_GPIOx read failed: %d\n", ret); + return ret; + } + + if (val & MAX77620_CNFG_GPIO_DIR_MASK) + return !!(val & MAX77620_CNFG_GPIO_INPUT_VAL_MASK); + else + return !!(val & MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK); +} + +static int max77620_gpio_dir_output(struct gpio_chip *gc, unsigned int offset, + int value) +{ + struct max77620_gpio *mgpio = gpiochip_get_data(gc); + u8 val; + int ret; + + val = (value) ? MAX77620_CNFG_GPIO_OUTPUT_VAL_HIGH : + MAX77620_CNFG_GPIO_OUTPUT_VAL_LOW; + + ret = regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset), + MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK, val); + if (ret < 0) { + dev_err(mgpio->dev, "CNFG_GPIOx val update failed: %d\n", ret); + return ret; + } + + ret = regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset), + MAX77620_CNFG_GPIO_DIR_MASK, + MAX77620_CNFG_GPIO_DIR_OUTPUT); + if (ret < 0) + dev_err(mgpio->dev, "CNFG_GPIOx dir update failed: %d\n", ret); + + return ret; +} + +static int max77620_gpio_set_debounce(struct gpio_chip *gc, + unsigned int offset, + unsigned int debounce) +{ + struct max77620_gpio *mgpio = gpiochip_get_data(gc); + u8 val; + int ret; + + switch (debounce) { + case 0: + val = MAX77620_CNFG_GPIO_DBNC_None; + break; + case 1 ... 8: + val = MAX77620_CNFG_GPIO_DBNC_8ms; + break; + case 9 ... 16: + val = MAX77620_CNFG_GPIO_DBNC_16ms; + break; + case 17 ... 32: + val = MAX77620_CNFG_GPIO_DBNC_32ms; + break; + default: + dev_err(mgpio->dev, "Illegal value %u\n", debounce); + return -EINVAL; + } + + ret = regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset), + MAX77620_CNFG_GPIO_DBNC_MASK, val); + if (ret < 0) + dev_err(mgpio->dev, "CNFG_GPIOx_DBNC update failed: %d\n", ret); + + return ret; +} + +static void max77620_gpio_set(struct gpio_chip *gc, unsigned int offset, + int value) +{ + struct max77620_gpio *mgpio = gpiochip_get_data(gc); + u8 val; + int ret; + + val = (value) ? MAX77620_CNFG_GPIO_OUTPUT_VAL_HIGH : + MAX77620_CNFG_GPIO_OUTPUT_VAL_LOW; + + ret = regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset), + MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK, val); + if (ret < 0) + dev_err(mgpio->dev, "CNFG_GPIO_OUT update failed: %d\n", ret); +} + +static int max77620_gpio_set_single_ended(struct gpio_chip *gc, + unsigned int offset, + enum single_ended_mode mode) +{ + struct max77620_gpio *mgpio = gpiochip_get_data(gc); + + switch (mode) { + case LINE_MODE_OPEN_DRAIN: + return regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset), + MAX77620_CNFG_GPIO_DRV_MASK, + MAX77620_CNFG_GPIO_DRV_OPENDRAIN); + case LINE_MODE_PUSH_PULL: + return regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset), + MAX77620_CNFG_GPIO_DRV_MASK, + MAX77620_CNFG_GPIO_DRV_PUSHPULL); + default: + break; + } + + return -ENOTSUPP; +} + +static int max77620_gpio_to_irq(struct gpio_chip *gc, unsigned int offset) +{ + struct max77620_gpio *mgpio = gpiochip_get_data(gc); + struct max77620_chip *chip = dev_get_drvdata(mgpio->dev->parent); + + return regmap_irq_get_virq(chip->gpio_irq_data, offset); +} + +static int max77620_gpio_probe(struct platform_device *pdev) +{ + struct max77620_chip *chip = dev_get_drvdata(pdev->dev.parent); + struct max77620_gpio *mgpio; + int gpio_irq; + int ret; + + gpio_irq = platform_get_irq(pdev, 0); + if (gpio_irq <= 0) { + dev_err(&pdev->dev, "GPIO irq not available %d\n", gpio_irq); + return -ENODEV; + } + + mgpio = devm_kzalloc(&pdev->dev, sizeof(*mgpio), GFP_KERNEL); + if (!mgpio) + return -ENOMEM; + + mgpio->rmap = chip->rmap; + mgpio->dev = &pdev->dev; + mgpio->gpio_irq = gpio_irq; + + mgpio->gpio_chip.label = pdev->name; + mgpio->gpio_chip.parent = &pdev->dev; + mgpio->gpio_chip.direction_input = max77620_gpio_dir_input; + mgpio->gpio_chip.get = max77620_gpio_get; + mgpio->gpio_chip.direction_output = max77620_gpio_dir_output; + mgpio->gpio_chip.set_debounce = max77620_gpio_set_debounce; + mgpio->gpio_chip.set = max77620_gpio_set; + mgpio->gpio_chip.set_single_ended = max77620_gpio_set_single_ended; + mgpio->gpio_chip.to_irq = max77620_gpio_to_irq; + mgpio->gpio_chip.ngpio = MAX77620_GPIO_NR; + mgpio->gpio_chip.can_sleep = 1; + mgpio->gpio_chip.base = -1; + mgpio->irq_base = -1; +#ifdef CONFIG_OF_GPIO + mgpio->gpio_chip.of_node = pdev->dev.parent->of_node; +#endif + + platform_set_drvdata(pdev, mgpio); + + ret = devm_gpiochip_add_data(&pdev->dev, &mgpio->gpio_chip, mgpio); + if (ret < 0) { + dev_err(&pdev->dev, "gpio_init: Failed to add max77620_gpio\n"); + return ret; + } + + mgpio->gpio_base = mgpio->gpio_chip.base; + ret = devm_regmap_add_irq_chip(&pdev->dev, chip->rmap, mgpio->gpio_irq, + IRQF_ONESHOT, mgpio->irq_base, + &max77620_gpio_irq_chip, + &chip->gpio_irq_data); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to add gpio irq_chip %d\n", ret); + return ret; + } + + return 0; +} + +static const struct platform_device_id max77620_gpio_devtype[] = { + { .name = "max77620-gpio", }, + {}, +}; +MODULE_DEVICE_TABLE(platform, max77620_gpio_devtype); + +static struct platform_driver max77620_gpio_driver = { + .driver.name = "max77620-gpio", + .probe = max77620_gpio_probe, + .id_table = max77620_gpio_devtype, +}; + +module_platform_driver(max77620_gpio_driver); + +MODULE_DESCRIPTION("GPIO interface for MAX77620 and MAX20024 PMIC"); +MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); +MODULE_AUTHOR("Chaitanya Bandi <bandik@nvidia.com>"); +MODULE_ALIAS("platform:max77620-gpio"); +MODULE_LICENSE("GPL v2"); @@ -187,7 +187,6 @@ MODULE_DEVICE_TABLE(mcb, men_z127_ids); static struct mcb_driver men_z127_driver = { .driver = { .name = "z127-gpio", - .owner = THIS_MODULE, }, .probe = men_z127_probe, .remove = men_z127_remove, diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c new file mode 100644 index 000000000000..45b51278b8ee --- /dev/null +++ b/ drivers/gpio/gpio-merrifield.c@@ -0,0 +1,444 @@ +/* + * Intel Merrifield SoC GPIO driver + * + * Copyright (c) 2016 Intel Corporation. + * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/bitops.h> +#include <linux/gpio/driver.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/pinctrl/consumer.h> + +#define GCCR 0x000 /* controller configuration */ +#define GPLR 0x004 /* pin level r/o */ +#define GPDR 0x01c /* pin direction */ +#define GPSR 0x034 /* pin set w/o */ +#define GPCR 0x04c /* pin clear w/o */ +#define GRER 0x064 /* rising edge detect */ +#define GFER 0x07c /* falling edge detect */ +#define GFBR 0x094 /* glitch filter bypass */ +#define GIMR 0x0ac /* interrupt mask */ +#define GISR 0x0c4 /* interrupt source */ +#define GITR 0x300 /* input type */ +#define GLPR 0x318 /* level input polarity */ +#define GWMR 0x400 /* wake mask */ +#define GWSR 0x418 /* wake source */ +#define GSIR 0xc00 /* secure input */ + +/* Intel Merrifield has 192 GPIO pins */ +#define MRFLD_NGPIO 192 + +struct mrfld_gpio_pinrange { + unsigned int gpio_base; + unsigned int pin_base; + unsigned int npins; +}; + +#define GPIO_PINRANGE(gstart, gend, pstart) \ + { \ + .gpio_base = (gstart), \ + .pin_base = (pstart), \ + .npins = (gend) - (gstart) + 1, \ + } + +struct mrfld_gpio { + struct gpio_chip chip; + void __iomem *reg_base; + raw_spinlock_t lock; + struct device *dev; +}; + +static const struct mrfld_gpio_pinrange mrfld_gpio_ranges[] = { + GPIO_PINRANGE(0, 11, 146), + GPIO_PINRANGE(12, 13, 144), + GPIO_PINRANGE(14, 15, 35), + GPIO_PINRANGE(16, 16, 164), + GPIO_PINRANGE(17, 18, 105), + GPIO_PINRANGE(19, 22, 101), + GPIO_PINRANGE(23, 30, 107), + GPIO_PINRANGE(32, 43, 67), + GPIO_PINRANGE(44, 63, 195), + GPIO_PINRANGE(64, 67, 140), + GPIO_PINRANGE(68, 69, 165), + GPIO_PINRANGE(70, 71, 65), + GPIO_PINRANGE(72, 76, 228), + GPIO_PINRANGE(77, 86, 37), + GPIO_PINRANGE(87, 87, 48), + GPIO_PINRANGE(88, 88, 47), + GPIO_PINRANGE(89, 96, 49), + GPIO_PINRANGE(97, 97, 34), + GPIO_PINRANGE(102, 119, 83), + GPIO_PINRANGE(120, 123, 79), + GPIO_PINRANGE(124, 135, 115), + GPIO_PINRANGE(137, 142, 158), + GPIO_PINRANGE(154, 163, 24), + GPIO_PINRANGE(164, 176, 215), + GPIO_PINRANGE(177, 189, 127), + GPIO_PINRANGE(190, 191, 178), +}; + +static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned int offset, + unsigned int reg_type_offset) +{ + struct mrfld_gpio *priv = gpiochip_get_data(chip); + u8 reg = offset / 32; + + return priv->reg_base + reg_type_offset + reg * 4; +} + +static int mrfld_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + void __iomem *gplr = gpio_reg(chip, offset, GPLR); + + return !!(readl(gplr) & BIT(offset % 32)); +} + +static void mrfld_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) +{ + struct mrfld_gpio *priv = gpiochip_get_data(chip); + void __iomem *gpsr, *gpcr; + unsigned long flags; + + raw_spin_lock_irqsave(&priv->lock, flags); + + if (value) { + gpsr = gpio_reg(chip, offset, GPSR); + writel(BIT(offset % 32), gpsr); + } else { + gpcr = gpio_reg(chip, offset, GPCR); + writel(BIT(offset % 32), gpcr); + } + + raw_spin_unlock_irqrestore(&priv->lock, flags); +} + +static int mrfld_gpio_direction_input(struct gpio_chip *chip, + unsigned int offset) +{ + struct mrfld_gpio *priv = gpiochip_get_data(chip); + void __iomem *gpdr = gpio_reg(chip, offset, GPDR); + unsigned long flags; + u32 value; + + raw_spin_lock_irqsave(&priv->lock, flags); + + value = readl(gpdr); + value &= ~BIT(offset % 32); + writel(value, gpdr); + + raw_spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int mrfld_gpio_direction_output(struct gpio_chip *chip, + unsigned int offset, int value) +{ + struct mrfld_gpio *priv = gpiochip_get_data(chip); + void __iomem *gpdr = gpio_reg(chip, offset, GPDR); + unsigned long flags; + + mrfld_gpio_set(chip, offset, value); + + raw_spin_lock_irqsave(&priv->lock, flags); + + value = readl(gpdr); + value |= BIT(offset % 32); + writel(value, gpdr); + + raw_spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static void mrfld_irq_ack(struct irq_data *d) +{ + struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d); + u32 gpio = irqd_to_hwirq(d); + void __iomem *gisr = gpio_reg(&priv->chip, gpio, GISR); + unsigned long flags; + + raw_spin_lock_irqsave(&priv->lock, flags); + + writel(BIT(gpio % 32), gisr); + + raw_spin_unlock_irqrestore(&priv->lock, flags); +} + +static void mrfld_irq_unmask_mask(struct irq_data *d, bool unmask) +{ + struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d); + u32 gpio = irqd_to_hwirq(d); + void __iomem *gimr = gpio_reg(&priv->chip, gpio, GIMR); + unsigned long flags; + u32 value; + + raw_spin_lock_irqsave(&priv->lock, flags); + + if (unmask) + value = readl(gimr) | BIT(gpio % 32); + else + value = readl(gimr) & ~BIT(gpio % 32); + writel(value, gimr); + + raw_spin_unlock_irqrestore(&priv->lock, flags); +} + +static void mrfld_irq_mask(struct irq_data *d) +{ + mrfld_irq_unmask_mask(d, false); +} + +static void mrfld_irq_unmask(struct irq_data *d) +{ + mrfld_irq_unmask_mask(d, true); +} + +static int mrfld_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct mrfld_gpio *priv = gpiochip_get_data(gc); + u32 gpio = irqd_to_hwirq(d); + void __iomem *grer = gpio_reg(&priv->chip, gpio, GRER); + void __iomem *gfer = gpio_reg(&priv->chip, gpio, GFER); + void __iomem *gitr = gpio_reg(&priv->chip, gpio, GITR); + void __iomem *glpr = gpio_reg(&priv->chip, gpio, GLPR); + unsigned long flags; + u32 value; + + raw_spin_lock_irqsave(&priv->lock, flags); + + if (type & IRQ_TYPE_EDGE_RISING) + value = readl(grer) | BIT(gpio % 32); + else + value = readl(grer) & ~BIT(gpio % 32); + writel(value, grer); + + if (type & IRQ_TYPE_EDGE_FALLING) + value = readl(gfer) | BIT(gpio % 32); + else + value = readl(gfer) & ~BIT(gpio % 32); + writel(value, gfer); + + /* + * To prevent glitches from triggering an unintended level interrupt, + * configure GLPR register first and then configure GITR. + */ + if (type & IRQ_TYPE_LEVEL_LOW) + value = readl(glpr) | BIT(gpio % 32); + else + value = readl(glpr) & ~BIT(gpio % 32); + writel(value, glpr); + + if (type & IRQ_TYPE_LEVEL_MASK) { + value = readl(gitr) | BIT(gpio % 32); + writel(value, gitr); + + irq_set_handler_locked(d, handle_level_irq); + } else if (type & IRQ_TYPE_EDGE_BOTH) { + value = readl(gitr) & ~BIT(gpio % 32); + writel(value, gitr); + + irq_set_handler_locked(d, handle_edge_irq); + } + + raw_spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int mrfld_irq_set_wake(struct irq_data *d, unsigned int on) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct mrfld_gpio *priv = gpiochip_get_data(gc); + u32 gpio = irqd_to_hwirq(d); + void __iomem *gwmr = gpio_reg(&priv->chip, gpio, GWMR); + void __iomem *gwsr = gpio_reg(&priv->chip, gpio, GWSR); + unsigned long flags; + u32 value; + + raw_spin_lock_irqsave(&priv->lock, flags); + + /* Clear the existing wake status */ + writel(BIT(gpio % 32), gwsr); + + if (on) + value = readl(gwmr) | BIT(gpio % 32); + else + value = readl(gwmr) & ~BIT(gpio % 32); + writel(value, gwmr); + + raw_spin_unlock_irqrestore(&priv->lock, flags); + + dev_dbg(priv->dev, "%sable wake for gpio %u\n", on ? "en" : "dis", gpio); + return 0; +} + +static struct irq_chip mrfld_irqchip = { + .name = "gpio-merrifield", + .irq_ack = mrfld_irq_ack, + .irq_mask = mrfld_irq_mask, + .irq_unmask = mrfld_irq_unmask, + .irq_set_type = mrfld_irq_set_type, + .irq_set_wake = mrfld_irq_set_wake, +}; + +static void mrfld_irq_handler(struct irq_desc *desc) +{ + struct gpio_chip *gc = irq_desc_get_handler_data(desc); + struct mrfld_gpio *priv = gpiochip_get_data(gc); + struct irq_chip *irqchip = irq_desc_get_chip(desc); + unsigned long base, gpio; + + chained_irq_enter(irqchip, desc); + + /* Check GPIO controller to check which pin triggered the interrupt */ + for (base = 0; base < priv->chip.ngpio; base += 32) { + void __iomem *gisr = gpio_reg(&priv->chip, base, GISR); + void __iomem *gimr = gpio_reg(&priv->chip, base, GIMR); + unsigned long pending, enabled; + + pending = readl(gisr); + enabled = readl(gimr); + + /* Only interrupts that are enabled */ + pending &= enabled; + + for_each_set_bit(gpio, &pending, 32) { + unsigned int irq; + + irq = irq_find_mapping(gc->irqdomain, base + gpio); + generic_handle_irq(irq); + } + } + + chained_irq_exit(irqchip, desc); +} + +static void mrfld_irq_init_hw(struct mrfld_gpio *priv) +{ + void __iomem *reg; + unsigned int base; + + for (base = 0; base < priv->chip.ngpio; base += 32) { + /* Clear the rising-edge detect register */ + reg = gpio_reg(&priv->chip, base, GRER); + writel(0, reg); + /* Clear the falling-edge detect register */ + reg = gpio_reg(&priv->chip, base, GFER); + writel(0, reg); + } +} + +static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + const struct mrfld_gpio_pinrange *range; + struct mrfld_gpio *priv; + u32 gpio_base, irq_base; + void __iomem *base; + unsigned int i; + int retval; + + retval = pcim_enable_device(pdev); + if (retval) + return retval; + + retval = pcim_iomap_regions(pdev, BIT(1) | BIT(0), pci_name(pdev)); + if (retval) { + dev_err(&pdev->dev, "I/O memory mapping error\n"); + return retval; + } + + base = pcim_iomap_table(pdev)[1]; + + irq_base = readl(base); + gpio_base = readl(sizeof(u32) + base); + + /* Release the IO mapping, since we already get the info from BAR1 */ + pcim_iounmap_regions(pdev, BIT(1)); + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(&pdev->dev, "can't allocate chip data\n"); + return -ENOMEM; + } + + priv->dev = &pdev->dev; + priv->reg_base = pcim_iomap_table(pdev)[0]; + + priv->chip.label = dev_name(&pdev->dev); + priv->chip.parent = &pdev->dev; + priv->chip.request = gpiochip_generic_request; + priv->chip.free = gpiochip_generic_free; + priv->chip.direction_input = mrfld_gpio_direction_input; + priv->chip.direction_output = mrfld_gpio_direction_output; + priv->chip.get = mrfld_gpio_get; + priv->chip.set = mrfld_gpio_set; + priv->chip.base = gpio_base; + priv->chip.ngpio = MRFLD_NGPIO; + priv->chip.can_sleep = false; + + raw_spin_lock_init(&priv->lock); + + pci_set_drvdata(pdev, priv); + retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv); + if (retval) { + dev_err(&pdev->dev, "gpiochip_add error %d\n", retval); + return retval; + } + + for (i = 0; i < ARRAY_SIZE(mrfld_gpio_ranges); i++) { + range = &mrfld_gpio_ranges[i]; + retval = gpiochip_add_pin_range(&priv->chip, + "pinctrl-merrifield", + range->gpio_base, + range->pin_base, + range->npins); + if (retval) { + dev_err(&pdev->dev, "failed to add GPIO pin range\n"); + return retval; + } + } + + retval = gpiochip_irqchip_add(&priv->chip, &mrfld_irqchip, irq_base, + handle_simple_irq, IRQ_TYPE_NONE); + if (retval) { + dev_err(&pdev->dev, "could not connect irqchip to gpiochip\n"); + return retval; + } + + mrfld_irq_init_hw(priv); + + gpiochip_set_chained_irqchip(&priv->chip, &mrfld_irqchip, pdev->irq, + mrfld_irq_handler); + + return 0; +} + +static const struct pci_device_id mrfld_gpio_ids[] = { + { PCI_VDEVICE(INTEL, 0x1199) }, + { } +}; +MODULE_DEVICE_TABLE(pci, mrfld_gpio_ids); + +static struct pci_driver mrfld_gpio_driver = { + .name = "gpio-merrifield", + .id_table = mrfld_gpio_ids, + .probe = mrfld_gpio_probe, +}; + +module_pci_driver(mrfld_gpio_driver); + +MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); +MODULE_DESCRIPTION("Intel Merrifield SoC GPIO driver"); +MODULE_LICENSE("GPL v2"); @@ -61,6 +61,8 @@ o ` ~~~~\___/~~~~ ` controller in FPGA is ,.` #include <linux/bitops.h> #include <linux/platform_device.h> #include <linux/mod_devicetable.h> +#include <linux/of.h> +#include <linux/of_device.h> static void bgpio_write8(void __iomem *reg, unsigned long data) { @@ -569,6 +571,41 @@ static void __iomem *bgpio_map(struct platform_device *pdev, return devm_ioremap_resource(&pdev->dev, r); } +#ifdef CONFIG_OF +static const struct of_device_id bgpio_of_match[] = { + { .compatible = "wd,mbl-gpio" }, + { } +}; +MODULE_DEVICE_TABLE(of, bgpio_of_match); + +static struct bgpio_pdata *bgpio_parse_dt(struct platform_device *pdev, + unsigned long *flags) +{ + struct bgpio_pdata *pdata; + + if (!of_match_device(bgpio_of_match, &pdev->dev)) + return NULL; + + pdata = devm_kzalloc(&pdev->dev, sizeof(struct bgpio_pdata), + GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + pdata->base = -1; + + if (of_property_read_bool(pdev->dev.of_node, "no-output")) + *flags |= BGPIOF_NO_OUTPUT; + + return pdata; +} +#else +static struct bgpio_pdata *bgpio_parse_dt(struct platform_device *pdev, + unsigned long *flags) +{ + return NULL; +} +#endif /* CONFIG_OF */ + static int bgpio_pdev_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -579,10 +616,19 @@ static int bgpio_pdev_probe(struct platform_device *pdev) void __iomem *dirout; void __iomem *dirin; unsigned long sz; - unsigned long flags = pdev->id_entry->driver_data; + unsigned long flags = 0; int err; struct gpio_chip *gc; - struct bgpio_pdata *pdata = dev_get_platdata(dev); + struct bgpio_pdata *pdata; + + pdata = bgpio_parse_dt(pdev, &flags); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + + if (!pdata) { + pdata = dev_get_platdata(dev); + flags = pdev->id_entry->driver_data; + } r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat"); if (!r) @@ -646,6 +692,7 @@ MODULE_DEVICE_TABLE(platform, bgpio_id_table); static struct platform_driver bgpio_driver = { .driver = { .name = "basic-mmio-gpio", + .of_match_table = of_match_ptr(bgpio_of_match), }, .id_table = bgpio_id_table, .probe = bgpio_pdev_probe, @@ -208,7 +208,6 @@ static int palmas_gpio_probe(struct platform_device *pdev) static struct platform_driver palmas_gpio_driver = { .driver.name = "palmas-gpio", - .driver.owner = THIS_MODULE, .driver.of_match_table = of_palmas_gpio_match, .probe = palmas_gpio_probe, }; @@ -44,7 +44,7 @@ #define PCA_GPIO_MASK 0x00FF #define PCA_INT 0x0100 -#define PCA_PCAL 0x0200 +#define PCA_PCAL 0x0200 #define PCA953X_TYPE 0x1000 #define PCA957X_TYPE 0x2000 #define PCA_TYPE_MASK 0xF000 @@ -67,6 +67,8 @@ static const struct i2c_device_id pca953x_id[] = { { "pca9575", 16 | PCA957X_TYPE | PCA_INT, }, { "pca9698", 40 | PCA953X_TYPE, }, + { "pcal9555a", 16 | PCA953X_TYPE | PCA_INT | PCA_PCAL, }, + { "max7310", 8 | PCA953X_TYPE, }, { "max7312", 16 | PCA953X_TYPE | PCA_INT, }, { "max7313", 16 | PCA953X_TYPE | PCA_INT, }, @@ -90,7 +92,7 @@ MODULE_DEVICE_TABLE(acpi, pca953x_acpi_ids); #define MAX_BANK 5 #define BANK_SZ 8 -#define NBANK(chip) (chip->gpio_chip.ngpio / BANK_SZ) +#define NBANK(chip) DIV_ROUND_UP(chip->gpio_chip.ngpio, BANK_SZ) struct pca953x_chip { unsigned gpio_start; @@ -135,7 +137,7 @@ static int pca953x_read_single(struct pca953x_chip *chip, int reg, u32 *val, static int pca953x_write_single(struct pca953x_chip *chip, int reg, u32 val, int off) { - int ret = 0; + int ret; int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ); int offset = off / BANK_SZ; @@ -163,10 +165,13 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val) NBANK(chip), val); } else { switch (chip->chip_type) { - case PCA953X_TYPE: - ret = i2c_smbus_write_word_data(chip->client, - reg << 1, cpu_to_le16(get_unaligned((u16 *)val))); + case PCA953X_TYPE: { + __le16 word = cpu_to_le16(get_unaligned((u16 *)val)); + + ret = i2c_smbus_write_word_data(chip->client, reg << 1, + (__force u16)word); break; + } case PCA957X_TYPE: ret = i2c_smbus_write_byte_data(chip->client, reg << 1, val[0]); @@ -235,7 +240,6 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off) goto exit; chip->reg_direction[off / BANK_SZ] = reg_val; - ret = 0; exit: mutex_unlock(&chip->i2c_lock); return ret; @@ -286,7 +290,6 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc, goto exit; chip->reg_direction[off / BANK_SZ] = reg_val; - ret = 0; exit: mutex_unlock(&chip->i2c_lock); return ret; @@ -351,7 +354,6 @@ exit: mutex_unlock(&chip->i2c_lock); } - static void pca953x_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { @@ -820,7 +822,7 @@ static int pca953x_remove(struct i2c_client *client) { struct pca953x_platform_data *pdata = dev_get_platdata(&client->dev); struct pca953x_chip *chip = i2c_get_clientdata(client); - int ret = 0; + int ret; if (pdata && pdata->teardown) { ret = pdata->teardown(client, chip->gpio_chip.base, @@ -861,6 +863,7 @@ static const struct of_device_id pca953x_dt_ids[] = { { .compatible = "maxim,max7315", .data = OF_953X( 8, PCA_INT), }, { .compatible = "ti,pca6107", .data = OF_953X( 8, PCA_INT), }, + { .compatible = "ti,pca9536", .data = OF_953X( 4, 0), }, { .compatible = "ti,tca6408", .data = OF_953X( 8, PCA_INT), }, { .compatible = "ti,tca6416", .data = OF_953X(16, PCA_INT), }, { .compatible = "ti,tca6424", .data = OF_953X(24, PCA_INT), }, @@ -440,6 +440,14 @@ static int pcf857x_remove(struct i2c_client *client) return status; } +static void pcf857x_shutdown(struct i2c_client *client) +{ + struct pcf857x *gpio = i2c_get_clientdata(client); + + /* Drive all the I/O lines high */ + gpio->write(gpio->client, BIT(gpio->chip.ngpio) - 1); +} + static struct i2c_driver pcf857x_driver = { .driver = { .name = "pcf857x", @@ -447,6 +455,7 @@ static struct i2c_driver pcf857x_driver = { }, .probe = pcf857x_probe, .remove = pcf857x_remove, + .shutdown = pcf857x_shutdown, .id_table = pcf857x_id, }; @@ -335,6 +335,9 @@ static const struct of_device_id gpio_rcar_of_table[] = { .compatible = "renesas,gpio-r8a7791", .data = &gpio_rcar_info_gen2, }, { + .compatible = "renesas,gpio-r8a7792", + .data = &gpio_rcar_info_gen2, + }, { .compatible = "renesas,gpio-r8a7793", .data = &gpio_rcar_info_gen2, }, { @@ -200,7 +200,6 @@ static int rdc321x_gpio_probe(struct platform_device *pdev) static struct platform_driver rdc321x_gpio_driver = { .driver.name = "rdc321x-gpio", - .driver.owner = THIS_MODULE, .probe = rdc321x_gpio_probe, }; @@ -61,9 +61,8 @@ static unsigned sch_gpio_bit(struct sch_gpio *sch, unsigned gpio) return gpio % 8; } -static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg) +static int sch_gpio_reg_get(struct sch_gpio *sch, unsigned gpio, unsigned reg) { - struct sch_gpio *sch = gpiochip_get_data(gc); unsigned short offset, bit; u8 reg_val; @@ -75,10 +74,9 @@ static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg) return reg_val; } -static void sch_gpio_reg_set(struct gpio_chip *gc, unsigned gpio, unsigned reg, +static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned gpio, unsigned reg, int val) { - struct sch_gpio *sch = gpiochip_get_data(gc); unsigned short offset, bit; u8 reg_val; @@ -98,14 +96,15 @@ static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num) struct sch_gpio *sch = gpiochip_get_data(gc); spin_lock(&sch->lock); - sch_gpio_reg_set(gc, gpio_num, GIO, 1); + sch_gpio_reg_set(sch, gpio_num, GIO, 1); spin_unlock(&sch->lock); return 0; } static int sch_gpio_get(struct gpio_chip *gc, unsigned gpio_num) { - return sch_gpio_reg_get(gc, gpio_num, GLV); + struct sch_gpio *sch = gpiochip_get_data(gc); + return sch_gpio_reg_get(sch, gpio_num, GLV); } static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val) @@ -113,7 +112,7 @@ static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val) struct sch_gpio *sch = gpiochip_get_data(gc); spin_lock(&sch->lock); - sch_gpio_reg_set(gc, gpio_num, GLV, val); + sch_gpio_reg_set(sch, gpio_num, GLV, val); spin_unlock(&sch->lock); } @@ -123,7 +122,7 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num, struct sch_gpio *sch = gpiochip_get_data(gc); spin_lock(&sch->lock); - sch_gpio_reg_set(gc, gpio_num, GIO, 0); + sch_gpio_reg_set(sch, gpio_num, GIO, 0); spin_unlock(&sch->lock); /* @@ -182,13 +181,13 @@ static int sch_gpio_probe(struct platform_device *pdev) * GPIO7 is configured by the CMC as SLPIOVR * Enable GPIO[9:8] core powered gpios explicitly */ - sch_gpio_reg_set(&sch->chip, 8, GEN, 1); - sch_gpio_reg_set(&sch->chip, 9, GEN, 1); + sch_gpio_reg_set(sch, 8, GEN, 1); + sch_gpio_reg_set(sch, 9, GEN, 1); /* * SUS_GPIO[2:0] enabled by default * Enable SUS_GPIO3 resume powered gpio explicitly */ - sch_gpio_reg_set(&sch->chip, 13, GEN, 1); + sch_gpio_reg_set(sch, 13, GEN, 1); break; case PCI_DEVICE_ID_INTEL_ITC_LPC: @@ -296,7 +296,6 @@ static int sch311x_gpio_remove(struct platform_device *pdev) static struct platform_driver sch311x_gpio_driver = { .driver.name = DRV_NAME, - .driver.owner = THIS_MODULE, .probe = sch311x_gpio_probe, .remove = sch311x_gpio_remove, }; @@ -68,6 +68,22 @@ static void stmpe_gpio_set(struct gpio_chip *chip, unsigned offset, int val) stmpe_reg_write(stmpe, reg, mask); } +static int stmpe_gpio_get_direction(struct gpio_chip *chip, + unsigned offset) +{ + struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(chip); + struct stmpe *stmpe = stmpe_gpio->stmpe; + u8 reg = stmpe->regs[STMPE_IDX_GPDR_LSB] - (offset / 8); + u8 mask = 1 << (offset % 8); + int ret; + + ret = stmpe_reg_read(stmpe, reg); + if (ret < 0) + return ret; + + return !(ret & mask); +} + static int stmpe_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int val) { @@ -106,6 +122,7 @@ static int stmpe_gpio_request(struct gpio_chip *chip, unsigned offset) static struct gpio_chip template_chip = { .label = "stmpe", .owner = THIS_MODULE, + .get_direction = stmpe_gpio_get_direction, .direction_input = stmpe_gpio_direction_input, .get = stmpe_gpio_get, .direction_output = stmpe_gpio_direction_output, @@ -416,7 +433,6 @@ static struct platform_driver stmpe_gpio_driver = { .driver = { .suppress_bind_attrs = true, .name = "stmpe-gpio", - .owner = THIS_MODULE, }, .probe = stmpe_gpio_probe, }; @@ -129,7 +129,7 @@ static int syscon_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int val) static const struct syscon_gpio_data clps711x_mctrl_gpio = { /* ARM CLPS711X SYSFLG1 Bits 8-10 */ - .compatible = "cirrus,clps711x-syscon1", + .compatible = "cirrus,ep7209-syscon1", .flags = GPIO_SYSCON_FEAT_IN, .bit_count = 3, .dat_bit_offset = 0x40 * 8 + 8, @@ -168,7 +168,7 @@ static const struct syscon_gpio_data keystone_dsp_gpio = { static const struct of_device_id syscon_gpio_ids[] = { { - .compatible = "cirrus,clps711x-mctrl-gpio", + .compatible = "cirrus,ep7209-mctrl-gpio", .data = &clps711x_mctrl_gpio, }, { @@ -343,7 +343,6 @@ static int tc3589x_gpio_probe(struct platform_device *pdev) static struct platform_driver tc3589x_gpio_driver = { .driver.name = "tc3589x-gpio", - .driver.owner = THIS_MODULE, .probe = tc3589x_gpio_probe, }; @@ -98,7 +98,6 @@ struct tegra_gpio_info { const struct tegra_gpio_soc_config *soc; struct gpio_chip gc; struct irq_chip ic; - struct lock_class_key lock_class; u32 bank_count; }; @@ -547,6 +546,12 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume) }; +/* + * This lock class tells lockdep that GPIO irqs are in a different category + * than their parents, so it won't report false recursion. + */ +static struct lock_class_key gpio_lock_class; + static int tegra_gpio_probe(struct platform_device *pdev) { const struct tegra_gpio_soc_config *config; @@ -660,7 +665,7 @@ static int tegra_gpio_probe(struct platform_device *pdev) bank = &tgi->bank_info[GPIO_BANK(gpio)]; - irq_set_lockdep_class(irq, &tgi->lock_class); + irq_set_lockdep_class(irq, &gpio_lock_class); irq_set_chip_data(irq, bank); irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq); } @@ -230,6 +230,12 @@ static const struct of_device_id tps65218_dt_match[] = { }; MODULE_DEVICE_TABLE(of, tps65218_dt_match); +static const struct platform_device_id tps65218_gpio_id_table[] = { + { "tps65218-gpio", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, tps65218_gpio_id_table); + static struct platform_driver tps65218_gpio_driver = { .driver = { .name = "tps65218-gpio", @@ -237,6 +243,7 @@ static struct platform_driver tps65218_gpio_driver = { }, .probe = tps65218_gpio_probe, .remove = tps65218_gpio_remove, + .id_table = tps65218_gpio_id_table, }; module_platform_driver(tps65218_gpio_driver); @@ -131,7 +131,6 @@ static int tps6586x_gpio_probe(struct platform_device *pdev) static struct platform_driver tps6586x_gpio_driver = { .driver.name = "tps6586x-gpio", - .driver.owner = THIS_MODULE, .probe = tps6586x_gpio_probe, }; @@ -184,7 +184,6 @@ skip_init: static struct platform_driver tps65910_gpio_driver = { .driver.name = "tps65910-gpio", - .driver.owner = THIS_MODULE, .probe = tps65910_gpio_probe, }; @@ -440,7 +440,6 @@ static int vprbrd_gpio_probe(struct platform_device *pdev) static struct platform_driver vprbrd_gpio_driver = { .driver.name = "viperboard-gpio", - .driver.owner = THIS_MODULE, .probe = vprbrd_gpio_probe, }; @@ -296,7 +296,6 @@ static int wm831x_gpio_probe(struct platform_device *pdev) static struct platform_driver wm831x_gpio_driver = { .driver.name = "wm831x-gpio", - .driver.owner = THIS_MODULE, .probe = wm831x_gpio_probe, }; @@ -139,7 +139,6 @@ static int wm8350_gpio_probe(struct platform_device *pdev) static struct platform_driver wm8350_gpio_driver = { .driver.name = "wm8350-gpio", - .driver.owner = THIS_MODULE, .probe = wm8350_gpio_probe, }; @@ -299,7 +299,6 @@ static int wm8994_gpio_probe(struct platform_device *pdev) static struct platform_driver wm8994_gpio_driver = { .driver.name = "wm8994-gpio", - .driver.owner = THIS_MODULE, .probe = wm8994_gpio_probe, }; @@ -133,6 +133,53 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val) } /** + * xgpio_set_multiple - Write the specified signals of the GPIO device. + * @gc: Pointer to gpio_chip device structure. + * @mask: Mask of the GPIOS to modify. + * @bits: Value to be wrote on each GPIO + * + * This function writes the specified values into the specified signals of the + * GPIO devices. + */ +static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, + unsigned long *bits) +{ + unsigned long flags; + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct xgpio_instance *chip = gpiochip_get_data(gc); + int index = xgpio_index(chip, 0); + int offset, i; + + spin_lock_irqsave(&chip->gpio_lock[index], flags); + + /* Write to GPIO signals */ + for (i = 0; i < gc->ngpio; i++) { + if (*mask == 0) + break; + if (index != xgpio_index(chip, i)) { + xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET + + xgpio_regoffset(chip, i), + chip->gpio_state[index]); + spin_unlock_irqrestore(&chip->gpio_lock[index], flags); + index = xgpio_index(chip, i); + spin_lock_irqsave(&chip->gpio_lock[index], flags); + } + if (__test_and_clear_bit(i, mask)) { + offset = xgpio_offset(chip, i); + if (test_bit(i, bits)) + chip->gpio_state[index] |= BIT(offset); + else + chip->gpio_state[index] &= ~BIT(offset); + } + } + + xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET + + xgpio_regoffset(chip, i), chip->gpio_state[index]); + + spin_unlock_irqrestore(&chip->gpio_lock[index], flags); +} + +/** * xgpio_dir_in - Set the direction of the specified GPIO signal as input. * @gc: Pointer to gpio_chip device structure. * @gpio: GPIO signal number. @@ -306,6 +353,7 @@ static int xgpio_probe(struct platform_device *pdev) chip->mmchip.gc.direction_output = xgpio_dir_out; chip->mmchip.gc.get = xgpio_get; chip->mmchip.gc.set = xgpio_set; + chip->mmchip.gc.set_multiple = xgpio_set_multiple; chip->mmchip.save_regs = xgpio_save_regs; @@ -19,6 +19,7 @@ #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> +#include <linux/acpi.h> /* * XLP GPIO has multiple 32 bit registers for each feature where each register @@ -299,7 +300,6 @@ static int xlp_gpio_probe(struct platform_device *pdev) struct gpio_chip *gc; struct resource *iores; struct xlp_gpio_priv *priv; - const struct of_device_id *of_id; void __iomem *gpio_base; int irq_base, irq, err; int ngpio; @@ -321,13 +321,26 @@ static int xlp_gpio_probe(struct platform_device *pdev) if (irq < 0) return irq; - of_id = of_match_device(xlp_gpio_of_ids, &pdev->dev); - if (!of_id) { - dev_err(&pdev->dev, "Failed to get soc type!\n"); - return -ENODEV; - } + if (pdev->dev.of_node) { + const struct of_device_id *of_id; - soc_type = (uintptr_t) of_id->data; + of_id = of_match_device(xlp_gpio_of_ids, &pdev->dev); + if (!of_id) { + dev_err(&pdev->dev, "Unable to match OF ID\n"); + return -ENODEV; + } + soc_type = (uintptr_t) of_id->data; + } else { + const struct acpi_device_id *acpi_id; + + acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table, + &pdev->dev); + if (!acpi_id || !acpi_id->driver_data) { + dev_err(&pdev->dev, "Unable to match ACPI ID\n"); + return -ENODEV; + } + soc_type = (uintptr_t) acpi_id->driver_data; + } switch (soc_type) { case XLP_GPIO_VARIANT_XLP832: @@ -388,14 +401,16 @@ static int xlp_gpio_probe(struct platform_device *pdev) gc->get = xlp_gpio_get; spin_lock_init(&priv->lock); - /* XLP has fixed IRQ range for GPIO interrupts */ - if (soc_type == GPIO_VARIANT_VULCAN) - irq_base = irq_alloc_descs(-1, 0, gc->ngpio, 0); - else + + /* XLP(MIPS) has fixed range for GPIO IRQs, Vulcan(ARM64) does not */ + if (soc_type != GPIO_VARIANT_VULCAN) { irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0); - if (irq_base < 0) { - dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n"); - return irq_base; + if (irq_base < 0) { + dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n"); + return irq_base; + } + } else { + irq_base = 0; } err = gpiochip_add_data(gc, priv); @@ -423,10 +438,19 @@ out_free_desc: return err; } +#ifdef CONFIG_ACPI +static const struct acpi_device_id xlp_gpio_acpi_match[] = { + { "BRCM9006", GPIO_VARIANT_VULCAN }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, xlp_gpio_acpi_match); +#endif + static struct platform_driver xlp_gpio_driver = { .driver = { .name = "xlp-gpio", .of_match_table = xlp_gpio_of_ids, + .acpi_match_table = ACPI_PTR(xlp_gpio_acpi_match), }, .probe = xlp_gpio_probe, }; @@ -709,7 +709,13 @@ static int zynq_gpio_probe(struct platform_device *pdev) dev_err(&pdev->dev, "input clock not found.\n"); return PTR_ERR(gpio->clk); } + ret = clk_prepare_enable(gpio->clk); + if (ret) { + dev_err(&pdev->dev, "Unable to enable clock.\n"); + return ret; + } + pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) @@ -747,6 +753,7 @@ err_pm_put: pm_runtime_put(&pdev->dev); err_pm_dis: pm_runtime_disable(&pdev->dev); + clk_disable_unprepare(gpio->clk); return ret; } @@ -836,6 +836,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip) } acpi_gpiochip_request_regions(acpi_gpio); + acpi_walk_dep_device_list(handle); } void acpi_gpiochip_remove(struct gpio_chip *chip) @@ -28,6 +28,10 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) if (!desc && gpio_is_valid(gpio)) return -EPROBE_DEFER; + err = gpiod_request(desc, label); + if (err) + return err; + if (flags & GPIOF_OPEN_DRAIN) set_bit(FLAG_OPEN_DRAIN, &desc->flags); @@ -37,10 +41,6 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) if (flags & GPIOF_ACTIVE_LOW) set_bit(FLAG_ACTIVE_LOW, &desc->flags); - err = gpiod_request(desc, label); - if (err) - return err; - if (flags & GPIOF_DIR_IN) err = gpiod_direction_input(desc); else @@ -16,6 +16,7 @@ #include <linux/errno.h> #include <linux/module.h> #include <linux/io.h> +#include <linux/io-mapping.h> #include <linux/gpio/consumer.h> #include <linux/of.h> #include <linux/of_address.h> @@ -26,38 +27,30 @@ #include "gpiolib.h" -/* Private data structure for of_gpiochip_find_and_xlate */ -struct gg_data { - enum of_gpio_flags *flags; - struct of_phandle_args gpiospec; +static int of_gpiochip_match_node(struct gpio_chip *chip, void *data) +{ + return chip->gpiodev->dev.of_node == data; +} - struct gpio_desc *out_gpio; -}; +static struct gpio_chip *of_find_gpiochip_by_node(struct device_node *np) +{ + return gpiochip_find(np, of_gpiochip_match_node); +} -/* Private function for resolving node pointer to gpio_chip */ -static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data) +static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip, + struct of_phandle_args *gpiospec, + enum of_gpio_flags *flags) { - struct gg_data *gg_data = data; int ret; - if ((gc->of_node != gg_data->gpiospec.np) || - (gc->of_gpio_n_cells != gg_data->gpiospec.args_count) || - (!gc->of_xlate)) - return false; - - ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags); - if (ret < 0) { - /* We've found a gpio chip, but the translation failed. - * Store translation error in out_gpio. - * Return false to keep looking, as more than one gpio chip - * could be registered per of-node. - */ - gg_data->out_gpio = ERR_PTR(ret); - return false; - } - - gg_data->out_gpio = gpiochip_get_desc(gc, ret); - return true; + if (chip->of_gpio_n_cells != gpiospec->args_count) + return ERR_PTR(-EINVAL); + + ret = chip->of_xlate(chip, gpiospec, flags); + if (ret < 0) + return ERR_PTR(ret); + + return gpiochip_get_desc(chip, ret); } /** @@ -74,34 +67,37 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data) struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np, const char *propname, int index, enum of_gpio_flags *flags) { - /* Return -EPROBE_DEFER to support probe() functions to be called - * later when the GPIO actually becomes available - */ - struct gg_data gg_data = { - .flags = flags, - .out_gpio = ERR_PTR(-EPROBE_DEFER) - }; + struct of_phandle_args gpiospec; + struct gpio_chip *chip; + struct gpio_desc *desc; int ret; - /* .of_xlate might decide to not fill in the flags, so clear it. */ - if (flags) - *flags = 0; - ret = of_parse_phandle_with_args(np, propname, "#gpio-cells", index, - &gg_data.gpiospec); + &gpiospec); if (ret) { pr_debug("%s: can't parse '%s' property of node '%s[%d]'\n", __func__, propname, np->full_name, index); return ERR_PTR(ret); } - gpiochip_find(&gg_data, of_gpiochip_find_and_xlate); + chip = of_find_gpiochip_by_node(gpiospec.np); + if (!chip) { + desc = ERR_PTR(-EPROBE_DEFER); + goto out; + } + + desc = of_xlate_and_get_gpiod_flags(chip, &gpiospec, flags); + if (IS_ERR(desc)) + goto out; - of_node_put(gg_data.gpiospec.np); pr_debug("%s: parsed '%s' property of node '%s[%d]' - status (%d)\n", __func__, propname, np->full_name, index, - PTR_ERR_OR_ZERO(gg_data.out_gpio)); - return gg_data.out_gpio; + PTR_ERR_OR_ZERO(desc)); + +out: + of_node_put(gpiospec.np); + + return desc; } int of_get_named_gpio_flags(struct device_node *np, const char *list_name, @@ -121,6 +117,7 @@ EXPORT_SYMBOL(of_get_named_gpio_flags); /** * of_parse_own_gpio() - Get a GPIO hog descriptor, names and flags for GPIO API * @np: device node to get GPIO from + * @chip: GPIO chip whose hog is parsed * @name: GPIO line name * @lflags: gpio_lookup_flags - returned from of_find_gpio() or * of_parse_own_gpio() @@ -130,19 +127,19 @@ EXPORT_SYMBOL(of_get_named_gpio_flags); * value on the error condition. */ static struct gpio_desc *of_parse_own_gpio(struct device_node *np, + struct gpio_chip *chip, const char **name, enum gpio_lookup_flags *lflags, enum gpiod_flags *dflags) { struct device_node *chip_np; enum of_gpio_flags xlate_flags; - struct gg_data gg_data = { - .flags = &xlate_flags, - }; + struct of_phandle_args gpiospec; + struct gpio_desc *desc; u32 tmp; - int i, ret; + int ret; - chip_np = np->parent; + chip_np = chip->of_node; if (!chip_np) return ERR_PTR(-EINVAL); @@ -154,25 +151,16 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np, if (ret) return ERR_PTR(ret); - if (tmp > MAX_PHANDLE_ARGS) - return ERR_PTR(-EINVAL); + gpiospec.np = chip_np; + gpiospec.args_count = tmp; - gg_data.gpiospec.args_count = tmp; - gg_data.gpiospec.np = chip_np; - for (i = 0; i < tmp; i++) { - ret = of_property_read_u32_index(np, "gpios", i, - &gg_data.gpiospec.args[i]); - if (ret) - return ERR_PTR(ret); - } + ret = of_property_read_u32_array(np, "gpios", gpiospec.args, tmp); + if (ret) + return ERR_PTR(ret); - gpiochip_find(&gg_data, of_gpiochip_find_and_xlate); - if (!gg_data.out_gpio) { - if (np->parent == np) - return ERR_PTR(-ENXIO); - else - return ERR_PTR(-EINVAL); - } + desc = of_xlate_and_get_gpiod_flags(chip, &gpiospec, &xlate_flags); + if (IS_ERR(desc)) + return desc; if (xlate_flags & OF_GPIO_ACTIVE_LOW) *lflags |= GPIO_ACTIVE_LOW; @@ -185,14 +173,14 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np, *dflags |= GPIOD_OUT_HIGH; else { pr_warn("GPIO line %d (%s): no hogging state specified, bailing out\n", - desc_to_gpio(gg_data.out_gpio), np->name); + desc_to_gpio(desc), np->name); return ERR_PTR(-EINVAL); } if (name && of_property_read_string(np, "line-name", name)) *name = np->name; - return gg_data.out_gpio; + return desc; } /** @@ -261,7 +249,7 @@ static int of_gpiochip_scan_gpios(struct gpio_chip *chip) if (!of_property_read_bool(np, "gpio-hog")) continue; - desc = of_parse_own_gpio(np, &name, &lflags, &dflags); + desc = of_parse_own_gpio(np, chip, &name, &lflags, &dflags); if (IS_ERR(desc)) continue; @@ -409,6 +397,7 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip) break; pctldev = of_pinctrl_get(pinspec.np); + of_node_put(pinspec.np); if (!pctldev) return -EPROBE_DEFER; @@ -486,6 +475,9 @@ int of_gpiochip_add(struct gpio_chip *chip) chip->of_xlate = of_gpio_simple_xlate; } + if (chip->of_gpio_n_cells > MAX_PHANDLE_ARGS) + return -EINVAL; + status = of_gpiochip_add_pin_range(chip); if (status) return status; @@ -16,10 +16,14 @@ #include <linux/gpio/driver.h> #include <linux/gpio/machine.h> #include <linux/pinctrl/consumer.h> -#include <linux/idr.h> #include <linux/cdev.h> #include <linux/fs.h> #include <linux/uaccess.h> +#include <linux/compat.h> +#include <linux/anon_inodes.h> +#include <linux/kfifo.h> +#include <linux/poll.h> +#include <linux/timekeeping.h> #include <uapi/linux/gpio.h> #include "gpiolib.h" @@ -309,6 +313,497 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc) return 0; } +/* + * GPIO line handle management + */ + +/** + * struct linehandle_state - contains the state of a userspace handle + * @gdev: the GPIO device the handle pertains to + * @label: consumer label used to tag descriptors + * @descs: the GPIO descriptors held by this handle + * @numdescs: the number of descriptors held in the descs array + */ +struct linehandle_state { + struct gpio_device *gdev; + const char *label; + struct gpio_desc *descs[GPIOHANDLES_MAX]; + u32 numdescs; +}; + +static long linehandle_ioctl(struct file *filep, unsigned int cmd, + unsigned long arg) +{ + struct linehandle_state *lh = filep->private_data; + void __user *ip = (void __user *)arg; + struct gpiohandle_data ghd; + int i; + + if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) { + int val; + + /* TODO: check if descriptors are really input */ + for (i = 0; i < lh->numdescs; i++) { + val = gpiod_get_value_cansleep(lh->descs[i]); + if (val < 0) + return val; + ghd.values[i] = val; + } + + if (copy_to_user(ip, &ghd, sizeof(ghd))) + return -EFAULT; + + return 0; + } else if (cmd == GPIOHANDLE_SET_LINE_VALUES_IOCTL) { + int vals[GPIOHANDLES_MAX]; + + /* TODO: check if descriptors are really output */ + if (copy_from_user(&ghd, ip, sizeof(ghd))) + return -EFAULT; + + /* Clamp all values to [0,1] */ + for (i = 0; i < lh->numdescs; i++) + vals[i] = !!ghd.values[i]; + + /* Reuse the array setting function */ + gpiod_set_array_value_complex(false, + true, + lh->numdescs, + lh->descs, + vals); + return 0; + } + return -EINVAL; +} + +#ifdef CONFIG_COMPAT +static long linehandle_ioctl_compat(struct file *filep, unsigned int cmd, + unsigned long arg) +{ + return linehandle_ioctl(filep, cmd, (unsigned long)compat_ptr(arg)); +} +#endif + +static int linehandle_release(struct inode *inode, struct file *filep) +{ + struct linehandle_state *lh = filep->private_data; + struct gpio_device *gdev = lh->gdev; + int i; + + for (i = 0; i < lh->numdescs; i++) + gpiod_free(lh->descs[i]); + kfree(lh->label); + kfree(lh); + put_device(&gdev->dev); + return 0; +} + +static const struct file_operations linehandle_fileops = { + .release = linehandle_release, + .owner = THIS_MODULE, + .llseek = noop_llseek, + .unlocked_ioctl = linehandle_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = linehandle_ioctl_compat, +#endif +}; + +static int linehandle_create(struct gpio_device *gdev, void __user *ip) +{ + struct gpiohandle_request handlereq; + struct linehandle_state *lh; + int fd, i, ret; + + if (copy_from_user(&handlereq, ip, sizeof(handlereq))) + return -EFAULT; + if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX)) + return -EINVAL; + + lh = kzalloc(sizeof(*lh), GFP_KERNEL); + if (!lh) + return -ENOMEM; + lh->gdev = gdev; + get_device(&gdev->dev); + + /* Make sure this is terminated */ + handlereq.consumer_label[sizeof(handlereq.consumer_label)-1] = '\0'; + if (strlen(handlereq.consumer_label)) { + lh->label = kstrdup(handlereq.consumer_label, + GFP_KERNEL); + if (!lh->label) { + ret = -ENOMEM; + goto out_free_lh; + } + } + + /* Request each GPIO */ + for (i = 0; i < handlereq.lines; i++) { + u32 offset = handlereq.lineoffsets[i]; + u32 lflags = handlereq.flags; + struct gpio_desc *desc; + + desc = &gdev->descs[offset]; + ret = gpiod_request(desc, lh->label); + if (ret) + goto out_free_descs; + lh->descs[i] = desc; + + if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW) + set_bit(FLAG_ACTIVE_LOW, &desc->flags); + if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) + set_bit(FLAG_OPEN_DRAIN, &desc->flags); + if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE) + set_bit(FLAG_OPEN_SOURCE, &desc->flags); + + /* + * Lines have to be requested explicitly for input + * or output, else the line will be treated "as is". + */ + if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { + int val = !!handlereq.default_values[i]; + + ret = gpiod_direction_output(desc, val); + if (ret) + goto out_free_descs; + } else if (lflags & GPIOHANDLE_REQUEST_INPUT) { + ret = gpiod_direction_input(desc); + if (ret) + goto out_free_descs; + } + dev_dbg(&gdev->dev, "registered chardev handle for line %d\n", + offset); + } + /* Let i point at the last handle */ + i--; + lh->numdescs = handlereq.lines; + + fd = anon_inode_getfd("gpio-linehandle", + &linehandle_fileops, + lh, + O_RDONLY | O_CLOEXEC); + if (fd < 0) { + ret = fd; + goto out_free_descs; + } + + handlereq.fd = fd; + if (copy_to_user(ip, &handlereq, sizeof(handlereq))) { + ret = -EFAULT; + goto out_free_descs; + } + + dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n", + lh->numdescs); + + return 0; + +out_free_descs: + for (; i >= 0; i--) + gpiod_free(lh->descs[i]); + kfree(lh->label); +out_free_lh: + kfree(lh); + put_device(&gdev->dev); + return ret; +} + +/* + * GPIO line event management + */ + +/** + * struct lineevent_state - contains the state of a userspace event + * @gdev: the GPIO device the event pertains to + * @label: consumer label used to tag descriptors + * @desc: the GPIO descriptor held by this event + * @eflags: the event flags this line was requested with + * @irq: the interrupt that trigger in response to events on this GPIO + * @wait: wait queue that handles blocking reads of events + * @events: KFIFO for the GPIO events + * @read_lock: mutex lock to protect reads from colliding with adding + * new events to the FIFO + */ +struct lineevent_state { + struct gpio_device *gdev; + const char *label; + struct gpio_desc *desc; + u32 eflags; + int irq; + wait_queue_head_t wait; + DECLARE_KFIFO(events, struct gpioevent_data, 16); + struct mutex read_lock; +}; + +static unsigned int lineevent_poll(struct file *filep, + struct poll_table_struct *wait) +{ + struct lineevent_state *le = filep->private_data; + unsigned int events = 0; + + poll_wait(filep, &le->wait, wait); + + if (!kfifo_is_empty(&le->events)) + events = POLLIN | POLLRDNORM; + + return events; +} + + +static ssize_t lineevent_read(struct file *filep, + char __user *buf, + size_t count, + loff_t *f_ps) +{ + struct lineevent_state *le = filep->private_data; + unsigned int copied; + int ret; + + if (count < sizeof(struct gpioevent_data)) + return -EINVAL; + + do { + if (kfifo_is_empty(&le->events)) { + if (filep->f_flags & O_NONBLOCK) + return -EAGAIN; + + ret = wait_event_interruptible(le->wait, + !kfifo_is_empty(&le->events)); + if (ret) + return ret; + } + + if (mutex_lock_interruptible(&le->read_lock)) + return -ERESTARTSYS; + ret = kfifo_to_user(&le->events, buf, count, &copied); + mutex_unlock(&le->read_lock); + + if (ret) + return ret; + + /* + * If we couldn't read anything from the fifo (a different + * thread might have been faster) we either return -EAGAIN if + * the file descriptor is non-blocking, otherwise we go back to + * sleep and wait for more data to arrive. + */ + if (copied == 0 && (filep->f_flags & O_NONBLOCK)) + return -EAGAIN; + + } while (copied == 0); + + return copied; +} + +static int lineevent_release(struct inode *inode, struct file *filep) +{ + struct lineevent_state *le = filep->private_data; + struct gpio_device *gdev = le->gdev; + + free_irq(le->irq, le); + gpiod_free(le->desc); + kfree(le->label); + kfree(le); + put_device(&gdev->dev); + return 0; +} + +static long lineevent_ioctl(struct file *filep, unsigned int cmd, + unsigned long arg) +{ + struct lineevent_state *le = filep->private_data; + void __user *ip = (void __user *)arg; + struct gpiohandle_data ghd; + + /* + * We can get the value for an event line but not set it, + * because it is input by definition. + */ + if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) { + int val; + + val = gpiod_get_value_cansleep(le->desc); + if (val < 0) + return val; + ghd.values[0] = val; + + if (copy_to_user(ip, &ghd, sizeof(ghd))) + return -EFAULT; + + return 0; + } + return -EINVAL; +} + +#ifdef CONFIG_COMPAT +static long lineevent_ioctl_compat(struct file *filep, unsigned int cmd, + unsigned long arg) +{ + return lineevent_ioctl(filep, cmd, (unsigned long)compat_ptr(arg)); +} +#endif + +static const struct file_operations lineevent_fileops = { + .release = lineevent_release, + .read = lineevent_read, + .poll = lineevent_poll, + .owner = THIS_MODULE, + .llseek = noop_llseek, + .unlocked_ioctl = lineevent_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = lineevent_ioctl_compat, +#endif +}; + +static irqreturn_t lineevent_irq_thread(int irq, void *p) +{ + struct lineevent_state *le = p; + struct gpioevent_data ge; + int ret; + + ge.timestamp = ktime_get_real_ns(); + + if (le->eflags & GPIOEVENT_REQUEST_BOTH_EDGES) { + int level = gpiod_get_value_cansleep(le->desc); + + if (level) + /* Emit low-to-high event */ + ge.id = GPIOEVENT_EVENT_RISING_EDGE; + else + /* Emit high-to-low event */ + ge.id = GPIOEVENT_EVENT_FALLING_EDGE; + } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) { + /* Emit low-to-high event */ + ge.id = GPIOEVENT_EVENT_RISING_EDGE; + } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { + /* Emit high-to-low event */ + ge.id = GPIOEVENT_EVENT_FALLING_EDGE; + } else { + return IRQ_NONE; + } + + ret = kfifo_put(&le->events, ge); + if (ret != 0) + wake_up_poll(&le->wait, POLLIN); + + return IRQ_HANDLED; +} + +static int lineevent_create(struct gpio_device *gdev, void __user *ip) +{ + struct gpioevent_request eventreq; + struct lineevent_state *le; + struct gpio_desc *desc; + u32 offset; + u32 lflags; + u32 eflags; + int fd; + int ret; + int irqflags = 0; + + if (copy_from_user(&eventreq, ip, sizeof(eventreq))) + return -EFAULT; + + le = kzalloc(sizeof(*le), GFP_KERNEL); + if (!le) + return -ENOMEM; + le->gdev = gdev; + get_device(&gdev->dev); + + /* Make sure this is terminated */ + eventreq.consumer_label[sizeof(eventreq.consumer_label)-1] = '\0'; + if (strlen(eventreq.consumer_label)) { + le->label = kstrdup(eventreq.consumer_label, + GFP_KERNEL); + if (!le->label) { + ret = -ENOMEM; + goto out_free_le; + } + } + + offset = eventreq.lineoffset; + lflags = eventreq.handleflags; + eflags = eventreq.eventflags; + + /* This is just wrong: we don't look for events on output lines */ + if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { + ret = -EINVAL; + goto out_free_label; + } + + desc = &gdev->descs[offset]; + ret = gpiod_request(desc, le->label); + if (ret) + goto out_free_desc; + le->desc = desc; + le->eflags = eflags; + + if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW) + set_bit(FLAG_ACTIVE_LOW, &desc->flags); + if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) + set_bit(FLAG_OPEN_DRAIN, &desc->flags); + if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE) + set_bit(FLAG_OPEN_SOURCE, &desc->flags); + + ret = gpiod_direction_input(desc); + if (ret) + goto out_free_desc; + + le->irq = gpiod_to_irq(desc); + if (le->irq <= 0) { + ret = -ENODEV; + goto out_free_desc; + } + + if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) + irqflags |= IRQF_TRIGGER_RISING; + if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE) + irqflags |= IRQF_TRIGGER_FALLING; + irqflags |= IRQF_ONESHOT; + irqflags |= IRQF_SHARED; + + INIT_KFIFO(le->events); + init_waitqueue_head(&le->wait); + mutex_init(&le->read_lock); + + /* Request a thread to read the events */ + ret = request_threaded_irq(le->irq, + NULL, + lineevent_irq_thread, + irqflags, + le->label, + le); + if (ret) + goto out_free_desc; + + fd = anon_inode_getfd("gpio-event", + &lineevent_fileops, + le, + O_RDONLY | O_CLOEXEC); + if (fd < 0) { + ret = fd; + goto out_free_irq; + } + + eventreq.fd = fd; + if (copy_to_user(ip, &eventreq, sizeof(eventreq))) { + ret = -EFAULT; + goto out_free_irq; + } + + return 0; + +out_free_irq: + free_irq(le->irq, le); +out_free_desc: + gpiod_free(le->desc); +out_free_label: + kfree(le->label); +out_free_le: + kfree(le); + put_device(&gdev->dev); + return ret; +} + /** * gpio_ioctl() - ioctl handler for the GPIO chardev */ @@ -316,7 +811,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct gpio_device *gdev = filp->private_data; struct gpio_chip *chip = gdev->chip; - int __user *ip = (int __user *)arg; + void __user *ip = (void __user *)arg; /* We fail any subsequent ioctl():s when the chip is gone */ if (!chip) @@ -384,10 +879,22 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) return -EFAULT; return 0; + } else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) { + return linehandle_create(gdev, ip); + } else if (cmd == GPIO_GET_LINEEVENT_IOCTL) { + return lineevent_create(gdev, ip); } return -EINVAL; } +#ifdef CONFIG_COMPAT +static long gpio_ioctl_compat(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + return gpio_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); +} +#endif + /** * gpio_chrdev_open() - open the chardev for ioctl operations * @inode: inode for this chardev @@ -431,14 +938,15 @@ static const struct file_operations gpio_fileops = { .owner = THIS_MODULE, .llseek = noop_llseek, .unlocked_ioctl = gpio_ioctl, - .compat_ioctl = gpio_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = gpio_ioctl_compat, +#endif }; static void gpiodevice_release(struct device *dev) { struct gpio_device *gdev = dev_get_drvdata(dev); - cdev_del(&gdev->chrdev); list_del(&gdev->list); ida_simple_remove(&gpio_ida, gdev->id); kfree(gdev->label); @@ -471,7 +979,6 @@ static int gpiochip_setup_dev(struct gpio_device *gdev) /* From this point, the .release() function cleans up gpio_device */ gdev->dev.release = gpiodevice_release; - get_device(&gdev->dev); pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n", __func__, gdev->base, gdev->base + gdev->ngpio - 1, dev_name(&gdev->dev), gdev->chip->label ? : "generic"); @@ -539,13 +1046,14 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data) if (chip->parent) { gdev->dev.parent = chip->parent; gdev->dev.of_node = chip->parent->of_node; - } else { + } + #ifdef CONFIG_OF_GPIO /* If the gpiochip has an assigned OF node this takes precedence */ - if (chip->of_node) - gdev->dev.of_node = chip->of_node; + if (chip->of_node) + gdev->dev.of_node = chip->of_node; #endif - } + gdev->id = ida_simple_get(&gpio_ida, 0, 0, GFP_KERNEL); if (gdev->id < 0) { status = gdev->id; @@ -618,6 +1126,8 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data) goto err_free_label; } + spin_unlock_irqrestore(&gpio_lock, flags); + for (i = 0; i < chip->ngpio; i++) { struct gpio_desc *desc = &gdev->descs[i]; @@ -649,8 +1159,6 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data) } } - spin_unlock_irqrestore(&gpio_lock, flags); - #ifdef CONFIG_PINCTRL INIT_LIST_HEAD(&gdev->pin_ranges); #endif @@ -759,6 +1267,8 @@ void gpiochip_remove(struct gpio_chip *chip) * be removed, else it will be dangling until the last user is * gone. */ + cdev_del(&gdev->chrdev); + device_del(&gdev->dev); put_device(&gdev->dev); } EXPORT_SYMBOL_GPL(gpiochip_remove); @@ -858,7 +1368,7 @@ struct gpio_chip *gpiochip_find(void *data, spin_lock_irqsave(&gpio_lock, flags); list_for_each_entry(gdev, &gpio_devices, list) - if (match(gdev->chip, data)) + if (gdev->chip && match(gdev->chip, data)) break; /* No match? */ @@ -1341,14 +1851,6 @@ static int __gpiod_request(struct gpio_desc *desc, const char *label) spin_lock_irqsave(&gpio_lock, flags); } done: - if (status < 0) { - /* Clear flags that might have been set by the caller before - * requesting the GPIO. - */ - clear_bit(FLAG_ACTIVE_LOW, &desc->flags); - clear_bit(FLAG_OPEN_DRAIN, &desc->flags); - clear_bit(FLAG_OPEN_SOURCE, &desc->flags); - } spin_unlock_irqrestore(&gpio_lock, flags); return status; } @@ -1356,11 +1858,18 @@ done: /* * This descriptor validation needs to be inserted verbatim into each * function taking a descriptor, so we need to use a preprocessor - * macro to avoid endless duplication. + * macro to avoid endless duplication. If the desc is NULL it is an + * optional GPIO and calls should just bail out. */ #define VALIDATE_DESC(desc) do { \ - if (!desc || !desc->gdev) { \ - pr_warn("%s: invalid GPIO\n", __func__); \ + if (!desc) \ + return 0; \ + if (IS_ERR(desc)) { \ + pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \ + return PTR_ERR(desc); \ + } \ + if (!desc->gdev) { \ + pr_warn("%s: invalid GPIO (no device)\n", __func__); \ return -EINVAL; \ } \ if ( !desc->gdev->chip ) { \ @@ -1370,8 +1879,14 @@ done: } } while (0) #define VALIDATE_DESC_VOID(desc) do { \ - if (!desc || !desc->gdev) { \ - pr_warn("%s: invalid GPIO\n", __func__); \ + if (!desc) \ + return; \ + if (IS_ERR(desc)) { \ + pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \ + return; \ + } \ + if (!desc->gdev) { \ + pr_warn("%s: invalid GPIO (no device)\n", __func__); \ return; \ } \ if (!desc->gdev->chip) { \ @@ -2040,7 +2555,14 @@ int gpiod_to_irq(const struct gpio_desc *desc) struct gpio_chip *chip; int offset; - VALIDATE_DESC(desc); + /* + * Cannot VALIDATE_DESC() here as gpiod_to_irq() consumer semantics + * requires this function to not return zero on an invalid descriptor + * but rather a negative error number. + */ + if (!desc || IS_ERR(desc) || !desc->gdev || !desc->gdev->chip) + return -EINVAL; + chip = desc->gdev->chip; offset = gpio_chip_hwgpio(desc); if (chip->to_irq) { @@ -2066,17 +2588,30 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq); */ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset) { - if (offset >= chip->ngpio) - return -EINVAL; + struct gpio_desc *desc; + + desc = gpiochip_get_desc(chip, offset); + if (IS_ERR(desc)) + return PTR_ERR(desc); - if (test_bit(FLAG_IS_OUT, &chip->gpiodev->descs[offset].flags)) { + /* Flush direction if something changed behind our back */ + if (chip->get_direction) { + int dir = chip->get_direction(chip, offset); + + if (dir) + clear_bit(FLAG_IS_OUT, &desc->flags); + else + set_bit(FLAG_IS_OUT, &desc->flags); + } + + if (test_bit(FLAG_IS_OUT, &desc->flags)) { chip_err(chip, "%s: tried to flag a GPIO set as output for IRQ\n", __func__); return -EIO; } - set_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags); + set_bit(FLAG_USED_AS_IRQ, &desc->flags); return 0; } EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq); @@ -2297,7 +2832,7 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, &of_flags); - if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER)) + if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT)) break; } @@ -2543,28 +3078,13 @@ struct gpio_desc *__must_check gpiod_get_optional(struct device *dev, } EXPORT_SYMBOL_GPL(gpiod_get_optional); -/** - * gpiod_parse_flags - helper function to parse GPIO lookup flags - * @desc: gpio to be setup - * @lflags: gpio_lookup_flags - returned from of_find_gpio() or - * of_get_gpio_hog() - * - * Set the GPIO descriptor flags based on the given GPIO lookup flags. - */ -static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags) -{ - if (lflags & GPIO_ACTIVE_LOW) - set_bit(FLAG_ACTIVE_LOW, &desc->flags); - if (lflags & GPIO_OPEN_DRAIN) - set_bit(FLAG_OPEN_DRAIN, &desc->flags); - if (lflags & GPIO_OPEN_SOURCE) - set_bit(FLAG_OPEN_SOURCE, &desc->flags); -} /** * gpiod_configure_flags - helper function to configure a given GPIO * @desc: gpio whose value will be assigned * @con_id: function within the GPIO consumer + * @lflags: gpio_lookup_flags - returned from of_find_gpio() or + * of_get_gpio_hog() * @dflags: gpiod_flags - optional GPIO initialization flags * * Return 0 on success, -ENOENT if no GPIO has been assigned to the @@ -2572,10 +3092,17 @@ static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags) * occurred while trying to acquire the GPIO. */ static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id, - enum gpiod_flags dflags) + unsigned long lflags, enum gpiod_flags dflags) { int status; + if (lflags & GPIO_ACTIVE_LOW) + set_bit(FLAG_ACTIVE_LOW, &desc->flags); + if (lflags & GPIO_OPEN_DRAIN) + set_bit(FLAG_OPEN_DRAIN, &desc->flags); + if (lflags & GPIO_OPEN_SOURCE) + set_bit(FLAG_OPEN_SOURCE, &desc->flags); + /* No particular flag request, return here... */ if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) { pr_debug("no flags found for %s\n", con_id); @@ -2642,13 +3169,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, return desc; } - gpiod_parse_flags(desc, lookupflags); - status = gpiod_request(desc, con_id); if (status < 0) return ERR_PTR(status); - status = gpiod_configure_flags(desc, con_id, flags); + status = gpiod_configure_flags(desc, con_id, lookupflags, flags); if (status < 0) { dev_dbg(dev, "setup of GPIO %s failed\n", con_id); gpiod_put(desc); @@ -2704,6 +3229,10 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, if (IS_ERR(desc)) return desc; + ret = gpiod_request(desc, NULL); + if (ret) + return ERR_PTR(ret); + if (active_low) set_bit(FLAG_ACTIVE_LOW, &desc->flags); @@ -2714,10 +3243,6 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, set_bit(FLAG_OPEN_SOURCE, &desc->flags); } - ret = gpiod_request(desc, NULL); - if (ret) - return ERR_PTR(ret); - return desc; } EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod); @@ -2770,8 +3295,6 @@ int gpiod_hog(struct gpio_desc *desc, const char *name, chip = gpiod_to_chip(desc); hwnum = gpio_chip_hwgpio(desc); - gpiod_parse_flags(desc, lflags); - local_desc = gpiochip_request_own_desc(chip, hwnum, name); if (IS_ERR(local_desc)) { status = PTR_ERR(local_desc); @@ -2780,7 +3303,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name, return status; } - status = gpiod_configure_flags(desc, name, dflags); + status = gpiod_configure_flags(desc, name, lflags, dflags); if (status < 0) { pr_err("setup of hog GPIO %s (chip %s, offset %d) failed, %d\n", name, chip->label, hwnum, status); @@ -1820,6 +1820,8 @@ struct amdgpu_asic_funcs { /* MM block clocks */ int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); + /* query virtual capabilities */ + u32 (*get_virtual_caps)(struct amdgpu_device *adev); }; /* @@ -1914,8 +1916,12 @@ void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); /* GPU virtualization */ +#define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0) +#define AMDGPU_VIRT_CAPS_IS_VF (1 << 1) struct amdgpu_virtualization { bool supports_sr_iov; + bool is_virtual; + u32 caps; }; /* @@ -2204,6 +2210,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) +#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev))) #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) @@ -421,29 +421,6 @@ static int acp_suspend(void *handle) static int acp_resume(void *handle) { - int i, ret; - struct acp_pm_domain *apd; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - /* return early if no ACP */ - if (!adev->acp.acp_genpd) - return 0; - - /* SMU block will power on ACP irrespective of ACP runtime status. - * Power off explicitly based on genpd ACP runtime status so that ACP - * hw and ACP-genpd status are in sync. - * 'suspend_power_off' represents "Power status before system suspend" - */ - if (adev->acp.acp_genpd->gpd.suspend_power_off == true) { - apd = container_of(&adev->acp.acp_genpd->gpd, - struct acp_pm_domain, gpd); - - for (i = 4; i >= 0 ; i--) { - ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i); - if (ret) - pr_err("ACP tile %d tile suspend failed\n", i); - } - } return 0; } @@ -696,6 +696,17 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type) return result; } +static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type) +{ + CGS_FUNC_ADEV; + if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) { + release_firmware(adev->pm.fw); + return 0; + } + /* cannot release other firmware because they are not created by cgs */ + return -EINVAL; +} + static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, enum cgs_ucode_id type, struct cgs_firmware_info *info) @@ -898,7 +909,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, struct cgs_acpi_method_argument *argument = NULL; uint32_t i, count; acpi_status status; - int result; + int result = 0; uint32_t func_no = 0xFFFFFFFF; handle = ACPI_HANDLE(&adev->pdev->dev); @@ -1125,6 +1136,7 @@ static const struct cgs_ops amdgpu_cgs_ops = { amdgpu_cgs_pm_query_clock_limits, amdgpu_cgs_set_camera_voltages, amdgpu_cgs_get_firmware_info, + amdgpu_cgs_rel_firmware, amdgpu_cgs_set_powergating_state, amdgpu_cgs_set_clockgating_state, amdgpu_cgs_get_active_displays_info, @@ -827,8 +827,10 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) */ static void amdgpu_atombios_fini(struct amdgpu_device *adev) { - if (adev->mode_info.atom_context) + if (adev->mode_info.atom_context) { kfree(adev->mode_info.atom_context->scratch); + kfree(adev->mode_info.atom_context->iio); + } kfree(adev->mode_info.atom_context); adev->mode_info.atom_context = NULL; kfree(adev->mode_info.atom_card_info); @@ -1325,6 +1327,11 @@ static int amdgpu_fini(struct amdgpu_device *adev) adev->ip_block_status[i].valid = false; } + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { + if (adev->ip_blocks[i].funcs->late_fini) + adev->ip_blocks[i].funcs->late_fini((void *)adev); + } + return 0; } @@ -1378,6 +1385,15 @@ static int amdgpu_resume(struct amdgpu_device *adev) return 0; } +static bool amdgpu_device_is_virtual(void) +{ +#ifdef CONFIG_X86 + return boot_cpu_has(X86_FEATURE_HYPERVISOR); +#else + return false; +#endif +} + /** * amdgpu_device_init - initialize the driver * @@ -1512,9 +1528,14 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->virtualization.supports_sr_iov = amdgpu_atombios_has_gpu_virtualization_table(adev); + /* Check if we are executing in a virtualized environment */ + adev->virtualization.is_virtual = amdgpu_device_is_virtual(); + adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev); + /* Post card if necessary */ if (!amdgpu_card_posted(adev) || - adev->virtualization.supports_sr_iov) { + (adev->virtualization.is_virtual && + !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) { if (!adev->bios) { dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); return -EINVAL; @@ -447,7 +447,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.max_memory_clock = adev->pm.default_mclk * 10; } dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; - dev_info.num_rb_pipes = adev->gfx.config.num_rbs; + dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se * + adev->gfx.config.max_shader_engines; dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; dev_info._pad = 0; dev_info.ids_flags = 0; @@ -270,30 +270,28 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type state = 0; - long idx; + unsigned long idx; int ret; if (strlen(buf) == 1) adev->pp_force_state_enabled = false; - else { - ret = kstrtol(buf, 0, &idx); + else if (adev->pp_enabled) { + struct pp_states_info data; - if (ret) { + ret = kstrtoul(buf, 0, &idx); + if (ret || idx >= ARRAY_SIZE(data.states)) { count = -EINVAL; goto fail; } - if (adev->pp_enabled) { - struct pp_states_info data; - amdgpu_dpm_get_pp_num_states(adev, &data); - state = data.states[idx]; - /* only set user selected power states */ - if (state != POWER_STATE_TYPE_INTERNAL_BOOT && - state != POWER_STATE_TYPE_DEFAULT) { - amdgpu_dpm_dispatch_task(adev, - AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); - adev->pp_force_state_enabled = true; - } + amdgpu_dpm_get_pp_num_states(adev, &data); + state = data.states[idx]; + /* only set user selected power states */ + if (state != POWER_STATE_TYPE_INTERNAL_BOOT && + state != POWER_STATE_TYPE_DEFAULT) { + amdgpu_dpm_dispatch_task(adev, + AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); + adev->pp_force_state_enabled = true; } } fail: @@ -183,13 +183,6 @@ static int amdgpu_pp_sw_fini(void *handle) if (ret) return ret; -#ifdef CONFIG_DRM_AMD_POWERPLAY - if (adev->pp_enabled) { - amdgpu_pm_sysfs_fini(adev); - amd_powerplay_fini(adev->powerplay.pp_handle); - } -#endif - return ret; } @@ -223,6 +216,22 @@ static int amdgpu_pp_hw_fini(void *handle) return ret; } +static void amdgpu_pp_late_fini(void *handle) +{ +#ifdef CONFIG_DRM_AMD_POWERPLAY + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->pp_enabled) { + amdgpu_pm_sysfs_fini(adev); + amd_powerplay_fini(adev->powerplay.pp_handle); + } + + if (adev->powerplay.ip_funcs->late_fini) + adev->powerplay.ip_funcs->late_fini( + adev->powerplay.pp_handle); +#endif +} + static int amdgpu_pp_suspend(void *handle) { int ret = 0; @@ -311,6 +320,7 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = { .sw_fini = amdgpu_pp_sw_fini, .hw_init = amdgpu_pp_hw_init, .hw_fini = amdgpu_pp_hw_fini, + .late_fini = amdgpu_pp_late_fini, .suspend = amdgpu_pp_suspend, .resume = amdgpu_pp_resume, .is_idle = amdgpu_pp_is_idle, @@ -343,6 +343,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) ring->ring = NULL; ring->ring_obj = NULL; + amdgpu_wb_free(ring->adev, ring->cond_exe_offs); amdgpu_wb_free(ring->adev, ring->fence_offs); amdgpu_wb_free(ring->adev, ring->rptr_offs); amdgpu_wb_free(ring->adev, ring->wptr_offs); @@ -115,6 +115,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, return r; } r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); + memset(sa_manager->cpu_ptr, 0, sa_manager->size); amdgpu_bo_unreserve(sa_manager->bo); return r; } @@ -253,19 +253,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { int r; - if (adev->uvd.vcpu_bo == NULL) - return 0; + kfree(adev->uvd.saved_bo); amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); - r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); - if (!r) { - amdgpu_bo_kunmap(adev->uvd.vcpu_bo); - amdgpu_bo_unpin(adev->uvd.vcpu_bo); - amdgpu_bo_unreserve(adev->uvd.vcpu_bo); - } + if (adev->uvd.vcpu_bo) { + r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); + if (!r) { + amdgpu_bo_kunmap(adev->uvd.vcpu_bo); + amdgpu_bo_unpin(adev->uvd.vcpu_bo); + amdgpu_bo_unreserve(adev->uvd.vcpu_bo); + } - amdgpu_bo_unref(&adev->uvd.vcpu_bo); + amdgpu_bo_unref(&adev->uvd.vcpu_bo); + } amdgpu_ring_fini(&adev->uvd.ring); @@ -1105,6 +1106,10 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) if (fences == 0 && handles == 0) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_uvd(adev, false); + /* just work around for uvd clock remain high even + * when uvd dpm disabled on Polaris10 */ + if (adev->asic_type == CHIP_POLARIS10) + amdgpu_asic_set_uvd_clocks(adev, 0, 0); } else { amdgpu_asic_set_uvd_clocks(adev, 0, 0); } @@ -156,3 +156,18 @@ u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap) return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } +void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev, u8 slave_addr, u8 line_number, u8 offset, u8 data) +{ + PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; + int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); + + args.ucRegIndex = offset; + args.lpI2CDataOut = data; + args.ucFlag = 1; + args.ucI2CSpeed = TARGET_HW_I2C_CLOCK; + args.ucTransBytes = 1; + args.ucSlaveAddr = slave_addr; + args.ucLineNumber = line_number; + + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); +} @@ -27,5 +27,7 @@ int amdgpu_atombios_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num); u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap); +void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev, + u8 slave_addr, u8 line_number, u8 offset, u8 data); #endif @@ -6221,6 +6221,9 @@ static int ci_dpm_sw_fini(void *handle) ci_dpm_fini(adev); mutex_unlock(&adev->pm.mutex); + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + return 0; } @@ -962,6 +962,12 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev, return true; } +static u32 cik_get_virtual_caps(struct amdgpu_device *adev) +{ + /* CIK does not support SR-IOV */ + return 0; +} + static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { {mmGRBM_STATUS, false}, {mmGB_ADDR_CONFIG, false}, @@ -2007,6 +2013,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = .get_xclk = &cik_get_xclk, .set_uvd_clocks = &cik_set_uvd_clocks, .set_vce_clocks = &cik_set_vce_clocks, + .get_virtual_caps = &cik_get_virtual_caps, /* these should be moved to their own ip modules */ .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle, @@ -66,6 +66,16 @@ MODULE_FIRMWARE("radeon/mullins_sdma1.bin"); u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); + +static void cik_sdma_free_microcode(struct amdgpu_device *adev) +{ + int i; + for (i = 0; i < adev->sdma.num_instances; i++) { + release_firmware(adev->sdma.instance[i].fw); + adev->sdma.instance[i].fw = NULL; + } +} + /* * sDMA - System DMA * Starting with CIK, the GPU has new asynchronous @@ -419,6 +429,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) /* Initialize the ring buffer's read and write pointers */ WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); /* set the wb address whether it's enabled or not */ WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], @@ -446,7 +458,12 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); ring->ready = true; + } + + cik_sdma_enable(adev, true); + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; r = amdgpu_ring_test_ring(ring); if (r) { ring->ready = false; @@ -529,8 +546,8 @@ static int cik_sdma_start(struct amdgpu_device *adev) if (r) return r; - /* unhalt the MEs */ - cik_sdma_enable(adev, true); + /* halt the engine before programing */ + cik_sdma_enable(adev, false); /* start the gfx rings and rlc compute queues */ r = cik_sdma_gfx_resume(adev); @@ -998,6 +1015,7 @@ static int cik_sdma_sw_fini(void *handle) for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + cik_sdma_free_microcode(adev); return 0; } @@ -72,6 +72,11 @@ static int fiji_dpm_sw_init(void *handle) static int fiji_dpm_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + return 0; } @@ -991,6 +991,22 @@ out: return err; } +static void gfx_v7_0_free_microcode(struct amdgpu_device *adev) +{ + release_firmware(adev->gfx.pfp_fw); + adev->gfx.pfp_fw = NULL; + release_firmware(adev->gfx.me_fw); + adev->gfx.me_fw = NULL; + release_firmware(adev->gfx.ce_fw); + adev->gfx.ce_fw = NULL; + release_firmware(adev->gfx.mec_fw); + adev->gfx.mec_fw = NULL; + release_firmware(adev->gfx.mec2_fw); + adev->gfx.mec2_fw = NULL; + release_firmware(adev->gfx.rlc_fw); + adev->gfx.rlc_fw = NULL; +} + /** * gfx_v7_0_tiling_mode_table_init - init the hw tiling table * @@ -4489,6 +4505,7 @@ static int gfx_v7_0_sw_fini(void *handle) gfx_v7_0_cp_compute_fini(adev); gfx_v7_0_rlc_fini(adev); gfx_v7_0_mec_fini(adev); + gfx_v7_0_free_microcode(adev); return 0; } @@ -4816,7 +4833,7 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev, case 2: for (i = 0; i < adev->gfx.num_compute_rings; i++) { ring = &adev->gfx.compute_ring[i]; - if ((ring->me == me_id) & (ring->pipe == pipe_id)) + if ((ring->me == me_id) && (ring->pipe == pipe_id)) amdgpu_fence_process(ring); } break; @@ -28,6 +28,7 @@ #include "vid.h" #include "amdgpu_ucode.h" #include "amdgpu_atombios.h" +#include "atombios_i2c.h" #include "clearstate_vi.h" #include "gmc/gmc_8_2_d.h" @@ -47,6 +48,8 @@ #include "dce/dce_10_0_d.h" #include "dce/dce_10_0_sh_mask.h" +#include "smu/smu_7_1_3_d.h" + #define GFX8_NUM_GFX_RINGS 1 #define GFX8_NUM_COMPUTE_RINGS 8 @@ -282,6 +285,7 @@ static const u32 golden_settings_polaris11_a11[] = mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210, + mmVGT_RESET_DEBUG, 0x00000004, 0x00000004, }; static const u32 polaris11_golden_common_all[] = @@ -297,7 +301,8 @@ static const u32 polaris11_golden_common_all[] = static const u32 golden_settings_polaris10_a11[] = { mmATC_MISC_CG, 0x000c0fc0, 0x000c0200, - mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208, + mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208, + mmCB_HW_CONTROL_2, 0, 0x0f000000, mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, mmDB_DEBUG2, 0xf00fffff, 0x00000400, mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, @@ -311,6 +316,7 @@ static const u32 golden_settings_polaris10_a11[] = mmTCC_CTRL, 0x00100000, 0xf31fff7f, mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000, + mmVGT_RESET_DEBUG, 0x00000004, 0x00000004, }; static const u32 polaris10_golden_common_all[] = @@ -692,6 +698,11 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) amdgpu_program_register_sequence(adev, polaris10_golden_common_all, (const u32)ARRAY_SIZE(polaris10_golden_common_all)); + WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C); + if (adev->pdev->revision == 0xc7) { + amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD); + amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0); + } break; case CHIP_CARRIZO: amdgpu_program_register_sequence(adev, @@ -836,6 +847,26 @@ err1: return r; } + +static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) { + release_firmware(adev->gfx.pfp_fw); + adev->gfx.pfp_fw = NULL; + release_firmware(adev->gfx.me_fw); + adev->gfx.me_fw = NULL; + release_firmware(adev->gfx.ce_fw); + adev->gfx.ce_fw = NULL; + release_firmware(adev->gfx.rlc_fw); + adev->gfx.rlc_fw = NULL; + release_firmware(adev->gfx.mec_fw); + adev->gfx.mec_fw = NULL; + if ((adev->asic_type != CHIP_STONEY) && + (adev->asic_type != CHIP_TOPAZ)) + release_firmware(adev->gfx.mec2_fw); + adev->gfx.mec2_fw = NULL; + + kfree(adev->gfx.rlc.register_list_format); +} + static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) { const char *chip_name; @@ -1983,7 +2014,7 @@ static int gfx_v8_0_sw_fini(void *handle) gfx_v8_0_rlc_fini(adev); - kfree(adev->gfx.rlc.register_list_format); + gfx_v8_0_free_microcode(adev); return 0; } @@ -3974,11 +4005,15 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_write(ring, 0x3a00161a); amdgpu_ring_write(ring, 0x0000002e); break; - case CHIP_TOPAZ: case CHIP_CARRIZO: amdgpu_ring_write(ring, 0x00000002); amdgpu_ring_write(ring, 0x00000000); break; + case CHIP_TOPAZ: + amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ? + 0x00000000 : 0x00000002); + amdgpu_ring_write(ring, 0x00000000); + break; case CHIP_STONEY: amdgpu_ring_write(ring, 0x00000000); amdgpu_ring_write(ring, 0x00000000); @@ -72,6 +72,11 @@ static int iceland_dpm_sw_init(void *handle) static int iceland_dpm_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + return 0; } @@ -105,6 +105,15 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev) } } +static void sdma_v2_4_free_microcode(struct amdgpu_device *adev) +{ + int i; + for (i = 0; i < adev->sdma.num_instances; i++) { + release_firmware(adev->sdma.instance[i].fw); + adev->sdma.instance[i].fw = NULL; + } +} + /** * sdma_v2_4_init_microcode - load ucode images from disk * @@ -461,6 +470,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) /* Initialize the ring buffer's read and write pointers */ WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); /* set the wb address whether it's enabled or not */ WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], @@ -489,7 +500,11 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); ring->ready = true; + } + sdma_v2_4_enable(adev, true); + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; r = amdgpu_ring_test_ring(ring); if (r) { ring->ready = false; @@ -580,8 +595,8 @@ static int sdma_v2_4_start(struct amdgpu_device *adev) return -EINVAL; } - /* unhalt the MEs */ - sdma_v2_4_enable(adev, true); + /* halt the engine before programing */ + sdma_v2_4_enable(adev, false); /* start the gfx rings and rlc compute queues */ r = sdma_v2_4_gfx_resume(adev); @@ -1012,6 +1027,7 @@ static int sdma_v2_4_sw_fini(void *handle) for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + sdma_v2_4_free_microcode(adev); return 0; } @@ -236,6 +236,15 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) } } +static void sdma_v3_0_free_microcode(struct amdgpu_device *adev) +{ + int i; + for (i = 0; i < adev->sdma.num_instances; i++) { + release_firmware(adev->sdma.instance[i].fw); + adev->sdma.instance[i].fw = NULL; + } +} + /** * sdma_v3_0_init_microcode - load ucode images from disk * @@ -672,6 +681,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) /* Initialize the ring buffer's read and write pointers */ WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); /* set the wb address whether it's enabled or not */ WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], @@ -711,7 +722,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); ring->ready = true; + } + + /* unhalt the MEs */ + sdma_v3_0_enable(adev, true); + /* enable sdma ring preemption */ + sdma_v3_0_ctx_switch_enable(adev, true); + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; r = amdgpu_ring_test_ring(ring); if (r) { ring->ready = false; @@ -804,10 +823,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev) } } - /* unhalt the MEs */ - sdma_v3_0_enable(adev, true); - /* enable sdma ring preemption */ - sdma_v3_0_ctx_switch_enable(adev, true); + /* disble sdma engine before programing it */ + sdma_v3_0_ctx_switch_enable(adev, false); + sdma_v3_0_enable(adev, false); /* start the gfx rings and rlc compute queues */ r = sdma_v3_0_gfx_resume(adev); @@ -1247,6 +1265,7 @@ static int sdma_v3_0_sw_fini(void *handle) for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + sdma_v3_0_free_microcode(adev); return 0; } @@ -71,6 +71,11 @@ static int tonga_dpm_sw_init(void *handle) static int tonga_dpm_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + return 0; } @@ -421,6 +421,20 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev, return true; } +static u32 vi_get_virtual_caps(struct amdgpu_device *adev) +{ + u32 caps = 0; + u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); + + if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) + caps |= AMDGPU_VIRT_CAPS_SRIOV_EN; + + if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) + caps |= AMDGPU_VIRT_CAPS_IS_VF; + + return caps; +} + static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { {mmGB_MACROTILE_MODE7, true}, }; @@ -1118,6 +1132,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = .get_xclk = &vi_get_xclk, .set_uvd_clocks = &vi_set_uvd_clocks, .set_vce_clocks = &vi_set_vce_clocks, + .get_virtual_caps = &vi_get_virtual_caps, /* these should be moved to their own ip modules */ .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle, @@ -242,13 +242,19 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn, pqm_uninit(&p->pqm); /* Iterate over all process device data structure and check - * if we should reset all wavefronts */ - list_for_each_entry(pdd, &p->per_device_data, per_device_list) + * if we should delete debug managers and reset all wavefronts + */ + list_for_each_entry(pdd, &p->per_device_data, per_device_list) { + if ((pdd->dev->dbgmgr) && + (pdd->dev->dbgmgr->pasid == p->pasid)) + kfd_dbgmgr_destroy(pdd->dev->dbgmgr); + if (pdd->reset_wavefronts) { pr_warn("amdkfd: Resetting all wave fronts\n"); dbgdev_wave_reset_wavefronts(pdd->dev, p); pdd->reset_wavefronts = false; } + } mutex_unlock(&p->mutex); @@ -404,42 +410,52 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid) idx = srcu_read_lock(&kfd_processes_srcu); + /* + * Look for the process that matches the pasid. If there is no such + * process, we either released it in amdkfd's own notifier, or there + * is a bug. Unfortunately, there is no way to tell... + */ hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes) - if (p->pasid == pasid) - break; + if (p->pasid == pasid) { - srcu_read_unlock(&kfd_processes_srcu, idx); + srcu_read_unlock(&kfd_processes_srcu, idx); - BUG_ON(p->pasid != pasid); + pr_debug("Unbinding process %d from IOMMU\n", pasid); - mutex_lock(&p->mutex); + mutex_lock(&p->mutex); - if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid)) - kfd_dbgmgr_destroy(dev->dbgmgr); + if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid)) + kfd_dbgmgr_destroy(dev->dbgmgr); - pqm_uninit(&p->pqm); + pqm_uninit(&p->pqm); - pdd = kfd_get_process_device_data(dev, p); + pdd = kfd_get_process_device_data(dev, p); - if (!pdd) { - mutex_unlock(&p->mutex); - return; - } + if (!pdd) { + mutex_unlock(&p->mutex); + return; + } - if (pdd->reset_wavefronts) { - dbgdev_wave_reset_wavefronts(pdd->dev, p); - pdd->reset_wavefronts = false; - } + if (pdd->reset_wavefronts) { + dbgdev_wave_reset_wavefronts(pdd->dev, p); + pdd->reset_wavefronts = false; + } - /* - * Just mark pdd as unbound, because we still need it to call - * amd_iommu_unbind_pasid() in when the process exits. - * We don't call amd_iommu_unbind_pasid() here - * because the IOMMU called us. - */ - pdd->bound = false; + /* + * Just mark pdd as unbound, because we still need it + * to call amd_iommu_unbind_pasid() in when the + * process exits. + * We don't call amd_iommu_unbind_pasid() here + * because the IOMMU called us. + */ + pdd->bound = false; - mutex_unlock(&p->mutex); + mutex_unlock(&p->mutex); + + return; + } + + srcu_read_unlock(&kfd_processes_srcu, idx); } struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p) @@ -666,7 +666,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.simd_count); if (dev->mem_bank_count < dev->node_props.mem_banks_count) { - pr_warn("kfd: mem_banks_count truncated from %d to %d\n", + pr_info_once("kfd: mem_banks_count truncated from %d to %d\n", dev->node_props.mem_banks_count, dev->mem_bank_count); sysfs_show_32bit_prop(buffer, "mem_banks_count", @@ -157,6 +157,7 @@ struct amd_ip_funcs { int (*hw_init)(void *handle); /* tears down the hw state */ int (*hw_fini)(void *handle); + void (*late_fini)(void *handle); /* handles IP specific hw/sw changes for suspend */ int (*suspend)(void *handle); /* handles IP specific hw/sw changes for resume */ @@ -5538,6 +5538,78 @@ typedef struct _ATOM_ASIC_PROFILING_INFO_V3_5 ULONG ulReserved[12]; }ATOM_ASIC_PROFILING_INFO_V3_5; +/* for Polars10/11 AVFS parameters */ +typedef struct _ATOM_ASIC_PROFILING_INFO_V3_6 +{ + ATOM_COMMON_TABLE_HEADER asHeader; + ULONG ulMaxVddc; + ULONG ulMinVddc; + USHORT usLkgEuseIndex; + UCHAR ucLkgEfuseBitLSB; + UCHAR ucLkgEfuseLength; + ULONG ulLkgEncodeLn_MaxDivMin; + ULONG ulLkgEncodeMax; + ULONG ulLkgEncodeMin; + EFUSE_LINEAR_FUNC_PARAM sRoFuse; + ULONG ulEvvDefaultVddc; + ULONG ulEvvNoCalcVddc; + ULONG ulSpeed_Model; + ULONG ulSM_A0; + ULONG ulSM_A1; + ULONG ulSM_A2; + ULONG ulSM_A3; + ULONG ulSM_A4; + ULONG ulSM_A5; + ULONG ulSM_A6; + ULONG ulSM_A7; + UCHAR ucSM_A0_sign; + UCHAR ucSM_A1_sign; + UCHAR ucSM_A2_sign; + UCHAR ucSM_A3_sign; + UCHAR ucSM_A4_sign; + UCHAR ucSM_A5_sign; + UCHAR ucSM_A6_sign; + UCHAR ucSM_A7_sign; + ULONG ulMargin_RO_a; + ULONG ulMargin_RO_b; + ULONG ulMargin_RO_c; + ULONG ulMargin_fixed; + ULONG ulMargin_Fmax_mean; + ULONG ulMargin_plat_mean; + ULONG ulMargin_Fmax_sigma; + ULONG ulMargin_plat_sigma; + ULONG ulMargin_DC_sigma; + ULONG ulLoadLineSlop; + ULONG ulaTDClimitPerDPM[8]; + ULONG ulaNoCalcVddcPerDPM[8]; + ULONG ulAVFS_meanNsigma_Acontant0; + ULONG ulAVFS_meanNsigma_Acontant1; + ULONG ulAVFS_meanNsigma_Acontant2; + USHORT usAVFS_meanNsigma_DC_tol_sigma; + USHORT usAVFS_meanNsigma_Platform_mean; + USHORT usAVFS_meanNsigma_Platform_sigma; + ULONG ulGB_VDROOP_TABLE_CKSOFF_a0; + ULONG ulGB_VDROOP_TABLE_CKSOFF_a1; + ULONG ulGB_VDROOP_TABLE_CKSOFF_a2; + ULONG ulGB_VDROOP_TABLE_CKSON_a0; + ULONG ulGB_VDROOP_TABLE_CKSON_a1; + ULONG ulGB_VDROOP_TABLE_CKSON_a2; + ULONG ulAVFSGB_FUSE_TABLE_CKSOFF_m1; + USHORT usAVFSGB_FUSE_TABLE_CKSOFF_m2; + ULONG ulAVFSGB_FUSE_TABLE_CKSOFF_b; + ULONG ulAVFSGB_FUSE_TABLE_CKSON_m1; + USHORT usAVFSGB_FUSE_TABLE_CKSON_m2; + ULONG ulAVFSGB_FUSE_TABLE_CKSON_b; + USHORT usMaxVoltage_0_25mv; + UCHAR ucEnableGB_VDROOP_TABLE_CKSOFF; + UCHAR ucEnableGB_VDROOP_TABLE_CKSON; + UCHAR ucEnableGB_FUSE_TABLE_CKSOFF; + UCHAR ucEnableGB_FUSE_TABLE_CKSON; + USHORT usPSM_Age_ComFactor; + UCHAR ucEnableApplyAVFS_CKS_OFF_Voltage; + UCHAR ucReserved; +}ATOM_ASIC_PROFILING_INFO_V3_6; + typedef struct _ATOM_SCLK_FCW_RANGE_ENTRY_V1{ ULONG ulMaxSclkFreq; @@ -581,6 +581,9 @@ typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device, enum cgs_ucode_id type, struct cgs_firmware_info *info); +typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device, + enum cgs_ucode_id type); + typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device, enum amd_ip_block_type block_type, enum amd_powergating_state state); @@ -645,6 +648,7 @@ struct cgs_ops { cgs_set_camera_voltages_t set_camera_voltages; /* Firmware Info */ cgs_get_firmware_info get_firmware_info; + cgs_rel_firmware rel_firmware; /* cg pg interface*/ cgs_set_powergating_state set_powergating_state; cgs_set_clockgating_state set_clockgating_state; @@ -738,6 +742,8 @@ struct cgs_device CGS_CALL(set_camera_voltages,dev,mask,voltages) #define cgs_get_firmware_info(dev, type, info) \ CGS_CALL(get_firmware_info, dev, type, info) +#define cgs_rel_firmware(dev, type) \ + CGS_CALL(rel_firmware, dev, type) #define cgs_set_powergating_state(dev, block_type, state) \ CGS_CALL(set_powergating_state, dev, block_type, state) #define cgs_set_clockgating_state(dev, block_type, state) \ @@ -73,11 +73,14 @@ static int pp_sw_init(void *handle) ret = hwmgr->hwmgr_func->backend_init(hwmgr); if (ret) - goto err; + goto err1; pr_info("amdgpu: powerplay initialized\n"); return 0; +err1: + if (hwmgr->pptable_func->pptable_fini) + hwmgr->pptable_func->pptable_fini(hwmgr); err: pr_err("amdgpu: powerplay initialization failed\n"); return ret; @@ -100,6 +103,9 @@ static int pp_sw_fini(void *handle) if (hwmgr->hwmgr_func->backend_fini != NULL) ret = hwmgr->hwmgr_func->backend_fini(hwmgr); + if (hwmgr->pptable_func->pptable_fini) + hwmgr->pptable_func->pptable_fini(hwmgr); + return ret; } @@ -58,9 +58,6 @@ static void pem_fini(struct pp_eventmgr *eventmgr) pem_unregister_interrupts(eventmgr); pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data); - - if (eventmgr != NULL) - kfree(eventmgr); } int eventmgr_init(struct pp_instance *handle) @@ -633,6 +633,8 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE; data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE; + data->force_pcie_gen = PP_PCIEGenInvalid; + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2; @@ -1830,7 +1832,7 @@ static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci) PP_ASSERT_WITH_CODE(false, "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", - return vddci_table->entries[i].value); + return vddci_table->entries[i-1].value); } static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, @@ -306,10 +306,14 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, { PHM_FUNC_CHECK(hwmgr); - if (hwmgr->hwmgr_func->store_cc6_data == NULL) + if (display_config == NULL) return -EINVAL; hwmgr->display_config = *display_config; + + if (hwmgr->hwmgr_func->store_cc6_data == NULL) + return -EINVAL; + /* to do pass other display configuration in furture */ if (hwmgr->hwmgr_func->store_cc6_data) @@ -93,6 +93,13 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr) if (hwmgr == NULL || hwmgr->ps == NULL) return -EINVAL; + /* do hwmgr finish*/ + kfree(hwmgr->backend); + + kfree(hwmgr->start_thermal_controller.function_list); + + kfree(hwmgr->set_temperature_range.function_list); + kfree(hwmgr->ps); kfree(hwmgr); return 0; @@ -462,7 +469,7 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u PP_ASSERT_WITH_CODE(false, "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", - return vddci_table->entries[i].value); + return vddci_table->entries[i-1].value); } int phm_find_boot_level(void *table, @@ -39,6 +39,7 @@ struct phm_ppt_v1_clock_voltage_dependency_record { uint8_t phases; uint8_t cks_enable; uint8_t cks_voffset; + uint32_t sclk_offset; }; typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record; @@ -732,7 +732,7 @@ static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, table->Smio[level] |= data->mvdd_voltage_table.entries[level].smio_low; } - table->SmioMask2 = data->vddci_voltage_table.mask_low; + table->SmioMask2 = data->mvdd_voltage_table.mask_low; table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count); } @@ -999,7 +999,7 @@ static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), (dep_table->entries[i].vddc - (uint16_t)data->vddc_vddci_delta)); - *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; } if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) @@ -1296,7 +1296,6 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, } mem_level->MclkFrequency = clock; - mem_level->StutterEnable = 0; mem_level->EnabledForThrottle = 1; mem_level->EnabledForActivity = 0; mem_level->UpHyst = 0; @@ -1304,7 +1303,6 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, mem_level->VoltageDownHyst = 0; mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target; mem_level->StutterEnable = false; - mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; data->display_timing.num_existing_displays = info.display_count; @@ -1363,7 +1361,7 @@ static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) * a higher state by default such that we are not effected by * up threshold or and MCLK DPM latency. */ - levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target; + levels[0].ActivityLevel = 0x1f; CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel); data->smc_state_table.MemoryDpmLevelCount = @@ -1424,22 +1422,19 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; - if (!data->sclk_dpm_key_disabled) { - /* Get MinVoltage and Frequency from DPM0, - * already converted to SMC_UL */ - sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value; - result = polaris10_get_dependency_volt_by_clk(hwmgr, - table_info->vdd_dep_on_sclk, - table->ACPILevel.SclkFrequency, - &table->ACPILevel.MinVoltage, &mvdd); - PP_ASSERT_WITH_CODE((0 == result), - "Cannot find ACPI VDDC voltage value " - "in Clock Dependency Table", ); - } else { - sclk_frequency = data->vbios_boot_state.sclk_bootup_value; - table->ACPILevel.MinVoltage = - data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE; - } + + /* Get MinVoltage and Frequency from DPM0, + * already converted to SMC_UL */ + sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value; + result = polaris10_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_sclk, + sclk_frequency, + &table->ACPILevel.MinVoltage, &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "Cannot find ACPI VDDC voltage value " + "in Clock Dependency Table", + ); + result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting)); PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result); @@ -1464,24 +1459,18 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac); CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate); - if (!data->mclk_dpm_key_disabled) { - /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ - table->MemoryACPILevel.MclkFrequency = - data->dpm_table.mclk_table.dpm_levels[0].value; - result = polaris10_get_dependency_volt_by_clk(hwmgr, - table_info->vdd_dep_on_mclk, - table->MemoryACPILevel.MclkFrequency, - &table->MemoryACPILevel.MinVoltage, &mvdd); - PP_ASSERT_WITH_CODE((0 == result), - "Cannot find ACPI VDDCI voltage value " - "in Clock Dependency Table", - ); - } else { - table->MemoryACPILevel.MclkFrequency = - data->vbios_boot_state.mclk_bootup_value; - table->MemoryACPILevel.MinVoltage = - data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE; - } + + /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ + table->MemoryACPILevel.MclkFrequency = + data->dpm_table.mclk_table.dpm_levels[0].value; + result = polaris10_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_mclk, + table->MemoryACPILevel.MclkFrequency, + &table->MemoryACPILevel.MinVoltage, &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "Cannot find ACPI VDDCI voltage value " + "in Clock Dependency Table", + ); us_mvdd = 0; if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) || @@ -1526,6 +1515,7 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr, struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = table_info->mm_dep_table; struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + uint32_t vddci; table->VceLevelCount = (uint8_t)(mm_table->count); table->VceBootLevel = 0; @@ -1535,9 +1525,18 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr, table->VceLevel[count].MinVoltage = 0; table->VceLevel[count].MinVoltage |= (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; + + if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) + vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; + else + vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; + + table->VceLevel[count].MinVoltage |= - ((mm_table->entries[count].vddc - data->vddc_vddci_delta) * - VOLTAGE_SCALE) << VDDCI_SHIFT; + (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT; /*retrieve divider value for VBIOS */ @@ -1566,6 +1565,7 @@ static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr, struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = table_info->mm_dep_table; struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + uint32_t vddci; table->SamuBootLevel = 0; table->SamuLevelCount = (uint8_t)(mm_table->count); @@ -1576,8 +1576,16 @@ static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr, table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; - table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - - data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT; + + if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) + vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; + else + vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; + + table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT; /* retrieve divider value for VBIOS */ @@ -1660,6 +1668,7 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = table_info->mm_dep_table; struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + uint32_t vddci; table->UvdLevelCount = (uint8_t)(mm_table->count); table->UvdBootLevel = 0; @@ -1670,8 +1679,16 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; - table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - - data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT; + + if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) + vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; + else + vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; + + table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT; /* retrieve divider value for VBIOS */ @@ -1692,8 +1709,8 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage); - } + return result; } @@ -1761,12 +1778,9 @@ static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) { - uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks, - volt_with_cks, value; - uint16_t clock_freq_u16; + uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, - volt_offset = 0; + uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = @@ -1778,50 +1792,44 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) * if the part is SS or FF. if RO >= 1660MHz, part is FF. */ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixSMU_EFUSE_0 + (146 * 4)); - efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixSMU_EFUSE_0 + (148 * 4)); + ixSMU_EFUSE_0 + (67 * 4)); efuse &= 0xFF000000; efuse = efuse >> 24; - efuse2 &= 0xF; - - if (efuse2 == 1) - ro = (2300 - 1350) * efuse / 255 + 1350; - else - ro = (2500 - 1000) * efuse / 255 + 1000; - if (ro >= 1660) - type = 0; - else - type = 1; + if (hwmgr->chip_id == CHIP_POLARIS10) { + min = 1000; + max = 2300; + } else { + min = 1100; + max = 2100; + } - /* Populate Stretch amount */ - data->smc_state_table.ClockStretcherAmount = stretch_amount; + ro = efuse * (max -min)/255 + min; /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ for (i = 0; i < sclk_table->count; i++) { data->smc_state_table.Sclk_CKS_masterEn0_7 |= sclk_table->entries[i].cks_enable << i; - volt_without_cks = (uint32_t)((14041 * - (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 / - (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000))); - volt_with_cks = (uint32_t)((13946 * - (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 / - (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000))); + if (hwmgr->chip_id == CHIP_POLARIS10) { + volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \ + (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000)); + volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \ + (2522480 - sclk_table->entries[i].clk/100 * 115764/100)); + } else { + volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \ + (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000))); + volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \ + (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000))); + } + if (volt_without_cks >= volt_with_cks) volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + - sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); + sclk_table->entries[i].cks_voffset) * 100 + 624) / 625); + data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; } - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, - STRETCH_ENABLE, 0x0); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, - masterReset, 0x1); - /* PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, staticEnable, 0x1); */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, - masterReset, 0x0); - + data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6; /* Populate CKS Lookup Table */ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) stretch_amount2 = 0; @@ -1835,69 +1843,6 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) return -EINVAL); } - value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixPWR_CKS_CNTL); - value &= 0xFFC2FF87; - data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq = - polaris10_clock_stretcher_lookup_table[stretch_amount2][0]; - data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq = - polaris10_clock_stretcher_lookup_table[stretch_amount2][1]; - clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table. - GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].SclkSetting.SclkFrequency) / 100); - if (polaris10_clock_stretcher_lookup_table[stretch_amount2][0] < clock_freq_u16 - && polaris10_clock_stretcher_lookup_table[stretch_amount2][1] > clock_freq_u16) { - /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */ - value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 16; - /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */ - value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][2]) << 18; - /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */ - value |= (polaris10_clock_stretch_amount_conversion - [polaris10_clock_stretcher_lookup_table[stretch_amount2][3]] - [stretch_amount]) << 3; - } - CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq); - CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq); - data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting = - polaris10_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F; - data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |= - (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 7; - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixPWR_CKS_CNTL, value); - - /* Populate DDT Lookup Table */ - for (i = 0; i < 4; i++) { - /* Assign the minimum and maximum VID stored - * in the last row of Clock Stretcher Voltage Table. - */ - data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].minVID = - (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][2]; - data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].maxVID = - (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][3]; - /* Loop through each SCLK and check the frequency - * to see if it lies within the frequency for clock stretcher. - */ - for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) { - cks_setting = 0; - clock_freq = PP_SMC_TO_HOST_UL( - data->smc_state_table.GraphicsLevel[j].SclkSetting.SclkFrequency); - /* Check the allowed frequency against the sclk level[j]. - * Sclk's endianness has already been converted, - * and it's in 10Khz unit, - * as opposed to Data table, which is in Mhz unit. - */ - if (clock_freq >= (polaris10_clock_stretcher_ddt_table[type][i][0]) * 100) { - cks_setting |= 0x2; - if (clock_freq < (polaris10_clock_stretcher_ddt_table[type][i][1]) * 100) - cks_setting |= 0x1; - } - data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting - |= cks_setting << (j * 2); - } - CONVERT_FROM_HOST_TO_SMC_US( - data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting); - } - value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL); value &= 0xFFFFFFFE; cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value); @@ -1956,6 +1901,90 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr, return 0; } + +int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + SMU74_Discrete_DpmTable *table = &(data->smc_state_table); + int result = 0; + struct pp_atom_ctrl__avfs_parameters avfs_params = {0}; + AVFS_meanNsigma_t AVFS_meanNsigma = { {0} }; + AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} }; + uint32_t tmp, i; + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = + table_info->vdd_dep_on_sclk; + + + if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) + return result; + + result = atomctrl_get_avfs_information(hwmgr, &avfs_params); + + if (0 == result) { + table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0); + table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1); + table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2); + table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0); + table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1); + table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2); + table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1); + table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2); + table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b); + table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24; + table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12; + table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1); + table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2); + table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b); + table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24; + table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12; + table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv); + AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0); + AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1); + AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2); + AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma); + AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean); + AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor); + AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma); + + for (i = 0; i < NUM_VFT_COLUMNS; i++) { + AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625); + AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100); + } + + result = polaris10_read_smc_sram_dword(smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma), + &tmp, data->sram_end); + + polaris10_copy_bytes_to_smc(smumgr, + tmp, + (uint8_t *)&AVFS_meanNsigma, + sizeof(AVFS_meanNsigma_t), + data->sram_end); + + result = polaris10_read_smc_sram_dword(smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable), + &tmp, data->sram_end); + polaris10_copy_bytes_to_smc(smumgr, + tmp, + (uint8_t *)&AVFS_SclkOffset, + sizeof(AVFS_Sclk_Offset_t), + data->sram_end); + + data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) | + (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) | + (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) | + (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT); + data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false; + } + return result; +} + + /** * Initializes the SMC table and uploads it * @@ -2056,6 +2085,10 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) "Failed to populate Clock Stretcher Data Table!", return result); } + + result = polaris10_populate_avfs_parameters(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;); + table->CurrSclkPllRange = 0xff; table->GraphicsVoltageChangeEnable = 1; table->GraphicsThermThrottleEnable = 1; @@ -2252,6 +2285,9 @@ static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + uint32_t soft_register_value = 0; + uint32_t handshake_disables_offset = data->soft_regs_start + + offsetof(SMU74_SoftRegisters, HandshakeDisables); /* enable SCLK dpm */ if (!data->sclk_dpm_key_disabled) @@ -2262,6 +2298,12 @@ static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) /* enable MCLK dpm */ if (0 == data->mclk_dpm_key_disabled) { +/* Disable UVD - SMU handshake for MCLK. */ + soft_register_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, handshake_disables_offset); + soft_register_value |= SMU7_UVD_MCLK_HANDSHAKE_DISABLE; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + handshake_disables_offset, soft_register_value); PP_ASSERT_WITH_CODE( (0 == smum_send_msg_to_smc(hwmgr->smumgr, @@ -2269,7 +2311,6 @@ static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) "Failed to enable MCLK DPM during DPM Start Function!", return -1); - PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); @@ -2471,6 +2512,8 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to enable VR hot GPIO interrupt!", result = tmp_result); + smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay); + tmp_result = polaris10_enable_sclk_control(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to enable SCLK control!", result = tmp_result); @@ -2606,6 +2649,7 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_FanSpeedInTableIsRPM); + if (hwmgr->chip_id == CHIP_POLARIS11) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SPLLShutdownSupport); @@ -2638,7 +2682,7 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); uint16_t vv_id; - uint16_t vddc = 0; + uint32_t vddc = 0; uint16_t i, j; uint32_t sclk = 0; struct phm_ppt_v1_information *table_info = @@ -2669,8 +2713,9 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr) continue); - /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */ - PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0), + /* need to make sure vddc is less than 2v or else, it could burn the ASIC. + * real voltage level in unit of 0.01mv */ + PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0), "Invalid VDDC value", result = -EINVAL;); /* the voltage should not be zero nor equal to leakage ID */ @@ -2896,6 +2941,31 @@ static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) return 0; } +int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = + table_info->vdd_dep_on_mclk; + struct phm_ppt_v1_voltage_lookup_table *lookup_table = + table_info->vddc_lookup_table; + uint32_t i; + + if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7) { + if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000) + return 0; + + for (i = 0; i < lookup_table->count; i++) { + if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) { + dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i; + return 0; + } + } + } + return 0; +} + + int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); @@ -2938,6 +3008,11 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE; data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE; + data->enable_tdc_limit_feature = true; + data->enable_pkg_pwr_tracking_feature = true; + data->force_pcie_gen = PP_PCIEGenInvalid; + data->mclk_stutter_mode_threshold = 40000; + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; @@ -2962,8 +3037,13 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; } + if (table_info->cac_dtp_table->usClockStretchAmount != 0) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher); + polaris10_set_features_platform_caps(hwmgr); + polaris10_patch_voltage_workaround(hwmgr); polaris10_init_dpm_defaults(hwmgr); /* Get leakage voltage based on leakage ID. */ @@ -3520,10 +3600,11 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; ATOM_Tonga_POWERPLAYTABLE *powerplay_table = (ATOM_Tonga_POWERPLAYTABLE *)pp_table; - ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = - (ATOM_Tonga_SCLK_Dependency_Table *) + PPTable_Generic_SubTable_Header *sclk_dep_table = + (PPTable_Generic_SubTable_Header *) (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); + ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = (ATOM_Tonga_MCLK_Dependency_Table *) (((unsigned long)powerplay_table) + @@ -3575,7 +3656,11 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, /* Performance levels are arranged from low to high. */ performance_level->memory_clock = mclk_dep_table->entries [state_entry->ucMemoryClockIndexLow].ulMclk; - performance_level->engine_clock = sclk_dep_table->entries + if (sclk_dep_table->ucRevId == 0) + performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries + [state_entry->ucEngineClockIndexLow].ulSclk; + else if (sclk_dep_table->ucRevId == 1) + performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries [state_entry->ucEngineClockIndexLow].ulSclk; performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, state_entry->ucPCIEGenLow); @@ -3586,8 +3671,14 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, [polaris10_power_state->performance_level_count++]); performance_level->memory_clock = mclk_dep_table->entries [state_entry->ucMemoryClockIndexHigh].ulMclk; - performance_level->engine_clock = sclk_dep_table->entries + + if (sclk_dep_table->ucRevId == 0) + performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries + [state_entry->ucEngineClockIndexHigh].ulSclk; + else if (sclk_dep_table->ucRevId == 1) + performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries [state_entry->ucEngineClockIndexHigh].ulSclk; + performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, state_entry->ucPCIEGenHigh); performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, @@ -3645,7 +3736,6 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr, switch (state->classification.ui_label) { case PP_StateUILabel_Performance: data->use_pcie_performance_levels = true; - for (i = 0; i < ps->performance_level_count; i++) { if (data->pcie_gen_performance.max < ps->performance_levels[i].pcie_gen) @@ -3661,7 +3751,6 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr, ps->performance_levels[i].pcie_lane) data->pcie_lane_performance.max = ps->performance_levels[i].pcie_lane; - if (data->pcie_lane_performance.min > ps->performance_levels[i].pcie_lane) data->pcie_lane_performance.min = @@ -4187,12 +4276,9 @@ int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); uint32_t mm_boot_level_offset, mm_boot_level_value; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); if (!bgate) { - data->smc_state_table.SamuBootLevel = - (uint8_t) (table_info->mm_dep_table->count - 1); + data->smc_state_table.SamuBootLevel = 0; mm_boot_level_offset = data->dpm_table_start + offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); mm_boot_level_offset /= 4; @@ -4327,6 +4413,15 @@ static int polaris10_notify_link_speed_change_after_state_change( return 0; } +static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2); + return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; +} + static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) { int tmp_result, result = 0; @@ -4375,6 +4470,11 @@ static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *i "Failed to program memory timing parameters!", result = tmp_result); + tmp_result = polaris10_notify_smc_display(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to notify smc display settings!", + result = tmp_result); + tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to unfreeze SCLK MCLK DPM!", @@ -4409,6 +4509,7 @@ static int polaris10_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_ PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); } + int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) { PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; @@ -4428,8 +4529,6 @@ int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwm if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */ polaris10_notify_smc_display_change(hwmgr, false); - else - polaris10_notify_smc_display_change(hwmgr, true); return 0; } @@ -4470,6 +4569,8 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr) frame_time_in_us = 1000000 / refresh_rate; pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; + data->frame_time_x2 = frame_time_in_us * 2 / 100; + display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); @@ -4478,8 +4579,6 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr) cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); - polaris10_notify_smc_display_change(hwmgr, num_active_displays != 0); - return 0; } @@ -4591,7 +4690,7 @@ int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr) return 0; } - data->need_long_memory_training = true; + data->need_long_memory_training = false; /* * PPMCME_FirmwareDescriptorEntry *pfd = NULL; @@ -312,6 +312,10 @@ struct polaris10_hwmgr { /* soft pptable for re-uploading into smu */ void *soft_pp_table; + + uint32_t avfs_vdroop_override_setting; + bool apply_avfs_cks_off_voltage; + uint32_t frame_time_x2; }; /* To convert to Q8.8 format for firmware */ @@ -286,7 +286,7 @@ int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, (uint8_t *)&data->power_tune_table, - sizeof(struct SMU74_Discrete_PmFuses), data->sram_end)) + (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end)) PP_ASSERT_WITH_CODE(false, "Attempt to download PmFuseTable Failed!", return -EINVAL); @@ -625,10 +625,14 @@ static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr, int ret; struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS) + if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) return 0; + ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting); + ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ? 0 : -1; @@ -44,6 +44,20 @@ bool acpi_atcs_functions_supported(void *device, uint32_t index) return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false; } +bool acpi_atcs_notify_pcie_device_ready(void *device) +{ + int32_t temp_buffer = 1; + + return cgs_call_acpi_method(device, CGS_ACPI_METHOD_ATCS, + ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, + &temp_buffer, + NULL, + 0, + sizeof(temp_buffer), + 0); +} + + int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise) { struct atcs_pref_req_input atcs_input; @@ -52,7 +66,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise) int result; struct cgs_system_info info = {0}; - if (!acpi_atcs_functions_supported(device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST)) + if( 0 != acpi_atcs_notify_pcie_device_ready(device)) return -EINVAL; info.size = sizeof(struct cgs_system_info); @@ -77,7 +91,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise) ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, &atcs_input, &atcs_output, - 0, + 1, sizeof(atcs_input), sizeof(atcs_output)); if (result != 0) @@ -1256,7 +1256,7 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, } int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type, - uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage) + uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage) { int result; @@ -1274,7 +1274,7 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_ if (0 != result) return result; - *voltage = get_voltage_info_param_space.usVoltageLevel; + *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel; return result; } @@ -1302,3 +1302,46 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr return 0; } + +int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param) +{ + ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL; + + if (param == NULL) + return -EINVAL; + + profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *) + cgs_atom_get_data_table(hwmgr->device, + GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo), + NULL, NULL, NULL); + if (!profile) + return -1; + + param->ulAVFS_meanNsigma_Acontant0 = profile->ulAVFS_meanNsigma_Acontant0; + param->ulAVFS_meanNsigma_Acontant1 = profile->ulAVFS_meanNsigma_Acontant1; + param->ulAVFS_meanNsigma_Acontant2 = profile->ulAVFS_meanNsigma_Acontant2; + param->usAVFS_meanNsigma_DC_tol_sigma = profile->usAVFS_meanNsigma_DC_tol_sigma; + param->usAVFS_meanNsigma_Platform_mean = profile->usAVFS_meanNsigma_Platform_mean; + param->usAVFS_meanNsigma_Platform_sigma = profile->usAVFS_meanNsigma_Platform_sigma; + param->ulGB_VDROOP_TABLE_CKSOFF_a0 = profile->ulGB_VDROOP_TABLE_CKSOFF_a0; + param->ulGB_VDROOP_TABLE_CKSOFF_a1 = profile->ulGB_VDROOP_TABLE_CKSOFF_a1; + param->ulGB_VDROOP_TABLE_CKSOFF_a2 = profile->ulGB_VDROOP_TABLE_CKSOFF_a2; + param->ulGB_VDROOP_TABLE_CKSON_a0 = profile->ulGB_VDROOP_TABLE_CKSON_a0; + param->ulGB_VDROOP_TABLE_CKSON_a1 = profile->ulGB_VDROOP_TABLE_CKSON_a1; + param->ulGB_VDROOP_TABLE_CKSON_a2 = profile->ulGB_VDROOP_TABLE_CKSON_a2; + param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1; + param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2; + param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b; + param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSON_m1; + param->usAVFSGB_FUSE_TABLE_CKSON_m2 = profile->usAVFSGB_FUSE_TABLE_CKSON_m2; + param->ulAVFSGB_FUSE_TABLE_CKSON_b = profile->ulAVFSGB_FUSE_TABLE_CKSON_b; + param->usMaxVoltage_0_25mv = profile->usMaxVoltage_0_25mv; + param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF; + param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON; + param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF; + param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON; + param->usPSM_Age_ComFactor = profile->usPSM_Age_ComFactor; + param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage; + + return 0; +} @@ -250,6 +250,35 @@ struct pp_atomctrl_gpio_pin_assignment { }; typedef struct pp_atomctrl_gpio_pin_assignment pp_atomctrl_gpio_pin_assignment; +struct pp_atom_ctrl__avfs_parameters { + uint32_t ulAVFS_meanNsigma_Acontant0; + uint32_t ulAVFS_meanNsigma_Acontant1; + uint32_t ulAVFS_meanNsigma_Acontant2; + uint16_t usAVFS_meanNsigma_DC_tol_sigma; + uint16_t usAVFS_meanNsigma_Platform_mean; + uint16_t usAVFS_meanNsigma_Platform_sigma; + uint32_t ulGB_VDROOP_TABLE_CKSOFF_a0; + uint32_t ulGB_VDROOP_TABLE_CKSOFF_a1; + uint32_t ulGB_VDROOP_TABLE_CKSOFF_a2; + uint32_t ulGB_VDROOP_TABLE_CKSON_a0; + uint32_t ulGB_VDROOP_TABLE_CKSON_a1; + uint32_t ulGB_VDROOP_TABLE_CKSON_a2; + uint32_t ulAVFSGB_FUSE_TABLE_CKSOFF_m1; + uint16_t usAVFSGB_FUSE_TABLE_CKSOFF_m2; + uint32_t ulAVFSGB_FUSE_TABLE_CKSOFF_b; + uint32_t ulAVFSGB_FUSE_TABLE_CKSON_m1; + uint16_t usAVFSGB_FUSE_TABLE_CKSON_m2; + uint32_t ulAVFSGB_FUSE_TABLE_CKSON_b; + uint16_t usMaxVoltage_0_25mv; + uint8_t ucEnableGB_VDROOP_TABLE_CKSOFF; + uint8_t ucEnableGB_VDROOP_TABLE_CKSON; + uint8_t ucEnableGB_FUSE_TABLE_CKSOFF; + uint8_t ucEnableGB_FUSE_TABLE_CKSON; + uint16_t usPSM_Age_ComFactor; + uint8_t ucEnableApplyAVFS_CKS_OFF_Voltage; + uint8_t ucReserved; +}; + extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment); extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr); @@ -276,7 +305,10 @@ extern int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, uint32_t extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, uint8_t level); extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type, - uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); + uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage); extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table); + +extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param); + #endif @@ -1302,7 +1302,7 @@ static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, table->Smio[count] |= data->mvdd_voltage_table.entries[count].smio_low; } - table->SmioMask2 = data->vddci_voltage_table.mask_low; + table->SmioMask2 = data->mvdd_voltage_table.mask_low; CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); } @@ -2847,27 +2847,6 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) } } - /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ - for (i = 0; i < allowed_vdd_sclk_table->count; i++) { - data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc; - /* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */ - /* param1 is for corresponding std voltage */ - data->dpm_table.vddc_table.dpm_levels[i].enabled = 1; - } - data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; - - if (NULL != allowed_vdd_mclk_table) { - /* Initialize Vddci DPM table based on allow Mclk values */ - for (i = 0; i < allowed_vdd_mclk_table->count; i++) { - data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci; - data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1; - data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd; - data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1; - } - data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count; - data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; - } - /* setup PCIE gen speed levels*/ tonga_setup_default_pcie_tables(hwmgr); @@ -4510,6 +4489,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE; data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE; data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE; + data->force_pcie_gen = PP_PCIEGenInvalid; if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) { @@ -197,6 +197,22 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Table { ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ } ATOM_Tonga_SCLK_Dependency_Table; +typedef struct _ATOM_Polaris_SCLK_Dependency_Record { + UCHAR ucVddInd; /* Base voltage */ + USHORT usVddcOffset; /* Offset relative to base voltage */ + ULONG ulSclk; + USHORT usEdcCurrent; + UCHAR ucReliabilityTemperature; + UCHAR ucCKSVOffsetandDisable; /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */ + ULONG ulSclkOffset; +} ATOM_Polaris_SCLK_Dependency_Record; + +typedef struct _ATOM_Polaris_SCLK_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Polaris_SCLK_Dependency_Table; + typedef struct _ATOM_Tonga_PCIE_Record { UCHAR ucPCIEGenSpeed; UCHAR usPCIELaneWidth; @@ -302,7 +302,7 @@ static int init_dpm_2_parameters( (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset)); if (0 != powerplay_table->usPPMTableOffset) { - if (1 == get_platform_power_management_table(hwmgr, atom_ppm_table)) { + if (get_platform_power_management_table(hwmgr, atom_ppm_table) == 0) { phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EnablePlatformPowerManagement); } @@ -408,41 +408,78 @@ static int get_mclk_voltage_dependency_table( static int get_sclk_voltage_dependency_table( struct pp_hwmgr *hwmgr, phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table, - const ATOM_Tonga_SCLK_Dependency_Table * sclk_dep_table + const PPTable_Generic_SubTable_Header *sclk_dep_table ) { uint32_t table_size, i; phm_ppt_v1_clock_voltage_dependency_table *sclk_table; - PP_ASSERT_WITH_CODE((0 != sclk_dep_table->ucNumEntries), - "Invalid PowerPlay Table!", return -1); + if (sclk_dep_table->ucRevId < 1) { + const ATOM_Tonga_SCLK_Dependency_Table *tonga_table = + (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table; - table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) - * sclk_dep_table->ucNumEntries; + PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries), + "Invalid PowerPlay Table!", return -1); - sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) - kzalloc(table_size, GFP_KERNEL); + table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) + * tonga_table->ucNumEntries; - if (NULL == sclk_table) - return -ENOMEM; + sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) + kzalloc(table_size, GFP_KERNEL); - memset(sclk_table, 0x00, table_size); - - sclk_table->count = (uint32_t)sclk_dep_table->ucNumEntries; - - for (i = 0; i < sclk_dep_table->ucNumEntries; i++) { - sclk_table->entries[i].vddInd = - sclk_dep_table->entries[i].ucVddInd; - sclk_table->entries[i].vdd_offset = - sclk_dep_table->entries[i].usVddcOffset; - sclk_table->entries[i].clk = - sclk_dep_table->entries[i].ulSclk; - sclk_table->entries[i].cks_enable = - (((sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; - sclk_table->entries[i].cks_voffset = - (sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x7F); - } + if (NULL == sclk_table) + return -ENOMEM; + + memset(sclk_table, 0x00, table_size); + + sclk_table->count = (uint32_t)tonga_table->ucNumEntries; + + for (i = 0; i < tonga_table->ucNumEntries; i++) { + sclk_table->entries[i].vddInd = + tonga_table->entries[i].ucVddInd; + sclk_table->entries[i].vdd_offset = + tonga_table->entries[i].usVddcOffset; + sclk_table->entries[i].clk = + tonga_table->entries[i].ulSclk; + sclk_table->entries[i].cks_enable = + (((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; + sclk_table->entries[i].cks_voffset = + (tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F); + } + } else { + const ATOM_Polaris_SCLK_Dependency_Table *polaris_table = + (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table; + + PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries), + "Invalid PowerPlay Table!", return -1); + + table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) + * polaris_table->ucNumEntries; + + sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) + kzalloc(table_size, GFP_KERNEL); + if (NULL == sclk_table) + return -ENOMEM; + + memset(sclk_table, 0x00, table_size); + + sclk_table->count = (uint32_t)polaris_table->ucNumEntries; + + for (i = 0; i < polaris_table->ucNumEntries; i++) { + sclk_table->entries[i].vddInd = + polaris_table->entries[i].ucVddInd; + sclk_table->entries[i].vdd_offset = + polaris_table->entries[i].usVddcOffset; + sclk_table->entries[i].clk = + polaris_table->entries[i].ulSclk; + sclk_table->entries[i].cks_enable = + (((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; + sclk_table->entries[i].cks_voffset = + (polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F); + sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset; + } + } *pp_tonga_sclk_dep_table = sclk_table; return 0; @@ -708,8 +745,8 @@ static int init_clock_voltage_dependency( const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) + le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); - const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = - (const ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long) powerplay_table) + + const PPTable_Generic_SubTable_Header *sclk_dep_table = + (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) + le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); const ATOM_Tonga_Hard_Limit_Table *pHardLimits = (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) + @@ -1040,48 +1077,44 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) struct phm_ppt_v1_information *pp_table_information = (struct phm_ppt_v1_information *)(hwmgr->pptable); - if (NULL != hwmgr->soft_pp_table) { - kfree(hwmgr->soft_pp_table); + if (NULL != hwmgr->soft_pp_table) hwmgr->soft_pp_table = NULL; - } - if (NULL != pp_table_information->vdd_dep_on_sclk) - pp_table_information->vdd_dep_on_sclk = NULL; + kfree(pp_table_information->vdd_dep_on_sclk); + pp_table_information->vdd_dep_on_sclk = NULL; - if (NULL != pp_table_information->vdd_dep_on_mclk) - pp_table_information->vdd_dep_on_mclk = NULL; + kfree(pp_table_information->vdd_dep_on_mclk); + pp_table_information->vdd_dep_on_mclk = NULL; - if (NULL != pp_table_information->valid_mclk_values) - pp_table_information->valid_mclk_values = NULL; + kfree(pp_table_information->valid_mclk_values); + pp_table_information->valid_mclk_values = NULL; - if (NULL != pp_table_information->valid_sclk_values) - pp_table_information->valid_sclk_values = NULL; + kfree(pp_table_information->valid_sclk_values); + pp_table_information->valid_sclk_values = NULL; - if (NULL != pp_table_information->vddc_lookup_table) - pp_table_information->vddc_lookup_table = NULL; + kfree(pp_table_information->vddc_lookup_table); + pp_table_information->vddc_lookup_table = NULL; - if (NULL != pp_table_information->vddgfx_lookup_table) - pp_table_information->vddgfx_lookup_table = NULL; + kfree(pp_table_information->vddgfx_lookup_table); + pp_table_information->vddgfx_lookup_table = NULL; - if (NULL != pp_table_information->mm_dep_table) - pp_table_information->mm_dep_table = NULL; + kfree(pp_table_information->mm_dep_table); + pp_table_information->mm_dep_table = NULL; - if (NULL != pp_table_information->cac_dtp_table) - pp_table_information->cac_dtp_table = NULL; + kfree(pp_table_information->cac_dtp_table); + pp_table_information->cac_dtp_table = NULL; - if (NULL != hwmgr->dyn_state.cac_dtp_table) - hwmgr->dyn_state.cac_dtp_table = NULL; + kfree(hwmgr->dyn_state.cac_dtp_table); + hwmgr->dyn_state.cac_dtp_table = NULL; - if (NULL != pp_table_information->ppm_parameter_table) - pp_table_information->ppm_parameter_table = NULL; + kfree(pp_table_information->ppm_parameter_table); + pp_table_information->ppm_parameter_table = NULL; - if (NULL != pp_table_information->pcie_table) - pp_table_information->pcie_table = NULL; + kfree(pp_table_information->pcie_table); + pp_table_information->pcie_table = NULL; - if (NULL != hwmgr->pptable) { - kfree(hwmgr->pptable); - hwmgr->pptable = NULL; - } + kfree(hwmgr->pptable); + hwmgr->pptable = NULL; return result; } @@ -411,6 +411,8 @@ struct phm_cac_tdp_table { uint8_t ucVr_I2C_Line; uint8_t ucPlx_I2C_address; uint8_t ucPlx_I2C_Line; + uint32_t usBoostPowerLimit; + uint8_t ucCKS_LDO_REFSEL; }; struct phm_ppm_table { @@ -27,6 +27,7 @@ #pragma pack(push, 1) +#define PPSMC_MSG_SetGBDroopSettings ((uint16_t) 0x305) #define PPSMC_SWSTATE_FLAG_DC 0x01 #define PPSMC_SWSTATE_FLAG_UVD 0x02 @@ -391,6 +392,8 @@ typedef uint16_t PPSMC_Result; #define PPSMC_MSG_SetGpuPllDfsForSclk ((uint16_t) 0x300) #define PPSMC_MSG_Didt_Block_Function ((uint16_t) 0x301) +#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306) + #define PPSMC_MSG_SecureSRBMWrite ((uint16_t) 0x600) #define PPSMC_MSG_SecureSRBMRead ((uint16_t) 0x601) #define PPSMC_MSG_SetAddress ((uint16_t) 0x800) @@ -26,3 +26,4 @@ extern bool acpi_atcs_functions_supported(void *device, extern int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise); +extern bool acpi_atcs_notify_pcie_device_ready(void *device); @@ -34,6 +34,30 @@ #define SMU__NUM_LCLK_DPM_LEVELS 8 #define SMU__NUM_PCIE_DPM_LEVELS 8 +#define EXP_M1 35 +#define EXP_M2 92821 +#define EXP_B 66629747 + +#define EXP_M1_1 365 +#define EXP_M2_1 658700 +#define EXP_B_1 305506134 + +#define EXP_M1_2 189 +#define EXP_M2_2 379692 +#define EXP_B_2 194609469 + +#define EXP_M1_3 99 +#define EXP_M2_3 217915 +#define EXP_B_3 122255994 + +#define EXP_M1_4 51 +#define EXP_M2_4 122643 +#define EXP_B_4 74893384 + +#define EXP_M1_5 423 +#define EXP_M2_5 1103326 +#define EXP_B_5 728122621 + enum SID_OPTION { SID_OPTION_HI, SID_OPTION_LO, @@ -548,20 +572,20 @@ struct SMU74_Firmware_Header { uint32_t CacConfigTable; uint32_t CacStatusTable; - uint32_t mcRegisterTable; - uint32_t mcArbDramTimingTable; - - - uint32_t PmFuseTable; uint32_t Globals; uint32_t ClockStretcherTable; uint32_t VftTable; - uint32_t Reserved[21]; + uint32_t Reserved1; + uint32_t AvfsTable; + uint32_t AvfsCksOffGbvTable; + uint32_t AvfsMeanNSigma; + uint32_t AvfsSclkOffsetTable; + uint32_t Reserved[16]; uint32_t Signature; }; @@ -701,8 +725,6 @@ VR Config info is contained in dpmTable.VRConfig */ struct SMU_ClockStretcherDataTableEntry { uint8_t minVID; uint8_t maxVID; - - uint16_t setting; }; typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry; @@ -769,6 +791,43 @@ struct VFT_TABLE_t { typedef struct VFT_TABLE_t VFT_TABLE_t; +/* Total margin, root mean square of Fmax + DC + Platform */ +struct AVFS_Margin_t { + VFT_CELL_t Cell[NUM_VFT_COLUMNS]; +}; +typedef struct AVFS_Margin_t AVFS_Margin_t; + +#define BTCGB_VDROOP_TABLE_MAX_ENTRIES 2 +#define AVFSGB_VDROOP_TABLE_MAX_ENTRIES 2 + +struct GB_VDROOP_TABLE_t { + int32_t a0; + int32_t a1; + int32_t a2; + uint32_t spare; +}; +typedef struct GB_VDROOP_TABLE_t GB_VDROOP_TABLE_t; + +struct AVFS_CksOff_Gbv_t { + VFT_CELL_t Cell[NUM_VFT_COLUMNS]; +}; +typedef struct AVFS_CksOff_Gbv_t AVFS_CksOff_Gbv_t; + +struct AVFS_meanNsigma_t { + uint32_t Aconstant[3]; + uint16_t DC_tol_sigma; + uint16_t Platform_mean; + uint16_t Platform_sigma; + uint16_t PSM_Age_CompFactor; + uint8_t Static_Voltage_Offset[NUM_VFT_COLUMNS]; +}; +typedef struct AVFS_meanNsigma_t AVFS_meanNsigma_t; + +struct AVFS_Sclk_Offset_t { + uint16_t Sclk_Offset[8]; +}; +typedef struct AVFS_Sclk_Offset_t AVFS_Sclk_Offset_t; + #endif @@ -223,6 +223,16 @@ struct SMU74_Discrete_StateInfo { typedef struct SMU74_Discrete_StateInfo SMU74_Discrete_StateInfo; +struct SMU_QuadraticCoeffs { + int32_t m1; + uint32_t b; + + int16_t m2; + uint8_t m1_shift; + uint8_t m2_shift; +}; +typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs; + struct SMU74_Discrete_DpmTable { SMU74_PIDController GraphicsPIDController; @@ -258,7 +268,15 @@ struct SMU74_Discrete_DpmTable { uint8_t ThermOutPolarity; uint8_t ThermOutMode; uint8_t BootPhases; - uint32_t Reserved[4]; + + uint8_t VRHotLevel; + uint8_t LdoRefSel; + uint8_t Reserved1[2]; + uint16_t FanStartTemperature; + uint16_t FanStopTemperature; + uint16_t MaxVoltage; + uint16_t Reserved2; + uint32_t Reserved[1]; SMU74_Discrete_GraphicsLevel GraphicsLevel[SMU74_MAX_LEVELS_GRAPHICS]; SMU74_Discrete_MemoryLevel MemoryACPILevel; @@ -347,6 +365,8 @@ struct SMU74_Discrete_DpmTable { uint32_t CurrSclkPllRange; sclkFcwRange_t SclkFcwRangeTable[NUM_SCLK_RANGE]; + GB_VDROOP_TABLE_t BTCGB_VDROOP_TABLE[BTCGB_VDROOP_TABLE_MAX_ENTRIES]; + SMU_QuadraticCoeffs AVFSGB_VDROOP_TABLE[AVFSGB_VDROOP_TABLE_MAX_ENTRIES]; }; typedef struct SMU74_Discrete_DpmTable SMU74_Discrete_DpmTable; @@ -550,16 +570,6 @@ struct SMU7_AcpiScoreboard { typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard; -struct SMU_QuadraticCoeffs { - int32_t m1; - uint32_t b; - - int16_t m2; - uint8_t m1_shift; - uint8_t m2_shift; -}; -typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs; - struct SMU74_Discrete_PmFuses { uint8_t BapmVddCVidHiSidd[8]; uint8_t BapmVddCVidLoSidd[8]; @@ -821,6 +831,17 @@ typedef struct SMU7_GfxCuPgScoreboard SMU7_GfxCuPgScoreboard; #define DB_PCC_SHIFT 26 #define DB_EDC_SHIFT 27 +#define BTCGB0_Vdroop_Enable_MASK 0x1 +#define BTCGB1_Vdroop_Enable_MASK 0x2 +#define AVFSGB0_Vdroop_Enable_MASK 0x4 +#define AVFSGB1_Vdroop_Enable_MASK 0x8 + +#define BTCGB0_Vdroop_Enable_SHIFT 0 +#define BTCGB1_Vdroop_Enable_SHIFT 1 +#define AVFSGB0_Vdroop_Enable_SHIFT 2 +#define AVFSGB1_Vdroop_Enable_SHIFT 3 + + #pragma pack(pop) @@ -1006,10 +1006,16 @@ static int fiji_smu_init(struct pp_smumgr *smumgr) static int fiji_smu_fini(struct pp_smumgr *smumgr) { + struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); + + smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); + if (smumgr->backend) { kfree(smumgr->backend); smumgr->backend = NULL; } + + cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); return 0; } @@ -52,19 +52,18 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { /* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */ /* Voltage, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, DynRm, DynRm1 Did, Padding,ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */ - { 0x3c0fd047, 0x00, 0x03, 0x1e00, 0x00200410, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x30750000, 0, 0, 0, 0, 0, 0, 0 } }, - { 0xa00fd047, 0x01, 0x04, 0x1e00, 0x00800510, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x409c0000, 0, 0, 0, 0, 0, 0, 0 } }, - { 0x0410d047, 0x01, 0x00, 0x1e00, 0x00600410, 0x87020000, 0, 0, 0x0e, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x50c30000, 0, 0, 0, 0, 0, 0, 0 } }, - { 0x6810d047, 0x01, 0x00, 0x1e00, 0x00800410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x60ea0000, 0, 0, 0, 0, 0, 0, 0 } }, - { 0xcc10d047, 0x01, 0x00, 0x1e00, 0x00e00410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xe8fd0000, 0, 0, 0, 0, 0, 0, 0 } }, - { 0x3011d047, 0x01, 0x00, 0x1e00, 0x00400510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x70110100, 0, 0, 0, 0, 0, 0, 0 } }, - { 0x9411d047, 0x01, 0x00, 0x1e00, 0x00a00510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xf8240100, 0, 0, 0, 0, 0, 0, 0 } }, - { 0xf811d047, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x80380100, 0, 0, 0, 0, 0, 0, 0 } } + { 0x100ea446, 0x00, 0x03, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x30750000, 0x3000, 0, 0x2600, 0, 0, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } }, + { 0x400ea446, 0x01, 0x04, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x409c0000, 0x2000, 0, 0x1e00, 1, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } }, + { 0x740ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x50c30000, 0x2800, 0, 0x2000, 1, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } }, + { 0xa40ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x60ea0000, 0x3000, 0, 0x2600, 1, 1, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } }, + { 0xd80ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x70110100, 0x3800, 0, 0x2c00, 1, 1, 0x0004, 0x1203, 0xffff, 0x3600, 0xc9e2, 0x2e00 } }, + { 0x3c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x80380100, 0x2000, 0, 0x1e00, 2, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } }, + { 0x6c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x905f0100, 0x2400, 0, 0x1e00, 2, 1, 0x0004, 0x8901, 0xffff, 0x2300, 0x314c, 0x1d00 } }, + { 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } } }; static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = - {0x50140000, 0x50140000, 0x00320000, 0x00, 0x00, - 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0000, 0x00, 0x00}; + {0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00}; /** * Set the address for reading/writing the SMC SRAM space. @@ -219,6 +218,18 @@ bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr) && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); } +static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr) +{ + uint32_t efuse; + + efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4)); + efuse &= 0x00000001; + if (efuse) + return true; + + return false; +} + /** * Send a message to the SMC, and wait for its response. * @@ -228,21 +239,27 @@ bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr) */ int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) { + int ret; + if (!polaris10_is_smc_ram_running(smumgr)) return -1; + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) - printk("Failed to send Previous Message.\n"); + ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); + if (ret != 1) + printk("\n failed to send pre message %x ret is %d \n", msg, ret); cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) - printk("Failed to send Message.\n"); + ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); + + if (ret != 1) + printk("\n failed to send message %x ret is %d \n", msg, ret); return 0; } @@ -469,6 +486,7 @@ int polaris10_smu_fini(struct pp_smumgr *smumgr) kfree(smumgr->backend); smumgr->backend = NULL; } + cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); return 0; } @@ -952,6 +970,11 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr) (cgs_handle_t)smu_data->smu_buffer.handle); return -1;); + if (polaris10_is_hw_avfs_present(smumgr)) + smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT; + else + smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED; + return 0; } @@ -81,6 +81,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) int smum_fini(struct pp_smumgr *smumgr) { + kfree(smumgr->device); kfree(smumgr); return 0; } @@ -328,10 +328,17 @@ int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr, static int tonga_smu_fini(struct pp_smumgr *smumgr) { + struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend); + + smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle); + smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); + if (smumgr->backend != NULL) { kfree(smumgr->backend); smumgr->backend = NULL; } + + cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); return 0; } @@ -33,8 +33,17 @@ * */ +static void hdlcd_crtc_cleanup(struct drm_crtc *crtc) +{ + struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); + + /* stop the controller on cleanup */ + hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); + drm_crtc_cleanup(crtc); +} + static const struct drm_crtc_funcs hdlcd_crtc_funcs = { - .destroy = drm_crtc_cleanup, + .destroy = hdlcd_crtc_cleanup, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, .reset = drm_atomic_helper_crtc_reset, @@ -97,7 +106,7 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc) struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); struct drm_display_mode *m = &crtc->state->adjusted_mode; struct videomode vm; - unsigned int polarities, line_length, err; + unsigned int polarities, err; vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay; vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end; @@ -113,23 +122,18 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc) if (m->flags & DRM_MODE_FLAG_PVSYNC) polarities |= HDLCD_POLARITY_VSYNC; - line_length = crtc->primary->state->fb->pitches[0]; - /* Allow max number of outstanding requests and largest burst size */ hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS, HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16); - hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, line_length); - hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, line_length); - hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, m->crtc_vdisplay - 1); hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1); hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1); hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1); hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1); + hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1); hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1); hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1); hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1); - hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1); hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities); err = hdlcd_set_pxl_fmt(crtc); @@ -144,20 +148,19 @@ static void hdlcd_crtc_enable(struct drm_crtc *crtc) struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); clk_prepare_enable(hdlcd->clk); + hdlcd_crtc_mode_set_nofb(crtc); hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1); - drm_crtc_vblank_on(crtc); } static void hdlcd_crtc_disable(struct drm_crtc *crtc) { struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); - if (!crtc->primary->fb) + if (!crtc->state->active) return; - clk_disable_unprepare(hdlcd->clk); hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); - drm_crtc_vblank_off(crtc); + clk_disable_unprepare(hdlcd->clk); } static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc, @@ -179,20 +182,17 @@ static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc, static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *state) { - struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); - unsigned long flags; - - if (crtc->state->event) { - struct drm_pending_vblank_event *event = crtc->state->event; + struct drm_pending_vblank_event *event = crtc->state->event; + if (event) { crtc->state->event = NULL; - event->pipe = drm_crtc_index(crtc); - - WARN_ON(drm_crtc_vblank_get(crtc) != 0); - spin_lock_irqsave(&crtc->dev->event_lock, flags); - list_add_tail(&event->base.link, &hdlcd->event_list); - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + spin_lock_irq(&crtc->dev->event_lock); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irq(&crtc->dev->event_lock); } } @@ -225,6 +225,15 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { static int hdlcd_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { + u32 src_w, src_h; + + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + + /* we can't do any scaling of the plane source */ + if ((src_w != state->crtc_w) || (src_h != state->crtc_h)) + return -EINVAL; + return 0; } @@ -233,20 +242,31 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane, { struct hdlcd_drm_private *hdlcd; struct drm_gem_cma_object *gem; + unsigned int depth, bpp; + u32 src_w, src_h, dest_w, dest_h; dma_addr_t scanout_start; - if (!plane->state->crtc || !plane->state->fb) + if (!plane->state->fb) return; - hdlcd = crtc_to_hdlcd_priv(plane->state->crtc); + drm_fb_get_bpp_depth(plane->state->fb->pixel_format, &depth, &bpp); + src_w = plane->state->src_w >> 16; + src_h = plane->state->src_h >> 16; + dest_w = plane->state->crtc_w; + dest_h = plane->state->crtc_h; gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0); - scanout_start = gem->paddr; + scanout_start = gem->paddr + plane->state->fb->offsets[0] + + plane->state->crtc_y * plane->state->fb->pitches[0] + + plane->state->crtc_x * bpp / 8; + + hdlcd = plane->dev->dev_private; + hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, plane->state->fb->pitches[0]); + hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, plane->state->fb->pitches[0]); + hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, dest_h - 1); hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start); } static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = { - .prepare_fb = NULL, - .cleanup_fb = NULL, .atomic_check = hdlcd_plane_atomic_check, .atomic_update = hdlcd_plane_atomic_update, }; @@ -294,16 +314,6 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm) return plane; } -void hdlcd_crtc_suspend(struct drm_crtc *crtc) -{ - hdlcd_crtc_disable(crtc); -} - -void hdlcd_crtc_resume(struct drm_crtc *crtc) -{ - hdlcd_crtc_enable(crtc); -} - int hdlcd_setup_crtc(struct drm_device *drm) { struct hdlcd_drm_private *hdlcd = drm->dev_private; @@ -49,8 +49,6 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags) atomic_set(&hdlcd->dma_end_count, 0); #endif - INIT_LIST_HEAD(&hdlcd->event_list); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hdlcd->mmio = devm_ioremap_resource(drm->dev, res); if (IS_ERR(hdlcd->mmio)) { @@ -84,11 +82,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags) goto setup_fail; } - pm_runtime_enable(drm->dev); - - pm_runtime_get_sync(drm->dev); ret = drm_irq_install(drm, platform_get_irq(pdev, 0)); - pm_runtime_put_sync(drm->dev); if (ret < 0) { DRM_ERROR("failed to install IRQ handler\n"); goto irq_fail; @@ -164,24 +158,9 @@ static irqreturn_t hdlcd_irq(int irq, void *arg) atomic_inc(&hdlcd->vsync_count); #endif - if (irq_status & HDLCD_INTERRUPT_VSYNC) { - bool events_sent = false; - unsigned long flags; - struct drm_pending_vblank_event *e, *t; - + if (irq_status & HDLCD_INTERRUPT_VSYNC) drm_crtc_handle_vblank(&hdlcd->crtc); - spin_lock_irqsave(&drm->event_lock, flags); - list_for_each_entry_safe(e, t, &hdlcd->event_list, base.link) { - list_del(&e->base.link); - drm_crtc_send_vblank_event(&hdlcd->crtc, e); - events_sent = true; - } - if (events_sent) - drm_crtc_vblank_put(&hdlcd->crtc); - spin_unlock_irqrestore(&drm->event_lock, flags); - } - /* acknowledge interrupt(s) */ hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status); @@ -275,6 +254,7 @@ static int hdlcd_show_pxlclock(struct seq_file *m, void *arg) static struct drm_info_list hdlcd_debugfs_list[] = { { "interrupt_count", hdlcd_show_underrun_count, 0 }, { "clocks", hdlcd_show_pxlclock, 0 }, + { "fb", drm_fb_cma_debugfs_show, 0 }, }; static int hdlcd_debugfs_init(struct drm_minor *minor) @@ -357,6 +337,8 @@ static int hdlcd_drm_bind(struct device *dev) return -ENOMEM; drm->dev_private = hdlcd; + dev_set_drvdata(dev, drm); + hdlcd_setup_mode_config(drm); ret = hdlcd_load(drm, 0); if (ret) @@ -366,14 +348,18 @@ static int hdlcd_drm_bind(struct device *dev) if (ret) goto err_unload; - dev_set_drvdata(dev, drm); - ret = component_bind_all(dev, drm); if (ret) { DRM_ERROR("Failed to bind all components\n"); goto err_unregister; } + ret = pm_runtime_set_active(dev); + if (ret) + goto err_pm_active; + + pm_runtime_enable(dev); + ret = drm_vblank_init(drm, drm->mode_config.num_crtc); if (ret < 0) { DRM_ERROR("failed to initialise vblank\n"); @@ -399,16 +385,16 @@ err_fbdev: drm_mode_config_cleanup(drm); drm_vblank_cleanup(drm); err_vblank: + pm_runtime_disable(drm->dev); +err_pm_active: component_unbind_all(dev, drm); err_unregister: drm_dev_unregister(drm); err_unload: - pm_runtime_get_sync(drm->dev); drm_irq_uninstall(drm); - pm_runtime_put_sync(drm->dev); - pm_runtime_disable(drm->dev); of_reserved_mem_device_release(drm->dev); err_free: + dev_set_drvdata(dev, NULL); drm_dev_unref(drm); return ret; @@ -495,30 +481,34 @@ MODULE_DEVICE_TABLE(of, hdlcd_of_match); static int __maybe_unused hdlcd_pm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - struct drm_crtc *crtc; + struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL; - if (pm_runtime_suspended(dev)) + if (!hdlcd) return 0; - drm_modeset_lock_all(drm); - list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) - hdlcd_crtc_suspend(crtc); - drm_modeset_unlock_all(drm); + drm_kms_helper_poll_disable(drm); + + hdlcd->state = drm_atomic_helper_suspend(drm); + if (IS_ERR(hdlcd->state)) { + drm_kms_helper_poll_enable(drm); + return PTR_ERR(hdlcd->state); + } + return 0; } static int __maybe_unused hdlcd_pm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - struct drm_crtc *crtc; + struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL; - if (!pm_runtime_suspended(dev)) + if (!hdlcd) return 0; - drm_modeset_lock_all(drm); - list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) - hdlcd_crtc_resume(crtc); - drm_modeset_unlock_all(drm); + drm_atomic_helper_resume(drm, hdlcd->state); + drm_kms_helper_poll_enable(drm); + pm_runtime_set_active(dev); + return 0; } @@ -9,10 +9,9 @@ struct hdlcd_drm_private { void __iomem *mmio; struct clk *clk; struct drm_fbdev_cma *fbdev; - struct drm_framebuffer *fb; - struct list_head event_list; struct drm_crtc crtc; struct drm_plane *plane; + struct drm_atomic_state *state; #ifdef CONFIG_DEBUG_FS atomic_t buffer_underrun_count; atomic_t bus_error_count; @@ -36,7 +35,5 @@ static inline u32 hdlcd_read(struct hdlcd_drm_private *hdlcd, unsigned int reg) int hdlcd_setup_crtc(struct drm_device *dev); void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd); -void hdlcd_crtc_suspend(struct drm_crtc *crtc); -void hdlcd_crtc_resume(struct drm_crtc *crtc); #endif /* __HDLCD_DRV_H__ */ @@ -231,7 +231,7 @@ struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev, obj->dev_addr = DMA_ERROR_CODE; - mapping = file_inode(obj->obj.filp)->i_mapping; + mapping = obj->obj.filp->f_mapping; mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size); @@ -441,7 +441,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, if (sg_alloc_table(sgt, count, GFP_KERNEL)) goto free_sgt; - mapping = file_inode(dobj->obj.filp)->i_mapping; + mapping = dobj->obj.filp->f_mapping; for_each_sg(sgt->sgl, sg, count, i) { struct page *page; @@ -391,12 +391,11 @@ void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc) { struct atmel_hlcdc_crtc_state *state; - if (crtc->state && crtc->state->mode_blob) - drm_property_unreference_blob(crtc->state->mode_blob); - if (crtc->state) { + __drm_atomic_helper_crtc_destroy_state(crtc->state); state = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state); kfree(state); + crtc->state = NULL; } state = kzalloc(sizeof(*state), GFP_KERNEL); @@ -415,8 +414,9 @@ atmel_hlcdc_crtc_duplicate_state(struct drm_crtc *crtc) return NULL; state = kmalloc(sizeof(*state), GFP_KERNEL); - if (state) - __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); + if (!state) + return NULL; + __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); cur = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state); state->output_mode = cur->output_mode; @@ -266,9 +266,10 @@ int atmel_hlcdc_create_outputs(struct drm_device *dev) if (!ret) ret = atmel_hlcdc_check_endpoint(dev, &ep); - of_node_put(ep_np); - if (ret) + if (ret) { + of_node_put(ep_np); return ret; + } } for_each_endpoint_of_node(dev->dev->of_node, ep_np) { @@ -276,9 +277,10 @@ int atmel_hlcdc_create_outputs(struct drm_device *dev) if (!ret) ret = atmel_hlcdc_attach_endpoint(dev, &ep); - of_node_put(ep_np); - if (ret) + if (ret) { + of_node_put(ep_np); return ret; + } } return 0; @@ -339,6 +339,8 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane, atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, factor_reg); + } else { + atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 0); } } @@ -351,6 +351,8 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, drm_property_unreference_blob(state->mode_blob); state->mode_blob = NULL; + memset(&state->mode, 0, sizeof(state->mode)); + if (blob) { if (blob->length != sizeof(struct drm_mode_modeinfo) || drm_mode_convert_umode(&state->mode, @@ -363,7 +365,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n", state->mode.name, state); } else { - memset(&state->mode, 0, sizeof(state->mode)); state->enable = false; DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n", state); @@ -1295,14 +1296,39 @@ EXPORT_SYMBOL(drm_atomic_add_affected_planes); */ void drm_atomic_legacy_backoff(struct drm_atomic_state *state) { + struct drm_device *dev = state->dev; + unsigned crtc_mask = 0; + struct drm_crtc *crtc; int ret; + bool global = false; + + drm_for_each_crtc(crtc, dev) { + if (crtc->acquire_ctx != state->acquire_ctx) + continue; + + crtc_mask |= drm_crtc_mask(crtc); + crtc->acquire_ctx = NULL; + } + + if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) { + global = true; + + dev->mode_config.acquire_ctx = NULL; + } retry: drm_modeset_backoff(state->acquire_ctx); - ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx); + ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); if (ret) goto retry; + + drm_for_each_crtc(crtc, dev) + if (drm_crtc_mask(crtc) & crtc_mask) + crtc->acquire_ctx = state->acquire_ctx; + + if (global) + dev->mode_config.acquire_ctx = state->acquire_ctx; } EXPORT_SYMBOL(drm_atomic_legacy_backoff); @@ -2821,8 +2821,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, goto out; } - drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); - /* * Check whether the primary plane supports the fb pixel format. * Drivers not implementing the universal planes API use a @@ -4841,7 +4839,8 @@ bool drm_property_change_valid_get(struct drm_property *property, if (value == 0) return true; - return _object_find(property->dev, value, property->values[0]) != NULL; + *ref = _object_find(property->dev, value, property->values[0]); + return *ref != NULL; } for (i = 0; i < property->num_values; i++) @@ -528,11 +528,11 @@ drm_crtc_helper_disable(struct drm_crtc *crtc) int drm_crtc_helper_set_config(struct drm_mode_set *set) { struct drm_device *dev; - struct drm_crtc *new_crtc; - struct drm_encoder *save_encoders, *new_encoder, *encoder; + struct drm_crtc **save_encoder_crtcs, *new_crtc; + struct drm_encoder **save_connector_encoders, *new_encoder, *encoder; bool mode_changed = false; /* if true do a full mode set */ bool fb_changed = false; /* if true and !mode_changed just do a flip */ - struct drm_connector *save_connectors, *connector; + struct drm_connector *connector; int count = 0, ro, fail = 0; const struct drm_crtc_helper_funcs *crtc_funcs; struct drm_mode_set save_set; @@ -574,15 +574,15 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) * Allocate space for the backup of all (non-pointer) encoder and * connector data. */ - save_encoders = kzalloc(dev->mode_config.num_encoder * - sizeof(struct drm_encoder), GFP_KERNEL); - if (!save_encoders) + save_encoder_crtcs = kzalloc(dev->mode_config.num_encoder * + sizeof(struct drm_crtc *), GFP_KERNEL); + if (!save_encoder_crtcs) return -ENOMEM; - save_connectors = kzalloc(dev->mode_config.num_connector * - sizeof(struct drm_connector), GFP_KERNEL); - if (!save_connectors) { - kfree(save_encoders); + save_connector_encoders = kzalloc(dev->mode_config.num_connector * + sizeof(struct drm_encoder *), GFP_KERNEL); + if (!save_connector_encoders) { + kfree(save_encoder_crtcs); return -ENOMEM; } @@ -593,12 +593,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) */ count = 0; drm_for_each_encoder(encoder, dev) { - save_encoders[count++] = *encoder; + save_encoder_crtcs[count++] = encoder->crtc; } count = 0; drm_for_each_connector(connector, dev) { - save_connectors[count++] = *connector; + save_connector_encoders[count++] = connector->encoder; } save_set.crtc = set->crtc; @@ -631,8 +631,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) mode_changed = true; } - /* take a reference on all connectors in set */ + /* take a reference on all unbound connectors in set, reuse the + * already taken reference for bound connectors + */ for (ro = 0; ro < set->num_connectors; ro++) { + if (set->connectors[ro]->encoder) + continue; drm_connector_reference(set->connectors[ro]); } @@ -754,30 +758,28 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) } } - /* after fail drop reference on all connectors in save set */ - count = 0; - drm_for_each_connector(connector, dev) { - drm_connector_unreference(&save_connectors[count++]); - } - - kfree(save_connectors); - kfree(save_encoders); + kfree(save_connector_encoders); + kfree(save_encoder_crtcs); return 0; fail: /* Restore all previous data. */ count = 0; drm_for_each_encoder(encoder, dev) { - *encoder = save_encoders[count++]; + encoder->crtc = save_encoder_crtcs[count++]; } count = 0; drm_for_each_connector(connector, dev) { - *connector = save_connectors[count++]; + connector->encoder = save_connector_encoders[count++]; } - /* after fail drop reference on all connectors in set */ + /* after fail drop reference on all unbound connectors in set, let + * bound connectors keep their reference + */ for (ro = 0; ro < set->num_connectors; ro++) { + if (set->connectors[ro]->encoder) + continue; drm_connector_unreference(set->connectors[ro]); } @@ -787,8 +789,8 @@ fail: save_set.y, save_set.fb)) DRM_ERROR("failed to restore config after modeset failure\n"); - kfree(save_connectors); - kfree(save_encoders); + kfree(save_connector_encoders); + kfree(save_encoder_crtcs); return ret; } EXPORT_SYMBOL(drm_crtc_helper_set_config); @@ -2927,11 +2927,9 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) drm_dp_port_teardown_pdt(port, port->pdt); if (!port->input && port->vcpi.vcpi > 0) { - if (mgr->mst_state) { - drm_dp_mst_reset_vcpi_slots(mgr, port); - drm_dp_update_payload_part1(mgr); - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); - } + drm_dp_mst_reset_vcpi_slots(mgr, port); + drm_dp_update_payload_part1(mgr); + drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); } kref_put(&port->kref, drm_dp_free_mst_port); @@ -445,7 +445,7 @@ err_cma_destroy: err_fb_info_destroy: drm_fb_helper_release_fbi(helper); err_gem_free_object: - dev->driver->gem_free_object(&obj->base); + drm_gem_object_unreference_unlocked(&obj->base); return ret; } EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs); @@ -511,7 +511,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) int i, npages; /* This is the shared memory object that backs the GEM resource */ - mapping = file_inode(obj->filp)->i_mapping; + mapping = obj->filp->f_mapping; /* We already BUG_ON() for non-page-aligned sizes in * drm_gem_object_init(), so we should never hit this unless @@ -121,7 +121,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, return cma_obj; error: - drm->driver->gem_free_object(&cma_obj->base); + drm_gem_object_unreference_unlocked(&cma_obj->base); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(drm_gem_cma_create); @@ -162,18 +162,12 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv, * and handle has the id what user can see. */ ret = drm_gem_handle_create(file_priv, gem_obj, handle); - if (ret) - goto err_handle_create; - /* drop reference from allocate - handle holds it now. */ drm_gem_object_unreference_unlocked(gem_obj); + if (ret) + return ERR_PTR(ret); return cma_obj; - -err_handle_create: - drm->driver->gem_free_object(gem_obj); - - return ERR_PTR(ret); } /** @@ -1518,6 +1518,8 @@ int drm_mode_convert_umode(struct drm_display_mode *out, if (out->status != MODE_OK) goto out; + drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V); + ret = 0; out: @@ -660,7 +660,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev, * why this is required _and_ expected if you're * going to pin these pages. */ - mapping = file_inode(obj->filp)->i_mapping; + mapping = obj->filp->f_mapping; mapping_set_gfp_mask(mapping, GFP_HIGHUSER); } @@ -225,6 +225,7 @@ struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu) etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING; etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops; + etnaviv_domain->domain.pgsize_bitmap = SZ_4K; etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START; etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1; @@ -31,7 +31,6 @@ #include "exynos_drm_plane.h" #include "exynos_drm_drv.h" #include "exynos_drm_fb.h" -#include "exynos_drm_fbdev.h" #include "exynos_drm_iommu.h" /* @@ -34,7 +34,7 @@ struct exynos_dp_device { struct drm_encoder encoder; - struct drm_connector connector; + struct drm_connector *connector; struct drm_bridge *ptn_bridge; struct drm_device *drm_dev; struct device *dev; @@ -70,7 +70,7 @@ static int exynos_dp_poweroff(struct analogix_dp_plat_data *plat_data) static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data) { struct exynos_dp_device *dp = to_dp(plat_data); - struct drm_connector *connector = &dp->connector; + struct drm_connector *connector = dp->connector; struct drm_display_mode *mode; int num_modes = 0; @@ -103,6 +103,7 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data, int ret; drm_connector_register(connector); + dp->connector = connector; /* Pre-empt DP connector creation if there's a bridge */ if (dp->ptn_bridge) { @@ -15,7 +15,6 @@ #include <drm/drmP.h> #include "exynos_drm_drv.h" #include "exynos_drm_crtc.h" -#include "exynos_drm_fbdev.h" static LIST_HEAD(exynos_drm_subdrv_list); @@ -30,7 +30,6 @@ #include "exynos_drm_drv.h" #include "exynos_drm_fb.h" -#include "exynos_drm_fbdev.h" #include "exynos_drm_crtc.h" #include "exynos_drm_plane.h" #include "exynos_drm_iommu.h" @@ -120,7 +119,6 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = { .timing_base = 0x0, .has_clksel = 1, .has_limited_fmt = 1, - .has_hw_trigger = 1, }; static struct fimd_driver_data exynos3_fimd_driver_data = { @@ -171,14 +169,11 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = { .lcdblk_vt_shift = 24, .lcdblk_bypass_shift = 15, .lcdblk_mic_bypass_shift = 11, - .trg_type = I80_HW_TRG, .has_shadowcon = 1, .has_vidoutcon = 1, .has_vtsel = 1, .has_mic_bypass = 1, .has_dp_clk = 1, - .has_hw_trigger = 1, - .has_trigger_per_te = 1, }; struct fimd_context { @@ -48,13 +48,13 @@ /* registers for base address */ #define G2D_SRC_BASE_ADDR 0x0304 -#define G2D_SRC_STRIDE_REG 0x0308 +#define G2D_SRC_STRIDE 0x0308 #define G2D_SRC_COLOR_MODE 0x030C #define G2D_SRC_LEFT_TOP 0x0310 #define G2D_SRC_RIGHT_BOTTOM 0x0314 #define G2D_SRC_PLANE2_BASE_ADDR 0x0318 #define G2D_DST_BASE_ADDR 0x0404 -#define G2D_DST_STRIDE_REG 0x0408 +#define G2D_DST_STRIDE 0x0408 #define G2D_DST_COLOR_MODE 0x040C #define G2D_DST_LEFT_TOP 0x0410 #define G2D_DST_RIGHT_BOTTOM 0x0414 @@ -563,7 +563,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset) switch (reg_offset) { case G2D_SRC_BASE_ADDR: - case G2D_SRC_STRIDE_REG: + case G2D_SRC_STRIDE: case G2D_SRC_COLOR_MODE: case G2D_SRC_LEFT_TOP: case G2D_SRC_RIGHT_BOTTOM: @@ -573,7 +573,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset) reg_type = REG_TYPE_SRC_PLANE2; break; case G2D_DST_BASE_ADDR: - case G2D_DST_STRIDE_REG: + case G2D_DST_STRIDE: case G2D_DST_COLOR_MODE: case G2D_DST_LEFT_TOP: case G2D_DST_RIGHT_BOTTOM: @@ -968,8 +968,8 @@ static int g2d_check_reg_offset(struct device *dev, } else buf_info->types[reg_type] = BUF_TYPE_GEM; break; - case G2D_SRC_STRIDE_REG: - case G2D_DST_STRIDE_REG: + case G2D_SRC_STRIDE: + case G2D_DST_STRIDE: if (for_addr) goto err; @@ -242,7 +242,7 @@ exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config, state->v_ratio == (1 << 15)) height_ok = true; - if (width_ok & height_ok) + if (width_ok && height_ok) return 0; DRM_DEBUG_KMS("scaling mode is not supported"); @@ -42,9 +42,10 @@ static const struct regmap_config fsl_dcu_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, - .cache_type = REGCACHE_RBTREE, + .cache_type = REGCACHE_FLAT, .volatile_reg = fsl_dcu_drm_is_volatile_reg, + .max_register = 0x11fc, }; static int fsl_dcu_drm_irq_init(struct drm_device *dev) @@ -2100,9 +2100,10 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) return ret; list_for_each_entry(ctx, &dev_priv->context_list, link) - if (ctx != dev_priv->kernel_context) + if (ctx != dev_priv->kernel_context) { for_each_engine(engine, dev_priv) i915_dump_lrc_obj(m, ctx, engine); + } mutex_unlock(&dev->struct_mutex); @@ -2365,16 +2366,16 @@ static int i915_ppgtt_info(struct seq_file *m, void *data) task = get_pid_task(file->pid, PIDTYPE_PID); if (!task) { ret = -ESRCH; - goto out_put; + goto out_unlock; } seq_printf(m, "\nproc: %s\n", task->comm); put_task_struct(task); idr_for_each(&file_priv->context_idr, per_file_ctx, (void *)(unsigned long)m); } +out_unlock: mutex_unlock(&dev->filelist_mutex); -out_put: intel_runtime_pm_put(dev_priv); mutex_unlock(&dev->struct_mutex); @@ -512,6 +512,10 @@ void intel_detect_pch(struct drm_device *dev) DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); WARN_ON(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev)); + } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { + dev_priv->pch_type = PCH_KBP; + DRM_DEBUG_KMS("Found KabyPoint PCH\n"); + WARN_ON(!IS_KABYLAKE(dev)); } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && @@ -990,6 +990,7 @@ enum intel_pch { PCH_CPT, /* Cougarpoint PCH */ PCH_LPT, /* Lynxpoint PCH */ PCH_SPT, /* Sunrisepoint PCH */ + PCH_KBP, /* Kabypoint PCH */ PCH_NOP, }; @@ -2600,6 +2601,15 @@ struct drm_i915_cmd_table { #define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until)) +#define KBL_REVID_A0 0x0 +#define KBL_REVID_B0 0x1 +#define KBL_REVID_C0 0x2 +#define KBL_REVID_D0 0x3 +#define KBL_REVID_E0 0x4 + +#define IS_KBL_REVID(p, since, until) \ + (IS_KABYLAKE(p) && IS_REVID(p, since, until)) + /* * The genX designation typically refers to the render engine, so render * capability related checks should use IS_GEN, while display and other checks @@ -2708,11 +2718,13 @@ struct drm_i915_cmd_table { #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 +#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) +#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP) #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) #define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) @@ -3481,6 +3493,7 @@ int intel_bios_init(struct drm_i915_private *dev_priv); bool intel_bios_is_valid_vbt(const void *buf, size_t size); bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); +bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); @@ -151,7 +151,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) { - struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; + struct address_space *mapping = obj->base.filp->f_mapping; char *vaddr = obj->phys_handle->vaddr; struct sg_table *st; struct scatterlist *sg; @@ -218,7 +218,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) obj->dirty = 0; if (obj->dirty) { - struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; + struct address_space *mapping = obj->base.filp->f_mapping; char *vaddr = obj->phys_handle->vaddr; int i; @@ -2155,7 +2155,7 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj) if (obj->base.filp == NULL) return; - mapping = file_inode(obj->base.filp)->i_mapping, + mapping = obj->base.filp->f_mapping, invalidate_mapping_pages(mapping, 0, (loff_t)-1); } @@ -2271,7 +2271,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) * * Fail silently without starting the shrinker */ - mapping = file_inode(obj->base.filp)->i_mapping; + mapping = obj->base.filp->f_mapping; gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); gfp |= __GFP_NORETRY | __GFP_NOWARN; sg = st->sgl; @@ -4522,7 +4522,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, mask |= __GFP_DMA32; } - mapping = file_inode(obj->base.filp)->i_mapping; + mapping = obj->base.filp->f_mapping; mapping_set_gfp_mask(mapping, mask); i915_gem_object_init(obj, &i915_gem_object_ops); @@ -40,7 +40,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) if (!mutex_is_locked(mutex)) return false; -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) +#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER) return mutex->owner == task; #else /* Since UP may be pre-empted, we cannot assume that we own the lock */ @@ -55,8 +55,10 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, return -ENODEV; /* See the comment at the drm_mm_init() call for more about this check. - * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */ - if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096) + * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete) + */ + if (start < 4096 && (IS_GEN8(dev_priv) || + IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0))) start = 4096; mutex_lock(&dev_priv->mm.stolen_lock); @@ -2471,7 +2471,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) I915_WRITE(SDEIIR, iir); ret = IRQ_HANDLED; - if (HAS_PCH_SPT(dev_priv)) + if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) spt_irq_handler(dev, iir); else cpt_irq_handler(dev, iir); @@ -4661,7 +4661,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev->driver->disable_vblank = gen8_disable_vblank; if (IS_BROXTON(dev)) dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; - else if (HAS_PCH_SPT(dev)) + else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev)) dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; else dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; @@ -220,6 +220,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define ECOCHK_PPGTT_WT_HSW (0x2<<3) #define ECOCHK_PPGTT_WB_HSW (0x3<<3) +#define GEN8_CONFIG0 _MMIO(0xD00) +#define GEN9_DEFAULT_FIXES (1 << 3 | 1 << 2 | 1 << 1) + #define GAC_ECO_BITS _MMIO(0x14090) #define ECOBITS_SNB_BIT (1<<13) #define ECOBITS_PPGTT_CACHE64B (3<<8) @@ -1669,6 +1672,9 @@ enum skl_disp_power_wells { #define GEN7_TLB_RD_ADDR _MMIO(0x4700) +#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) +#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28) + #if 0 #define PRB0_TAIL _MMIO(0x2030) #define PRB0_HEAD _MMIO(0x2034) @@ -1804,6 +1810,10 @@ enum skl_disp_power_wells { #define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2)) #define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2)) +/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */ +#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4) +#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2) + /* WaClearTdlStateAckDirtyBits */ #define GEN8_STATE_ACK _MMIO(0x20F0) #define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8) @@ -2200,6 +2210,8 @@ enum skl_disp_power_wells { #define ILK_DPFC_STATUS _MMIO(0x43210) #define ILK_DPFC_FENCE_YOFF _MMIO(0x43218) #define ILK_DPFC_CHICKEN _MMIO(0x43224) +#define ILK_DPFC_DISABLE_DUMMY0 (1<<8) +#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23) #define ILK_FBC_RT_BASE _MMIO(0x2128) #define ILK_FBC_RT_VALID (1<<0) #define SNB_FBC_FRONT_BUFFER (1<<1) @@ -6031,6 +6043,7 @@ enum skl_disp_power_wells { #define CHICKEN_PAR1_1 _MMIO(0x42080) #define DPA_MASK_VBLANK_SRD (1 << 15) #define FORCE_ARB_IDLE_PLANES (1 << 14) +#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3) #define _CHICKEN_PIPESL_1_A 0x420b0 #define _CHICKEN_PIPESL_1_B 0x420b4 @@ -6039,6 +6052,7 @@ enum skl_disp_power_wells { #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) #define DISP_ARB_CTL _MMIO(0x45000) +#define DISP_FBC_MEMORY_WAKE (1<<31) #define DISP_TILE_SURFACE_SWIZZLING (1<<13) #define DISP_FBC_WM_DIS (1<<15) #define DISP_ARB_CTL2 _MMIO(0x45004) @@ -6052,6 +6066,9 @@ enum skl_disp_power_wells { #define HSW_NDE_RSTWRN_OPT _MMIO(0x46408) #define RESET_PCH_HANDSHAKE_ENABLE (1<<4) +#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) +#define MASK_WAKEMEM (1<<13) + #define SKL_DFSM _MMIO(0x51000) #define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23) #define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23) @@ -6069,6 +6086,7 @@ enum skl_disp_power_wells { #define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8) #define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec) +#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248) #define GEN8_CS_CHICKEN1 _MMIO(0x2580) /* GEN7 chicken */ @@ -6076,6 +6094,7 @@ enum skl_disp_power_wells { # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) # define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14) #define COMMON_SLICE_CHICKEN2 _MMIO(0x7014) +# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8) # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) #define HIZ_CHICKEN _MMIO(0x7018) @@ -6921,6 +6940,7 @@ enum skl_disp_power_wells { #define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3) #define GEN6_UCGCTL1 _MMIO(0x9400) +# define GEN6_GAMUNIT_CLOCK_GATE_DISABLE (1 << 22) # define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16) # define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) # define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) @@ -6937,6 +6957,7 @@ enum skl_disp_power_wells { #define GEN7_UCGCTL4 _MMIO(0x940c) #define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25) +#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1<<14) #define GEN6_RCGCTL1 _MMIO(0x9410) #define GEN6_RCGCTL2 _MMIO(0x9414) @@ -139,6 +139,11 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, else panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; + panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) | + dvo_timing->himage_lo; + panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) | + dvo_timing->vimage_lo; + /* Some VBTs have bogus h/vtotal values */ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; @@ -1187,7 +1192,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, } if (bdb->version < 106) { expected_size = 22; - } else if (bdb->version < 109) { + } else if (bdb->version < 111) { expected_size = 27; } else if (bdb->version < 195) { BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33); @@ -1546,6 +1551,45 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin) } /** + * intel_bios_is_port_present - is the specified digital port present + * @dev_priv: i915 device instance + * @port: port to check + * + * Return true if the device in %port is present. + */ +bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port) +{ + static const struct { + u16 dp, hdmi; + } port_mapping[] = { + [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, + [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, + [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, + [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, + }; + int i; + + /* FIXME maybe deal with port A as well? */ + if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping)) + return false; + + if (!dev_priv->vbt.child_dev_num) + return false; + + for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { + const union child_device_config *p_child = + &dev_priv->vbt.child_dev[i]; + if ((p_child->common.dvo_port == port_mapping[port].dp || + p_child->common.dvo_port == port_mapping[port].hdmi) && + (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING | + DEVICE_TYPE_DISPLAYPORT_OUTPUT))) + return true; + } + + return false; +} + +/** * intel_bios_is_port_edp - is the device in given port eDP * @dev_priv: i915 device instance * @port: port to check @@ -41,16 +41,22 @@ * be moved to FW_FAILED. */ +#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin" +MODULE_FIRMWARE(I915_CSR_KBL); +#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) + #define I915_CSR_SKL "i915/skl_dmc_ver1.bin" +MODULE_FIRMWARE(I915_CSR_SKL); +#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23) + #define I915_CSR_BXT "i915/bxt_dmc_ver1.bin" +MODULE_FIRMWARE(I915_CSR_BXT); +#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) #define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares" -MODULE_FIRMWARE(I915_CSR_SKL); -MODULE_FIRMWARE(I915_CSR_BXT); -#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23) -#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) + #define CSR_MAX_FW_SIZE 0x2FFF #define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF @@ -169,12 +175,10 @@ struct stepping_info { char substepping; }; -/* - * Kabylake derivated from Skylake H0, so SKL H0 - * is the right firmware for KBL A0 (revid 0). - */ static const struct stepping_info kbl_stepping_info[] = { - {'H', '0'}, {'I', '0'} + {'A', '0'}, {'B', '0'}, {'C', '0'}, + {'D', '0'}, {'E', '0'}, {'F', '0'}, + {'G', '0'}, {'H', '0'}, {'I', '0'}, }; static const struct stepping_info skl_stepping_info[] = { @@ -298,7 +302,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, csr->version = css_header->version; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_KABYLAKE(dev_priv)) { + required_min_version = KBL_CSR_VERSION_REQUIRED; + } else if (IS_SKYLAKE(dev_priv)) { required_min_version = SKL_CSR_VERSION_REQUIRED; } else if (IS_BROXTON(dev_priv)) { required_min_version = BXT_CSR_VERSION_REQUIRED; @@ -446,7 +452,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) if (!HAS_CSR(dev_priv)) return; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + if (IS_KABYLAKE(dev_priv)) + csr->fw_path = I915_CSR_KBL; + else if (IS_SKYLAKE(dev_priv)) csr->fw_path = I915_CSR_SKL; else if (IS_BROXTON(dev_priv)) csr->fw_path = I915_CSR_BXT; @@ -8275,12 +8275,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *encoder; + int i; u32 val, final; bool has_lvds = false; bool has_cpu_edp = false; bool has_panel = false; bool has_ck505 = false; bool can_ssc = false; + bool using_ssc_source = false; /* We need to take the global config into account */ for_each_intel_encoder(dev, encoder) { @@ -8307,8 +8309,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) can_ssc = true; } - DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", - has_panel, has_lvds, has_ck505); + /* Check if any DPLLs are using the SSC source */ + for (i = 0; i < dev_priv->num_shared_dpll; i++) { + u32 temp = I915_READ(PCH_DPLL(i)); + + if (!(temp & DPLL_VCO_ENABLE)) + continue; + + if ((temp & PLL_REF_INPUT_MASK) == + PLLB_REF_INPUT_SPREADSPECTRUMIN) { + using_ssc_source = true; + break; + } + } + + DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", + has_panel, has_lvds, has_ck505, using_ssc_source); /* Ironlake: try to setup display ref clock before DPLL * enabling. This is only under driver's control after @@ -8345,9 +8361,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; } else final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; - } else { - final |= DREF_SSC_SOURCE_DISABLE; - final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; + } else if (using_ssc_source) { + final |= DREF_SSC_SOURCE_ENABLE; + final |= DREF_SSC1_ENABLE; } if (final == val) @@ -8393,7 +8409,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) POSTING_READ(PCH_DREF_CONTROL); udelay(200); } else { - DRM_DEBUG_KMS("Disabling SSC entirely\n"); + DRM_DEBUG_KMS("Disabling CPU source output\n"); val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; @@ -8404,16 +8420,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) POSTING_READ(PCH_DREF_CONTROL); udelay(200); - /* Turn off the SSC source */ - val &= ~DREF_SSC_SOURCE_MASK; - val |= DREF_SSC_SOURCE_DISABLE; + if (!using_ssc_source) { + DRM_DEBUG_KMS("Disabling SSC source\n"); - /* Turn off SSC1 */ - val &= ~DREF_SSC1_ENABLE; + /* Turn off the SSC source */ + val &= ~DREF_SSC_SOURCE_MASK; + val |= DREF_SSC_SOURCE_DISABLE; - I915_WRITE(PCH_DREF_CONTROL, val); - POSTING_READ(PCH_DREF_CONTROL); - udelay(200); + /* Turn off SSC1 */ + val &= ~DREF_SSC1_ENABLE; + + I915_WRITE(PCH_DREF_CONTROL, val); + POSTING_READ(PCH_DREF_CONTROL); + udelay(200); + } } BUG_ON(val != final); @@ -8427,16 +8447,16 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) tmp |= FDI_MPHY_IOSFSB_RESET_CTL; I915_WRITE(SOUTH_CHICKEN2, tmp); - if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & - FDI_MPHY_IOSFSB_RESET_STATUS, 100)) + if (wait_for_us(I915_READ(SOUTH_CHICKEN2) & + FDI_MPHY_IOSFSB_RESET_STATUS, 100)) DRM_ERROR("FDI mPHY reset assert timeout\n"); tmp = I915_READ(SOUTH_CHICKEN2); tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; I915_WRITE(SOUTH_CHICKEN2, tmp); - if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & - FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) + if (wait_for_us((I915_READ(SOUTH_CHICKEN2) & + FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) DRM_ERROR("FDI mPHY reset de-assert timeout\n"); } @@ -9420,8 +9440,8 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, val |= LCPLL_CD_SOURCE_FCLK; I915_WRITE(LCPLL_CTL, val); - if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & - LCPLL_CD_SOURCE_FCLK_DONE, 1)) + if (wait_for_us(I915_READ(LCPLL_CTL) & + LCPLL_CD_SOURCE_FCLK_DONE, 1)) DRM_ERROR("Switching to FCLK failed\n"); val = I915_READ(LCPLL_CTL); @@ -9494,8 +9514,8 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) val &= ~LCPLL_CD_SOURCE_FCLK; I915_WRITE(LCPLL_CTL, val); - if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & - LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) + if (wait_for_us((I915_READ(LCPLL_CTL) & + LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) DRM_ERROR("Switching back to LCPLL failed\n"); } @@ -11977,6 +11997,12 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, ret = intel_color_check(crtc, crtc_state); if (ret) return ret; + + /* + * Changing color management on Intel hardware is + * handled as part of planes update. + */ + crtc_state->planes_changed = true; } ret = 0; @@ -14554,6 +14580,8 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(PCH_DP_D) & DP_DETECTED) intel_dp_init(dev, PCH_DP_D, PORT_D); } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + bool has_edp, has_port; + /* * The DP_DETECTED bit is the latched state of the DDC * SDA pin at boot. However since eDP doesn't require DDC @@ -14562,27 +14590,37 @@ static void intel_setup_outputs(struct drm_device *dev) * Thus we can't rely on the DP_DETECTED bit alone to detect * eDP ports. Consult the VBT as well as DP_DETECTED to * detect eDP ports. + * + * Sadly the straps seem to be missing sometimes even for HDMI + * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap + * and VBT for the presence of the port. Additionally we can't + * trust the port type the VBT declares as we've seen at least + * HDMI ports that the VBT claim are DP or eDP. */ - if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && - !intel_dp_is_edp(dev, PORT_B)) + has_edp = intel_dp_is_edp(dev, PORT_B); + has_port = intel_bios_is_port_present(dev_priv, PORT_B); + if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) + has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B); + if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) intel_hdmi_init(dev, VLV_HDMIB, PORT_B); - if (I915_READ(VLV_DP_B) & DP_DETECTED || - intel_dp_is_edp(dev, PORT_B)) - intel_dp_init(dev, VLV_DP_B, PORT_B); - if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && - !intel_dp_is_edp(dev, PORT_C)) + has_edp = intel_dp_is_edp(dev, PORT_C); + has_port = intel_bios_is_port_present(dev_priv, PORT_C); + if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) + has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C); + if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) intel_hdmi_init(dev, VLV_HDMIC, PORT_C); - if (I915_READ(VLV_DP_C) & DP_DETECTED || - intel_dp_is_edp(dev, PORT_C)) - intel_dp_init(dev, VLV_DP_C, PORT_C); if (IS_CHERRYVIEW(dev)) { - /* eDP not supported on port D, so don't check VBT */ - if (I915_READ(CHV_HDMID) & SDVO_DETECTED) - intel_hdmi_init(dev, CHV_HDMID, PORT_D); - if (I915_READ(CHV_DP_D) & DP_DETECTED) + /* + * eDP not supported on port D, + * so no need to worry about it + */ + has_port = intel_bios_is_port_present(dev_priv, PORT_D); + if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) intel_dp_init(dev, CHV_DP_D, PORT_D); + if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) + intel_hdmi_init(dev, CHV_HDMID, PORT_D); } intel_dsi_init(dev); @@ -663,7 +663,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, msecs_to_jiffies_timeout(10)); else - done = wait_for_atomic(C, 10) == 0; + done = wait_for(C, 10) == 0; if (!done) DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", has_aux_irq); @@ -4645,7 +4645,7 @@ intel_dp_detect(struct drm_connector *connector, bool force) intel_dp->detect_done = false; - if (intel_connector->detect_edid) + if (is_edp(intel_dp) || intel_connector->detect_edid) return connector_status_connected; else return connector_status_disconnected; @@ -4899,13 +4899,15 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) void intel_dp_encoder_reset(struct drm_encoder *encoder) { - struct intel_dp *intel_dp; + struct drm_i915_private *dev_priv = to_i915(encoder->dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + if (!HAS_DDI(dev_priv)) + intel_dp->DP = I915_READ(intel_dp->output_reg); if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP) return; - intel_dp = enc_to_intel_dp(encoder); - pps_lock(intel_dp); /* @@ -4977,9 +4979,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) intel_display_power_get(dev_priv, power_domain); if (long_hpd) { - /* indicate that we need to restart link training */ - intel_dp->train_set_valid = false; - intel_dp_long_pulse(intel_dp->attached_connector); if (intel_dp->is_mst) ret = IRQ_HANDLED; @@ -5725,8 +5724,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); - if (fixed_mode) + if (fixed_mode) { fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + connector->display_info.width_mm = fixed_mode->width_mm; + connector->display_info.height_mm = fixed_mode->height_mm; + } } mutex_unlock(&dev->mode_config.mutex); @@ -5923,9 +5925,9 @@ fail: return false; } -void -intel_dp_init(struct drm_device *dev, - i915_reg_t output_reg, enum port port) +bool intel_dp_init(struct drm_device *dev, + i915_reg_t output_reg, + enum port port) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_digital_port *intel_dig_port; @@ -5935,7 +5937,7 @@ intel_dp_init(struct drm_device *dev, intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); if (!intel_dig_port) - return; + return false; intel_connector = intel_connector_alloc(); if (!intel_connector) @@ -5992,7 +5994,7 @@ intel_dp_init(struct drm_device *dev, if (!intel_dp_init_connector(intel_dig_port, intel_connector)) goto err_init_connector; - return; + return true; err_init_connector: drm_encoder_cleanup(encoder); @@ -6000,8 +6002,7 @@ err_encoder_init: kfree(intel_connector); err_connector_alloc: kfree(intel_dig_port); - - return; + return false; } void intel_dp_mst_suspend(struct drm_device *dev) @@ -85,8 +85,7 @@ static bool intel_dp_reset_link_train(struct intel_dp *intel_dp, uint8_t dp_train_pat) { - if (!intel_dp->train_set_valid) - memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); + memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); intel_dp_set_signal_levels(intel_dp); return intel_dp_set_link_train(intel_dp, dp_train_pat); } @@ -161,23 +160,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) break; } - /* - * if we used previously trained voltage and pre-emphasis values - * and we don't get clock recovery, reset link training values - */ - if (intel_dp->train_set_valid) { - DRM_DEBUG_KMS("clock recovery not ok, reset"); - /* clear the flag as we are not reusing train set */ - intel_dp->train_set_valid = false; - if (!intel_dp_reset_link_train(intel_dp, - DP_TRAINING_PATTERN_1 | - DP_LINK_SCRAMBLING_DISABLE)) { - DRM_ERROR("failed to enable link training\n"); - return; - } - continue; - } - /* Check to see if we've tried the max voltage */ for (i = 0; i < intel_dp->lane_count; i++) if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) @@ -284,7 +266,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) /* Make sure clock is still ok */ if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { - intel_dp->train_set_valid = false; intel_dp_link_training_clock_recovery(intel_dp); intel_dp_set_link_train(intel_dp, training_pattern | @@ -301,7 +282,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) /* Try 5 times, then try clock recovery if that fails */ if (tries > 5) { - intel_dp->train_set_valid = false; intel_dp_link_training_clock_recovery(intel_dp); intel_dp_set_link_train(intel_dp, training_pattern | @@ -322,10 +302,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) intel_dp_set_idle_link_train(intel_dp); - if (channel_eq) { - intel_dp->train_set_valid = true; + if (channel_eq) DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); - } } void intel_dp_stop_link_train(struct intel_dp *intel_dp) @@ -366,6 +366,9 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, DPLL_ID_PCH_PLL_B); } + if (!pll) + return NULL; + /* reference the pll */ intel_reference_shared_dpll(pll, crtc_state); @@ -1374,8 +1377,8 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); POSTING_READ(BXT_PORT_PLL_ENABLE(port)); - if (wait_for_atomic_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & - PORT_PLL_LOCK), 200)) + if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK), + 200)) DRM_ERROR("PLL %d not locked\n", port); /* @@ -863,8 +863,6 @@ struct intel_dp { /* This is called before a link training is starterd */ void (*prepare_link_retrain)(struct intel_dp *intel_dp); - bool train_set_valid; - /* Displayport compliance testing */ unsigned long compliance_test_type; unsigned long compliance_test_data; @@ -1284,7 +1282,7 @@ void intel_csr_ucode_suspend(struct drm_i915_private *); void intel_csr_ucode_resume(struct drm_i915_private *); /* intel_dp.c */ -void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); +bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, struct intel_connector *intel_connector); void intel_dp_set_link_params(struct intel_dp *intel_dp, @@ -1545,6 +1545,9 @@ void intel_dsi_init(struct drm_device *dev) goto err; } + connector->display_info.width_mm = fixed_mode->width_mm; + connector->display_info.height_mm = fixed_mode->height_mm; + intel_panel_init(&intel_connector->panel, fixed_mode, NULL); intel_dsi_add_properties(intel_connector); @@ -824,8 +824,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; struct intel_fbc *fbc = &dev_priv->fbc; - bool enable_by_default = IS_HASWELL(dev_priv) || - IS_BROADWELL(dev_priv); + bool enable_by_default = IS_BROADWELL(dev_priv); if (intel_vgpu_active(dev_priv->dev)) { fbc->no_fbc_reason = "VGPU is active"; @@ -2142,6 +2142,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, enum port port = intel_dig_port->port; uint8_t alternate_ddc_pin; + DRM_DEBUG_KMS("Adding HDMI connector on port %c\n", + port_name(port)); + if (WARN(intel_dig_port->max_lanes < 4, "Not enough lanes (%d) for HDMI on port %c\n", intel_dig_port->max_lanes, port_name(port))) @@ -1103,15 +1103,17 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, uint32_t *const batch, uint32_t index) { + struct drm_i915_private *dev_priv = engine->dev->dev_private; uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); /* - * WaDisableLSQCROPERFforOCL:skl + * WaDisableLSQCROPERFforOCL:skl,kbl * This WA is implemented in skl_init_clock_gating() but since * this batch updates GEN8_L3SQCREG4 with default value we need to * set this bit here to retain the WA during flush. */ - if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0)) + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0) || + IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | @@ -1273,6 +1275,7 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, { int ret; struct drm_device *dev = engine->dev; + struct drm_i915_private *dev_priv = dev->dev_private; uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); /* WaDisableCtxRestoreArbitration:skl,bxt */ @@ -1286,6 +1289,22 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, return ret; index = ret; + /* WaClearSlmSpaceAtContextSwitch:kbl */ + /* Actual scratch location is at 128 bytes offset */ + if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) { + uint32_t scratch_addr + = engine->scratch.gtt_offset + 2*CACHELINE_BYTES; + + wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); + wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | + PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_QW_WRITE)); + wa_ctx_emit(batch, index, scratch_addr); + wa_ctx_emit(batch, index, 0); + wa_ctx_emit(batch, index, 0); + wa_ctx_emit(batch, index, 0); + } /* Pad to end of cacheline */ while (index % CACHELINE_DWORDS) wa_ctx_emit(batch, index, MI_NOOP); @@ -1687,9 +1706,10 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, struct intel_ringbuffer *ringbuf = request->ringbuf; struct intel_engine_cs *engine = ringbuf->engine; u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; - bool vf_flush_wa = false; + bool vf_flush_wa = false, dc_flush_wa = false; u32 flags = 0; int ret; + int len; flags |= PIPE_CONTROL_CS_STALL; @@ -1716,9 +1736,21 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, */ if (IS_GEN9(engine->dev)) vf_flush_wa = true; + + /* WaForGAMHang:kbl */ + if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0)) + dc_flush_wa = true; } - ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6); + len = 6; + + if (vf_flush_wa) + len += 6; + + if (dc_flush_wa) + len += 12; + + ret = intel_ring_begin(request, len); if (ret) return ret; @@ -1731,12 +1763,31 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, intel_logical_ring_emit(ringbuf, 0); } + if (dc_flush_wa) { + intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); + intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, 0); + } + intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); intel_logical_ring_emit(ringbuf, flags); intel_logical_ring_emit(ringbuf, scratch_addr); intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, 0); + + if (dc_flush_wa) { + intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); + intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, 0); + } + intel_logical_ring_advance(ringbuf); return 0; @@ -1082,6 +1082,8 @@ void intel_lvds_init(struct drm_device *dev) fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); if (fixed_mode) { fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + connector->display_info.width_mm = fixed_mode->width_mm; + connector->display_info.height_mm = fixed_mode->height_mm; goto out; } } @@ -1038,5 +1038,16 @@ intel_opregion_get_panel_type(struct drm_device *dev) return -ENODEV; } + /* + * FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us + * low vswing for eDP, whereas the VBT panel type (2) gives us normal + * vswing instead. Low vswing results in some display flickers, so + * let's simply ignore the OpRegion panel type on SKL for now. + */ + if (IS_SKYLAKE(dev)) { + DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1); + return -ENODEV; + } + return ret - 1; } @@ -1731,7 +1731,8 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel) panel->backlight.set = bxt_set_backlight; panel->backlight.get = bxt_get_backlight; panel->backlight.hz_to_pwm = bxt_hz_to_pwm; - } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv)) { + } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) || + HAS_PCH_KBP(dev_priv)) { panel->backlight.setup = lpt_setup_backlight; panel->backlight.enable = lpt_enable_backlight; panel->backlight.disable = lpt_disable_backlight; @@ -54,10 +54,38 @@ #define INTEL_RC6p_ENABLE (1<<1) #define INTEL_RC6pp_ENABLE (1<<2) +static void gen9_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */ + I915_WRITE(CHICKEN_PAR1_1, + I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); + + I915_WRITE(GEN8_CONFIG0, + I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES); + + /* WaEnableChickenDCPR:skl,bxt,kbl */ + I915_WRITE(GEN8_CHICKEN_DCPR_1, + I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); + + /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */ + /* WaFbcWakeMemOn:skl,bxt,kbl */ + I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | + DISP_FBC_WM_DIS | + DISP_FBC_MEMORY_WAKE); + + /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */ + I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | + ILK_DPFC_DISABLE_DUMMY0); +} + static void bxt_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + gen9_init_clock_gating(dev); + /* WaDisableSDEUnitClockGating:bxt */ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); @@ -6698,6 +6726,38 @@ static void lpt_suspend_hw(struct drm_device *dev) } } +static void kabylake_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + gen9_init_clock_gating(dev); + + /* WaDisableSDEUnitClockGating:kbl */ + if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) + I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | + GEN8_SDEUNIT_CLOCK_GATE_DISABLE); + + /* WaDisableGamClockGating:kbl */ + if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) + I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | + GEN6_GAMUNIT_CLOCK_GATE_DISABLE); + + /* WaFbcNukeOnHostModify:kbl */ + I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | + ILK_DPFC_NUKE_ON_ANY_MODIFICATION); +} + +static void skylake_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + gen9_init_clock_gating(dev); + + /* WaFbcNukeOnHostModify:skl */ + I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | + ILK_DPFC_NUKE_ON_ANY_MODIFICATION); +} + static void broadwell_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -7163,9 +7223,9 @@ static void nop_init_clock_gating(struct drm_device *dev) void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) { if (IS_SKYLAKE(dev_priv)) - dev_priv->display.init_clock_gating = nop_init_clock_gating; + dev_priv->display.init_clock_gating = skylake_init_clock_gating; else if (IS_KABYLAKE(dev_priv)) - dev_priv->display.init_clock_gating = nop_init_clock_gating; + dev_priv->display.init_clock_gating = kabylake_init_clock_gating; else if (IS_BROXTON(dev_priv)) dev_priv->display.init_clock_gating = bxt_init_clock_gating; else if (IS_BROADWELL(dev_priv)) @@ -913,24 +913,26 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) { struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t tmp; int ret; - /* WaEnableLbsSlaRetryTimerDecrement:skl */ + /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */ + I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE)); + + /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */ I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); - /* WaDisableKillLogic:bxt,skl */ + /* WaDisableKillLogic:bxt,skl,kbl */ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | ECOCHK_DIS_TLB); - /* WaClearFlowControlGpgpuContextSave:skl,bxt */ - /* WaDisablePartialInstShootdown:skl,bxt */ + /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */ + /* WaDisablePartialInstShootdown:skl,bxt,kbl */ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, FLOW_CONTROL_ENABLE | PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); - /* Syncing dependencies between camera and graphics:skl,bxt */ + /* Syncing dependencies between camera and graphics:skl,bxt,kbl */ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); @@ -952,18 +954,18 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) */ } - /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */ - /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt */ + /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */ + /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */ WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, GEN9_ENABLE_YV12_BUGFIX | GEN9_ENABLE_GPGPU_PREEMPTION); - /* Wa4x4STCOptimizationDisable:skl,bxt */ - /* WaDisablePartialResolveInVc:skl,bxt */ + /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */ + /* WaDisablePartialResolveInVc:skl,bxt,kbl */ WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE | GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE)); - /* WaCcsTlbPrefetchDisable:skl,bxt */ + /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */ WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, GEN9_CCS_TLB_PREFETCH_ENABLE); @@ -973,31 +975,57 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, PIXEL_MASK_CAMMING_DISABLE); - /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ - tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; - if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) || - IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) - tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; - WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); + /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */ + WA_SET_BIT_MASKED(HDC_CHICKEN0, + HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | + HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE); + + /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are + * both tied to WaForceContextSaveRestoreNonCoherent + * in some hsds for skl. We keep the tie for all gen9. The + * documentation is a bit hazy and so we want to get common behaviour, + * even though there is no clear evidence we would need both on kbl/bxt. + * This area has been source of system hangs so we play it safe + * and mimic the skl regardless of what bspec says. + * + * Use Force Non-Coherent whenever executing a 3D context. This + * is a workaround for a possible hang in the unlikely event + * a TLB invalidation occurs during a PSD flush. + */ - /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */ - if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0)) + /* WaForceEnableNonCoherent:skl,bxt,kbl */ + WA_SET_BIT_MASKED(HDC_CHICKEN0, + HDC_FORCE_NON_COHERENT); + + /* WaDisableHDCInvalidation:skl,bxt,kbl */ + I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | + BDW_DISABLE_HDC_INVALIDATION); + + /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */ + if (IS_SKYLAKE(dev_priv) || + IS_KABYLAKE(dev_priv) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, GEN8_SAMPLER_POWER_BYPASS_DIS); - /* WaDisableSTUnitPowerOptimization:skl,bxt */ + /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); - /* WaOCLCoherentLineFlush:skl,bxt */ + /* WaOCLCoherentLineFlush:skl,bxt,kbl */ I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES)); - /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */ + /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */ + ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG); + if (ret) + return ret; + + /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */ ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1); if (ret) return ret; - /* WaAllowUMDToModifyHDCChicken1:skl,bxt */ + /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */ ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1); if (ret) return ret; @@ -1092,22 +1120,6 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) WA_SET_BIT_MASKED(HIZ_CHICKEN, BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); - /* This is tied to WaForceContextSaveRestoreNonCoherent */ - if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) { - /* - *Use Force Non-Coherent whenever executing a 3D context. This - * is a workaround for a possible hang in the unlikely event - * a TLB invalidation occurs during a PSD flush. - */ - /* WaForceEnableNonCoherent:skl */ - WA_SET_BIT_MASKED(HDC_CHICKEN0, - HDC_FORCE_NON_COHERENT); - - /* WaDisableHDCInvalidation:skl */ - I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | - BDW_DISABLE_HDC_INVALIDATION); - } - /* WaBarrierPerformanceFixDisable:skl */ if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0)) WA_SET_BIT_MASKED(HDC_CHICKEN0, @@ -1120,6 +1132,9 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); + /* WaDisableGafsUnitClkGating:skl */ + WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + /* WaDisableLSQCROPERFforOCL:skl */ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); if (ret) @@ -1174,6 +1189,63 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) return ret; } + /* WaInsertDummyPushConstPs:bxt */ + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); + + return 0; +} + +static int kbl_init_workarounds(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->dev->dev_private; + int ret; + + ret = gen9_init_workarounds(engine); + if (ret) + return ret; + + /* WaEnableGapsTsvCreditFix:kbl */ + I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | + GEN9_GAPS_TSV_CREDIT_DISABLE)); + + /* WaDisableDynamicCreditSharing:kbl */ + if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) + WA_SET_BIT(GAMT_CHKN_BIT_REG, + GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); + + /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */ + if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0)) + WA_SET_BIT_MASKED(HDC_CHICKEN0, + HDC_FENCE_DEST_SLM_DISABLE); + + /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes + * involving this register should also be added to WA batch as required. + */ + if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) + /* WaDisableLSQCROPERFforOCL:kbl */ + I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | + GEN8_LQSC_RO_PERF_DIS); + + /* WaInsertDummyPushConstPs:kbl */ + if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); + + /* WaDisableGafsUnitClkGating:kbl */ + WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + + /* WaDisableSbeCacheDispatchPortSharing:kbl */ + WA_SET_BIT_MASKED( + GEN7_HALF_SLICE_CHICKEN1, + GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); + + /* WaDisableLSQCROPERFforOCL:kbl */ + ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); + if (ret) + return ret; + return 0; } @@ -1199,6 +1271,9 @@ int init_workarounds_ring(struct intel_engine_cs *engine) if (IS_BROXTON(dev)) return bxt_init_workarounds(engine); + if (IS_KABYLAKE(dev_priv)) + return kbl_init_workarounds(engine); + return 0; } @@ -403,9 +403,10 @@ struct lvds_dvo_timing { u8 vsync_off:4; u8 rsvd0:6; u8 hsync_off_hi:2; - u8 h_image; - u8 v_image; - u8 max_hv; + u8 himage_lo; + u8 vimage_lo; + u8 vimage_hi:4; + u8 himage_hi:4; u8 h_border; u8 v_border; u8 rsvd1:3; @@ -97,8 +97,8 @@ static struct imx_drm_crtc *imx_drm_find_crtc(struct drm_crtc *crtc) return NULL; } -int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format, - int hsync_pin, int vsync_pin) +int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format, + int hsync_pin, int vsync_pin, u32 bus_flags) { struct imx_drm_crtc_helper_funcs *helper; struct imx_drm_crtc *imx_crtc; @@ -110,14 +110,17 @@ int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format, helper = &imx_crtc->imx_drm_helper_funcs; if (helper->set_interface_pix_fmt) return helper->set_interface_pix_fmt(encoder->crtc, - bus_format, hsync_pin, vsync_pin); + bus_format, hsync_pin, vsync_pin, + bus_flags); return 0; } -EXPORT_SYMBOL_GPL(imx_drm_set_bus_format_pins); +EXPORT_SYMBOL_GPL(imx_drm_set_bus_config); int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format) { - return imx_drm_set_bus_format_pins(encoder, bus_format, 2, 3); + return imx_drm_set_bus_config(encoder, bus_format, 2, 3, + DRM_BUS_FLAG_DE_HIGH | + DRM_BUS_FLAG_PIXDATA_NEGEDGE); } EXPORT_SYMBOL_GPL(imx_drm_set_bus_format); @@ -19,7 +19,8 @@ struct imx_drm_crtc_helper_funcs { int (*enable_vblank)(struct drm_crtc *crtc); void (*disable_vblank)(struct drm_crtc *crtc); int (*set_interface_pix_fmt)(struct drm_crtc *crtc, - u32 bus_format, int hsync_pin, int vsync_pin); + u32 bus_format, int hsync_pin, int vsync_pin, + u32 bus_flags); const struct drm_crtc_helper_funcs *crtc_helper_funcs; const struct drm_crtc_funcs *crtc_funcs; }; @@ -41,8 +42,8 @@ void imx_drm_mode_config_init(struct drm_device *drm); struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb); -int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, - u32 bus_format, int hsync_pin, int vsync_pin); +int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format, + int hsync_pin, int vsync_pin, u32 bus_flags); int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format); @@ -25,6 +25,7 @@ #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> #include <linux/of_device.h> #include <linux/of_graph.h> +#include <video/of_display_timing.h> #include <video/of_videomode.h> #include <linux/regmap.h> #include <linux/videodev2.h> @@ -59,6 +60,7 @@ struct imx_ldb_channel { struct drm_encoder encoder; struct drm_panel *panel; struct device_node *child; + struct i2c_adapter *ddc; int chno; void *edid; int edid_len; @@ -107,6 +109,9 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector) return num_modes; } + if (!imx_ldb_ch->edid && imx_ldb_ch->ddc) + imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc); + if (imx_ldb_ch->edid) { drm_mode_connector_update_edid_property(connector, imx_ldb_ch->edid); @@ -553,7 +558,8 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) for_each_child_of_node(np, child) { struct imx_ldb_channel *channel; - struct device_node *port; + struct device_node *ddc_node; + struct device_node *ep; ret = of_property_read_u32(child, "reg", &i); if (ret || i < 0 || i > 1) @@ -576,33 +582,54 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) * The output port is port@4 with an external 4-port mux or * port@2 with the internal 2-port mux. */ - port = of_graph_get_port_by_id(child, imx_ldb->lvds_mux ? 4 : 2); - if (port) { - struct device_node *endpoint, *remote; - - endpoint = of_get_child_by_name(port, "endpoint"); - if (endpoint) { - remote = of_graph_get_remote_port_parent(endpoint); - if (remote) - channel->panel = of_drm_find_panel(remote); - else - return -EPROBE_DEFER; - if (!channel->panel) { - dev_err(dev, "panel not found: %s\n", - remote->full_name); - return -EPROBE_DEFER; - } + ep = of_graph_get_endpoint_by_regs(child, + imx_ldb->lvds_mux ? 4 : 2, + -1); + if (ep) { + struct device_node *remote; + + remote = of_graph_get_remote_port_parent(ep); + of_node_put(ep); + if (remote) + channel->panel = of_drm_find_panel(remote); + else + return -EPROBE_DEFER; + of_node_put(remote); + if (!channel->panel) { + dev_err(dev, "panel not found: %s\n", + remote->full_name); + return -EPROBE_DEFER; } } - edidp = of_get_property(child, "edid", &channel->edid_len); - if (edidp) { - channel->edid = kmemdup(edidp, channel->edid_len, - GFP_KERNEL); - } else if (!channel->panel) { - ret = of_get_drm_display_mode(child, &channel->mode, 0); - if (!ret) - channel->mode_valid = 1; + ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0); + if (ddc_node) { + channel->ddc = of_find_i2c_adapter_by_node(ddc_node); + of_node_put(ddc_node); + if (!channel->ddc) { + dev_warn(dev, "failed to get ddc i2c adapter\n"); + return -EPROBE_DEFER; + } + } + + if (!channel->ddc) { + /* if no DDC available, fallback to hardcoded EDID */ + dev_dbg(dev, "no ddc available\n"); + + edidp = of_get_property(child, "edid", + &channel->edid_len); + if (edidp) { + channel->edid = kmemdup(edidp, + channel->edid_len, + GFP_KERNEL); + } else if (!channel->panel) { + /* fallback to display-timings node */ + ret = of_get_drm_display_mode(child, + &channel->mode, + OF_USE_NATIVE_MODE); + if (!ret) + channel->mode_valid = 1; + } } channel->bus_format = of_get_bus_format(dev, child); @@ -647,6 +674,7 @@ static void imx_ldb_unbind(struct device *dev, struct device *master, channel->encoder.funcs->destroy(&channel->encoder); kfree(channel->edid); + i2c_put_adapter(channel->ddc); } } @@ -294,8 +294,10 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder) switch (tve->mode) { case TVE_MODE_VGA: - imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24, - tve->hsync_pin, tve->vsync_pin); + imx_drm_set_bus_config(encoder, MEDIA_BUS_FMT_GBR888_1X24, + tve->hsync_pin, tve->vsync_pin, + DRM_BUS_FLAG_DE_HIGH | + DRM_BUS_FLAG_PIXDATA_NEGEDGE); break; case TVE_MODE_TVOUT: imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24); @@ -66,6 +66,7 @@ struct ipu_crtc { struct ipu_flip_work *flip_work; int irq; u32 bus_format; + u32 bus_flags; int di_hsync_pin; int di_vsync_pin; }; @@ -271,8 +272,10 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc, else sig_cfg.clkflags = 0; - sig_cfg.enable_pol = 1; - sig_cfg.clk_pol = 0; + sig_cfg.enable_pol = !(ipu_crtc->bus_flags & DRM_BUS_FLAG_DE_LOW); + /* Default to driving pixel data on negative clock edges */ + sig_cfg.clk_pol = !!(ipu_crtc->bus_flags & + DRM_BUS_FLAG_PIXDATA_POSEDGE); sig_cfg.bus_format = ipu_crtc->bus_format; sig_cfg.v_to_h_sync = 0; sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin; @@ -396,11 +399,12 @@ static void ipu_disable_vblank(struct drm_crtc *crtc) } static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc, - u32 bus_format, int hsync_pin, int vsync_pin) + u32 bus_format, int hsync_pin, int vsync_pin, u32 bus_flags) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); ipu_crtc->bus_format = bus_format; + ipu_crtc->bus_flags = bus_flags; ipu_crtc->di_hsync_pin = hsync_pin; ipu_crtc->di_vsync_pin = vsync_pin; @@ -38,6 +38,8 @@ static const uint32_t ipu_plane_formats[] = { DRM_FORMAT_RGBX8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_BGRA8888, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_YUV420, @@ -428,7 +430,6 @@ static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (crtc != plane->crtc) dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n", plane->crtc, crtc); - plane->crtc = crtc; if (!ipu_plane->enabled) ipu_plane_enable(ipu_plane); @@ -461,7 +462,7 @@ static void ipu_plane_destroy(struct drm_plane *plane) kfree(ipu_plane); } -static struct drm_plane_funcs ipu_plane_funcs = { +static const struct drm_plane_funcs ipu_plane_funcs = { .update_plane = ipu_update_plane, .disable_plane = ipu_disable_plane, .destroy = ipu_plane_destroy, @@ -35,7 +35,6 @@ struct imx_parallel_display { void *edid; int edid_len; u32 bus_format; - int mode_valid; struct drm_display_mode mode; struct drm_panel *panel; }; @@ -68,17 +67,6 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) num_modes = drm_add_edid_modes(connector, imxpd->edid); } - if (imxpd->mode_valid) { - struct drm_display_mode *mode = drm_mode_create(connector->dev); - - if (!mode) - return -EINVAL; - drm_mode_copy(mode, &imxpd->mode); - mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, - drm_mode_probed_add(connector, mode); - num_modes++; - } - if (np) { struct drm_display_mode *mode = drm_mode_create(connector->dev); @@ -115,8 +103,8 @@ static void imx_pd_encoder_dpms(struct drm_encoder *encoder, int mode) static void imx_pd_encoder_prepare(struct drm_encoder *encoder) { struct imx_parallel_display *imxpd = enc_to_imxpd(encoder); - - imx_drm_set_bus_format(encoder, imxpd->bus_format); + imx_drm_set_bus_config(encoder, imxpd->bus_format, 2, 3, + imxpd->connector.display_info.bus_flags); } static void imx_pd_encoder_commit(struct drm_encoder *encoder) @@ -203,7 +191,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = data; struct device_node *np = dev->of_node; - struct device_node *port; + struct device_node *ep; const u8 *edidp; struct imx_parallel_display *imxpd; int ret; @@ -230,18 +218,18 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) } /* port@1 is the output port */ - port = of_graph_get_port_by_id(np, 1); - if (port) { - struct device_node *endpoint, *remote; - - endpoint = of_get_child_by_name(port, "endpoint"); - if (endpoint) { - remote = of_graph_get_remote_port_parent(endpoint); - if (remote) - imxpd->panel = of_drm_find_panel(remote); - if (!imxpd->panel) - return -EPROBE_DEFER; + ep = of_graph_get_endpoint_by_regs(np, 1, -1); + if (ep) { + struct device_node *remote; + + remote = of_graph_get_remote_port_parent(ep); + of_node_put(ep); + if (remote) { + imxpd->panel = of_drm_find_panel(remote); + of_node_put(remote); } + if (!imxpd->panel) + return -EPROBE_DEFER; } imxpd->dev = dev; @@ -432,11 +432,6 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi, unsigned long pll_rate; unsigned int factor; - if (!dpi) { - dev_err(dpi->dev, "invalid argument\n"); - return -EINVAL; - } - pix_rate = 1000UL * mode->clock; if (mode->clock <= 74000) factor = 8 * 3; @@ -695,10 +695,8 @@ static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi) { drm_encoder_cleanup(&dsi->encoder); /* Skip connector cleanup if creation was delegated to the bridge */ - if (dsi->conn.dev) { - drm_connector_unregister(&dsi->conn); + if (dsi->conn.dev) drm_connector_cleanup(&dsi->conn); - } } static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp) @@ -182,7 +182,7 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock) } } - fvv = pllreffreq * testn / testm; + fvv = pllreffreq * (n + 1) / (m + 1); fvv = (fvv - 800000) / 50000; if (fvv > 15) @@ -202,6 +202,14 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock) WREG_DAC(MGA1064_PIX_PLLC_M, m); WREG_DAC(MGA1064_PIX_PLLC_N, n); WREG_DAC(MGA1064_PIX_PLLC_P, p); + + if (mdev->unique_rev_id >= 0x04) { + WREG_DAC(0x1a, 0x09); + msleep(20); + WREG_DAC(0x1a, 0x01); + + } + return 0; } @@ -408,7 +408,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, } adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); - if (!adreno_gpu->memptrs) { + if (IS_ERR(adreno_gpu->memptrs)) { dev_err(drm->dev, "could not vmap memptrs\n"); return -ENOMEM; } @@ -159,6 +159,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, dev->mode_config.fb_base = paddr; fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo); + if (IS_ERR(fbi->screen_base)) { + ret = PTR_ERR(fbi->screen_base); + goto fail_unlock; + } fbi->screen_size = fbdev->bo->size; fbi->fix.smem_start = paddr; fbi->fix.smem_len = fbdev->bo->size; @@ -398,6 +398,8 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj) return ERR_CAST(pages); msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); + if (msm_obj->vaddr == NULL) + return ERR_PTR(-ENOMEM); } return msm_obj->vaddr; } @@ -40,12 +40,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, submit->dev = dev; submit->gpu = gpu; + submit->fence = NULL; submit->pid = get_pid(task_pid(current)); /* initially, until copy_from_user() and bo lookup succeeds: */ submit->nr_bos = 0; submit->nr_cmds = 0; + INIT_LIST_HEAD(&submit->node); INIT_LIST_HEAD(&submit->bo_list); ww_acquire_init(&submit->ticket, &reservation_ww_class); @@ -75,6 +77,11 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, void __user *userptr = u64_to_user_ptr(args->bos + (i * sizeof(submit_bo))); + /* make sure we don't have garbage flags, in case we hit + * error path before flags is initialized: + */ + submit->bos[i].flags = 0; + ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); if (ret) { ret = -EFAULT; @@ -312,6 +312,9 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit) struct msm_gem_object *obj = submit->bos[idx].obj; const char *buf = msm_gem_vaddr_locked(&obj->base); + if (IS_ERR(buf)) + continue; + buf += iova - submit->bos[idx].iova; rd_write_section(rd, RD_GPUADDR, @@ -40,6 +40,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) } ring->start = msm_gem_vaddr_locked(ring->bo); + if (IS_ERR(ring->start)) { + ret = PTR_ERR(ring->start); + goto fail; + } ring->end = ring->start + (size / 4); ring->cur = ring->start; @@ -16,9 +16,9 @@ enum nvkm_devidx { NVKM_SUBDEV_MC, NVKM_SUBDEV_BUS, NVKM_SUBDEV_TIMER, + NVKM_SUBDEV_INSTMEM, NVKM_SUBDEV_FB, NVKM_SUBDEV_LTC, - NVKM_SUBDEV_INSTMEM, NVKM_SUBDEV_MMU, NVKM_SUBDEV_BAR, NVKM_SUBDEV_PMU, @@ -25,7 +25,8 @@ u16 nvbios_outp_match(struct nvkm_bios *, u16 type, u16 mask, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *); struct nvbios_ocfg { - u16 match; + u8 proto; + u8 flags; u16 clkcmp[2]; }; @@ -33,7 +34,7 @@ u16 nvbios_ocfg_entry(struct nvkm_bios *, u16 outp, u8 idx, u8 *ver, u8 *hdr, u8 *cnt, u8 *len); u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); -u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type, +u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u8 proto, u8 flags, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz); #endif @@ -552,6 +552,8 @@ nouveau_fbcon_init(struct drm_device *dev) if (ret) goto fini; + if (fbcon->helper.fbdev) + fbcon->helper.fbdev->pixmap.buf_align = 4; return 0; fini: @@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) uint32_t fg; uint32_t bg; uint32_t dsize; - uint32_t width; uint32_t *data = (uint32_t *)image->data; int ret; @@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) if (ret) return ret; - width = ALIGN(image->width, 8); - dsize = ALIGN(width * image->height, 32) >> 5; - if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { fg = ((uint32_t *) info->pseudo_palette)[image->fg_color]; @@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) ((image->dx + image->width) & 0xffff)); OUT_RING(chan, bg); OUT_RING(chan, fg); - OUT_RING(chan, (image->height << 16) | width); + OUT_RING(chan, (image->height << 16) | image->width); OUT_RING(chan, (image->height << 16) | image->width); OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); + dsize = ALIGN(image->width * image->height, 32) >> 5; while (dsize) { int iter_len = dsize > 128 ? 128 : dsize; @@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) struct nouveau_fbdev *nfbdev = info->par; struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); struct nouveau_channel *chan = drm->channel; - uint32_t width, dwords, *data = (uint32_t *)image->data; + uint32_t dwords, *data = (uint32_t *)image->data; uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); uint32_t *palette = info->pseudo_palette; int ret; @@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) if (ret) return ret; - width = ALIGN(image->width, 32); - dwords = (width * image->height) >> 5; - BEGIN_NV04(chan, NvSub2D, 0x0814, 2); if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { @@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) OUT_RING(chan, 0); OUT_RING(chan, image->dy); + dwords = ALIGN(image->width * image->height, 32) >> 5; while (dwords) { int push = dwords > 2047 ? 2047 : dwords; @@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) struct nouveau_fbdev *nfbdev = info->par; struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); struct nouveau_channel *chan = drm->channel; - uint32_t width, dwords, *data = (uint32_t *)image->data; + uint32_t dwords, *data = (uint32_t *)image->data; uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); uint32_t *palette = info->pseudo_palette; int ret; @@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) if (ret) return ret; - width = ALIGN(image->width, 32); - dwords = (width * image->height) >> 5; - BEGIN_NVC0(chan, NvSub2D, 0x0814, 2); if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { @@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) OUT_RING (chan, 0); OUT_RING (chan, image->dy); + dwords = ALIGN(image->width * image->height, 32) >> 5; while (dwords) { int push = dwords > 2047 ? 2047 : dwords; @@ -1614,7 +1614,7 @@ nvkm_device_pci_func = { .fini = nvkm_device_pci_fini, .resource_addr = nvkm_device_pci_resource_addr, .resource_size = nvkm_device_pci_resource_size, - .cpu_coherent = !IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_ARM64), + .cpu_coherent = !IS_ENABLED(CONFIG_ARM), }; int @@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/disp/piornv50.o nvkm-y += nvkm/engine/disp/sornv50.o nvkm-y += nvkm/engine/disp/sorg94.o nvkm-y += nvkm/engine/disp/sorgf119.o +nvkm-y += nvkm/engine/disp/sorgm107.o nvkm-y += nvkm/engine/disp/sorgm200.o nvkm-y += nvkm/engine/disp/dport.o @@ -76,6 +76,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl, mask |= 0x0001 << or; mask |= 0x0100 << head; + list_for_each_entry(outp, &disp->base.outp, head) { if ((outp->info.hasht & 0xff) == type && (outp->info.hashm & mask) == mask) { @@ -155,25 +156,21 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) if (!outp) return NULL; + *conf = (ctrl & 0x00000f00) >> 8; switch (outp->info.type) { case DCB_OUTPUT_TMDS: - *conf = (ctrl & 0x00000f00) >> 8; if (*conf == 5) *conf |= 0x0100; break; case DCB_OUTPUT_LVDS: - *conf = disp->sor.lvdsconf; - break; - case DCB_OUTPUT_DP: - *conf = (ctrl & 0x00000f00) >> 8; + *conf |= disp->sor.lvdsconf; break; - case DCB_OUTPUT_ANALOG: default: - *conf = 0x00ff; break; } - data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2); + data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8, + &ver, &hdr, &cnt, &len, &info2); if (data && id < 0xff) { data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); if (data) { @@ -36,7 +36,7 @@ gm107_disp = { .outp.internal.crt = nv50_dac_output_new, .outp.internal.tmds = nv50_sor_output_new, .outp.internal.lvds = nv50_sor_output_new, - .outp.internal.dp = gf119_sor_dp_new, + .outp.internal.dp = gm107_sor_dp_new, .dac.nr = 3, .dac.power = nv50_dac_power, .dac.sense = nv50_dac_sense, @@ -387,22 +387,17 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) if (!outp) return NULL; + *conf = (ctrl & 0x00000f00) >> 8; if (outp->info.location == 0) { switch (outp->info.type) { case DCB_OUTPUT_TMDS: - *conf = (ctrl & 0x00000f00) >> 8; if (*conf == 5) *conf |= 0x0100; break; case DCB_OUTPUT_LVDS: - *conf = disp->sor.lvdsconf; + *conf |= disp->sor.lvdsconf; break; - case DCB_OUTPUT_DP: - *conf = (ctrl & 0x00000f00) >> 8; - break; - case DCB_OUTPUT_ANALOG: default: - *conf = 0x00ff; break; } } else { @@ -410,7 +405,8 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) pclk = pclk / 2; } - data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2); + data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8, + &ver, &hdr, &cnt, &len, &info2); if (data && id < 0xff) { data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); if (data) { @@ -62,7 +62,12 @@ int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int); int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, struct nvkm_output **); int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool); +int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int); -int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, - struct nvkm_output **); +int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, + struct nvkm_output **); +int gm107_sor_dp_pattern(struct nvkm_output_dp *, int); + +int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, + struct nvkm_output **); #endif @@ -40,8 +40,8 @@ static int gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) { struct nvkm_device *device = outp->base.disp->engine.subdev.device; - const u32 loff = gf119_sor_loff(outp); - nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern); + const u32 soff = gf119_sor_soff(outp); + nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern); return 0; } @@ -64,7 +64,7 @@ gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef) return 0; } -static int +int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc) { @@ -0,0 +1,53 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "nv50.h" +#include "outpdp.h" + +int +gm107_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) +{ + struct nvkm_device *device = outp->base.disp->engine.subdev.device; + const u32 soff = outp->base.or * 0x800; + const u32 data = 0x01010101 * pattern; + if (outp->base.info.sorconf.link & 1) + nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data); + else + nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data); + return 0; +} + +static const struct nvkm_output_dp_func +gm107_sor_dp_func = { + .pattern = gm107_sor_dp_pattern, + .lnk_pwr = g94_sor_dp_lnk_pwr, + .lnk_ctl = gf119_sor_dp_lnk_ctl, + .drv_ctl = gf119_sor_dp_drv_ctl, +}; + +int +gm107_sor_dp_new(struct nvkm_disp *disp, int index, + struct dcb_output *dcbE, struct nvkm_output **poutp) +{ + return nvkm_output_dp_new_(&gm107_sor_dp_func, disp, index, dcbE, poutp); +} @@ -57,19 +57,6 @@ gm200_sor_dp_lane_map(struct nvkm_device *device, u8 lane) } static int -gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) -{ - struct nvkm_device *device = outp->base.disp->engine.subdev.device; - const u32 soff = gm200_sor_soff(outp); - const u32 data = 0x01010101 * pattern; - if (outp->base.info.sorconf.link & 1) - nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data); - else - nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data); - return 0; -} - -static int gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr) { struct nvkm_device *device = outp->base.disp->engine.subdev.device; @@ -129,7 +116,7 @@ gm200_sor_dp_drv_ctl(struct nvkm_output_dp *outp, static const struct nvkm_output_dp_func gm200_sor_dp_func = { - .pattern = gm200_sor_dp_pattern, + .pattern = gm107_sor_dp_pattern, .lnk_pwr = gm200_sor_dp_lnk_pwr, .lnk_ctl = gf119_sor_dp_lnk_ctl, .drv_ctl = gm200_sor_dp_drv_ctl, @@ -949,22 +949,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc) } static const struct nvkm_enum gf100_mp_warp_error[] = { - { 0x00, "NO_ERROR" }, - { 0x01, "STACK_MISMATCH" }, + { 0x01, "STACK_ERROR" }, + { 0x02, "API_STACK_ERROR" }, + { 0x03, "RET_EMPTY_STACK_ERROR" }, + { 0x04, "PC_WRAP" }, { 0x05, "MISALIGNED_PC" }, - { 0x08, "MISALIGNED_GPR" }, - { 0x09, "INVALID_OPCODE" }, - { 0x0d, "GPR_OUT_OF_BOUNDS" }, - { 0x0e, "MEM_OUT_OF_BOUNDS" }, - { 0x0f, "UNALIGNED_MEM_ACCESS" }, + { 0x06, "PC_OVERFLOW" }, + { 0x07, "MISALIGNED_IMMC_ADDR" }, + { 0x08, "MISALIGNED_REG" }, + { 0x09, "ILLEGAL_INSTR_ENCODING" }, + { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" }, + { 0x0b, "ILLEGAL_INSTR_PARAM" }, + { 0x0c, "INVALID_CONST_ADDR" }, + { 0x0d, "OOR_REG" }, + { 0x0e, "OOR_ADDR" }, + { 0x0f, "MISALIGNED_ADDR" }, { 0x10, "INVALID_ADDR_SPACE" }, - { 0x11, "INVALID_PARAM" }, + { 0x11, "ILLEGAL_INSTR_PARAM2" }, + { 0x12, "INVALID_CONST_ADDR_LDC" }, + { 0x13, "GEOMETRY_SM_ERROR" }, + { 0x14, "DIVERGENT" }, + { 0x15, "WARP_EXIT" }, {} }; static const struct nvkm_bitfield gf100_mp_global_error[] = { + { 0x00000001, "SM_TO_SM_FAULT" }, + { 0x00000002, "L1_ERROR" }, { 0x00000004, "MULTIPLE_WARP_ERRORS" }, - { 0x00000008, "OUT_OF_STACK_SPACE" }, + { 0x00000008, "PHYSICAL_STACK_OVERFLOW" }, + { 0x00000010, "BPT_INT" }, + { 0x00000020, "BPT_PAUSE" }, + { 0x00000040, "SINGLE_STEP_COMPLETE" }, + { 0x20000000, "ECC_SEC_ERROR" }, + { 0x40000000, "ECC_DED_ERROR" }, + { 0x80000000, "TIMEOUT" }, {} }; @@ -141,7 +141,8 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx, { u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len); if (data) { - info->match = nvbios_rd16(bios, data + 0x00); + info->proto = nvbios_rd08(bios, data + 0x00); + info->flags = nvbios_rd16(bios, data + 0x01); info->clkcmp[0] = nvbios_rd16(bios, data + 0x02); info->clkcmp[1] = nvbios_rd16(bios, data + 0x04); } @@ -149,12 +150,13 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx, } u16 -nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u16 type, +nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u8 proto, u8 flags, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info) { u16 data, idx = 0; while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) { - if (info->match == type) + if ((info->proto == proto || info->proto == 0xff) && + (info->flags == flags)) break; } return data; @@ -276,6 +276,8 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev) struct pwr_rail_t *r = &stbl.rail[i]; struct nvkm_iccsense_rail *rail; struct nvkm_iccsense_sensor *sensor; + int (*read)(struct nvkm_iccsense *, + struct nvkm_iccsense_rail *); if (!r->mode || r->resistor_mohm == 0) continue; @@ -284,31 +286,31 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev) if (!sensor) continue; - rail = kmalloc(sizeof(*rail), GFP_KERNEL); - if (!rail) - return -ENOMEM; - switch (sensor->type) { case NVBIOS_EXTDEV_INA209: if (r->rail != 0) continue; - rail->read = nvkm_iccsense_ina209_read; + read = nvkm_iccsense_ina209_read; break; case NVBIOS_EXTDEV_INA219: if (r->rail != 0) continue; - rail->read = nvkm_iccsense_ina219_read; + read = nvkm_iccsense_ina219_read; break; case NVBIOS_EXTDEV_INA3221: if (r->rail >= 3) continue; - rail->read = nvkm_iccsense_ina3221_read; + read = nvkm_iccsense_ina3221_read; break; default: continue; } + rail = kmalloc(sizeof(*rail), GFP_KERNEL); + if (!rail) + return -ENOMEM; sensor->rail_mask |= 1 << r->rail; + rail->read = read; rail->sensor = sensor; rail->idx = r->rail; rail->mohm = r->resistor_mohm; @@ -69,11 +69,11 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth) } static void -gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s) +gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s) { struct nvkm_subdev *subdev = <c->subdev; struct nvkm_device *device = subdev->device; - u32 base = 0x140000 + (c * 0x2000) + (s * 0x200); + u32 base = 0x140400 + (c * 0x2000) + (s * 0x200); u32 stat = nvkm_rd32(device, base + 0x00c); if (stat) { @@ -92,7 +92,7 @@ gm107_ltc_intr(struct nvkm_ltc *ltc) while (mask) { u32 s, c = __ffs(mask); for (s = 0; s < ltc->lts_nr; s++) - gm107_ltc_lts_isr(ltc, c, s); + gm107_ltc_intr_lts(ltc, c, s); mask &= ~(1 << c); } } @@ -46,7 +46,7 @@ static const struct nvkm_ltc_func gm200_ltc = { .oneinit = gm200_ltc_oneinit, .init = gm200_ltc_init, - .intr = gm107_ltc_intr, /*XXX: not validated */ + .intr = gm107_ltc_intr, .cbc_clear = gm107_ltc_cbc_clear, .cbc_wait = gm107_ltc_cbc_wait, .zbc = 16, @@ -2,6 +2,7 @@ config DRM_OMAP tristate "OMAP DRM" depends on DRM depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM + select OMAP2_DSS select DRM_KMS_HELPER select DRM_KMS_FB_HELPER select FB_SYS_FILLRECT @@ -9,6 +9,7 @@ * the Free Software Foundation. */ +#include <linux/gpio/consumer.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -14,7 +14,7 @@ * the Free Software Foundation. */ -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> @@ -9,7 +9,7 @@ * the Free Software Foundation. */ -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> @@ -9,7 +9,7 @@ * the Free Software Foundation. */ -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> @@ -14,7 +14,7 @@ #include <linux/backlight.h> #include <linux/delay.h> #include <linux/fb.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/module.h> @@ -15,6 +15,7 @@ #include <linux/spi/spi.h> #include <linux/mutex.h> #include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <video/omapdss.h> #include <video/omap-panel-data.h> @@ -15,7 +15,7 @@ #include <linux/delay.h> #include <linux/spi/spi.h> #include <linux/fb.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/of_gpio.h> #include <video/omapdss.h> @@ -10,7 +10,7 @@ */ #include <linux/delay.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_gpio.h> @@ -29,7 +29,7 @@ #include <linux/sched.h> #include <linux/backlight.h> #include <linux/fb.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/of.h> #include <linux/of_gpio.h> @@ -14,7 +14,7 @@ #include <linux/delay.h> #include <linux/spi/spi.h> #include <linux/regulator/consumer.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/of_gpio.h> @@ -1167,7 +1167,6 @@ static int dsi_regulator_init(struct platform_device *dsidev) { struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); struct regulator *vdds_dsi; - int r; if (dsi->vdds_dsi_reg != NULL) return 0; @@ -1180,15 +1179,6 @@ static int dsi_regulator_init(struct platform_device *dsidev) return PTR_ERR(vdds_dsi); } - if (regulator_can_change_voltage(vdds_dsi)) { - r = regulator_set_voltage(vdds_dsi, 1800000, 1800000); - if (r) { - devm_regulator_put(vdds_dsi); - DSSERR("can't set the DSI regulator voltage\n"); - return r; - } - } - dsi->vdds_dsi_reg = vdds_dsi; return 0; @@ -30,6 +30,7 @@ #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/clk.h> +#include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/gfp.h> @@ -33,6 +33,7 @@ #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/component.h> +#include <linux/of.h> #include <video/omapdss.h> #include <sound/omap-hdmi-audio.h> @@ -100,7 +101,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data) static int hdmi_init_regulator(void) { - int r; struct regulator *reg; if (hdmi.vdda_reg != NULL) @@ -114,15 +114,6 @@ static int hdmi_init_regulator(void) return PTR_ERR(reg); } - if (regulator_can_change_voltage(reg)) { - r = regulator_set_voltage(reg, 1800000, 1800000); - if (r) { - devm_regulator_put(reg); - DSSWARN("can't set the regulator voltage\n"); - return r; - } - } - hdmi.vdda_reg = reg; return 0; @@ -211,7 +211,7 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg) static void hdmi_core_powerdown_disable(struct hdmi_core_data *core) { DSSDBG("Enter hdmi_core_powerdown_disable\n"); - REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x0, 0, 0); + REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x1, 0, 0); } static void hdmi_core_swreset_release(struct hdmi_core_data *core) @@ -38,6 +38,7 @@ #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/component.h> +#include <linux/of.h> #include <video/omapdss.h> #include <sound/omap-hdmi-audio.h> @@ -119,7 +120,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data) static int hdmi_init_regulator(void) { - int r; struct regulator *reg; if (hdmi.vdda_reg != NULL) @@ -131,15 +131,6 @@ static int hdmi_init_regulator(void) return PTR_ERR(reg); } - if (regulator_can_change_voltage(reg)) { - r = regulator_set_voltage(reg, 1800000, 1800000); - if (r) { - devm_regulator_put(reg); - DSSWARN("can't set the regulator voltage\n"); - return r; - } - } - hdmi.vdda_reg = reg; return 0; @@ -51,8 +51,8 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core) { void __iomem *base = core->base; const unsigned long long iclk = 266000000; /* DSS L3 ICLK */ - const unsigned ss_scl_high = 4000; /* ns */ - const unsigned ss_scl_low = 4700; /* ns */ + const unsigned ss_scl_high = 4600; /* ns */ + const unsigned ss_scl_low = 5400; /* ns */ const unsigned fs_scl_high = 600; /* ns */ const unsigned fs_scl_low = 1300; /* ns */ const unsigned sda_hold = 1000; /* ns */ @@ -458,7 +458,7 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core, c = (ptr[1] >> 6) & 0x3; m = (ptr[1] >> 4) & 0x3; - r = (ptr[1] >> 0) & 0x3; + r = (ptr[1] >> 0) & 0xf; itc = (ptr[2] >> 7) & 0x1; ec = (ptr[2] >> 4) & 0x7; @@ -13,6 +13,7 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/seq_file.h> #include <video/omapdss.h> #include "dss.h" @@ -16,6 +16,7 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/clk.h> +#include <linux/seq_file.h> #include <video/omapdss.h> @@ -14,6 +14,7 @@ #include <linux/err.h> #include <linux/io.h> #include <linux/platform_device.h> +#include <linux/seq_file.h> #include <video/omapdss.h> #include "dss.h" @@ -17,6 +17,8 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/seq_file.h> + #include <drm/drm_crtc.h> #include <drm/drm_fb_helper.h> @@ -27,6 +27,7 @@ #include <linux/module.h> #include <linux/platform_device.h> /* platform_device() */ #include <linux/sched.h> +#include <linux/seq_file.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/vmalloc.h> @@ -17,6 +17,8 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/seq_file.h> + #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> @@ -17,6 +17,7 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/seq_file.h> #include <linux/shmem_fs.h> #include <linux/spinlock.h> #include <linux/pfn_t.h> @@ -1406,7 +1407,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, if (ret) goto err_free; - mapping = file_inode(obj->filp)->i_mapping; + mapping = obj->filp->f_mapping; mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32); } @@ -589,7 +589,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev)) radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; /* use frac fb div on RS780/RS880 */ - if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) + if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) + && !radeon_crtc->ss_enabled) radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; if (ASIC_IS_DCE32(rdev) && mode->clock > 165000) radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; @@ -626,7 +627,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, if (radeon_crtc->ss.refdiv) { radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; - if (ASIC_IS_AVIVO(rdev)) + if (rdev->family >= CHIP_RV770) radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; } } @@ -630,6 +630,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) /* * GPU helpers function. */ + +/** + * radeon_device_is_virtual - check if we are running is a virtual environment + * + * Check if the asic has been passed through to a VM (all asics). + * Used at driver startup. + * Returns true if virtual or false if not. + */ +static bool radeon_device_is_virtual(void) +{ +#ifdef CONFIG_X86 + return boot_cpu_has(X86_FEATURE_HYPERVISOR); +#else + return false; +#endif +} + /** * radeon_card_posted - check if the hw has already been initialized * @@ -643,6 +660,10 @@ bool radeon_card_posted(struct radeon_device *rdev) { uint32_t reg; + /* for pass through, always force asic_init */ + if (radeon_device_is_virtual()) + return false; + /* required for EFI mode on macbook2,1 which uses an r5xx asic */ if (efi_enabled(EFI_BOOT) && (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && @@ -1631,7 +1652,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, radeon_agp_suspend(rdev); pci_save_state(dev->pdev); - if (freeze && rdev->family >= CHIP_R600) { + if (freeze && rdev->family >= CHIP_CEDAR) { rdev->asic->asic_reset(rdev, true); pci_restore_state(dev->pdev); } else if (suspend) { @@ -148,40 +148,39 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane) struct rcar_du_vsp_plane_state *state = to_rcar_vsp_plane_state(plane->plane.state); struct drm_framebuffer *fb = plane->plane.state->fb; - struct v4l2_rect src; - struct v4l2_rect dst; - dma_addr_t paddr[2] = { 0, }; - u32 pixelformat = 0; + struct vsp1_du_atomic_config cfg = { + .pixelformat = 0, + .pitch = fb->pitches[0], + .alpha = state->alpha, + .zpos = state->zpos, + }; unsigned int i; - src.left = state->state.src_x >> 16; - src.top = state->state.src_y >> 16; - src.width = state->state.src_w >> 16; - src.height = state->state.src_h >> 16; + cfg.src.left = state->state.src_x >> 16; + cfg.src.top = state->state.src_y >> 16; + cfg.src.width = state->state.src_w >> 16; + cfg.src.height = state->state.src_h >> 16; - dst.left = state->state.crtc_x; - dst.top = state->state.crtc_y; - dst.width = state->state.crtc_w; - dst.height = state->state.crtc_h; + cfg.dst.left = state->state.crtc_x; + cfg.dst.top = state->state.crtc_y; + cfg.dst.width = state->state.crtc_w; + cfg.dst.height = state->state.crtc_h; for (i = 0; i < state->format->planes; ++i) { struct drm_gem_cma_object *gem; gem = drm_fb_cma_get_gem_obj(fb, i); - paddr[i] = gem->paddr + fb->offsets[i]; + cfg.mem[i] = gem->paddr + fb->offsets[i]; } for (i = 0; i < ARRAY_SIZE(formats_kms); ++i) { if (formats_kms[i] == state->format->fourcc) { - pixelformat = formats_v4l2[i]; + cfg.pixelformat = formats_v4l2[i]; break; } } - WARN_ON(!pixelformat); - - vsp1_du_atomic_update(plane->vsp->vsp, plane->index, pixelformat, - fb->pitches[0], paddr, &src, &dst); + vsp1_du_atomic_update(plane->vsp->vsp, plane->index, &cfg); } static int rcar_du_vsp_plane_atomic_check(struct drm_plane *plane, @@ -220,8 +219,7 @@ static void rcar_du_vsp_plane_atomic_update(struct drm_plane *plane, if (plane->state->crtc) rcar_du_vsp_plane_setup(rplane); else - vsp1_du_atomic_update(rplane->vsp->vsp, rplane->index, 0, 0, 0, - NULL, NULL); + vsp1_du_atomic_update(rplane->vsp->vsp, rplane->index, NULL); } static const struct drm_plane_helper_funcs rcar_du_vsp_plane_helper_funcs = { @@ -269,6 +267,7 @@ static void rcar_du_vsp_plane_reset(struct drm_plane *plane) return; state->alpha = 255; + state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1; plane->state = &state->state; plane->state->plane = plane; @@ -283,6 +282,8 @@ static int rcar_du_vsp_plane_atomic_set_property(struct drm_plane *plane, if (property == rcdu->props.alpha) rstate->alpha = val; + else if (property == rcdu->props.zpos) + rstate->zpos = val; else return -EINVAL; @@ -299,6 +300,8 @@ static int rcar_du_vsp_plane_atomic_get_property(struct drm_plane *plane, if (property == rcdu->props.alpha) *val = rstate->alpha; + else if (property == rcdu->props.zpos) + *val = rstate->zpos; else return -EINVAL; @@ -378,6 +381,8 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp) drm_object_attach_property(&plane->plane.base, rcdu->props.alpha, 255); + drm_object_attach_property(&plane->plane.base, + rcdu->props.zpos, 1); } return 0; @@ -44,6 +44,7 @@ static inline struct rcar_du_vsp_plane *to_rcar_vsp_plane(struct drm_plane *p) * @state: base DRM plane state * @format: information about the pixel format used by the plane * @alpha: value of the plane alpha property + * @zpos: value of the plane zpos property */ struct rcar_du_vsp_plane_state { struct drm_plane_state state; @@ -51,6 +52,7 @@ struct rcar_du_vsp_plane_state { const struct rcar_du_format_info *format; unsigned int alpha; + unsigned int zpos; }; static inline struct rcar_du_vsp_plane_state * @@ -51,15 +51,6 @@ static void sti_crtc_disabling(struct drm_crtc *crtc) mixer->status = STI_MIXER_DISABLING; } -static bool sti_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - /* accept the provided drm_display_mode, do not fix it up */ - drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); - return true; -} - static int sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) { @@ -230,7 +221,6 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc, static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { .enable = sti_crtc_enable, .disable = sti_crtc_disabling, - .mode_fixup = sti_crtc_mode_fixup, .mode_set = drm_helper_crtc_mode_set, .mode_set_nofb = sti_crtc_mode_set_nofb, .mode_set_base = drm_helper_crtc_mode_set_base, @@ -1,6 +1,6 @@ config DRM_SUN4I tristate "DRM Support for Allwinner A10 Display Engine" - depends on DRM && ARM + depends on DRM && ARM && COMMON_CLK depends on ARCH_SUNXI || COMPILE_TEST select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER @@ -190,7 +190,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend, /* Get the physical address of the buffer in memory */ gem = drm_fb_cma_get_gem_obj(fb, 0); - DRM_DEBUG_DRIVER("Using GEM @ 0x%x\n", gem->paddr); + DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr); /* Compute the start of the displayed memory */ bpp = drm_format_plane_cpp(fb->pixel_format, 0); @@ -198,7 +198,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend, paddr += (state->src_x >> 16) * bpp; paddr += (state->src_y >> 16) * fb->pitches[0]; - DRM_DEBUG_DRIVER("Setting buffer address to 0x%x\n", paddr); + DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr); /* Write the 32 lower bits of the address (in bits) */ lo_paddr = paddr << 3; @@ -65,6 +65,14 @@ static void sun4i_crtc_disable(struct drm_crtc *crtc) DRM_DEBUG_DRIVER("Disabling the CRTC\n"); sun4i_tcon_disable(drv->tcon); + + if (crtc->state->event && !crtc->state->active) { + spin_lock_irq(&crtc->dev->event_lock); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + spin_unlock_irq(&crtc->dev->event_lock); + + crtc->state->event = NULL; + } } static void sun4i_crtc_enable(struct drm_crtc *crtc) @@ -72,14 +72,40 @@ static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw, static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { - return *parent_rate / DIV_ROUND_CLOSEST(*parent_rate, rate); + unsigned long best_parent = 0; + u8 best_div = 1; + int i; + + for (i = 6; i < 127; i++) { + unsigned long ideal = rate * i; + unsigned long rounded; + + rounded = clk_hw_round_rate(clk_hw_get_parent(hw), + ideal); + + if (rounded == ideal) { + best_parent = rounded; + best_div = i; + goto out; + } + + if ((rounded < ideal) && (rounded > best_parent)) { + best_parent = rounded; + best_div = i; + } + } + +out: + *parent_rate = best_parent; + + return best_parent / best_div; } static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct sun4i_dclk *dclk = hw_to_dclk(hw); - int div = DIV_ROUND_CLOSEST(parent_rate, rate); + u8 div = parent_rate / rate; return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG, GENMASK(6, 0), div); @@ -127,10 +153,14 @@ int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon) const char *clk_name, *parent_name; struct clk_init_data init; struct sun4i_dclk *dclk; + int ret; parent_name = __clk_get_name(tcon->sclk0); - of_property_read_string_index(dev->of_node, "clock-output-names", 0, - &clk_name); + ret = of_property_read_string_index(dev->of_node, + "clock-output-names", 0, + &clk_name); + if (ret) + return ret; dclk = devm_kzalloc(dev, sizeof(*dclk), GFP_KERNEL); if (!dclk) @@ -140,6 +170,7 @@ int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon) init.ops = &sun4i_dclk_ops; init.parent_names = &parent_name; init.num_parents = 1; + init.flags = CLK_SET_RATE_PARENT; dclk->regmap = tcon->regs; dclk->hw.init = &init; @@ -24,34 +24,6 @@ #include "sun4i_layer.h" #include "sun4i_tcon.h" -static int sun4i_drv_connector_plug_all(struct drm_device *drm) -{ - struct drm_connector *connector, *failed; - int ret; - - mutex_lock(&drm->mode_config.mutex); - list_for_each_entry(connector, &drm->mode_config.connector_list, head) { - ret = drm_connector_register(connector); - if (ret) { - failed = connector; - goto err; - } - } - mutex_unlock(&drm->mode_config.mutex); - return 0; - -err: - list_for_each_entry(connector, &drm->mode_config.connector_list, head) { - if (failed == connector) - break; - - drm_connector_unregister(connector); - } - mutex_unlock(&drm->mode_config.mutex); - - return ret; -} - static int sun4i_drv_enable_vblank(struct drm_device *drm, unsigned int pipe) { struct sun4i_drv *drv = drm->dev_private; @@ -120,11 +92,27 @@ static struct drm_driver sun4i_drv_driver = { /* Frame Buffer Operations */ /* VBlank Operations */ - .get_vblank_counter = drm_vblank_count, + .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = sun4i_drv_enable_vblank, .disable_vblank = sun4i_drv_disable_vblank, }; +static void sun4i_remove_framebuffers(void) +{ + struct apertures_struct *ap; + + ap = alloc_apertures(1); + if (!ap) + return; + + /* The framebuffer can be located anywhere in RAM */ + ap->ranges[0].base = 0; + ap->ranges[0].size = ~0; + + remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false); + kfree(ap); +} + static int sun4i_drv_bind(struct device *dev) { struct drm_device *drm; @@ -172,6 +160,9 @@ static int sun4i_drv_bind(struct device *dev) } drm->irq_enabled = true; + /* Remove early framebuffers (ie. simplefb) */ + sun4i_remove_framebuffers(); + /* Create our framebuffer */ drv->fbdev = sun4i_framebuffer_init(drm); if (IS_ERR(drv->fbdev)) { @@ -187,7 +178,7 @@ static int sun4i_drv_bind(struct device *dev) if (ret) goto free_drm; - ret = sun4i_drv_connector_plug_all(drm); + ret = drm_connector_register_all(drm); if (ret) goto unregister_drm; @@ -204,6 +195,7 @@ static void sun4i_drv_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); + drm_connector_unregister_all(drm); drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); sun4i_framebuffer_free(drm); @@ -318,6 +310,7 @@ static int sun4i_drv_probe(struct platform_device *pdev) count += sun4i_drv_add_endpoints(&pdev->dev, &match, pipeline); + of_node_put(pipeline); DRM_DEBUG_DRIVER("Queued %d outputs on pipeline %d\n", count, i); @@ -54,8 +54,13 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector) static int sun4i_rgb_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector); + struct sun4i_drv *drv = rgb->drv; + struct sun4i_tcon *tcon = drv->tcon; u32 hsync = mode->hsync_end - mode->hsync_start; u32 vsync = mode->vsync_end - mode->vsync_start; + unsigned long rate = mode->clock * 1000; + long rounded_rate; DRM_DEBUG_DRIVER("Validating modes...\n"); @@ -87,6 +92,15 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector, DRM_DEBUG_DRIVER("Vertical parameters OK\n"); + rounded_rate = clk_round_rate(tcon->dclk, rate); + if (rounded_rate < rate) + return MODE_CLOCK_LOW; + + if (rounded_rate > rate) + return MODE_CLOCK_HIGH; + + DRM_DEBUG_DRIVER("Clock rate OK\n"); + return MODE_OK; } @@ -203,7 +217,7 @@ int sun4i_rgb_init(struct drm_device *drm) int ret; /* If we don't have a panel, there's no point in going on */ - if (!tcon->panel) + if (IS_ERR(tcon->panel)) return -ENODEV; rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL); @@ -425,11 +425,11 @@ static struct drm_panel *sun4i_tcon_find_panel(struct device_node *node) remote = of_graph_get_remote_port_parent(end_node); if (!remote) { - DRM_DEBUG_DRIVER("Enable to parse remote node\n"); + DRM_DEBUG_DRIVER("Unable to parse remote node\n"); return ERR_PTR(-EINVAL); } - return of_drm_find_panel(remote); + return of_drm_find_panel(remote) ?: ERR_PTR(-EPROBE_DEFER); } static int sun4i_tcon_bind(struct device *dev, struct device *master, @@ -490,7 +490,11 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master, return 0; } - return sun4i_rgb_init(drm); + ret = sun4i_rgb_init(drm); + if (ret < 0) + goto err_free_clocks; + + return 0; err_free_clocks: sun4i_tcon_free_clocks(tcon); @@ -522,12 +526,13 @@ static int sun4i_tcon_probe(struct platform_device *pdev) * Defer the probe. */ panel = sun4i_tcon_find_panel(node); - if (IS_ERR(panel)) { - /* - * If we don't have a panel endpoint, just go on - */ - if (PTR_ERR(panel) != -ENODEV) - return -EPROBE_DEFER; + + /* + * If we don't have a panel endpoint, just go on + */ + if (PTR_ERR(panel) == -EPROBE_DEFER) { + DRM_DEBUG_DRIVER("Still waiting for our panel. Deferring...\n"); + return -EPROBE_DEFER; } return component_add(&pdev->dev, &sun4i_tcon_ops); @@ -1034,9 +1034,9 @@ out_unlock: return ret; } -static bool ttm_bo_mem_compat(struct ttm_placement *placement, - struct ttm_mem_reg *mem, - uint32_t *new_flags) +bool ttm_bo_mem_compat(struct ttm_placement *placement, + struct ttm_mem_reg *mem, + uint32_t *new_flags) { int i; @@ -1068,6 +1068,7 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement, return false; } +EXPORT_SYMBOL(ttm_bo_mem_compat); int ttm_bo_validate(struct ttm_buffer_object *bo, struct ttm_placement *placement, @@ -298,7 +298,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm) swap_storage = ttm->swap_storage; BUG_ON(swap_storage == NULL); - swap_space = file_inode(swap_storage)->i_mapping; + swap_space = swap_storage->f_mapping; for (i = 0; i < ttm->num_pages; ++i) { from_page = shmem_read_mapping_page(swap_space, i); @@ -347,7 +347,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) } else swap_storage = persistent_swap_storage; - swap_space = file_inode(swap_storage)->i_mapping; + swap_space = swap_storage->f_mapping; for (i = 0; i < ttm->num_pages; ++i) { from_page = ttm->pages[i]; @@ -456,14 +456,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); - HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), - vc4_state->mm.start); - - if (debug_dump_regs) { - DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); - vc4_hvs_dump_state(dev); - } - if (crtc->state->event) { unsigned long flags; @@ -473,8 +465,20 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, spin_lock_irqsave(&dev->event_lock, flags); vc4_crtc->event = crtc->state->event; - spin_unlock_irqrestore(&dev->event_lock, flags); crtc->state->event = NULL; + + HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), + vc4_state->mm.start); + + spin_unlock_irqrestore(&dev->event_lock, flags); + } else { + HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), + vc4_state->mm.start); + } + + if (debug_dump_regs) { + DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); + vc4_hvs_dump_state(dev); } } @@ -500,12 +504,17 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) { struct drm_crtc *crtc = &vc4_crtc->base; struct drm_device *dev = crtc->dev; + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); + u32 chan = vc4_crtc->channel; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); - if (vc4_crtc->event) { + if (vc4_crtc->event && + (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) { drm_crtc_send_vblank_event(crtc, vc4_crtc->event); vc4_crtc->event = NULL; + drm_crtc_vblank_put(crtc); } spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -556,6 +565,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) spin_unlock_irqrestore(&dev->event_lock, flags); } + drm_crtc_vblank_put(crtc); drm_framebuffer_unreference(flip_state->fb); kfree(flip_state); @@ -598,6 +608,8 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, return ret; } + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + /* Immediately update the plane's legacy fb pointer, so that later * modeset prep sees the state that will be present when the semaphore * is released. @@ -66,12 +66,12 @@ static const struct file_operations vc4_drm_fops = { }; static const struct drm_ioctl_desc vc4_drm_ioctls[] = { - DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0), + DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl, DRM_ROOT_ONLY), }; @@ -91,7 +91,7 @@ static struct drm_driver vc4_drm_driver = { .enable_vblank = vc4_enable_vblank, .disable_vblank = vc4_disable_vblank, - .get_vblank_counter = drm_vblank_count, + .get_vblank_counter = drm_vblank_no_hw_counter, #if defined(CONFIG_DEBUG_FS) .debugfs_init = vc4_debugfs_init, @@ -117,10 +117,18 @@ static int vc4_atomic_commit(struct drm_device *dev, return -ENOMEM; /* Make sure that any outstanding modesets have finished. */ - ret = down_interruptible(&vc4->async_modeset); - if (ret) { - kfree(c); - return ret; + if (nonblock) { + ret = down_trylock(&vc4->async_modeset); + if (ret) { + kfree(c); + return -EBUSY; + } + } else { + ret = down_interruptible(&vc4->async_modeset); + if (ret) { + kfree(c); + return ret; + } } ret = drm_atomic_helper_prepare_planes(dev, state); @@ -341,6 +341,10 @@ #define SCALER_DISPLACT0 0x00000030 #define SCALER_DISPLACT1 0x00000034 #define SCALER_DISPLACT2 0x00000038 +#define SCALER_DISPLACTX(x) (SCALER_DISPLACT0 + \ + (x) * (SCALER_DISPLACT1 - \ + SCALER_DISPLACT0)) + #define SCALER_DISPCTRL0 0x00000040 # define SCALER_DISPCTRLX_ENABLE BIT(31) # define SCALER_DISPCTRLX_RESET BIT(30) @@ -49,6 +49,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv, { struct ttm_buffer_object *bo = &buf->base; int ret; + uint32_t new_flags; ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); if (unlikely(ret != 0)) @@ -60,7 +61,12 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv, if (unlikely(ret != 0)) goto err; - ret = ttm_bo_validate(bo, placement, interruptible, false); + if (buf->pin_count > 0) + ret = ttm_bo_mem_compat(placement, &bo->mem, + &new_flags) == true ? 0 : -EINVAL; + else + ret = ttm_bo_validate(bo, placement, interruptible, false); + if (!ret) vmw_bo_pin_reserved(buf, true); @@ -91,6 +97,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv, { struct ttm_buffer_object *bo = &buf->base; int ret; + uint32_t new_flags; ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); if (unlikely(ret != 0)) @@ -102,6 +109,12 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv, if (unlikely(ret != 0)) goto err; + if (buf->pin_count > 0) { + ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem, + &new_flags) == true ? 0 : -EINVAL; + goto out_unreserve; + } + ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible, false); if (likely(ret == 0) || ret == -ERESTARTSYS) @@ -161,6 +174,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv, struct ttm_placement placement; struct ttm_place place; int ret = 0; + uint32_t new_flags; place = vmw_vram_placement.placement[0]; place.lpfn = bo->num_pages; @@ -185,10 +199,15 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv, */ if (bo->mem.mem_type == TTM_PL_VRAM && bo->mem.start < bo->num_pages && - bo->mem.start > 0) + bo->mem.start > 0 && + buf->pin_count == 0) (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false); - ret = ttm_bo_validate(bo, &placement, interruptible, false); + if (buf->pin_count > 0) + ret = ttm_bo_mem_compat(&placement, &bo->mem, + &new_flags) == true ? 0 : -EINVAL; + else + ret = ttm_bo_validate(bo, &placement, interruptible, false); /* For some reason we didn't end up at the start of vram */ WARN_ON(ret == 0 && bo->offset != 0); @@ -233,6 +233,7 @@ static int vmw_force_iommu; static int vmw_restrict_iommu; static int vmw_force_coherent; static int vmw_restrict_dma_mask; +static int vmw_assume_16bpp; static int vmw_probe(struct pci_dev *, const struct pci_device_id *); static void vmw_master_init(struct vmw_master *); @@ -249,6 +250,8 @@ MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); module_param_named(force_coherent, vmw_force_coherent, int, 0600); MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); +MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); +module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); static void vmw_print_capabilities(uint32_t capabilities) @@ -660,6 +663,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->vram_start = pci_resource_start(dev->pdev, 1); dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); + dev_priv->assume_16bpp = !!vmw_assume_16bpp; + dev_priv->enable_fb = enable_fbdev; vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); @@ -706,6 +711,13 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) vmw_read(dev_priv, SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); + /* + * Workaround for low memory 2D VMs to compensate for the + * allocation taken by fbdev + */ + if (!(dev_priv->capabilities & SVGA_CAP_3D)) + mem_size *= 2; + dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; dev_priv->prim_bb_mem = vmw_read(dev_priv, @@ -386,6 +386,7 @@ struct vmw_private { spinlock_t hw_lock; spinlock_t cap_lock; bool has_dx; + bool assume_16bpp; /* * VGA registers. @@ -517,28 +517,6 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info) par->set_fb = &vfb->base; - if (!par->bo_ptr) { - /* - * Pin before mapping. Since we don't know in what placement - * to pin, call into KMS to do it for us. - */ - ret = vfb->pin(vfb); - if (ret) { - DRM_ERROR("Could not pin the fbdev framebuffer.\n"); - return ret; - } - - ret = ttm_bo_kmap(&par->vmw_bo->base, 0, - par->vmw_bo->base.num_pages, &par->map); - if (ret) { - vfb->unpin(vfb); - DRM_ERROR("Could not map the fbdev framebuffer.\n"); - return ret; - } - - par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite); - } - return 0; } @@ -601,6 +579,31 @@ static int vmw_fb_set_par(struct fb_info *info) if (ret) goto out_unlock; + if (!par->bo_ptr) { + struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb); + + /* + * Pin before mapping. Since we don't know in what placement + * to pin, call into KMS to do it for us. + */ + ret = vfb->pin(vfb); + if (ret) { + DRM_ERROR("Could not pin the fbdev framebuffer.\n"); + goto out_unlock; + } + + ret = ttm_bo_kmap(&par->vmw_bo->base, 0, + par->vmw_bo->base.num_pages, &par->map); + if (ret) { + vfb->unpin(vfb); + DRM_ERROR("Could not map the fbdev framebuffer.\n"); + goto out_unlock; + } + + par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite); + } + + vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width, par->set_fb->height); @@ -1553,14 +1553,10 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }; int i; - u32 assumed_bpp = 2; + u32 assumed_bpp = 4; - /* - * If using screen objects, then assume 32-bpp because that's what the - * SVGA device is assuming - */ - if (dev_priv->active_display_unit == vmw_du_screen_object) - assumed_bpp = 4; + if (dev_priv->assume_16bpp) + assumed_bpp = 2; if (dev_priv->active_display_unit == vmw_du_screen_target) { max_width = min(max_width, dev_priv->stdu_max_width); @@ -28,6 +28,7 @@ #include <linux/slab.h> #include <linux/module.h> #include <linux/kernel.h> +#include <linux/frame.h> #include <asm/hypervisor.h> #include "drmP.h" #include "vmwgfx_msg.h" @@ -194,7 +195,7 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg) return -EINVAL; } - +STACK_FRAME_NON_STANDARD(vmw_send_msg); /** @@ -299,11 +300,15 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, break; } + if (retries == RETRIES) + return -EINVAL; + *msg_len = reply_len; *msg = reply; return 0; } +STACK_FRAME_NON_STANDARD(vmw_recv_msg); /** @@ -399,8 +399,10 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv, WARN_ON_ONCE(!stdu->defined); - if (!vfb->dmabuf && new_fb->width == mode->hdisplay && - new_fb->height == mode->vdisplay) + new_vfbs = (vfb->dmabuf) ? NULL : vmw_framebuffer_to_vfbs(new_fb); + + if (new_vfbs && new_vfbs->surface->base_size.width == mode->hdisplay && + new_vfbs->surface->base_size.height == mode->vdisplay) new_content_type = SAME_AS_DISPLAY; else if (vfb->dmabuf) new_content_type = SEPARATE_DMA; @@ -444,7 +446,6 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv, content_srf.mip_levels[0] = 1; content_srf.multisample_count = 0; } else { - new_vfbs = vmw_framebuffer_to_vfbs(new_fb); content_srf = *new_vfbs->surface; } @@ -464,7 +465,6 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv, return ret; } } else if (new_content_type == SAME_AS_DISPLAY) { - new_vfbs = vmw_framebuffer_to_vfbs(new_fb); new_display_srf = vmw_surface_reference(new_vfbs->surface); } @@ -388,6 +388,21 @@ config HID_LCPOWER ---help--- Support for LC-Power RC1000MCE RF remote control. +config HID_LED + tristate "Simple RGB LED support" + depends on HID + depends on LEDS_CLASS + ---help--- + Support for simple RGB LED devices. Currently supported are: + - Riso Kagaku Webmail Notifier + - Dream Cheeky Webmail Notifier and Friends Alert + - ThingM blink(1) + - Delcom Visual Signal Indicator Generation 2 + - Greynut Luxafor + + To compile this driver as a module, choose M here: the + module will be called hid-led. + config HID_LENOVO tristate "Lenovo / Thinkpad devices" depends on HID @@ -819,11 +834,11 @@ config HID_THINGM tristate "ThingM blink(1) USB RGB LED" depends on HID depends on LEDS_CLASS + select HID_LED ---help--- - Support for the ThingM blink(1) USB RGB LED. This driver registers a - Linux LED class instance, plus additional sysfs attributes to control - RGB colors, fade time and playing. The device is exposed through hidraw - to access other functions. + Support for the ThingM blink(1) USB RGB LED. This driver has been + merged into the generic hid led driver. Config symbol HID_THINGM + just selects HID_LED and will be removed soon. config HID_THRUSTMASTER tristate "ThrustMaster devices support" @@ -936,6 +951,14 @@ config HID_SENSOR_CUSTOM_SENSOR standard sensors. Select this config option for custom/generic sensor support. +config HID_ALPS + tristate "Alps HID device support" + depends on HID + ---help--- + Support for Alps I2C HID touchpads and StickPointer. + Say Y here if you have a Alps touchpads over i2c-hid or usbhid + and want support for its special functionalities. + endmenu endif # HID @@ -21,6 +21,7 @@ hid-wiimote-y := hid-wiimote-core.o hid-wiimote-modules.o hid-wiimote-$(CONFIG_DEBUG_FS) += hid-wiimote-debug.o obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o +obj-$(CONFIG_HID_ALPS) += hid-alps.o obj-$(CONFIG_HID_ACRUX) += hid-axff.o obj-$(CONFIG_HID_APPLE) += hid-apple.o obj-$(CONFIG_HID_APPLEIR) += hid-appleir.o @@ -90,12 +91,12 @@ obj-$(CONFIG_HID_SPEEDLINK) += hid-speedlink.o obj-$(CONFIG_HID_STEELSERIES) += hid-steelseries.o obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o -obj-$(CONFIG_HID_THINGM) += hid-thingm.o obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o obj-$(CONFIG_HID_TIVO) += hid-tivo.o obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o obj-$(CONFIG_HID_UCLOGIC) += hid-uclogic.o +obj-$(CONFIG_HID_LED) += hid-led.o obj-$(CONFIG_HID_XINMO) += hid-xinmo.o obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o obj-$(CONFIG_HID_ZYDACRON) += hid-zydacron.o diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c new file mode 100644 index 000000000000..048befde295a --- /dev/null +++ b/ drivers/hid/hid-alps.c@@ -0,0 +1,506 @@ +/* + * Copyright (c) 2016 Masaki Ota <masaki.ota@jp.alps.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <linux/kernel.h> +#include <linux/hid.h> +#include <linux/input.h> +#include <linux/input/mt.h> +#include <linux/module.h> +#include <asm/unaligned.h> +#include "hid-ids.h" + +/* ALPS Device Product ID */ +#define HID_PRODUCT_ID_T3_BTNLESS 0xD0C0 +#define HID_PRODUCT_ID_COSMO 0x1202 +#define HID_PRODUCT_ID_U1_PTP_1 0x1207 +#define HID_PRODUCT_ID_U1 0x1209 +#define HID_PRODUCT_ID_U1_PTP_2 0x120A +#define HID_PRODUCT_ID_U1_DUAL 0x120B +#define HID_PRODUCT_ID_T4_BTNLESS 0x120C + +#define DEV_SINGLEPOINT 0x01 +#define DEV_DUALPOINT 0x02 + +#define U1_MOUSE_REPORT_ID 0x01 /* Mouse data ReportID */ +#define U1_ABSOLUTE_REPORT_ID 0x03 /* Absolute data ReportID */ +#define U1_FEATURE_REPORT_ID 0x05 /* Feature ReportID */ +#define U1_SP_ABSOLUTE_REPORT_ID 0x06 /* Feature ReportID */ + +#define U1_FEATURE_REPORT_LEN 0x08 /* Feature Report Length */ +#define U1_FEATURE_REPORT_LEN_ALL 0x0A +#define U1_CMD_REGISTER_READ 0xD1 +#define U1_CMD_REGISTER_WRITE 0xD2 + +#define U1_DEVTYPE_SP_SUPPORT 0x10 /* SP Support */ +#define U1_DISABLE_DEV 0x01 +#define U1_TP_ABS_MODE 0x02 +#define U1_SP_ABS_MODE 0x80 + +#define ADDRESS_U1_DEV_CTRL_1 0x00800040 +#define ADDRESS_U1_DEVICE_TYP 0x00800043 +#define ADDRESS_U1_NUM_SENS_X 0x00800047 +#define ADDRESS_U1_NUM_SENS_Y 0x00800048 +#define ADDRESS_U1_PITCH_SENS_X 0x00800049 +#define ADDRESS_U1_PITCH_SENS_Y 0x0080004A +#define ADDRESS_U1_RESO_DWN_ABS 0x0080004E +#define ADDRESS_U1_PAD_BTN 0x00800052 +#define ADDRESS_U1_SP_BTN 0x0080009F + +#define MAX_TOUCHES 5 + +/** + * struct u1_data + * + * @input: pointer to the kernel input device + * @input2: pointer to the kernel input2 device + * @hdev: pointer to the struct hid_device + * + * @dev_ctrl: device control parameter + * @dev_type: device type + * @sen_line_num_x: number of sensor line of X + * @sen_line_num_y: number of sensor line of Y + * @pitch_x: sensor pitch of X + * @pitch_y: sensor pitch of Y + * @resolution: resolution + * @btn_info: button information + * @x_active_len_mm: active area length of X (mm) + * @y_active_len_mm: active area length of Y (mm) + * @x_max: maximum x coordinate value + * @y_max: maximum y coordinate value + * @btn_cnt: number of buttons + * @sp_btn_cnt: number of stick buttons + */ +struct u1_dev { + struct input_dev *input; + struct input_dev *input2; + struct hid_device *hdev; + + u8 dev_ctrl; + u8 dev_type; + u8 sen_line_num_x; + u8 sen_line_num_y; + u8 pitch_x; + u8 pitch_y; + u8 resolution; + u8 btn_info; + u8 sp_btn_info; + u32 x_active_len_mm; + u32 y_active_len_mm; + u32 x_max; + u32 y_max; + u32 btn_cnt; + u32 sp_btn_cnt; +}; + +static int u1_read_write_register(struct hid_device *hdev, u32 address, + u8 *read_val, u8 write_val, bool read_flag) +{ + int ret, i; + u8 check_sum; + u8 *input; + u8 *readbuf; + + input = kzalloc(U1_FEATURE_REPORT_LEN, GFP_KERNEL); + if (!input) + return -ENOMEM; + + input[0] = U1_FEATURE_REPORT_ID; + if (read_flag) { + input[1] = U1_CMD_REGISTER_READ; + input[6] = 0x00; + } else { + input[1] = U1_CMD_REGISTER_WRITE; + input[6] = write_val; + } + + put_unaligned_le32(address, input + 2); + + /* Calculate the checksum */ + check_sum = U1_FEATURE_REPORT_LEN_ALL; + for (i = 0; i < U1_FEATURE_REPORT_LEN - 1; i++) + check_sum += input[i]; + + input[7] = check_sum; + ret = hid_hw_raw_request(hdev, U1_FEATURE_REPORT_ID, input, + U1_FEATURE_REPORT_LEN, + HID_FEATURE_REPORT, HID_REQ_SET_REPORT); + + if (ret < 0) { + dev_err(&hdev->dev, "failed to read command (%d)\n", ret); + goto exit; + } + + if (read_flag) { + readbuf = kzalloc(U1_FEATURE_REPORT_LEN, GFP_KERNEL); + if (!readbuf) { + kfree(input); + return -ENOMEM; + } + + ret = hid_hw_raw_request(hdev, U1_FEATURE_REPORT_ID, readbuf, + U1_FEATURE_REPORT_LEN, + HID_FEATURE_REPORT, HID_REQ_GET_REPORT); + + if (ret < 0) { + dev_err(&hdev->dev, "failed read register (%d)\n", ret); + goto exit; + } + + *read_val = readbuf[6]; + + kfree(readbuf); + } + + ret = 0; + +exit: + kfree(input); + return ret; +} + +static int alps_raw_event(struct hid_device *hdev, + struct hid_report *report, u8 *data, int size) +{ + unsigned int x, y, z; + int i; + short sp_x, sp_y; + struct u1_dev *hdata = hid_get_drvdata(hdev); + + switch (data[0]) { + case U1_MOUSE_REPORT_ID: + break; + case U1_FEATURE_REPORT_ID: + break; + case U1_ABSOLUTE_REPORT_ID: + for (i = 0; i < MAX_TOUCHES; i++) { + u8 *contact = &data[i * 5]; + + x = get_unaligned_le16(contact + 3); + y = get_unaligned_le16(contact + 5); + z = contact[7] & 0x7F; + + input_mt_slot(hdata->input, i); + + if (z != 0) { + input_mt_report_slot_state(hdata->input, + MT_TOOL_FINGER, 1); + } else { + input_mt_report_slot_state(hdata->input, + MT_TOOL_FINGER, 0); + break; + } + + input_report_abs(hdata->input, ABS_MT_POSITION_X, x); + input_report_abs(hdata->input, ABS_MT_POSITION_Y, y); + input_report_abs(hdata->input, ABS_MT_PRESSURE, z); + + } + + input_mt_sync_frame(hdata->input); + + input_report_key(hdata->input, BTN_LEFT, + data[1] & 0x1); + input_report_key(hdata->input, BTN_RIGHT, + (data[1] & 0x2)); + input_report_key(hdata->input, BTN_MIDDLE, + (data[1] & 0x4)); + + input_sync(hdata->input); + + return 1; + + case U1_SP_ABSOLUTE_REPORT_ID: + sp_x = get_unaligned_le16(data+2); + sp_y = get_unaligned_le16(data+4); + + sp_x = sp_x / 8; + sp_y = sp_y / 8; + + input_report_rel(hdata->input2, REL_X, sp_x); + input_report_rel(hdata->input2, REL_Y, sp_y); + + input_report_key(hdata->input2, BTN_LEFT, + data[1] & 0x1); + input_report_key(hdata->input2, BTN_RIGHT, + (data[1] & 0x2)); + input_report_key(hdata->input2, BTN_MIDDLE, + (data[1] & 0x4)); + + input_sync(hdata->input2); + + return 1; + } + + return 0; +} + +#ifdef CONFIG_PM +static int alps_post_reset(struct hid_device *hdev) +{ + return u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1, + NULL, U1_TP_ABS_MODE, false); +} + +static int alps_post_resume(struct hid_device *hdev) +{ + return u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1, + NULL, U1_TP_ABS_MODE, false); +} +#endif /* CONFIG_PM */ + +static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) +{ + struct u1_dev *data = hid_get_drvdata(hdev); + struct input_dev *input = hi->input, *input2; + struct u1_dev devInfo; + int ret; + int res_x, res_y, i; + + data->input = input; + + hid_dbg(hdev, "Opening low level driver\n"); + ret = hid_hw_open(hdev); + if (ret) + return ret; + + /* Allow incoming hid reports */ + hid_device_io_start(hdev); + + /* Device initialization */ + ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1, + &devInfo.dev_ctrl, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_DEV_CTRL_1 (%d)\n", ret); + goto exit; + } + + devInfo.dev_ctrl &= ~U1_DISABLE_DEV; + devInfo.dev_ctrl |= U1_TP_ABS_MODE; + ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1, + NULL, devInfo.dev_ctrl, false); + if (ret < 0) { + dev_err(&hdev->dev, "failed to change TP mode (%d)\n", ret); + goto exit; + } + + ret = u1_read_write_register(hdev, ADDRESS_U1_NUM_SENS_X, + &devInfo.sen_line_num_x, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_NUM_SENS_X (%d)\n", ret); + goto exit; + } + + ret = u1_read_write_register(hdev, ADDRESS_U1_NUM_SENS_Y, + &devInfo.sen_line_num_y, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_NUM_SENS_Y (%d)\n", ret); + goto exit; + } + + ret = u1_read_write_register(hdev, ADDRESS_U1_PITCH_SENS_X, + &devInfo.pitch_x, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_PITCH_SENS_X (%d)\n", ret); + goto exit; + } + + ret = u1_read_write_register(hdev, ADDRESS_U1_PITCH_SENS_Y, + &devInfo.pitch_y, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_PITCH_SENS_Y (%d)\n", ret); + goto exit; + } + + ret = u1_read_write_register(hdev, ADDRESS_U1_RESO_DWN_ABS, + &devInfo.resolution, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_RESO_DWN_ABS (%d)\n", ret); + goto exit; + } + + ret = u1_read_write_register(hdev, ADDRESS_U1_PAD_BTN, + &devInfo.btn_info, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_PAD_BTN (%d)\n", ret); + goto exit; + } + + /* Check StickPointer device */ + ret = u1_read_write_register(hdev, ADDRESS_U1_DEVICE_TYP, + &devInfo.dev_type, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_DEVICE_TYP (%d)\n", ret); + goto exit; + } + + devInfo.x_active_len_mm = + (devInfo.pitch_x * (devInfo.sen_line_num_x - 1)) / 10; + devInfo.y_active_len_mm = + (devInfo.pitch_y * (devInfo.sen_line_num_y - 1)) / 10; + + devInfo.x_max = + (devInfo.resolution << 2) * (devInfo.sen_line_num_x - 1); + devInfo.y_max = + (devInfo.resolution << 2) * (devInfo.sen_line_num_y - 1); + + __set_bit(EV_ABS, input->evbit); + input_set_abs_params(input, ABS_MT_POSITION_X, 1, devInfo.x_max, 0, 0); + input_set_abs_params(input, ABS_MT_POSITION_Y, 1, devInfo.y_max, 0, 0); + + if (devInfo.x_active_len_mm && devInfo.y_active_len_mm) { + res_x = (devInfo.x_max - 1) / devInfo.x_active_len_mm; + res_y = (devInfo.y_max - 1) / devInfo.y_active_len_mm; + + input_abs_set_res(input, ABS_MT_POSITION_X, res_x); + input_abs_set_res(input, ABS_MT_POSITION_Y, res_y); + } + + input_set_abs_params(input, ABS_MT_PRESSURE, 0, 64, 0, 0); + + input_mt_init_slots(input, MAX_TOUCHES, INPUT_MT_POINTER); + + __set_bit(EV_KEY, input->evbit); + if ((devInfo.btn_info & 0x0F) == (devInfo.btn_info & 0xF0) >> 4) { + devInfo.btn_cnt = (devInfo.btn_info & 0x0F); + } else { + /* Button pad */ + devInfo.btn_cnt = 1; + __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); + } + + for (i = 0; i < devInfo.btn_cnt; i++) + __set_bit(BTN_LEFT + i, input->keybit); + + + /* Stick device initialization */ + if (devInfo.dev_type & U1_DEVTYPE_SP_SUPPORT) { + + input2 = input_allocate_device(); + if (!input2) { + input_free_device(input2); + goto exit; + } + + data->input2 = input2; + + devInfo.dev_ctrl |= U1_SP_ABS_MODE; + ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1, + NULL, devInfo.dev_ctrl, false); + if (ret < 0) { + dev_err(&hdev->dev, "failed SP mode (%d)\n", ret); + input_free_device(input2); + goto exit; + } + + ret = u1_read_write_register(hdev, ADDRESS_U1_SP_BTN, + &devInfo.sp_btn_info, 0, true); + if (ret < 0) { + dev_err(&hdev->dev, "failed U1_SP_BTN (%d)\n", ret); + input_free_device(input2); + goto exit; + } + + input2->phys = input->phys; + input2->name = "DualPoint Stick"; + input2->id.bustype = BUS_I2C; + input2->id.vendor = input->id.vendor; + input2->id.product = input->id.product; + input2->id.version = input->id.version; + input2->dev.parent = input->dev.parent; + + __set_bit(EV_KEY, input2->evbit); + devInfo.sp_btn_cnt = (devInfo.sp_btn_info & 0x0F); + for (i = 0; i < devInfo.sp_btn_cnt; i++) + __set_bit(BTN_LEFT + i, input2->keybit); + + __set_bit(EV_REL, input2->evbit); + __set_bit(REL_X, input2->relbit); + __set_bit(REL_Y, input2->relbit); + __set_bit(INPUT_PROP_POINTER, input2->propbit); + __set_bit(INPUT_PROP_POINTING_STICK, input2->propbit); + + if (input_register_device(data->input2)) { + input_free_device(input2); + goto exit; + } + } + +exit: + hid_device_io_stop(hdev); + hid_hw_close(hdev); + return ret; +} + +static int alps_input_mapping(struct hid_device *hdev, + struct hid_input *hi, struct hid_field *field, + struct hid_usage *usage, unsigned long **bit, int *max) +{ + return -1; +} + +static int alps_probe(struct hid_device *hdev, const struct hid_device_id *id) +{ + struct u1_dev *data = NULL; + int ret; + + data = devm_kzalloc(&hdev->dev, sizeof(struct u1_dev), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->hdev = hdev; + hid_set_drvdata(hdev, data); + + hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS; + + ret = hid_parse(hdev); + if (ret) { + hid_err(hdev, "parse failed\n"); + return ret; + } + + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); + if (ret) { + hid_err(hdev, "hw start failed\n"); + return ret; + } + + return 0; +} + +static void alps_remove(struct hid_device *hdev) +{ + hid_hw_stop(hdev); +} + +static const struct hid_device_id alps_id[] = { + { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, + USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) }, + { } +}; +MODULE_DEVICE_TABLE(hid, alps_id); + +static struct hid_driver alps_driver = { + .name = "hid-alps", + .id_table = alps_id, + .probe = alps_probe, + .remove = alps_remove, + .raw_event = alps_raw_event, + .input_mapping = alps_input_mapping, + .input_configured = alps_input_configured, +#ifdef CONFIG_PM + .resume = alps_post_resume, + .reset_resume = alps_post_reset, +#endif +}; + +module_hid_driver(alps_driver); + +MODULE_AUTHOR("Masaki Ota <masaki.ota@jp.alps.com>"); +MODULE_DESCRIPTION("ALPS HID driver"); +MODULE_LICENSE("GPL"); @@ -474,6 +474,8 @@ static const struct hid_device_id apple_devices[] = { .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS), .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI), + .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO), @@ -1772,6 +1772,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) }, { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) }, { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) }, + { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) }, @@ -1851,6 +1852,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) }, @@ -1877,8 +1879,11 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) }, @@ -1962,6 +1967,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, @@ -2008,6 +2014,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) }, #if IS_ENABLED(CONFIG_HID_ROCCAT) { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) }, @@ -2348,8 +2355,6 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) }, { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, - { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) }, - { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0401) }, { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, @@ -2486,7 +2491,6 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) }, #endif { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) }, - { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) }, { } }; @@ -261,7 +261,7 @@ static void elo_remove(struct hid_device *hdev) struct elo_priv *priv = hid_get_drvdata(hdev); hid_hw_stop(hdev); - flush_workqueue(wq); + cancel_delayed_work_sync(&priv->work); kfree(priv); } @@ -70,6 +70,9 @@ #define USB_VENDOR_ID_ALPS 0x0433 #define USB_DEVICE_ID_IBM_GAMEPAD 0x1101 +#define USB_VENDOR_ID_ALPS_JP 0x044E +#define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B + #define USB_VENDOR_ID_ANTON 0x1130 #define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101 @@ -142,6 +145,7 @@ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257 +#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI 0x0267 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 @@ -296,6 +300,9 @@ #define USB_VENDOR_ID_DEALEXTREAME 0x10c5 #define USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701 0x819a +#define USB_VENDOR_ID_DELCOM 0x0fc5 +#define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080 + #define USB_VENDOR_ID_DELORME 0x1163 #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 #define USB_DEVICE_ID_DELORME_EM_LT20 0x0200 @@ -334,6 +341,8 @@ #define USB_DEVICE_ID_ELECOM_BM084 0x0061 #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 +#define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004 +#define USB_DEVICE_ID_DREAM_CHEEKY_FA 0x000a #define USB_VENDOR_ID_ELITEGROUP 0x03fc #define USB_DEVICE_ID_ELITEGROUP_05D8 0x05d8 @@ -680,6 +689,7 @@ #define USB_DEVICE_ID_PICOLCD_BOOTLOADER 0xf002 #define USB_DEVICE_ID_PICK16F1454 0x0042 #define USB_DEVICE_ID_PICK16F1454_V2 0xf2f7 +#define USB_DEVICE_ID_LUXAFOR 0xf372 #define USB_VENDOR_ID_MICROSOFT 0x045e #define USB_DEVICE_ID_SIDEWINDER_GV 0x003b diff --git a/drivers/hid/hid-led.c b/drivers/hid/hid-led.c new file mode 100644 index 000000000000..d8d55f37b4f5 --- /dev/null +++ b/ drivers/hid/hid-led.c@@ -0,0 +1,523 @@ +/* + * Simple USB RGB LED driver + * + * Copyright 2016 Heiner Kallweit <hkallweit1@gmail.com> + * Based on drivers/hid/hid-thingm.c and + * drivers/usb/misc/usbled.c + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2. + */ + +#include <linux/hid.h> +#include <linux/hidraw.h> +#include <linux/leds.h> +#include <linux/module.h> +#include <linux/mutex.h> + +#include "hid-ids.h" + +enum hidled_report_type { + RAW_REQUEST, + OUTPUT_REPORT +}; + +enum hidled_type { + RISO_KAGAKU, + DREAM_CHEEKY, + THINGM, + DELCOM, + LUXAFOR, +}; + +static unsigned const char riso_kagaku_tbl[] = { +/* R+2G+4B -> riso kagaku color index */ + [0] = 0, /* black */ + [1] = 2, /* red */ + [2] = 1, /* green */ + [3] = 5, /* yellow */ + [4] = 3, /* blue */ + [5] = 6, /* magenta */ + [6] = 4, /* cyan */ + [7] = 7 /* white */ +}; + +#define RISO_KAGAKU_IX(r, g, b) riso_kagaku_tbl[((r)?1:0)+((g)?2:0)+((b)?4:0)] + +union delcom_packet { + __u8 data[8]; + struct { + __u8 major_cmd; + __u8 minor_cmd; + __u8 data_lsb; + __u8 data_msb; + } tx; + struct { + __u8 cmd; + } rx; + struct { + __le16 family_code; + __le16 security_code; + __u8 fw_version; + } fw; +}; + +#define DELCOM_GREEN_LED 0 +#define DELCOM_RED_LED 1 +#define DELCOM_BLUE_LED 2 + +struct hidled_device; +struct hidled_rgb; + +struct hidled_config { + enum hidled_type type; + const char *name; + const char *short_name; + enum led_brightness max_brightness; + int num_leds; + size_t report_size; + enum hidled_report_type report_type; + int (*init)(struct hidled_device *ldev); + int (*write)(struct led_classdev *cdev, enum led_brightness br); +}; + +struct hidled_led { + struct led_classdev cdev; + struct hidled_rgb *rgb; + char name[32]; +}; + +struct hidled_rgb { + struct hidled_device *ldev; + struct hidled_led red; + struct hidled_led green; + struct hidled_led blue; + u8 num; +}; + +struct hidled_device { + const struct hidled_config *config; + struct hid_device *hdev; + struct hidled_rgb *rgb; + struct mutex lock; +}; + +#define MAX_REPORT_SIZE 16 + +#define to_hidled_led(arg) container_of(arg, struct hidled_led, cdev) + +static bool riso_kagaku_switch_green_blue; +module_param(riso_kagaku_switch_green_blue, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(riso_kagaku_switch_green_blue, + "switch green and blue RGB component for Riso Kagaku devices"); + +static int hidled_send(struct hidled_device *ldev, __u8 *buf) +{ + int ret; + + mutex_lock(&ldev->lock); + + if (ldev->config->report_type == RAW_REQUEST) + ret = hid_hw_raw_request(ldev->hdev, buf[0], buf, + ldev->config->report_size, + HID_FEATURE_REPORT, + HID_REQ_SET_REPORT); + else if (ldev->config->report_type == OUTPUT_REPORT) + ret = hid_hw_output_report(ldev->hdev, buf, + ldev->config->report_size); + else + ret = -EINVAL; + + mutex_unlock(&ldev->lock); + + if (ret < 0) + return ret; + + return ret == ldev->config->report_size ? 0 : -EMSGSIZE; +} + +/* reading data is supported for report type RAW_REQUEST only */ +static int hidled_recv(struct hidled_device *ldev, __u8 *buf) +{ + int ret; + + if (ldev->config->report_type != RAW_REQUEST) + return -EINVAL; + + mutex_lock(&ldev->lock); + + ret = hid_hw_raw_request(ldev->hdev, buf[0], buf, + ldev->config->report_size, + HID_FEATURE_REPORT, + HID_REQ_SET_REPORT); + if (ret < 0) + goto err; + + ret = hid_hw_raw_request(ldev->hdev, buf[0], buf, + ldev->config->report_size, + HID_FEATURE_REPORT, + HID_REQ_GET_REPORT); +err: + mutex_unlock(&ldev->lock); + + return ret < 0 ? ret : 0; +} + +static u8 riso_kagaku_index(struct hidled_rgb *rgb) +{ + enum led_brightness r, g, b; + + r = rgb->red.cdev.brightness; + g = rgb->green.cdev.brightness; + b = rgb->blue.cdev.brightness; + + if (riso_kagaku_switch_green_blue) + return RISO_KAGAKU_IX(r, b, g); + else + return RISO_KAGAKU_IX(r, g, b); +} + +static int riso_kagaku_write(struct led_classdev *cdev, enum led_brightness br) +{ + struct hidled_led *led = to_hidled_led(cdev); + struct hidled_rgb *rgb = led->rgb; + __u8 buf[MAX_REPORT_SIZE] = {}; + + buf[1] = riso_kagaku_index(rgb); + + return hidled_send(rgb->ldev, buf); +} + +static int dream_cheeky_write(struct led_classdev *cdev, enum led_brightness br) +{ + struct hidled_led *led = to_hidled_led(cdev); + struct hidled_rgb *rgb = led->rgb; + __u8 buf[MAX_REPORT_SIZE] = {}; + + buf[1] = rgb->red.cdev.brightness; + buf[2] = rgb->green.cdev.brightness; + buf[3] = rgb->blue.cdev.brightness; + buf[7] = 0x1a; + buf[8] = 0x05; + + return hidled_send(rgb->ldev, buf); +} + +static int dream_cheeky_init(struct hidled_device *ldev) +{ + __u8 buf[MAX_REPORT_SIZE] = {}; + + /* Dream Cheeky magic */ + buf[1] = 0x1f; + buf[2] = 0x02; + buf[4] = 0x5f; + buf[7] = 0x1a; + buf[8] = 0x03; + + return hidled_send(ldev, buf); +} + +static int _thingm_write(struct led_classdev *cdev, enum led_brightness br, + u8 offset) +{ + struct hidled_led *led = to_hidled_led(cdev); + __u8 buf[MAX_REPORT_SIZE] = { 1, 'c' }; + + buf[2] = led->rgb->red.cdev.brightness; + buf[3] = led->rgb->green.cdev.brightness; + buf[4] = led->rgb->blue.cdev.brightness; + buf[7] = led->rgb->num + offset; + + return hidled_send(led->rgb->ldev, buf); +} + +static int thingm_write_v1(struct led_classdev *cdev, enum led_brightness br) +{ + return _thingm_write(cdev, br, 0); +} + +static int thingm_write(struct led_classdev *cdev, enum led_brightness br) +{ + return _thingm_write(cdev, br, 1); +} + +static const struct hidled_config hidled_config_thingm_v1 = { + .name = "ThingM blink(1) v1", + .short_name = "thingm", + .max_brightness = 255, + .num_leds = 1, + .report_size = 9, + .report_type = RAW_REQUEST, + .write = thingm_write_v1, +}; + +static int thingm_init(struct hidled_device *ldev) +{ + __u8 buf[MAX_REPORT_SIZE] = { 1, 'v' }; + int ret; + + ret = hidled_recv(ldev, buf); + if (ret) + return ret; + + /* Check for firmware major version 1 */ + if (buf[3] == '1') + ldev->config = &hidled_config_thingm_v1; + + return 0; +} + +static inline int delcom_get_lednum(const struct hidled_led *led) +{ + if (led == &led->rgb->red) + return DELCOM_RED_LED; + else if (led == &led->rgb->green) + return DELCOM_GREEN_LED; + else + return DELCOM_BLUE_LED; +} + +static int delcom_enable_led(struct hidled_led *led) +{ + union delcom_packet dp = { .tx.major_cmd = 101, .tx.minor_cmd = 12 }; + + dp.tx.data_lsb = 1 << delcom_get_lednum(led); + dp.tx.data_msb = 0; + + return hidled_send(led->rgb->ldev, dp.data); +} + +static int delcom_set_pwm(struct hidled_led *led) +{ + union delcom_packet dp = { .tx.major_cmd = 101, .tx.minor_cmd = 34 }; + + dp.tx.data_lsb = delcom_get_lednum(led); + dp.tx.data_msb = led->cdev.brightness; + + return hidled_send(led->rgb->ldev, dp.data); +} + +static int delcom_write(struct led_classdev *cdev, enum led_brightness br) +{ + struct hidled_led *led = to_hidled_led(cdev); + int ret; + + /* + * enable LED + * We can't do this in the init function already because the device + * is internally reset later. + */ + ret = delcom_enable_led(led); + if (ret) + return ret; + + return delcom_set_pwm(led); +} + +static int delcom_init(struct hidled_device *ldev) +{ + union delcom_packet dp = { .rx.cmd = 104 }; + int ret; + + ret = hidled_recv(ldev, dp.data); + if (ret) + return ret; + /* + * Several Delcom devices share the same USB VID/PID + * Check for family id 2 for Visual Signal Indicator + */ + return le16_to_cpu(dp.fw.family_code) == 2 ? 0 : -ENODEV; +} + +static int luxafor_write(struct led_classdev *cdev, enum led_brightness br) +{ + struct hidled_led *led = to_hidled_led(cdev); + __u8 buf[MAX_REPORT_SIZE] = { [1] = 1 }; + + buf[2] = led->rgb->num + 1; + buf[3] = led->rgb->red.cdev.brightness; + buf[4] = led->rgb->green.cdev.brightness; + buf[5] = led->rgb->blue.cdev.brightness; + + return hidled_send(led->rgb->ldev, buf); +} + +static const struct hidled_config hidled_configs[] = { + { + .type = RISO_KAGAKU, + .name = "Riso Kagaku Webmail Notifier", + .short_name = "riso_kagaku", + .max_brightness = 1, + .num_leds = 1, + .report_size = 6, + .report_type = OUTPUT_REPORT, + .write = riso_kagaku_write, + }, + { + .type = DREAM_CHEEKY, + .name = "Dream Cheeky Webmail Notifier", + .short_name = "dream_cheeky", + .max_brightness = 31, + .num_leds = 1, + .report_size = 9, + .report_type = RAW_REQUEST, + .init = dream_cheeky_init, + .write = dream_cheeky_write, + }, + { + .type = THINGM, + .name = "ThingM blink(1)", + .short_name = "thingm", + .max_brightness = 255, + .num_leds = 2, + .report_size = 9, + .report_type = RAW_REQUEST, + .init = thingm_init, + .write = thingm_write, + }, + { + .type = DELCOM, + .name = "Delcom Visual Signal Indicator G2", + .short_name = "delcom", + .max_brightness = 100, + .num_leds = 1, + .report_size = 8, + .report_type = RAW_REQUEST, + .init = delcom_init, + .write = delcom_write, + }, + { + .type = LUXAFOR, + .name = "Greynut Luxafor", + .short_name = "luxafor", + .max_brightness = 255, + .num_leds = 6, + .report_size = 9, + .report_type = OUTPUT_REPORT, + .write = luxafor_write, + }, +}; + +static int hidled_init_led(struct hidled_led *led, const char *color_name, + struct hidled_rgb *rgb, unsigned int minor) +{ + const struct hidled_config *config = rgb->ldev->config; + + if (config->num_leds > 1) + snprintf(led->name, sizeof(led->name), "%s%u:%s:led%u", + config->short_name, minor, color_name, rgb->num); + else + snprintf(led->name, sizeof(led->name), "%s%u:%s", + config->short_name, minor, color_name); + led->cdev.name = led->name; + led->cdev.max_brightness = config->max_brightness; + led->cdev.brightness_set_blocking = config->write; + led->cdev.flags = LED_HW_PLUGGABLE; + led->rgb = rgb; + + return devm_led_classdev_register(&rgb->ldev->hdev->dev, &led->cdev); +} + +static int hidled_init_rgb(struct hidled_rgb *rgb, unsigned int minor) +{ + int ret; + + /* Register the red diode */ + ret = hidled_init_led(&rgb->red, "red", rgb, minor); + if (ret) + return ret; + + /* Register the green diode */ + ret = hidled_init_led(&rgb->green, "green", rgb, minor); + if (ret) + return ret; + + /* Register the blue diode */ + return hidled_init_led(&rgb->blue, "blue", rgb, minor); +} + +static int hidled_probe(struct hid_device *hdev, const struct hid_device_id *id) +{ + struct hidled_device *ldev; + unsigned int minor; + int ret, i; + + ldev = devm_kzalloc(&hdev->dev, sizeof(*ldev), GFP_KERNEL); + if (!ldev) + return -ENOMEM; + + ret = hid_parse(hdev); + if (ret) + return ret; + + ldev->hdev = hdev; + mutex_init(&ldev->lock); + + for (i = 0; !ldev->config && i < ARRAY_SIZE(hidled_configs); i++) + if (hidled_configs[i].type == id->driver_data) + ldev->config = &hidled_configs[i]; + + if (!ldev->config) + return -EINVAL; + + if (ldev->config->init) { + ret = ldev->config->init(ldev); + if (ret) + return ret; + } + + ldev->rgb = devm_kcalloc(&hdev->dev, ldev->config->num_leds, + sizeof(struct hidled_rgb), GFP_KERNEL); + if (!ldev->rgb) + return -ENOMEM; + + ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW); + if (ret) + return ret; + + minor = ((struct hidraw *) hdev->hidraw)->minor; + + for (i = 0; i < ldev->config->num_leds; i++) { + ldev->rgb[i].ldev = ldev; + ldev->rgb[i].num = i; + ret = hidled_init_rgb(&ldev->rgb[i], minor); + if (ret) { + hid_hw_stop(hdev); + return ret; + } + } + + hid_info(hdev, "%s initialized\n", ldev->config->name); + + return 0; +} + +static const struct hid_device_id hidled_table[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, + USB_DEVICE_ID_RI_KA_WEBMAIL), .driver_data = RISO_KAGAKU }, + { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, + USB_DEVICE_ID_DREAM_CHEEKY_WN), .driver_data = DREAM_CHEEKY }, + { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, + USB_DEVICE_ID_DREAM_CHEEKY_FA), .driver_data = DREAM_CHEEKY }, + { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, + USB_DEVICE_ID_BLINK1), .driver_data = THINGM }, + { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, + USB_DEVICE_ID_DELCOM_VISUAL_IND), .driver_data = DELCOM }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, + USB_DEVICE_ID_LUXAFOR), .driver_data = LUXAFOR }, + { } +}; +MODULE_DEVICE_TABLE(hid, hidled_table); + +static struct hid_driver hidled_driver = { + .name = "hid-led", + .probe = hidled_probe, + .id_table = hidled_table, +}; + +module_hid_driver(hidled_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Heiner Kallweit <hkallweit1@gmail.com>"); +MODULE_DESCRIPTION("Simple USB RGB LED driver"); @@ -61,6 +61,7 @@ MODULE_LICENSE("GPL"); #define MT_QUIRK_ALWAYS_VALID (1 << 4) #define MT_QUIRK_VALID_IS_INRANGE (1 << 5) #define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6) +#define MT_QUIRK_CONFIDENCE (1 << 7) #define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8) #define MT_QUIRK_NO_AREA (1 << 9) #define MT_QUIRK_IGNORE_DUPLICATES (1 << 10) @@ -78,6 +79,7 @@ struct mt_slot { __s32 contactid; /* the device ContactID assigned to this slot */ bool touch_state; /* is the touch valid? */ bool inrange_state; /* is the finger in proximity of the sensor? */ + bool confidence_state; /* is the touch made by a finger? */ }; struct mt_class { @@ -503,10 +505,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, return 1; case HID_DG_CONFIDENCE: if (cls->name == MT_CLS_WIN_8 && - field->application == HID_DG_TOUCHPAD) { - cls->quirks &= ~MT_QUIRK_ALWAYS_VALID; - cls->quirks |= MT_QUIRK_VALID_IS_CONFIDENCE; - } + field->application == HID_DG_TOUCHPAD) + cls->quirks |= MT_QUIRK_CONFIDENCE; mt_store_field(usage, td, hi); return 1; case HID_DG_TIPSWITCH: @@ -619,6 +619,7 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input) return; if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) { + int active; int slotnum = mt_compute_slot(td, input); struct mt_slot *s = &td->curdata; struct input_mt *mt = input->mt; @@ -633,10 +634,14 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input) return; } + if (!(td->mtclass.quirks & MT_QUIRK_CONFIDENCE)) + s->confidence_state = 1; + active = (s->touch_state || s->inrange_state) && + s->confidence_state; + input_mt_slot(input, slotnum); - input_mt_report_slot_state(input, MT_TOOL_FINGER, - s->touch_state || s->inrange_state); - if (s->touch_state || s->inrange_state) { + input_mt_report_slot_state(input, MT_TOOL_FINGER, active); + if (active) { /* this finger is in proximity of the sensor */ int wide = (s->w > s->h); /* divided by two to match visual scale of touch */ @@ -701,6 +706,8 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field, td->curdata.touch_state = value; break; case HID_DG_CONFIDENCE: + if (quirks & MT_QUIRK_CONFIDENCE) + td->curdata.confidence_state = value; if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE) td->curvalid = value; break; @@ -1401,6 +1408,11 @@ static const struct hid_device_id mt_devices[] = { MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_PCT) }, + /* Ntrig Panel */ + { .driver_data = MT_CLS_NSMU, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_NTRIG, 0x1b05) }, + /* PixArt optical touch screen */ { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER, MT_USB_DEVICE(USB_VENDOR_ID_PIXART, diff --git a/drivers/hid/hid-thingm.c b/drivers/hid/hid-thingm.c deleted file mode 100644 index 9ad9c6ec5bba..000000000000 --- a/ drivers/hid/hid-thingm.c+++ /dev/null @@ -1,263 +0,0 @@ -/* - * ThingM blink(1) USB RGB LED driver - * - * Copyright 2013-2014 Savoir-faire Linux Inc. - * Vivien Didelot <vivien.didelot@savoirfairelinux.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation, version 2. - */ - -#include <linux/hid.h> -#include <linux/hidraw.h> -#include <linux/leds.h> -#include <linux/module.h> -#include <linux/mutex.h> - -#include "hid-ids.h" - -#define REPORT_ID 1 -#define REPORT_SIZE 9 - -/* Firmware major number of supported devices */ -#define THINGM_MAJOR_MK1 '1' -#define THINGM_MAJOR_MK2 '2' - -struct thingm_fwinfo { - char major; - unsigned numrgb; - unsigned first; -}; - -static const struct thingm_fwinfo thingm_fwinfo[] = { - { - .major = THINGM_MAJOR_MK1, - .numrgb = 1, - .first = 0, - }, { - .major = THINGM_MAJOR_MK2, - .numrgb = 2, - .first = 1, - } -}; - -/* A red, green or blue channel, part of an RGB chip */ -struct thingm_led { - struct thingm_rgb *rgb; - struct led_classdev ldev; - char name[32]; -}; - -/* Basically a WS2812 5050 RGB LED chip */ -struct thingm_rgb { - struct thingm_device *tdev; - struct thingm_led red; - struct thingm_led green; - struct thingm_led blue; - u8 num; -}; - -struct thingm_device { - struct hid_device *hdev; - struct { - char major; - char minor; - } version; - const struct thingm_fwinfo *fwinfo; - struct mutex lock; - struct thingm_rgb *rgb; -}; - -static int thingm_send(struct thingm_device *tdev, u8 buf[REPORT_SIZE]) -{ - int ret; - - hid_dbg(tdev->hdev, "-> %d %c %02hhx %02hhx %02hhx %02hhx %02hhx %02hhx %02hhx\n", - buf[0], buf[1], buf[2], buf[3], buf[4], - buf[5], buf[6], buf[7], buf[8]); - - mutex_lock(&tdev->lock); - - ret = hid_hw_raw_request(tdev->hdev, buf[0], buf, REPORT_SIZE, - HID_FEATURE_REPORT, HID_REQ_SET_REPORT); - - mutex_unlock(&tdev->lock); - - return ret < 0 ? ret : 0; -} - -static int thingm_recv(struct thingm_device *tdev, u8 buf[REPORT_SIZE]) -{ - int ret; - - /* - * A read consists of two operations: sending the read command - * and the actual read from the device. Use the mutex to protect - * the full sequence of both operations. - */ - mutex_lock(&tdev->lock); - - ret = hid_hw_raw_request(tdev->hdev, buf[0], buf, REPORT_SIZE, - HID_FEATURE_REPORT, HID_REQ_SET_REPORT); - if (ret < 0) - goto err; - - ret = hid_hw_raw_request(tdev->hdev, buf[0], buf, REPORT_SIZE, - HID_FEATURE_REPORT, HID_REQ_GET_REPORT); - if (ret < 0) - goto err; - - ret = 0; - - hid_dbg(tdev->hdev, "<- %d %c %02hhx %02hhx %02hhx %02hhx %02hhx %02hhx %02hhx\n", - buf[0], buf[1], buf[2], buf[3], buf[4], - buf[5], buf[6], buf[7], buf[8]); -err: - mutex_unlock(&tdev->lock); - return ret; -} - -static int thingm_version(struct thingm_device *tdev) -{ - u8 buf[REPORT_SIZE] = { REPORT_ID, 'v', 0, 0, 0, 0, 0, 0, 0 }; - int err; - - err = thingm_recv(tdev, buf); - if (err) - return err; - - tdev->version.major = buf[3]; - tdev->version.minor = buf[4]; - - return 0; -} - -static int thingm_write_color(struct thingm_rgb *rgb) -{ - u8 buf[REPORT_SIZE] = { REPORT_ID, 'c', 0, 0, 0, 0, 0, rgb->num, 0 }; - - buf[2] = rgb->red.ldev.brightness; - buf[3] = rgb->green.ldev.brightness; - buf[4] = rgb->blue.ldev.brightness; - - return thingm_send(rgb->tdev, buf); -} - -static int thingm_led_set(struct led_classdev *ldev, - enum led_brightness brightness) -{ - struct thingm_led *led = container_of(ldev, struct thingm_led, ldev); - - return thingm_write_color(led->rgb); -} - -static int thingm_init_led(struct thingm_led *led, const char *color_name, - struct thingm_rgb *rgb, int minor) -{ - snprintf(led->name, sizeof(led->name), "thingm%d:%s:led%d", - minor, color_name, rgb->num); - led->ldev.name = led->name; - led->ldev.max_brightness = 255; - led->ldev.brightness_set_blocking = thingm_led_set; - led->ldev.flags = LED_HW_PLUGGABLE; - led->rgb = rgb; - return devm_led_classdev_register(&rgb->tdev->hdev->dev, &led->ldev); -} - -static int thingm_init_rgb(struct thingm_rgb *rgb) -{ - const int minor = ((struct hidraw *) rgb->tdev->hdev->hidraw)->minor; - int err; - - /* Register the red diode */ - err = thingm_init_led(&rgb->red, "red", rgb, minor); - if (err) - return err; - - /* Register the green diode */ - err = thingm_init_led(&rgb->green, "green", rgb, minor); - if (err) - return err; - - /* Register the blue diode */ - return thingm_init_led(&rgb->blue, "blue", rgb, minor); -} - -static int thingm_probe(struct hid_device *hdev, const struct hid_device_id *id) -{ - struct thingm_device *tdev; - int i, err; - - tdev = devm_kzalloc(&hdev->dev, sizeof(struct thingm_device), - GFP_KERNEL); - if (!tdev) - return -ENOMEM; - - tdev->hdev = hdev; - hid_set_drvdata(hdev, tdev); - - err = hid_parse(hdev); - if (err) - return err; - - mutex_init(&tdev->lock); - - err = thingm_version(tdev); - if (err) - return err; - - hid_dbg(hdev, "firmware version: %c.%c\n", - tdev->version.major, tdev->version.minor); - - for (i = 0; i < ARRAY_SIZE(thingm_fwinfo) && !tdev->fwinfo; ++i) - if (thingm_fwinfo[i].major == tdev->version.major) - tdev->fwinfo = &thingm_fwinfo[i]; - - if (!tdev->fwinfo) { - hid_err(hdev, "unsupported firmware %c\n", tdev->version.major); - return -ENODEV; - } - - tdev->rgb = devm_kzalloc(&hdev->dev, - sizeof(struct thingm_rgb) * tdev->fwinfo->numrgb, - GFP_KERNEL); - if (!tdev->rgb) - return -ENOMEM; - - err = hid_hw_start(hdev, HID_CONNECT_HIDRAW); - if (err) - return err; - - for (i = 0; i < tdev->fwinfo->numrgb; ++i) { - struct thingm_rgb *rgb = tdev->rgb + i; - - rgb->tdev = tdev; - rgb->num = tdev->fwinfo->first + i; - err = thingm_init_rgb(rgb); - if (err) { - hid_hw_stop(hdev); - return err; - } - } - - return 0; -} - -static const struct hid_device_id thingm_table[] = { - { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) }, - { } -}; -MODULE_DEVICE_TABLE(hid, thingm_table); - -static struct hid_driver thingm_driver = { - .name = "thingm", - .probe = thingm_probe, - .id_table = thingm_table, -}; - -module_hid_driver(thingm_driver); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Vivien Didelot <vivien.didelot@savoirfairelinux.com>"); -MODULE_DESCRIPTION("ThingM blink(1) USB RGB LED driver"); @@ -1020,6 +1020,7 @@ static int i2c_hid_probe(struct i2c_client *client, pm_runtime_get_noresume(&client->dev); pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); + device_enable_async_suspend(&client->dev); ret = i2c_hid_fetch_hid_descriptor(ihid); if (ret < 0) @@ -1106,6 +1107,14 @@ static int i2c_hid_remove(struct i2c_client *client) return 0; } +static void i2c_hid_shutdown(struct i2c_client *client) +{ + struct i2c_hid *ihid = i2c_get_clientdata(client); + + i2c_hid_set_power(client, I2C_HID_PWR_SLEEP); + free_irq(client->irq, ihid); +} + #ifdef CONFIG_PM_SLEEP static int i2c_hid_suspend(struct device *dev) { @@ -1230,7 +1239,7 @@ static struct i2c_driver i2c_hid_driver = { .probe = i2c_hid_probe, .remove = i2c_hid_remove, - + .shutdown = i2c_hid_shutdown, .id_table = i2c_hid_id_table, }; @@ -51,10 +51,26 @@ struct uhid_device { u32 report_id; u32 report_type; struct uhid_event report_buf; + struct work_struct worker; }; static struct miscdevice uhid_misc; +static void uhid_device_add_worker(struct work_struct *work) +{ + struct uhid_device *uhid = container_of(work, struct uhid_device, worker); + int ret; + + ret = hid_add_device(uhid->hid); + if (ret) { + hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret); + + hid_destroy_device(uhid->hid); + uhid->hid = NULL; + uhid->running = false; + } +} + static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev) { __u8 newhead; @@ -498,18 +514,14 @@ static int uhid_dev_create2(struct uhid_device *uhid, uhid->hid = hid; uhid->running = true; - ret = hid_add_device(hid); - if (ret) { - hid_err(hid, "Cannot register HID device\n"); - goto err_hid; - } + /* Adding of a HID device is done through a worker, to allow HID drivers + * which use feature requests during .probe to work, without they would + * be blocked on devlock, which is held by uhid_char_write. + */ + schedule_work(&uhid->worker); return 0; -err_hid: - hid_destroy_device(hid); - uhid->hid = NULL; - uhid->running = false; err_free: kfree(uhid->rd_data); uhid->rd_data = NULL; @@ -550,6 +562,8 @@ static int uhid_dev_destroy(struct uhid_device *uhid) uhid->running = false; wake_up_interruptible(&uhid->report_wait); + cancel_work_sync(&uhid->worker); + hid_destroy_device(uhid->hid); kfree(uhid->rd_data); @@ -612,6 +626,7 @@ static int uhid_char_open(struct inode *inode, struct file *file) init_waitqueue_head(&uhid->waitq); init_waitqueue_head(&uhid->report_wait); uhid->running = false; + INIT_WORK(&uhid->worker, uhid_device_add_worker); file->private_data = uhid; nonseekable_open(inode, file); @@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, goto inval; } else if (uref->usage_index >= field->report_count) goto inval; - - else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) && - (uref_multi->num_values > HID_MAX_MULTI_USAGES || - uref->usage_index + uref_multi->num_values > field->report_count)) - goto inval; } + if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) && + (uref_multi->num_values > HID_MAX_MULTI_USAGES || + uref->usage_index + uref_multi->num_values > field->report_count)) + goto inval; + switch (cmd) { case HIDIOCGUSAGE: uref->value = field->value[uref->usage_index]; @@ -1,7 +1,8 @@ # # Makefile for HSI # -obj-$(CONFIG_HSI_BOARDINFO) += hsi_boardinfo.o obj-$(CONFIG_HSI) += hsi.o +hsi-objs := hsi_core.o +hsi-$(CONFIG_HSI_BOARDINFO) += hsi_boardinfo.o obj-y += controllers/ obj-y += clients/ @@ -444,8 +444,8 @@ static void cs_hsi_read_on_control_complete(struct hsi_msg *msg) hi->control_state &= ~SSI_CHANNEL_STATE_READING; if (msg->status == HSI_STATUS_ERROR) { dev_err(&hi->cl->device, "Control RX error detected\n"); - cs_hsi_control_read_error(hi, msg); spin_unlock(&hi->lock); + cs_hsi_control_read_error(hi, msg); goto out; } dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd); @@ -1275,7 +1275,7 @@ static int cs_char_mmap(struct file *file, struct vm_area_struct *vma) if (vma->vm_end < vma->vm_start) return -EINVAL; - if (((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) != 1) + if (vma_pages(vma) != 1) return -EINVAL; vma->vm_flags |= VM_IO | VM_DONTDUMP | VM_DONTEXPAND; @@ -88,6 +88,8 @@ void ssi_waketest(struct hsi_client *cl, unsigned int enable); #define SSIP_READY_CMD SSIP_CMD(SSIP_READY, 0) #define SSIP_SWBREAK_CMD SSIP_CMD(SSIP_SW_BREAK, 0) +#define SSIP_WAKETEST_FLAG 0 + /* Main state machine states */ enum { INIT, @@ -116,7 +118,7 @@ enum { * @main_state: Main state machine * @send_state: TX state machine * @recv_state: RX state machine - * @waketest: Flag to follow wake line test + * @flags: Flags, currently only used to follow wake line test * @rxid: RX data id * @txid: TX data id * @txqueue_len: TX queue length @@ -137,7 +139,7 @@ struct ssi_protocol { unsigned int main_state; unsigned int send_state; unsigned int recv_state; - unsigned int waketest:1; + unsigned long flags; u8 rxid; u8 txid; unsigned int txqueue_len; @@ -148,6 +150,7 @@ struct ssi_protocol { struct net_device *netdev; struct list_head txqueue; struct list_head cmdqueue; + struct work_struct work; struct hsi_client *cl; struct list_head link; atomic_t tx_usecnt; @@ -405,15 +408,17 @@ static void ssip_reset(struct hsi_client *cl) spin_lock_bh(&ssi->lock); if (ssi->send_state != SEND_IDLE) hsi_stop_tx(cl); - if (ssi->waketest) - ssi_waketest(cl, 0); + spin_unlock_bh(&ssi->lock); + if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) + ssi_waketest(cl, 0); /* FIXME: To be removed */ + spin_lock_bh(&ssi->lock); del_timer(&ssi->rx_wd); del_timer(&ssi->tx_wd); del_timer(&ssi->keep_alive); ssi->main_state = 0; ssi->send_state = 0; ssi->recv_state = 0; - ssi->waketest = 0; + ssi->flags = 0; ssi->rxid = 0; ssi->txid = 0; list_for_each_safe(head, tmp, &ssi->txqueue) { @@ -437,7 +442,8 @@ static void ssip_dump_state(struct hsi_client *cl) dev_err(&cl->device, "Send state: %d\n", ssi->send_state); dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ? "Online" : "Offline"); - dev_err(&cl->device, "Wake test %d\n", ssi->waketest); + dev_err(&cl->device, "Wake test %d\n", + test_bit(SSIP_WAKETEST_FLAG, &ssi->flags)); dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid); dev_err(&cl->device, "Data TX id: %d\n", ssi->txid); @@ -515,17 +521,17 @@ static void ssip_start_rx(struct hsi_client *cl) dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state, ssi->recv_state); - spin_lock(&ssi->lock); + spin_lock_bh(&ssi->lock); /* * We can have two UP events in a row due to a short low * high transition. Therefore we need to ignore the sencond UP event. */ if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) { - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); return; } ssip_set_rxstate(ssi, RECV_READY); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); msg = ssip_claim_cmd(ssi); ssip_set_cmd(msg, SSIP_READY_CMD); @@ -539,10 +545,10 @@ static void ssip_stop_rx(struct hsi_client *cl) struct ssi_protocol *ssi = hsi_client_drvdata(cl); dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state); - spin_lock(&ssi->lock); + spin_lock_bh(&ssi->lock); if (likely(ssi->main_state == ACTIVE)) ssip_set_rxstate(ssi, RECV_IDLE); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); } static void ssip_free_strans(struct hsi_msg *msg) @@ -559,9 +565,9 @@ static void ssip_strans_complete(struct hsi_msg *msg) data = msg->context; ssip_release_cmd(msg); - spin_lock(&ssi->lock); + spin_lock_bh(&ssi->lock); ssip_set_txstate(ssi, SENDING); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); hsi_async_write(cl, data); } @@ -666,15 +672,17 @@ static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd) /* Fall through */ case INIT: case HANDSHAKE: - spin_lock(&ssi->lock); + spin_lock_bh(&ssi->lock); ssi->main_state = HANDSHAKE; - if (!ssi->waketest) { - ssi->waketest = 1; + spin_unlock_bh(&ssi->lock); + + if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) ssi_waketest(cl, 1); /* FIXME: To be removed */ - } + + spin_lock_bh(&ssi->lock); /* Start boot handshake watchdog */ mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); dev_dbg(&cl->device, "Send BOOTINFO_RESP\n"); if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) dev_warn(&cl->device, "boot info req verid mismatch\n"); @@ -696,14 +704,14 @@ static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd) if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) dev_warn(&cl->device, "boot info resp verid mismatch\n"); - spin_lock(&ssi->lock); + spin_lock_bh(&ssi->lock); if (ssi->main_state != ACTIVE) /* Use tx_wd as a boot watchdog in non ACTIVE state */ mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); else dev_dbg(&cl->device, "boot info resp ignored M(%d)\n", ssi->main_state); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); } static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd) @@ -711,20 +719,22 @@ static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd) struct ssi_protocol *ssi = hsi_client_drvdata(cl); unsigned int wkres = SSIP_PAYLOAD(cmd); - spin_lock(&ssi->lock); + spin_lock_bh(&ssi->lock); if (ssi->main_state != HANDSHAKE) { dev_dbg(&cl->device, "wake lines test ignored M(%d)\n", ssi->main_state); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); return; } - if (ssi->waketest) { - ssi->waketest = 0; + spin_unlock_bh(&ssi->lock); + + if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) ssi_waketest(cl, 0); /* FIXME: To be removed */ - } + + spin_lock_bh(&ssi->lock); ssi->main_state = ACTIVE; del_timer(&ssi->tx_wd); /* Stop boot handshake timer */ - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); dev_notice(&cl->device, "WAKELINES TEST %s\n", wkres & SSIP_WAKETEST_FAILED ? "FAILED" : "OK"); @@ -741,20 +751,20 @@ static void ssip_rx_ready(struct hsi_client *cl) { struct ssi_protocol *ssi = hsi_client_drvdata(cl); - spin_lock(&ssi->lock); + spin_lock_bh(&ssi->lock); if (unlikely(ssi->main_state != ACTIVE)) { dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n", ssi->send_state, ssi->main_state); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); return; } if (ssi->send_state != WAIT4READY) { dev_dbg(&cl->device, "Ignore spurious READY command\n"); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); return; } ssip_set_txstate(ssi, SEND_READY); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); ssip_xmit(cl); } @@ -766,22 +776,22 @@ static void ssip_rx_strans(struct hsi_client *cl, u32 cmd) int len = SSIP_PDU_LENGTH(cmd); dev_dbg(&cl->device, "RX strans: %d frames\n", len); - spin_lock(&ssi->lock); + spin_lock_bh(&ssi->lock); if (unlikely(ssi->main_state != ACTIVE)) { dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n", ssi->send_state, ssi->main_state); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); return; } ssip_set_rxstate(ssi, RECEIVING); if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) { dev_err(&cl->device, "START TRANS id %d expected %d\n", SSIP_MSG_ID(cmd), ssi->rxid); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); goto out1; } ssi->rxid++; - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); skb = netdev_alloc_skb(ssi->netdev, len * 4); if (unlikely(!skb)) { dev_err(&cl->device, "No memory for rx skb\n"); @@ -849,7 +859,7 @@ static void ssip_swbreak_complete(struct hsi_msg *msg) struct ssi_protocol *ssi = hsi_client_drvdata(cl); ssip_release_cmd(msg); - spin_lock(&ssi->lock); + spin_lock_bh(&ssi->lock); if (list_empty(&ssi->txqueue)) { if (atomic_read(&ssi->tx_usecnt)) { ssip_set_txstate(ssi, SEND_READY); @@ -857,9 +867,9 @@ static void ssip_swbreak_complete(struct hsi_msg *msg) ssip_set_txstate(ssi, SEND_IDLE); hsi_stop_tx(cl); } - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); } else { - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); ssip_xmit(cl); } netif_wake_queue(ssi->netdev); @@ -876,17 +886,17 @@ static void ssip_tx_data_complete(struct hsi_msg *msg) ssip_error(cl); goto out; } - spin_lock(&ssi->lock); + spin_lock_bh(&ssi->lock); if (list_empty(&ssi->txqueue)) { ssip_set_txstate(ssi, SENDING_SWBREAK); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); cmsg = ssip_claim_cmd(ssi); ssip_set_cmd(cmsg, SSIP_SWBREAK_CMD); cmsg->complete = ssip_swbreak_complete; dev_dbg(&cl->device, "Send SWBREAK\n"); hsi_async_write(cl, cmsg); } else { - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); ssip_xmit(cl); } out: @@ -926,11 +936,11 @@ static int ssip_pn_open(struct net_device *dev) } dev_dbg(&cl->device, "Configuring SSI port\n"); hsi_setup(cl); - spin_lock_bh(&ssi->lock); - if (!ssi->waketest) { - ssi->waketest = 1; + + if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) ssi_waketest(cl, 1); /* FIXME: To be removed */ - } + + spin_lock_bh(&ssi->lock); ssi->main_state = HANDSHAKE; spin_unlock_bh(&ssi->lock); @@ -959,6 +969,15 @@ static int ssip_pn_set_mtu(struct net_device *dev, int new_mtu) return 0; } +static void ssip_xmit_work(struct work_struct *work) +{ + struct ssi_protocol *ssi = + container_of(work, struct ssi_protocol, work); + struct hsi_client *cl = ssi->cl; + + ssip_xmit(cl); +} + static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev) { struct hsi_client *cl = to_hsi_client(dev->dev.parent); @@ -1011,7 +1030,7 @@ static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev) dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n", ssi->txqueue_len); spin_unlock_bh(&ssi->lock); - ssip_xmit(cl); + schedule_work(&ssi->work); } else { spin_unlock_bh(&ssi->lock); } @@ -1088,6 +1107,7 @@ static int ssi_protocol_probe(struct device *dev) atomic_set(&ssi->tx_usecnt, 0); hsi_client_set_drvdata(cl, ssi); ssi->cl = cl; + INIT_WORK(&ssi->work, ssip_xmit_work); ssi->channel_id_cmd = hsi_get_channel_id_by_name(cl, "mcsaab-control"); if (ssi->channel_id_cmd < 0) { @@ -35,6 +35,8 @@ #define SSI_MAX_GDD_LCH 8 #define SSI_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1) +#define SSI_WAKE_EN 0 + /** * struct omap_ssm_ctx - OMAP synchronous serial module (TX/RX) context * @mode: Bit transmission mode @@ -71,13 +73,14 @@ struct omap_ssm_ctx { * @txqueue: TX message queues * @rxqueue: RX message queues * @brkqueue: Queue of incoming HWBREAK requests (FRAME mode) + * @errqueue: Queue for failed messages + * @errqueue_work: Delayed Work for failed messages * @irq: IRQ number * @wake_irq: IRQ number for incoming wake line (-1 if none) * @wake_gpio: GPIO number for incoming wake line (-1 if none) - * @pio_tasklet: Bottom half for PIO transfers and events - * @wake_tasklet: Bottom half for incoming wake events - * @wkin_cken: Keep track of clock references due to the incoming wake line + * @flags: flags to keep track of states * @wk_refcount: Reference count for output wake line + * @work: worker for starting TX * @sys_mpu_enable: Context for the interrupt enable register for irq 0 * @sst: Context for the synchronous serial transmitter * @ssr: Context for the synchronous serial receiver @@ -95,14 +98,15 @@ struct omap_ssi_port { struct list_head txqueue[SSI_MAX_CHANNELS]; struct list_head rxqueue[SSI_MAX_CHANNELS]; struct list_head brkqueue; + struct list_head errqueue; + struct delayed_work errqueue_work; unsigned int irq; int wake_irq; struct gpio_desc *wake_gpio; - struct tasklet_struct pio_tasklet; - struct tasklet_struct wake_tasklet; bool wktest:1; /* FIXME: HACK to be removed */ - bool wkin_cken:1; /* Workaround */ + unsigned long flags; unsigned int wk_refcount; + struct work_struct work; /* OMAP SSI port context */ u32 sys_mpu_enable; /* We use only one irq */ struct omap_ssm_ctx sst; @@ -138,7 +142,6 @@ struct gdd_trn { * @fck_rate: clock rate * @loss_count: To follow if we need to restore context or not * @max_speed: Maximum TX speed (Kb/s) set by the clients. - * @sysconfig: SSI controller saved context * @gdd_gcr: SSI GDD saved context * @get_loss: Pointer to omap_pm_get_dev_context_loss_count, if any * @port: Array of pointers of the ports of the controller @@ -158,7 +161,6 @@ struct omap_ssi_controller { u32 loss_count; u32 max_speed; /* OMAP SSI Controller context */ - u32 sysconfig; u32 gdd_gcr; int (*get_loss)(struct device *dev); struct omap_ssi_port **port; @@ -58,7 +58,7 @@ static int ssi_debug_show(struct seq_file *m, void *p __maybe_unused) seq_printf(m, "REVISION\t: 0x%08x\n", readl(sys + SSI_REVISION_REG)); seq_printf(m, "SYSCONFIG\t: 0x%08x\n", readl(sys + SSI_SYSCONFIG_REG)); seq_printf(m, "SYSSTATUS\t: 0x%08x\n", readl(sys + SSI_SYSSTATUS_REG)); - pm_runtime_put_sync(ssi->device.parent); + pm_runtime_put(ssi->device.parent); return 0; } @@ -112,7 +112,7 @@ static int ssi_debug_gdd_show(struct seq_file *m, void *p __maybe_unused) readw(gdd + SSI_GDD_CLNK_CTRL_REG(lch))); } - pm_runtime_put_sync(ssi->device.parent); + pm_runtime_put(ssi->device.parent); return 0; } @@ -193,7 +193,7 @@ void ssi_waketest(struct hsi_client *cl, unsigned int enable) } else { writel_relaxed(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); - pm_runtime_put_sync(ssi->device.parent); + pm_runtime_put(ssi->device.parent); } } EXPORT_SYMBOL_GPL(ssi_waketest); @@ -217,7 +217,7 @@ static void ssi_gdd_complete(struct hsi_controller *ssi, unsigned int lch) if (msg->ttype == HSI_MSG_READ) { dir = DMA_FROM_DEVICE; val = SSI_DATAAVAILABLE(msg->channel); - pm_runtime_put_sync(ssi->device.parent); + pm_runtime_put(omap_port->pdev); } else { dir = DMA_TO_DEVICE; val = SSI_DATAACCEPT(msg->channel); @@ -235,7 +235,9 @@ static void ssi_gdd_complete(struct hsi_controller *ssi, unsigned int lch) spin_lock(&omap_port->lock); list_del(&msg->link); /* Dequeue msg */ spin_unlock(&omap_port->lock); - msg->complete(msg); + + list_add_tail(&msg->link, &omap_port->errqueue); + schedule_delayed_work(&omap_port->errqueue_work, 0); return; } spin_lock(&omap_port->lock); @@ -255,7 +257,13 @@ static void ssi_gdd_tasklet(unsigned long dev) unsigned int lch; u32 status_reg; - pm_runtime_get_sync(ssi->device.parent); + pm_runtime_get(ssi->device.parent); + + if (!pm_runtime_active(ssi->device.parent)) { + dev_warn(ssi->device.parent, "ssi_gdd_tasklet called without runtime PM!\n"); + pm_runtime_put(ssi->device.parent); + return; + } status_reg = readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG); for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) { @@ -265,7 +273,7 @@ static void ssi_gdd_tasklet(unsigned long dev) writel_relaxed(status_reg, sys + SSI_GDD_MPU_IRQ_STATUS_REG); status_reg = readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG); - pm_runtime_put_sync(ssi->device.parent); + pm_runtime_put(ssi->device.parent); if (status_reg) tasklet_hi_schedule(&omap_ssi->gdd_tasklet); @@ -312,7 +320,7 @@ static int ssi_clk_event(struct notifier_block *nb, unsigned long event, continue; /* Workaround for SWBREAK + CAwake down race in CMT */ - tasklet_disable(&omap_port->wake_tasklet); + disable_irq(omap_port->wake_irq); /* stop all ssi communication */ pinctrl_pm_select_idle_state(omap_port->pdev); @@ -338,7 +346,7 @@ static int ssi_clk_event(struct notifier_block *nb, unsigned long event, /* resume ssi communication */ pinctrl_pm_select_default_state(omap_port->pdev); - tasklet_enable(&omap_port->wake_tasklet); + enable_irq(omap_port->wake_irq); } break; @@ -452,8 +460,6 @@ out_err: static int ssi_hw_init(struct hsi_controller *ssi) { struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); - unsigned int i; - u32 val; int err; err = pm_runtime_get_sync(ssi->device.parent); @@ -461,27 +467,12 @@ static int ssi_hw_init(struct hsi_controller *ssi) dev_err(&ssi->device, "runtime PM failed %d\n", err); return err; } - /* Reseting SSI controller */ - writel_relaxed(SSI_SOFTRESET, omap_ssi->sys + SSI_SYSCONFIG_REG); - val = readl(omap_ssi->sys + SSI_SYSSTATUS_REG); - for (i = 0; ((i < 20) && !(val & SSI_RESETDONE)); i++) { - msleep(20); - val = readl(omap_ssi->sys + SSI_SYSSTATUS_REG); - } - if (!(val & SSI_RESETDONE)) { - dev_err(&ssi->device, "SSI HW reset failed\n"); - pm_runtime_put_sync(ssi->device.parent); - return -EIO; - } /* Reseting GDD */ writel_relaxed(SSI_SWRESET, omap_ssi->gdd + SSI_GDD_GRST_REG); /* Get FCK rate in KHz */ omap_ssi->fck_rate = DIV_ROUND_CLOSEST(ssi_get_clk_rate(ssi), 1000); dev_dbg(&ssi->device, "SSI fck rate %lu KHz\n", omap_ssi->fck_rate); - /* Set default PM settings */ - val = SSI_AUTOIDLE | SSI_SIDLEMODE_SMART | SSI_MIDLEMODE_SMART; - writel_relaxed(val, omap_ssi->sys + SSI_SYSCONFIG_REG); - omap_ssi->sysconfig = val; + writel_relaxed(SSI_CLK_AUTOGATING_ON, omap_ssi->sys + SSI_GDD_GCR_REG); omap_ssi->gdd_gcr = SSI_CLK_AUTOGATING_ON; pm_runtime_put_sync(ssi->device.parent); @@ -552,7 +543,6 @@ static int ssi_probe(struct platform_device *pd) if (err < 0) goto out1; - pm_runtime_irq_safe(&pd->dev); pm_runtime_enable(&pd->dev); err = ssi_hw_init(ssi); @@ -126,7 +126,7 @@ static int ssi_debug_port_show(struct seq_file *m, void *p __maybe_unused) seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, readl(base + SSI_SSR_BUFFER_CH_REG(ch))); } - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); return 0; } @@ -150,7 +150,7 @@ static int ssi_div_get(void *data, u64 *val) pm_runtime_get_sync(omap_port->pdev); *val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG); - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); return 0; } @@ -166,7 +166,7 @@ static int ssi_div_set(void *data, u64 val) pm_runtime_get_sync(omap_port->pdev); writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG); omap_port->sst.divisor = val; - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); return 0; } @@ -193,6 +193,21 @@ static int ssi_debug_add_port(struct omap_ssi_port *omap_port, } #endif +static void ssi_process_errqueue(struct work_struct *work) +{ + struct omap_ssi_port *omap_port; + struct list_head *head, *tmp; + struct hsi_msg *msg; + + omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work); + + list_for_each_safe(head, tmp, &omap_port->errqueue) { + msg = list_entry(head, struct hsi_msg, link); + msg->complete(msg); + list_del(head); + } +} + static int ssi_claim_lch(struct hsi_msg *msg) { @@ -225,11 +240,21 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch) u32 d_addr; u32 tmp; + /* Hold clocks during the transfer */ + pm_runtime_get(omap_port->pdev); + + if (!pm_runtime_active(omap_port->pdev)) { + dev_warn(&port->device, "ssi_start_dma called without runtime PM!\n"); + pm_runtime_put_autosuspend(omap_port->pdev); + return -EREMOTEIO; + } + if (msg->ttype == HSI_MSG_READ) { err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, DMA_FROM_DEVICE); if (err < 0) { dev_dbg(&ssi->device, "DMA map SG failed !\n"); + pm_runtime_put_autosuspend(omap_port->pdev); return err; } csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT | @@ -246,6 +271,7 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch) DMA_TO_DEVICE); if (err < 0) { dev_dbg(&ssi->device, "DMA map SG failed !\n"); + pm_runtime_put_autosuspend(omap_port->pdev); return err; } csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT | @@ -261,9 +287,6 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch) dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n", lch, csdp, ccr, s_addr, d_addr); - /* Hold clocks during the transfer */ - pm_runtime_get_sync(omap_port->pdev); - writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch)); writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch)); writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch)); @@ -290,11 +313,18 @@ static int ssi_start_pio(struct hsi_msg *msg) struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); u32 val; - pm_runtime_get_sync(omap_port->pdev); + pm_runtime_get(omap_port->pdev); + + if (!pm_runtime_active(omap_port->pdev)) { + dev_warn(&port->device, "ssi_start_pio called without runtime PM!\n"); + pm_runtime_put_autosuspend(omap_port->pdev); + return -EREMOTEIO; + } + if (msg->ttype == HSI_MSG_WRITE) { val = SSI_DATAACCEPT(msg->channel); /* Hold clocks for pio writes */ - pm_runtime_get_sync(omap_port->pdev); + pm_runtime_get(omap_port->pdev); } else { val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED; } @@ -302,7 +332,7 @@ static int ssi_start_pio(struct hsi_msg *msg) msg->ttype ? "write" : "read"); val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); msg->actual_len = 0; msg->status = HSI_STATUS_PROCEEDING; @@ -360,7 +390,8 @@ static int ssi_async_break(struct hsi_msg *msg) spin_unlock_bh(&omap_port->lock); } out: - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); return err; } @@ -388,6 +419,8 @@ static int ssi_async(struct hsi_msg *msg) queue = &omap_port->rxqueue[msg->channel]; } msg->status = HSI_STATUS_QUEUED; + + pm_runtime_get_sync(omap_port->pdev); spin_lock_bh(&omap_port->lock); list_add_tail(&msg->link, queue); err = ssi_start_transfer(queue); @@ -396,6 +429,8 @@ static int ssi_async(struct hsi_msg *msg) msg->status = HSI_STATUS_ERROR; } spin_unlock_bh(&omap_port->lock); + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); dev_dbg(&port->device, "msg status %d ttype %d ch %d\n", msg->status, msg->ttype, msg->channel); @@ -497,7 +532,8 @@ static int ssi_setup(struct hsi_client *cl) omap_port->ssr.mode = cl->rx_cfg.mode; out: spin_unlock_bh(&omap_port->lock); - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); return err; } @@ -528,7 +564,7 @@ static int ssi_flush(struct hsi_client *cl) continue; writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); if (msg->ttype == HSI_MSG_READ) - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); omap_ssi->gdd_trn[i].msg = NULL; } /* Flush all SST buffers */ @@ -552,7 +588,7 @@ static int ssi_flush(struct hsi_client *cl) for (i = 0; i < omap_port->channels; i++) { /* Release write clocks */ if (!list_empty(&omap_port->txqueue[i])) - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); ssi_flush_queue(&omap_port->txqueue[i], NULL); ssi_flush_queue(&omap_port->rxqueue[i], NULL); } @@ -562,17 +598,28 @@ static int ssi_flush(struct hsi_client *cl) pinctrl_pm_select_default_state(omap_port->pdev); spin_unlock_bh(&omap_port->lock); - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); return 0; } +static void start_tx_work(struct work_struct *work) +{ + struct omap_ssi_port *omap_port = + container_of(work, struct omap_ssi_port, work); + struct hsi_port *port = to_hsi_port(omap_port->dev); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */ + writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); +} + static int ssi_start_tx(struct hsi_client *cl) { struct hsi_port *port = hsi_get_port(cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); - struct hsi_controller *ssi = to_hsi_controller(port->device.parent); - struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount); @@ -581,10 +628,10 @@ static int ssi_start_tx(struct hsi_client *cl) spin_unlock_bh(&omap_port->wk_lock); return 0; } - pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */ - writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); spin_unlock_bh(&omap_port->wk_lock); + schedule_work(&omap_port->work); + return 0; } @@ -604,9 +651,12 @@ static int ssi_stop_tx(struct hsi_client *cl) return 0; } writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); - pm_runtime_put_sync(omap_port->pdev); /* Release clocks */ spin_unlock_bh(&omap_port->wk_lock); + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */ + + return 0; } @@ -616,6 +666,7 @@ static void ssi_transfer(struct omap_ssi_port *omap_port, struct hsi_msg *msg; int err = -1; + pm_runtime_get(omap_port->pdev); spin_lock_bh(&omap_port->lock); while (err < 0) { err = ssi_start_transfer(queue); @@ -630,6 +681,8 @@ static void ssi_transfer(struct omap_ssi_port *omap_port, } } spin_unlock_bh(&omap_port->lock); + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); } static void ssi_cleanup_queues(struct hsi_client *cl) @@ -658,7 +711,8 @@ static void ssi_cleanup_queues(struct hsi_client *cl) txbufstate |= (1 << i); status |= SSI_DATAACCEPT(i); /* Release the clocks writes, also GDD ones */ - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); } ssi_flush_queue(&omap_port->txqueue[i], cl); } @@ -712,8 +766,10 @@ static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl) * Clock references for write will be handled in * ssi_cleanup_queues */ - if (msg->ttype == HSI_MSG_READ) - pm_runtime_put_sync(omap_port->pdev); + if (msg->ttype == HSI_MSG_READ) { + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); + } omap_ssi->gdd_trn[i].msg = NULL; } tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); @@ -738,32 +794,30 @@ static int ssi_release(struct hsi_client *cl) struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); - spin_lock_bh(&omap_port->lock); pm_runtime_get_sync(omap_port->pdev); + spin_lock_bh(&omap_port->lock); /* Stop all the pending DMA requests for that client */ ssi_cleanup_gdd(ssi, cl); /* Now cleanup all the queues */ ssi_cleanup_queues(cl); - pm_runtime_put_sync(omap_port->pdev); /* If it is the last client of the port, do extra checks and cleanup */ if (port->claimed <= 1) { /* * Drop the clock reference for the incoming wake line * if it is still kept high by the other side. */ - if (omap_port->wkin_cken) { + if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) pm_runtime_put_sync(omap_port->pdev); - omap_port->wkin_cken = 0; - } - pm_runtime_get_sync(omap_port->pdev); + pm_runtime_get(omap_port->pdev); /* Stop any SSI TX/RX without a client */ ssi_set_port_mode(omap_port, SSI_MODE_SLEEP); omap_port->sst.mode = SSI_MODE_SLEEP; omap_port->ssr.mode = SSI_MODE_SLEEP; - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_put(omap_port->pdev); WARN_ON(omap_port->wk_refcount != 0); } spin_unlock_bh(&omap_port->lock); + pm_runtime_put_sync(omap_port->pdev); return 0; } @@ -868,7 +922,7 @@ static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue) u32 reg; u32 val; - spin_lock(&omap_port->lock); + spin_lock_bh(&omap_port->lock); msg = list_first_entry(queue, struct hsi_msg, link); if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) { msg->actual_len = 0; @@ -900,7 +954,7 @@ static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue) (msg->ttype == HSI_MSG_WRITE))) { writel(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); - spin_unlock(&omap_port->lock); + spin_unlock_bh(&omap_port->lock); return; } @@ -910,18 +964,19 @@ static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue) reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); if (msg->ttype == HSI_MSG_WRITE) { /* Release clocks for write transfer */ - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); } reg &= ~val; writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); list_del(&msg->link); - spin_unlock(&omap_port->lock); + spin_unlock_bh(&omap_port->lock); msg->complete(msg); ssi_transfer(omap_port, queue); } -static void ssi_pio_tasklet(unsigned long ssi_port) +static irqreturn_t ssi_pio_thread(int irq, void *ssi_port) { struct hsi_port *port = (struct hsi_port *)ssi_port; struct hsi_controller *ssi = to_hsi_controller(port->device.parent); @@ -932,41 +987,35 @@ static void ssi_pio_tasklet(unsigned long ssi_port) u32 status_reg; pm_runtime_get_sync(omap_port->pdev); - status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); - status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); - for (ch = 0; ch < omap_port->channels; ch++) { - if (status_reg & SSI_DATAACCEPT(ch)) - ssi_pio_complete(port, &omap_port->txqueue[ch]); - if (status_reg & SSI_DATAAVAILABLE(ch)) - ssi_pio_complete(port, &omap_port->rxqueue[ch]); - } - if (status_reg & SSI_BREAKDETECTED) - ssi_break_complete(port); - if (status_reg & SSI_ERROROCCURED) - ssi_error(port); + do { + status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); + status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); - status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); - status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); - pm_runtime_put_sync(omap_port->pdev); + for (ch = 0; ch < omap_port->channels; ch++) { + if (status_reg & SSI_DATAACCEPT(ch)) + ssi_pio_complete(port, &omap_port->txqueue[ch]); + if (status_reg & SSI_DATAAVAILABLE(ch)) + ssi_pio_complete(port, &omap_port->rxqueue[ch]); + } + if (status_reg & SSI_BREAKDETECTED) + ssi_break_complete(port); + if (status_reg & SSI_ERROROCCURED) + ssi_error(port); - if (status_reg) - tasklet_hi_schedule(&omap_port->pio_tasklet); - else - enable_irq(omap_port->irq); -} + status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); + status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); -static irqreturn_t ssi_pio_isr(int irq, void *port) -{ - struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + /* TODO: sleep if we retry? */ + } while (status_reg); - tasklet_hi_schedule(&omap_port->pio_tasklet); - disable_irq_nosync(irq); + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); return IRQ_HANDLED; } -static void ssi_wake_tasklet(unsigned long ssi_port) +static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port) { struct hsi_port *port = (struct hsi_port *)ssi_port; struct hsi_controller *ssi = to_hsi_controller(port->device.parent); @@ -981,12 +1030,8 @@ static void ssi_wake_tasklet(unsigned long ssi_port) * This workaround will avoid breaking the clock reference * count when such a situation ocurrs. */ - spin_lock(&omap_port->lock); - if (!omap_port->wkin_cken) { - omap_port->wkin_cken = 1; + if (!test_and_set_bit(SSI_WAKE_EN, &omap_port->flags)) pm_runtime_get_sync(omap_port->pdev); - } - spin_unlock(&omap_port->lock); dev_dbg(&ssi->device, "Wake in high\n"); if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ writel(SSI_WAKE(0), @@ -1000,26 +1045,16 @@ static void ssi_wake_tasklet(unsigned long ssi_port) omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); } hsi_event(port, HSI_EVENT_STOP_RX); - spin_lock(&omap_port->lock); - if (omap_port->wkin_cken) { - pm_runtime_put_sync(omap_port->pdev); - omap_port->wkin_cken = 0; + if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) { + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); } - spin_unlock(&omap_port->lock); } -} - -static irqreturn_t ssi_wake_isr(int irq __maybe_unused, void *ssi_port) -{ - struct omap_ssi_port *omap_port = hsi_port_drvdata(ssi_port); - - tasklet_hi_schedule(&omap_port->wake_tasklet); return IRQ_HANDLED; } -static int ssi_port_irq(struct hsi_port *port, - struct platform_device *pd) +static int ssi_port_irq(struct hsi_port *port, struct platform_device *pd) { struct omap_ssi_port *omap_port = hsi_port_drvdata(port); int err; @@ -1030,18 +1065,15 @@ static int ssi_port_irq(struct hsi_port *port, return err; } omap_port->irq = err; - tasklet_init(&omap_port->pio_tasklet, ssi_pio_tasklet, - (unsigned long)port); - err = devm_request_irq(&port->device, omap_port->irq, ssi_pio_isr, - 0, "mpu_irq0", port); + err = devm_request_threaded_irq(&port->device, omap_port->irq, NULL, + ssi_pio_thread, IRQF_ONESHOT, "SSI PORT", port); if (err < 0) dev_err(&port->device, "Request IRQ %d failed (%d)\n", omap_port->irq, err); return err; } -static int ssi_wake_irq(struct hsi_port *port, - struct platform_device *pd) +static int ssi_wake_irq(struct hsi_port *port, struct platform_device *pd) { struct omap_ssi_port *omap_port = hsi_port_drvdata(port); int cawake_irq; @@ -1053,13 +1085,12 @@ static int ssi_wake_irq(struct hsi_port *port, } cawake_irq = gpiod_to_irq(omap_port->wake_gpio); - omap_port->wake_irq = cawake_irq; - tasklet_init(&omap_port->wake_tasklet, ssi_wake_tasklet, - (unsigned long)port); - err = devm_request_irq(&port->device, cawake_irq, ssi_wake_isr, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - "cawake", port); + + err = devm_request_threaded_irq(&port->device, cawake_irq, NULL, + ssi_wake_thread, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + "SSI cawake", port); if (err < 0) dev_err(&port->device, "Request Wake in IRQ %d failed %d\n", cawake_irq, err); @@ -1169,6 +1200,9 @@ static int ssi_port_probe(struct platform_device *pd) omap_port->pdev = &pd->dev; omap_port->port_id = port_id; + INIT_DEFERRABLE_WORK(&omap_port->errqueue_work, ssi_process_errqueue); + INIT_WORK(&omap_port->work, start_tx_work); + /* initialize HSI port */ port->async = ssi_async; port->setup = ssi_setup; @@ -1202,7 +1236,8 @@ static int ssi_port_probe(struct platform_device *pd) spin_lock_init(&omap_port->wk_lock); omap_port->dev = &port->device; - pm_runtime_irq_safe(omap_port->pdev); + pm_runtime_use_autosuspend(omap_port->pdev); + pm_runtime_set_autosuspend_delay(omap_port->pdev, 250); pm_runtime_enable(omap_port->pdev); #ifdef CONFIG_DEBUG_FS @@ -1234,10 +1269,9 @@ static int ssi_port_remove(struct platform_device *pd) ssi_debug_remove_port(port); #endif - hsi_port_unregister_clients(port); + cancel_delayed_work_sync(&omap_port->errqueue_work); - tasklet_kill(&omap_port->wake_tasklet); - tasklet_kill(&omap_port->pio_tasklet); + hsi_port_unregister_clients(port); port->async = hsi_dummy_msg; port->setup = hsi_dummy_cl; @@ -1248,6 +1282,8 @@ static int ssi_port_remove(struct platform_device *pd) omap_ssi->port[omap_port->port_id] = NULL; platform_set_drvdata(pd, NULL); + + pm_runtime_dont_use_autosuspend(&pd->dev); pm_runtime_disable(&pd->dev); return 0; @@ -90,19 +90,19 @@ struct hsi_client *hsi_new_client(struct hsi_port *port, cl->tx_cfg = info->tx_cfg; if (cl->tx_cfg.channels) { size = cl->tx_cfg.num_channels * sizeof(*cl->tx_cfg.channels); - cl->tx_cfg.channels = kzalloc(size , GFP_KERNEL); + cl->tx_cfg.channels = kmemdup(info->tx_cfg.channels, size, + GFP_KERNEL); if (!cl->tx_cfg.channels) goto err_tx; - memcpy(cl->tx_cfg.channels, info->tx_cfg.channels, size); } cl->rx_cfg = info->rx_cfg; if (cl->rx_cfg.channels) { size = cl->rx_cfg.num_channels * sizeof(*cl->rx_cfg.channels); - cl->rx_cfg.channels = kzalloc(size , GFP_KERNEL); + cl->rx_cfg.channels = kmemdup(info->rx_cfg.channels, size, + GFP_KERNEL); if (!cl->rx_cfg.channels) goto err_rx; - memcpy(cl->rx_cfg.channels, info->rx_cfg.channels, size); } cl->device.bus = &hsi_bus_type; @@ -507,7 +507,7 @@ struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags) port[i]->stop_tx = hsi_dummy_cl; port[i]->release = hsi_dummy_cl; mutex_init(&port[i]->lock); - ATOMIC_INIT_NOTIFIER_HEAD(&port[i]->n_head); + BLOCKING_INIT_NOTIFIER_HEAD(&port[i]->n_head); dev_set_name(&port[i]->device, "port%d", i); hsi->port[i]->device.release = hsi_port_release; device_initialize(&hsi->port[i]->device); @@ -689,7 +689,7 @@ int hsi_register_port_event(struct hsi_client *cl, cl->ehandler = handler; cl->nb.notifier_call = hsi_event_notifier_call; - return atomic_notifier_chain_register(&port->n_head, &cl->nb); + return blocking_notifier_chain_register(&port->n_head, &cl->nb); } EXPORT_SYMBOL_GPL(hsi_register_port_event); @@ -709,7 +709,7 @@ int hsi_unregister_port_event(struct hsi_client *cl) WARN_ON(!hsi_port_claimed(cl)); - err = atomic_notifier_chain_unregister(&port->n_head, &cl->nb); + err = blocking_notifier_chain_unregister(&port->n_head, &cl->nb); if (!err) cl->ehandler = NULL; @@ -734,7 +734,7 @@ EXPORT_SYMBOL_GPL(hsi_unregister_port_event); */ int hsi_event(struct hsi_port *port, unsigned long event) { - return atomic_notifier_call_chain(&port->n_head, event, NULL); + return blocking_notifier_call_chain(&port->n_head, event, NULL); } EXPORT_SYMBOL_GPL(hsi_event); @@ -42,6 +42,7 @@ #include <linux/screen_info.h> #include <linux/kdebug.h> #include <linux/efi.h> +#include <linux/random.h> #include "hyperv_vmbus.h" static struct acpi_device *hv_acpi_dev; @@ -806,6 +807,8 @@ static void vmbus_isr(void) else tasklet_schedule(hv_context.msg_dpc[cpu]); } + + add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0); } @@ -486,6 +486,18 @@ config SENSORS_FSCHMD This driver can also be built as a module. If so, the module will be called fschmd. +config SENSORS_FTSTEUTATES + tristate "Fujitsu Technology Solutions sensor chip Teutates" + depends on I2C && WATCHDOG + select WATCHDOG_CORE + help + If you say yes here you get support for the Fujitsu Technology + Solutions (FTS) sensor chip "Teutates" including support for + the integrated watchdog. + + This driver can also be built as a module. If so, the module + will be called ftsteutates. + config SENSORS_GL518SM tristate "Genesys Logic GL518SM" depends on I2C @@ -645,8 +657,8 @@ config SENSORS_JC42 temperature sensors, which are used on many DDR3 memory modules for mobile devices and servers. Support will include, but not be limited to, ADT7408, AT30TS00, CAT34TS02, CAT6095, MAX6604, MCP9804, MCP9805, - MCP98242, MCP98243, MCP98244, MCP9843, SE97, SE98, STTS424(E), - STTS2002, STTS3000, TSE2002, TSE2004, TS3000, and TS3001. + MCP9808, MCP98242, MCP98243, MCP98244, MCP9843, SE97, SE98, + STTS424(E), STTS2002, STTS3000, TSE2002, TSE2004, TS3000, and TS3001. This driver can also be built as a module. If so, the module will be called jc42. @@ -958,6 +970,7 @@ config SENSORS_LM75 tristate "National Semiconductor LM75 and compatibles" depends on I2C depends on THERMAL || !THERMAL_OF + select REGMAP_I2C help If you say yes here you get support for one common type of temperature sensor chip, with models including: @@ -1265,6 +1278,17 @@ config SENSORS_SHT21 This driver can also be built as a module. If so, the module will be called sht21. +config SENSORS_SHT3x + tristate "Sensiron humidity and temperature sensors. SHT3x and compat." + depends on I2C + select CRC8 + help + If you say yes here you get support for the Sensiron SHT30 and SHT31 + humidity and temperature sensors. + + This driver can also be built as a module. If so, the module + will be called sht3x. + config SENSORS_SHTC1 tristate "Sensiron humidity and temperature sensors. SHTC1 and compat." depends on I2C @@ -1514,6 +1538,17 @@ config SENSORS_INA2XX This driver can also be built as a module. If so, the module will be called ina2xx. +config SENSORS_INA3221 + tristate "Texas Instruments INA3221 Triple Power Monitor" + depends on I2C + select REGMAP_I2C + help + If you say yes here you get support for the TI INA3221 Triple Power + Monitor. + + This driver can also be built as a module. If so, the module + will be called ina3221. + config SENSORS_TC74 tristate "Microchip TC74" depends on I2C @@ -1538,6 +1573,7 @@ config SENSORS_TMP102 tristate "Texas Instruments TMP102" depends on I2C depends on THERMAL || !THERMAL_OF + select REGMAP_I2C help If you say yes here you get support for Texas Instruments TMP102 sensor chips. @@ -1561,7 +1597,7 @@ config SENSORS_TMP401 depends on I2C help If you say yes here you get support for Texas Instruments TMP401, - TMP411, TMP431, TMP432 and TMP435 temperature sensor chips. + TMP411, TMP431, TMP432, TMP435, and TMP461 temperature sensor chips. This driver can also be built as a module. If so, the module will be called tmp401. @@ -62,6 +62,7 @@ obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o obj-$(CONFIG_SENSORS_F75375S) += f75375s.o obj-$(CONFIG_SENSORS_FAM15H_POWER) += fam15h_power.o obj-$(CONFIG_SENSORS_FSCHMD) += fschmd.o +obj-$(CONFIG_SENSORS_FTSTEUTATES) += ftsteutates.o obj-$(CONFIG_SENSORS_G760A) += g760a.o obj-$(CONFIG_SENSORS_G762) += g762.o obj-$(CONFIG_SENSORS_GL518SM) += gl518sm.o @@ -77,6 +78,7 @@ obj-$(CONFIG_SENSORS_IBMPOWERNV)+= ibmpowernv.o obj-$(CONFIG_SENSORS_IIO_HWMON) += iio_hwmon.o obj-$(CONFIG_SENSORS_INA209) += ina209.o obj-$(CONFIG_SENSORS_INA2XX) += ina2xx.o +obj-$(CONFIG_SENSORS_INA3221) += ina3221.o obj-$(CONFIG_SENSORS_IT87) += it87.o obj-$(CONFIG_SENSORS_JC42) += jc42.o obj-$(CONFIG_SENSORS_JZ4740) += jz4740-hwmon.o @@ -138,6 +140,7 @@ obj-$(CONFIG_SENSORS_SCH5627) += sch5627.o obj-$(CONFIG_SENSORS_SCH5636) += sch5636.o obj-$(CONFIG_SENSORS_SHT15) += sht15.o obj-$(CONFIG_SENSORS_SHT21) += sht21.o +obj-$(CONFIG_SENSORS_SHT3x) += sht3x.o obj-$(CONFIG_SENSORS_SHTC1) += shtc1.o obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o obj-$(CONFIG_SENSORS_SMM665) += smm665.o @@ -37,7 +37,6 @@ enum ad7314_variant { struct ad7314_data { struct spi_device *spi_dev; - struct device *hwmon_dev; u16 rx ____cacheline_aligned; }; @@ -88,62 +87,30 @@ static ssize_t ad7314_show_temperature(struct device *dev, } } -static ssize_t ad7314_show_name(struct device *dev, - struct device_attribute *devattr, char *buf) -{ - return sprintf(buf, "%s\n", to_spi_device(dev)->modalias); -} - -static DEVICE_ATTR(name, S_IRUGO, ad7314_show_name, NULL); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ad7314_show_temperature, NULL, 0); -static struct attribute *ad7314_attributes[] = { - &dev_attr_name.attr, +static struct attribute *ad7314_attrs[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, NULL, }; -static const struct attribute_group ad7314_group = { - .attrs = ad7314_attributes, -}; +ATTRIBUTE_GROUPS(ad7314); static int ad7314_probe(struct spi_device *spi_dev) { - int ret; struct ad7314_data *chip; + struct device *hwmon_dev; chip = devm_kzalloc(&spi_dev->dev, sizeof(*chip), GFP_KERNEL); if (chip == NULL) return -ENOMEM; - spi_set_drvdata(spi_dev, chip); - - ret = sysfs_create_group(&spi_dev->dev.kobj, &ad7314_group); - if (ret < 0) - return ret; - - chip->hwmon_dev = hwmon_device_register(&spi_dev->dev); - if (IS_ERR(chip->hwmon_dev)) { - ret = PTR_ERR(chip->hwmon_dev); - goto error_remove_group; - } chip->spi_dev = spi_dev; - - return 0; -error_remove_group: - sysfs_remove_group(&spi_dev->dev.kobj, &ad7314_group); - return ret; -} - -static int ad7314_remove(struct spi_device *spi_dev) -{ - struct ad7314_data *chip = spi_get_drvdata(spi_dev); - - hwmon_device_unregister(chip->hwmon_dev); - sysfs_remove_group(&spi_dev->dev.kobj, &ad7314_group); - - return 0; + hwmon_dev = devm_hwmon_device_register_with_groups(&spi_dev->dev, + spi_dev->modalias, + chip, ad7314_groups); + return PTR_ERR_OR_ZERO(hwmon_dev); } static const struct spi_device_id ad7314_id[] = { @@ -159,7 +126,6 @@ static struct spi_driver ad7314_driver = { .name = "ad7314", }, .probe = ad7314_probe, - .remove = ad7314_remove, .id_table = ad7314_id, }; @@ -66,14 +66,12 @@ #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> -#include <linux/mutex.h> #include <linux/delay.h> #define DEVICE_NAME "ads7871" struct ads7871_data { - struct device *hwmon_dev; - struct mutex update_lock; + struct spi_device *spi; }; static int ads7871_read_reg8(struct spi_device *spi, int reg) @@ -101,7 +99,8 @@ static int ads7871_write_reg8(struct spi_device *spi, int reg, u8 val) static ssize_t show_voltage(struct device *dev, struct device_attribute *da, char *buf) { - struct spi_device *spi = to_spi_device(dev); + struct ads7871_data *pdata = dev_get_drvdata(dev); + struct spi_device *spi = pdata->spi; struct sensor_device_attribute *attr = to_sensor_dev_attr(da); int ret, val, i = 0; uint8_t channel, mux_cnv; @@ -139,12 +138,6 @@ static ssize_t show_voltage(struct device *dev, } } -static ssize_t ads7871_show_name(struct device *dev, - struct device_attribute *devattr, char *buf) -{ - return sprintf(buf, "%s\n", to_spi_device(dev)->modalias); -} - static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_voltage, NULL, 0); static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 1); static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 2); @@ -154,9 +147,7 @@ static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 5); static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 6); static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 7); -static DEVICE_ATTR(name, S_IRUGO, ads7871_show_name, NULL); - -static struct attribute *ads7871_attributes[] = { +static struct attribute *ads7871_attrs[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, @@ -165,21 +156,18 @@ static struct attribute *ads7871_attributes[] = { &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, - &dev_attr_name.attr, NULL }; -static const struct attribute_group ads7871_group = { - .attrs = ads7871_attributes, -}; +ATTRIBUTE_GROUPS(ads7871); static int ads7871_probe(struct spi_device *spi) { - int ret, err; + struct device *dev = &spi->dev; + int ret; uint8_t val; struct ads7871_data *pdata; - - dev_dbg(&spi->dev, "probe\n"); + struct device *hwmon_dev; /* Configure the SPI bus */ spi->mode = (SPI_MODE_0); @@ -193,7 +181,7 @@ static int ads7871_probe(struct spi_device *spi) ads7871_write_reg8(spi, REG_OSC_CONTROL, val); ret = ads7871_read_reg8(spi, REG_OSC_CONTROL); - dev_dbg(&spi->dev, "REG_OSC_CONTROL write:%x, read:%x\n", val, ret); + dev_dbg(dev, "REG_OSC_CONTROL write:%x, read:%x\n", val, ret); /* * because there is no other error checking on an SPI bus * we need to make sure we really have a chip @@ -201,46 +189,23 @@ static int ads7871_probe(struct spi_device *spi) if (val != ret) return -ENODEV; - pdata = devm_kzalloc(&spi->dev, sizeof(struct ads7871_data), - GFP_KERNEL); + pdata = devm_kzalloc(dev, sizeof(struct ads7871_data), GFP_KERNEL); if (!pdata) return -ENOMEM; - err = sysfs_create_group(&spi->dev.kobj, &ads7871_group); - if (err < 0) - return err; - - spi_set_drvdata(spi, pdata); + pdata->spi = spi; - pdata->hwmon_dev = hwmon_device_register(&spi->dev); - if (IS_ERR(pdata->hwmon_dev)) { - err = PTR_ERR(pdata->hwmon_dev); - goto error_remove; - } - - return 0; - -error_remove: - sysfs_remove_group(&spi->dev.kobj, &ads7871_group); - return err; -} - -static int ads7871_remove(struct spi_device *spi) -{ - struct ads7871_data *pdata = spi_get_drvdata(spi); - - hwmon_device_unregister(pdata->hwmon_dev); - sysfs_remove_group(&spi->dev.kobj, &ads7871_group); - return 0; + hwmon_dev = devm_hwmon_device_register_with_groups(dev, spi->modalias, + pdata, + ads7871_groups); + return PTR_ERR_OR_ZERO(hwmon_dev); } static struct spi_driver ads7871_driver = { .driver = { .name = DEVICE_NAME, }, - .probe = ads7871_probe, - .remove = ads7871_remove, }; module_spi_driver(ads7871_driver); @@ -30,6 +30,7 @@ #define ADT7411_REG_CFG1 0x18 #define ADT7411_CFG1_START_MONITOR (1 << 0) +#define ADT7411_CFG1_RESERVED_BIT3 (1 << 3) #define ADT7411_REG_CFG2 0x19 #define ADT7411_CFG2_DISABLE_AVG (1 << 5) @@ -296,8 +297,10 @@ static int adt7411_probe(struct i2c_client *client, mutex_init(&data->device_lock); mutex_init(&data->update_lock); + /* According to the datasheet, we must only write 1 to bit 3 */ ret = adt7411_modify_bit(client, ADT7411_REG_CFG1, - ADT7411_CFG1_START_MONITOR, 1); + ADT7411_CFG1_RESERVED_BIT3 + | ADT7411_CFG1_START_MONITOR, 1); if (ret < 0) return ret; @@ -35,6 +35,7 @@ #include <linux/uaccess.h> #include <linux/io.h> #include <linux/sched.h> +#include <linux/ctype.h> #include <linux/i8k.h> @@ -66,11 +67,13 @@ static DEFINE_MUTEX(i8k_mutex); static char bios_version[4]; +static char bios_machineid[16]; static struct device *i8k_hwmon_dev; static u32 i8k_hwmon_flags; static uint i8k_fan_mult = I8K_FAN_MULT; static uint i8k_pwm_mult; static uint i8k_fan_max = I8K_FAN_HIGH; +static bool disallow_fan_type_call; #define I8K_HWMON_HAVE_TEMP1 (1 << 0) #define I8K_HWMON_HAVE_TEMP2 (1 << 1) @@ -78,6 +81,7 @@ static uint i8k_fan_max = I8K_FAN_HIGH; #define I8K_HWMON_HAVE_TEMP4 (1 << 3) #define I8K_HWMON_HAVE_FAN1 (1 << 4) #define I8K_HWMON_HAVE_FAN2 (1 << 5) +#define I8K_HWMON_HAVE_FAN3 (1 << 6) MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)"); MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); @@ -94,13 +98,13 @@ module_param(ignore_dmi, bool, 0); MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match"); #if IS_ENABLED(CONFIG_I8K) -static bool restricted; +static bool restricted = true; module_param(restricted, bool, 0); -MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set"); +MODULE_PARM_DESC(restricted, "Restrict fan control and serial number to CAP_SYS_ADMIN (default: 1)"); static bool power_status; module_param(power_status, bool, 0600); -MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k"); +MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k (default: 0)"); #endif static uint fan_mult; @@ -136,6 +140,14 @@ static int i8k_smm(struct smm_regs *regs) int eax = regs->eax; cpumask_var_t old_mask; +#ifdef DEBUG + int ebx = regs->ebx; + unsigned long duration; + ktime_t calltime, delta, rettime; + + calltime = ktime_get(); +#endif + /* SMM requires CPU 0 */ if (!alloc_cpumask_var(&old_mask, GFP_KERNEL)) return -ENOMEM; @@ -207,6 +219,15 @@ static int i8k_smm(struct smm_regs *regs) out: set_cpus_allowed_ptr(current, old_mask); free_cpumask_var(old_mask); + +#ifdef DEBUG + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = ktime_to_ns(delta) >> 10; + pr_debug("smm(0x%.4x 0x%.4x) = 0x%.4x (took %7lu usecs)\n", eax, ebx, + (rc ? 0xffff : regs->eax & 0xffff), duration); +#endif + return rc; } @@ -235,14 +256,28 @@ static int i8k_get_fan_speed(int fan) /* * Read the fan type. */ -static int i8k_get_fan_type(int fan) +static int _i8k_get_fan_type(int fan) { struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, }; + if (disallow_fan_type_call) + return -EINVAL; + regs.ebx = fan & 0xff; return i8k_smm(®s) ? : regs.eax & 0xff; } +static int i8k_get_fan_type(int fan) +{ + /* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */ + static int types[3] = { INT_MIN, INT_MIN, INT_MIN }; + + if (types[fan] == INT_MIN) + types[fan] = _i8k_get_fan_type(fan); + + return types[fan]; +} + /* * Read the fan nominal rpm for specific fan speed. */ @@ -387,14 +422,20 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg) switch (cmd) { case I8K_BIOS_VERSION: + if (!isdigit(bios_version[0]) || !isdigit(bios_version[1]) || + !isdigit(bios_version[2])) + return -EINVAL; + val = (bios_version[0] << 16) | (bios_version[1] << 8) | bios_version[2]; break; case I8K_MACHINE_ID: - memset(buff, 0, 16); - strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL), - sizeof(buff)); + if (restricted && !capable(CAP_SYS_ADMIN)) + return -EPERM; + + memset(buff, 0, sizeof(buff)); + strlcpy(buff, bios_machineid, sizeof(buff)); break; case I8K_FN_STATUS: @@ -511,7 +552,7 @@ static int i8k_proc_show(struct seq_file *seq, void *offset) seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n", I8K_PROC_FMT, bios_version, - i8k_get_dmi_data(DMI_PRODUCT_SERIAL), + (restricted && !capable(CAP_SYS_ADMIN)) ? "-1" : bios_machineid, cpu_temp, left_fan, right_fan, left_speed, right_speed, ac_power, fn_key); @@ -696,6 +737,12 @@ static SENSOR_DEVICE_ATTR(fan2_label, S_IRUGO, i8k_hwmon_show_fan_label, NULL, 1); static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm, i8k_hwmon_set_pwm, 1); +static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, i8k_hwmon_show_fan, NULL, + 2); +static SENSOR_DEVICE_ATTR(fan3_label, S_IRUGO, i8k_hwmon_show_fan_label, NULL, + 2); +static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm, + i8k_hwmon_set_pwm, 2); static struct attribute *i8k_attrs[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, /* 0 */ @@ -712,12 +759,18 @@ static struct attribute *i8k_attrs[] = { &sensor_dev_attr_fan2_input.dev_attr.attr, /* 11 */ &sensor_dev_attr_fan2_label.dev_attr.attr, /* 12 */ &sensor_dev_attr_pwm2.dev_attr.attr, /* 13 */ + &sensor_dev_attr_fan3_input.dev_attr.attr, /* 14 */ + &sensor_dev_attr_fan3_label.dev_attr.attr, /* 15 */ + &sensor_dev_attr_pwm3.dev_attr.attr, /* 16 */ NULL }; static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr, int index) { + if (disallow_fan_type_call && + (index == 9 || index == 12 || index == 15)) + return 0; if (index >= 0 && index <= 1 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1)) return 0; @@ -736,6 +789,9 @@ static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr, if (index >= 11 && index <= 13 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN2)) return 0; + if (index >= 14 && index <= 16 && + !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN3)) + return 0; return attr->mode; } @@ -767,16 +823,27 @@ static int __init i8k_init_hwmon(void) if (err >= 0) i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4; - /* First fan attributes, if fan type is OK */ - err = i8k_get_fan_type(0); + /* First fan attributes, if fan status or type is OK */ + err = i8k_get_fan_status(0); + if (err < 0) + err = i8k_get_fan_type(0); if (err >= 0) i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1; - /* Second fan attributes, if fan type is OK */ - err = i8k_get_fan_type(1); + /* Second fan attributes, if fan status or type is OK */ + err = i8k_get_fan_status(1); + if (err < 0) + err = i8k_get_fan_type(1); if (err >= 0) i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2; + /* Third fan attributes, if fan status or type is OK */ + err = i8k_get_fan_status(2); + if (err < 0) + err = i8k_get_fan_type(2); + if (err >= 0) + i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN3; + i8k_hwmon_dev = hwmon_device_register_with_groups(NULL, "dell_smm", NULL, i8k_groups); if (IS_ERR(i8k_hwmon_dev)) { @@ -929,12 +996,14 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = { MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); -static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = { +/* + * On some machines once I8K_SMM_GET_FAN_TYPE is issued then CPU fan speed + * randomly going up and down due to bug in Dell SMM or BIOS. Here is blacklist + * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call. + * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121 + */ +static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = { { - /* - * CPU fan speed going up and down on Dell Studio XPS 8000 - * for unknown reasons. - */ .ident = "Dell Studio XPS 8000", .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), @@ -942,16 +1011,19 @@ static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = { }, }, { - /* - * CPU fan speed going up and down on Dell Studio XPS 8100 - * for unknown reasons. - */ .ident = "Dell Studio XPS 8100", .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"), }, }, + { + .ident = "Dell Inspiron 580", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 580 "), + }, + }, { } }; @@ -966,8 +1038,7 @@ static int __init i8k_probe(void) /* * Get DMI information */ - if (!dmi_check_system(i8k_dmi_table) || - dmi_check_system(i8k_blacklist_dmi_table)) { + if (!dmi_check_system(i8k_dmi_table)) { if (!ignore_dmi && !force) return -ENODEV; @@ -978,8 +1049,13 @@ static int __init i8k_probe(void) i8k_get_dmi_data(DMI_BIOS_VERSION)); } + if (dmi_check_system(i8k_blacklist_fan_type_dmi_table)) + disallow_fan_type_call = true; + strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION), sizeof(bios_version)); + strlcpy(bios_machineid, i8k_get_dmi_data(DMI_PRODUCT_SERIAL), + sizeof(bios_machineid)); /* * Get SMM Dell signature @@ -464,7 +464,7 @@ static int emc6w201_detect(struct i2c_client *client, if (verstep < 0 || (verstep & 0xF0) != 0xB0) return -ENODEV; if ((verstep & 0x0F) > 2) { - dev_dbg(&client->dev, "Unknwown EMC6W201 stepping %d\n", + dev_dbg(&client->dev, "Unknown EMC6W201 stepping %d\n", verstep & 0x0F); return -ENODEV; } @@ -172,9 +172,9 @@ static void do_read_registers_on_cu(void *_data) */ static int read_registers(struct fam15h_power_data *data) { - int this_cpu, ret, cpu; int core, this_core; cpumask_var_t mask; + int ret, cpu; ret = zalloc_cpumask_var(&mask, GFP_KERNEL); if (!ret) @@ -183,7 +183,6 @@ static int read_registers(struct fam15h_power_data *data) memset(data->cu_on, 0, sizeof(int) * MAX_CUS); get_online_cpus(); - this_cpu = smp_processor_id(); /* * Choose the first online core of each compute unit, and then @@ -205,12 +204,9 @@ static int read_registers(struct fam15h_power_data *data) cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask); } - if (cpumask_test_cpu(this_cpu, mask)) - do_read_registers_on_cu(data); + on_each_cpu_mask(mask, do_read_registers_on_cu, data, true); - smp_call_function_many(mask, do_read_registers_on_cu, data, true); put_online_cpus(); - free_cpumask_var(mask); return 0; diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c new file mode 100644 index 000000000000..2b2ff67026be --- /dev/null +++ b/ drivers/hwmon/ftsteutates.c@@ -0,0 +1,819 @@ +/* + * Support for the FTS Systemmonitoring Chip "Teutates" + * + * Copyright (C) 2016 Fujitsu Technology Solutions GmbH, + * Thilo Cestonaro <thilo.cestonaro@ts.fujitsu.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/err.h> +#include <linux/fs.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/jiffies.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/sysfs.h> +#include <linux/uaccess.h> +#include <linux/watchdog.h> + +#define FTS_DEVICE_ID_REG 0x0000 +#define FTS_DEVICE_REVISION_REG 0x0001 +#define FTS_DEVICE_STATUS_REG 0x0004 +#define FTS_SATELLITE_STATUS_REG 0x0005 +#define FTS_EVENT_STATUS_REG 0x0006 +#define FTS_GLOBAL_CONTROL_REG 0x0007 + +#define FTS_SENSOR_EVENT_REG 0x0010 + +#define FTS_FAN_EVENT_REG 0x0014 +#define FTS_FAN_PRESENT_REG 0x0015 + +#define FTS_POWER_ON_TIME_COUNTER_A 0x007A +#define FTS_POWER_ON_TIME_COUNTER_B 0x007B +#define FTS_POWER_ON_TIME_COUNTER_C 0x007C + +#define FTS_PAGE_SELECT_REG 0x007F + +#define FTS_WATCHDOG_TIME_PRESET 0x000B +#define FTS_WATCHDOG_CONTROL 0x5081 + +#define FTS_NO_FAN_SENSORS 0x08 +#define FTS_NO_TEMP_SENSORS 0x10 +#define FTS_NO_VOLT_SENSORS 0x04 + +static struct i2c_device_id fts_id[] = { + { "ftsteutates", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, fts_id); + +enum WATCHDOG_RESOLUTION { + seconds = 1, + minutes = 60 +}; + +struct fts_data { + struct i2c_client *client; + /* update sensor data lock */ + struct mutex update_lock; + /* read/write register lock */ + struct mutex access_lock; + unsigned long last_updated; /* in jiffies */ + struct watchdog_device wdd; + enum WATCHDOG_RESOLUTION resolution; + bool valid; /* false until following fields are valid */ + + u8 volt[FTS_NO_VOLT_SENSORS]; + + u8 temp_input[FTS_NO_TEMP_SENSORS]; + u8 temp_alarm; + + u8 fan_present; + u8 fan_input[FTS_NO_FAN_SENSORS]; /* in rps */ + u8 fan_source[FTS_NO_FAN_SENSORS]; + u8 fan_alarm; +}; + +#define FTS_REG_FAN_INPUT(idx) ((idx) + 0x20) +#define FTS_REG_FAN_SOURCE(idx) ((idx) + 0x30) +#define FTS_REG_FAN_CONTROL(idx) (((idx) << 16) + 0x4881) + +#define FTS_REG_TEMP_INPUT(idx) ((idx) + 0x40) +#define FTS_REG_TEMP_CONTROL(idx) (((idx) << 16) + 0x0681) + +#define FTS_REG_VOLT(idx) ((idx) + 0x18) + +/*****************************************************************************/ +/* I2C Helper functions */ +/*****************************************************************************/ +static int fts_read_byte(struct i2c_client *client, unsigned short reg) +{ + int ret; + unsigned char page = reg >> 8; + struct fts_data *data = dev_get_drvdata(&client->dev); + + mutex_lock(&data->access_lock); + + dev_dbg(&client->dev, "page select - page: 0x%.02x\n", page); + ret = i2c_smbus_write_byte_data(client, FTS_PAGE_SELECT_REG, page); + if (ret < 0) + goto error; + + reg &= 0xFF; + ret = i2c_smbus_read_byte_data(client, reg); + dev_dbg(&client->dev, "read - reg: 0x%.02x: val: 0x%.02x\n", reg, ret); + +error: + mutex_unlock(&data->access_lock); + return ret; +} + +static int fts_write_byte(struct i2c_client *client, unsigned short reg, + unsigned char value) +{ + int ret; + unsigned char page = reg >> 8; + struct fts_data *data = dev_get_drvdata(&client->dev); + + mutex_lock(&data->access_lock); + + dev_dbg(&client->dev, "page select - page: 0x%.02x\n", page); + ret = i2c_smbus_write_byte_data(client, FTS_PAGE_SELECT_REG, page); + if (ret < 0) + goto error; + + reg &= 0xFF; + dev_dbg(&client->dev, + "write - reg: 0x%.02x: val: 0x%.02x\n", reg, value); + ret = i2c_smbus_write_byte_data(client, reg, value); + +error: + mutex_unlock(&data->access_lock); + return ret; +} + +/*****************************************************************************/ +/* Data Updater Helper function */ +/*****************************************************************************/ +static int fts_update_device(struct fts_data *data) +{ + int i; + int err = 0; + + mutex_lock(&data->update_lock); + if (!time_after(jiffies, data->last_updated + 2 * HZ) && data->valid) + goto exit; + + err = fts_read_byte(data->client, FTS_DEVICE_STATUS_REG); + if (err < 0) + goto exit; + + data->valid = !!(err & 0x02); /* Data not ready yet */ + if (unlikely(!data->valid)) { + err = -EAGAIN; + goto exit; + } + + err = fts_read_byte(data->client, FTS_FAN_PRESENT_REG); + if (err < 0) + goto exit; + data->fan_present = err; + + err = fts_read_byte(data->client, FTS_FAN_EVENT_REG); + if (err < 0) + goto exit; + data->fan_alarm = err; + + for (i = 0; i < FTS_NO_FAN_SENSORS; i++) { + if (data->fan_present & BIT(i)) { + err = fts_read_byte(data->client, FTS_REG_FAN_INPUT(i)); + if (err < 0) + goto exit; + data->fan_input[i] = err; + + err = fts_read_byte(data->client, + FTS_REG_FAN_SOURCE(i)); + if (err < 0) + goto exit; + data->fan_source[i] = err; + } else { + data->fan_input[i] = 0; + data->fan_source[i] = 0; + } + } + + err = fts_read_byte(data->client, FTS_SENSOR_EVENT_REG); + if (err < 0) + goto exit; + data->temp_alarm = err; + + for (i = 0; i < FTS_NO_TEMP_SENSORS; i++) { + err = fts_read_byte(data->client, FTS_REG_TEMP_INPUT(i)); + if (err < 0) + goto exit; + data->temp_input[i] = err; + } + + for (i = 0; i < FTS_NO_VOLT_SENSORS; i++) { + err = fts_read_byte(data->client, FTS_REG_VOLT(i)); + if (err < 0) + goto exit; + data->volt[i] = err; + } + data->last_updated = jiffies; + err = 0; +exit: + mutex_unlock(&data->update_lock); + return err; +} + +/*****************************************************************************/ +/* Watchdog functions */ +/*****************************************************************************/ +static int fts_wd_set_resolution(struct fts_data *data, + enum WATCHDOG_RESOLUTION resolution) +{ + int ret; + + if (data->resolution == resolution) + return 0; + + ret = fts_read_byte(data->client, FTS_WATCHDOG_CONTROL); + if (ret < 0) + return ret; + + if ((resolution == seconds && ret & BIT(1)) || + (resolution == minutes && (ret & BIT(1)) == 0)) { + data->resolution = resolution; + return 0; + } + + if (resolution == seconds) + set_bit(1, (unsigned long *)&ret); + else + ret &= ~BIT(1); + + ret = fts_write_byte(data->client, FTS_WATCHDOG_CONTROL, ret); + if (ret < 0) + return ret; + + data->resolution = resolution; + return ret; +} + +static int fts_wd_set_timeout(struct watchdog_device *wdd, unsigned int timeout) +{ + struct fts_data *data; + enum WATCHDOG_RESOLUTION resolution = seconds; + int ret; + + data = watchdog_get_drvdata(wdd); + /* switch watchdog resolution to minutes if timeout does not fit + * into a byte + */ + if (timeout > 0xFF) { + timeout = DIV_ROUND_UP(timeout, 60) * 60; + resolution = minutes; + } + + ret = fts_wd_set_resolution(data, resolution); + if (ret < 0) + return ret; + + wdd->timeout = timeout; + return 0; +} + +static int fts_wd_start(struct watchdog_device *wdd) +{ + struct fts_data *data = watchdog_get_drvdata(wdd); + + return fts_write_byte(data->client, FTS_WATCHDOG_TIME_PRESET, + wdd->timeout / (u8)data->resolution); +} + +static int fts_wd_stop(struct watchdog_device *wdd) +{ + struct fts_data *data; + + data = watchdog_get_drvdata(wdd); + return fts_write_byte(data->client, FTS_WATCHDOG_TIME_PRESET, 0); +} + +static const struct watchdog_info fts_wd_info = { + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, + .identity = "FTS Teutates Hardware Watchdog", +}; + +static const struct watchdog_ops fts_wd_ops = { + .owner = THIS_MODULE, + .start = fts_wd_start, + .stop = fts_wd_stop, + .set_timeout = fts_wd_set_timeout, +}; + +static int fts_watchdog_init(struct fts_data *data) +{ + int timeout, ret; + + watchdog_set_drvdata(&data->wdd, data); + + timeout = fts_read_byte(data->client, FTS_WATCHDOG_TIME_PRESET); + if (timeout < 0) + return timeout; + + /* watchdog not running, set timeout to a default of 60 sec. */ + if (timeout == 0) { + ret = fts_wd_set_resolution(data, seconds); + if (ret < 0) + return ret; + data->wdd.timeout = 60; + } else { + ret = fts_read_byte(data->client, FTS_WATCHDOG_CONTROL); + if (ret < 0) + return ret; + + data->resolution = ret & BIT(1) ? seconds : minutes; + data->wdd.timeout = timeout * (u8)data->resolution; + set_bit(WDOG_HW_RUNNING, &data->wdd.status); + } + + /* Register our watchdog part */ + data->wdd.info = &fts_wd_info; + data->wdd.ops = &fts_wd_ops; + data->wdd.parent = &data->client->dev; + data->wdd.min_timeout = 1; + + /* max timeout 255 minutes. */ + data->wdd.max_hw_heartbeat_ms = 0xFF * 60 * MSEC_PER_SEC; + + return watchdog_register_device(&data->wdd); +} + +/*****************************************************************************/ +/* SysFS handler functions */ +/*****************************************************************************/ +static ssize_t show_in_value(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct fts_data *data = dev_get_drvdata(dev); + int index = to_sensor_dev_attr(devattr)->index; + int err; + + err = fts_update_device(data); + if (err < 0) + return err; + + return sprintf(buf, "%u\n", data->volt[index]); +} + +static ssize_t show_temp_value(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct fts_data *data = dev_get_drvdata(dev); + int index = to_sensor_dev_attr(devattr)->index; + int err; + + err = fts_update_device(data); + if (err < 0) + return err; + + return sprintf(buf, "%u\n", data->temp_input[index]); +} + +static ssize_t show_temp_fault(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct fts_data *data = dev_get_drvdata(dev); + int index = to_sensor_dev_attr(devattr)->index; + int err; + + err = fts_update_device(data); + if (err < 0) + return err; + + /* 00h Temperature = Sensor Error */ + return sprintf(buf, "%d\n", data->temp_input[index] == 0); +} + +static ssize_t show_temp_alarm(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct fts_data *data = dev_get_drvdata(dev); + int index = to_sensor_dev_attr(devattr)->index; + int err; + + err = fts_update_device(data); + if (err < 0) + return err; + + return sprintf(buf, "%u\n", !!(data->temp_alarm & BIT(index))); +} + +static ssize_t +clear_temp_alarm(struct device *dev, struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct fts_data *data = dev_get_drvdata(dev); + int index = to_sensor_dev_attr(devattr)->index; + long ret; + + ret = fts_update_device(data); + if (ret < 0) + return ret; + + if (kstrtoul(buf, 10, &ret) || ret != 0) + return -EINVAL; + + mutex_lock(&data->update_lock); + ret = fts_read_byte(data->client, FTS_REG_TEMP_CONTROL(index)); + if (ret < 0) + goto error; + + ret = fts_write_byte(data->client, FTS_REG_TEMP_CONTROL(index), + ret | 0x1); + if (ret < 0) + goto error; + + data->valid = false; +error: + mutex_unlock(&data->update_lock); + return ret; +} + +static ssize_t show_fan_value(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct fts_data *data = dev_get_drvdata(dev); + int index = to_sensor_dev_attr(devattr)->index; + int err; + + err = fts_update_device(data); + if (err < 0) + return err; + + return sprintf(buf, "%u\n", data->fan_input[index]); +} + +static ssize_t show_fan_source(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct fts_data *data = dev_get_drvdata(dev); + int index = to_sensor_dev_attr(devattr)->index; + int err; + + err = fts_update_device(data); + if (err < 0) + return err; + + return sprintf(buf, "%u\n", data->fan_source[index]); +} + +static ssize_t show_fan_alarm(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct fts_data *data = dev_get_drvdata(dev); + int index = to_sensor_dev_attr(devattr)->index; + int err; + + err = fts_update_device(data); + if (err < 0) + return err; + + return sprintf(buf, "%d\n", !!(data->fan_alarm & BIT(index))); +} + +static ssize_t +clear_fan_alarm(struct device *dev, struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct fts_data *data = dev_get_drvdata(dev); + int index = to_sensor_dev_attr(devattr)->index; + long ret; + + ret = fts_update_device(data); + if (ret < 0) + return ret; + + if (kstrtoul(buf, 10, &ret) || ret != 0) + return -EINVAL; + + mutex_lock(&data->update_lock); + ret = fts_read_byte(data->client, FTS_REG_FAN_CONTROL(index)); + if (ret < 0) + goto error; + + ret = fts_write_byte(data->client, FTS_REG_FAN_CONTROL(index), + ret | 0x1); + if (ret < 0) + goto error; + + data->valid = false; +error: + mutex_unlock(&data->update_lock); + return ret; +} + +/*****************************************************************************/ +/* SysFS structs */ +/*****************************************************************************/ + +/* Temprature sensors */ +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_value, NULL, 0); +static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_value, NULL, 1); +static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp_value, NULL, 2); +static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp_value, NULL, 3); +static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp_value, NULL, 4); +static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO, show_temp_value, NULL, 5); +static SENSOR_DEVICE_ATTR(temp7_input, S_IRUGO, show_temp_value, NULL, 6); +static SENSOR_DEVICE_ATTR(temp8_input, S_IRUGO, show_temp_value, NULL, 7); +static SENSOR_DEVICE_ATTR(temp9_input, S_IRUGO, show_temp_value, NULL, 8); +static SENSOR_DEVICE_ATTR(temp10_input, S_IRUGO, show_temp_value, NULL, 9); +static SENSOR_DEVICE_ATTR(temp11_input, S_IRUGO, show_temp_value, NULL, 10); +static SENSOR_DEVICE_ATTR(temp12_input, S_IRUGO, show_temp_value, NULL, 11); +static SENSOR_DEVICE_ATTR(temp13_input, S_IRUGO, show_temp_value, NULL, 12); +static SENSOR_DEVICE_ATTR(temp14_input, S_IRUGO, show_temp_value, NULL, 13); +static SENSOR_DEVICE_ATTR(temp15_input, S_IRUGO, show_temp_value, NULL, 14); +static SENSOR_DEVICE_ATTR(temp16_input, S_IRUGO, show_temp_value, NULL, 15); + +static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0); +static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1); +static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_temp_fault, NULL, 2); +static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_temp_fault, NULL, 3); +static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_temp_fault, NULL, 4); +static SENSOR_DEVICE_ATTR(temp6_fault, S_IRUGO, show_temp_fault, NULL, 5); +static SENSOR_DEVICE_ATTR(temp7_fault, S_IRUGO, show_temp_fault, NULL, 6); +static SENSOR_DEVICE_ATTR(temp8_fault, S_IRUGO, show_temp_fault, NULL, 7); +static SENSOR_DEVICE_ATTR(temp9_fault, S_IRUGO, show_temp_fault, NULL, 8); +static SENSOR_DEVICE_ATTR(temp10_fault, S_IRUGO, show_temp_fault, NULL, 9); +static SENSOR_DEVICE_ATTR(temp11_fault, S_IRUGO, show_temp_fault, NULL, 10); +static SENSOR_DEVICE_ATTR(temp12_fault, S_IRUGO, show_temp_fault, NULL, 11); +static SENSOR_DEVICE_ATTR(temp13_fault, S_IRUGO, show_temp_fault, NULL, 12); +static SENSOR_DEVICE_ATTR(temp14_fault, S_IRUGO, show_temp_fault, NULL, 13); +static SENSOR_DEVICE_ATTR(temp15_fault, S_IRUGO, show_temp_fault, NULL, 14); +static SENSOR_DEVICE_ATTR(temp16_fault, S_IRUGO, show_temp_fault, NULL, 15); + +static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO | S_IWUSR, show_temp_alarm, + clear_temp_alarm, 0); +static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO | S_IWUSR, show_temp_alarm, + clear_temp_alarm, 1); +static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO | S_IWUSR, show_temp_alarm, + clear_temp_alarm, 2); +static SENSOR_DEVICE_ATTR(temp4_alarm, S_IRUGO | S_IWUSR, show_temp_alarm, + clear_temp_alarm, 3); +static SENSOR_DEVICE_ATTR(temp5_alarm, S_IRUGO | S_IWUSR, show_temp_alarm, + clear_temp_alarm, 4); +static SENSOR_DEVICE_ATTR(temp6_alarm, S_IRUGO | S_IWUSR, show_temp_alarm, + clear_temp_alarm, 5); +static SENSOR_DEVICE_ATTR(temp7_alarm, S_IRUGO | S_IWUSR, show_temp_alarm, + clear_temp_alarm, 6); +static SENSOR_DEVICE_ATTR(temp8_alarm, S_IRUGO | S_IWUSR, show_temp_alarm, + clear_temp_alarm, 7); +static SENSOR_DEVICE_ATTR(temp9_alarm, S_IRUGO | S_IWUSR, show_temp_alarm, + clear_temp_alarm, 8); +static SENSOR_DEVICE_ATTR(temp10_alarm, S_IRUGO | S_IWUSR, show_temp_alarm, + clear_temp_alarm, 9); +static SENSOR_DEVICE_ATTR(temp11_alarm, S_IRUGO | S_IWUSR, show_temp_alarm, + clear_temp_alarm, 10); +static SENSOR_DEVICE_ATTR(temp12_alarm, S_IRUGO | S_IWUSR, show_temp_alarm, + clear_temp_alarm, 11); +static SENSOR_DEVICE_ATTR(temp13_alarm, S_IRUGO | S_IWUSR, show_temp_alarm, + clear_temp_alarm, 12); +static SENSOR_DEVICE_ATTR(temp14_alarm, S_IRUGO | S_IWUSR, show_temp_alarm, + clear_temp_alarm, 13); +static SENSOR_DEVICE_ATTR(temp15_alarm, S_IRUGO | S_IWUSR, show_temp_alarm, + clear_temp_alarm, 14); +static SENSOR_DEVICE_ATTR(temp16_alarm, S_IRUGO | S_IWUSR, show_temp_alarm, + clear_temp_alarm, 15); + +static struct attribute *fts_temp_attrs[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp2_input.dev_attr.attr, + &sensor_dev_attr_temp3_input.dev_attr.attr, + &sensor_dev_attr_temp4_input.dev_attr.attr, + &sensor_dev_attr_temp5_input.dev_attr.attr, + &sensor_dev_attr_temp6_input.dev_attr.attr, + &sensor_dev_attr_temp7_input.dev_attr.attr, + &sensor_dev_attr_temp8_input.dev_attr.attr, + &sensor_dev_attr_temp9_input.dev_attr.attr, + &sensor_dev_attr_temp10_input.dev_attr.attr, + &sensor_dev_attr_temp11_input.dev_attr.attr, + &sensor_dev_attr_temp12_input.dev_attr.attr, + &sensor_dev_attr_temp13_input.dev_attr.attr, + &sensor_dev_attr_temp14_input.dev_attr.attr, + &sensor_dev_attr_temp15_input.dev_attr.attr, + &sensor_dev_attr_temp16_input.dev_attr.attr, + + &sensor_dev_attr_temp1_fault.dev_attr.attr, + &sensor_dev_attr_temp2_fault.dev_attr.attr, + &sensor_dev_attr_temp3_fault.dev_attr.attr, + &sensor_dev_attr_temp4_fault.dev_attr.attr, + &sensor_dev_attr_temp5_fault.dev_attr.attr, + &sensor_dev_attr_temp6_fault.dev_attr.attr, + &sensor_dev_attr_temp7_fault.dev_attr.attr, + &sensor_dev_attr_temp8_fault.dev_attr.attr, + &sensor_dev_attr_temp9_fault.dev_attr.attr, + &sensor_dev_attr_temp10_fault.dev_attr.attr, + &sensor_dev_attr_temp11_fault.dev_attr.attr, + &sensor_dev_attr_temp12_fault.dev_attr.attr, + &sensor_dev_attr_temp13_fault.dev_attr.attr, + &sensor_dev_attr_temp14_fault.dev_attr.attr, + &sensor_dev_attr_temp15_fault.dev_attr.attr, + &sensor_dev_attr_temp16_fault.dev_attr.attr, + + &sensor_dev_attr_temp1_alarm.dev_attr.attr, + &sensor_dev_attr_temp2_alarm.dev_attr.attr, + &sensor_dev_attr_temp3_alarm.dev_attr.attr, + &sensor_dev_attr_temp4_alarm.dev_attr.attr, + &sensor_dev_attr_temp5_alarm.dev_attr.attr, + &sensor_dev_attr_temp6_alarm.dev_attr.attr, + &sensor_dev_attr_temp7_alarm.dev_attr.attr, + &sensor_dev_attr_temp8_alarm.dev_attr.attr, + &sensor_dev_attr_temp9_alarm.dev_attr.attr, + &sensor_dev_attr_temp10_alarm.dev_attr.attr, + &sensor_dev_attr_temp11_alarm.dev_attr.attr, + &sensor_dev_attr_temp12_alarm.dev_attr.attr, + &sensor_dev_attr_temp13_alarm.dev_attr.attr, + &sensor_dev_attr_temp14_alarm.dev_attr.attr, + &sensor_dev_attr_temp15_alarm.dev_attr.attr, + &sensor_dev_attr_temp16_alarm.dev_attr.attr, + NULL +}; + +/* Fans */ +static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_value, NULL, 0); +static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_value, NULL, 1); +static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan_value, NULL, 2); +static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan_value, NULL, 3); +static SENSOR_DEVICE_ATTR(fan5_input, S_IRUGO, show_fan_value, NULL, 4); +static SENSOR_DEVICE_ATTR(fan6_input, S_IRUGO, show_fan_value, NULL, 5); +static SENSOR_DEVICE_ATTR(fan7_input, S_IRUGO, show_fan_value, NULL, 6); +static SENSOR_DEVICE_ATTR(fan8_input, S_IRUGO, show_fan_value, NULL, 7); + +static SENSOR_DEVICE_ATTR(fan1_source, S_IRUGO, show_fan_source, NULL, 0); +static SENSOR_DEVICE_ATTR(fan2_source, S_IRUGO, show_fan_source, NULL, 1); +static SENSOR_DEVICE_ATTR(fan3_source, S_IRUGO, show_fan_source, NULL, 2); +static SENSOR_DEVICE_ATTR(fan4_source, S_IRUGO, show_fan_source, NULL, 3); +static SENSOR_DEVICE_ATTR(fan5_source, S_IRUGO, show_fan_source, NULL, 4); +static SENSOR_DEVICE_ATTR(fan6_source, S_IRUGO, show_fan_source, NULL, 5); +static SENSOR_DEVICE_ATTR(fan7_source, S_IRUGO, show_fan_source, NULL, 6); +static SENSOR_DEVICE_ATTR(fan8_source, S_IRUGO, show_fan_source, NULL, 7); + +static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO | S_IWUSR, + show_fan_alarm, clear_fan_alarm, 0); +static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO | S_IWUSR, + show_fan_alarm, clear_fan_alarm, 1); +static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO | S_IWUSR, + show_fan_alarm, clear_fan_alarm, 2); +static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO | S_IWUSR, + show_fan_alarm, clear_fan_alarm, 3); +static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO | S_IWUSR, + show_fan_alarm, clear_fan_alarm, 4); +static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO | S_IWUSR, + show_fan_alarm, clear_fan_alarm, 5); +static SENSOR_DEVICE_ATTR(fan7_alarm, S_IRUGO | S_IWUSR, + show_fan_alarm, clear_fan_alarm, 6); +static SENSOR_DEVICE_ATTR(fan8_alarm, S_IRUGO | S_IWUSR, + show_fan_alarm, clear_fan_alarm, 7); + +static struct attribute *fts_fan_attrs[] = { + &sensor_dev_attr_fan1_input.dev_attr.attr, + &sensor_dev_attr_fan2_input.dev_attr.attr, + &sensor_dev_attr_fan3_input.dev_attr.attr, + &sensor_dev_attr_fan4_input.dev_attr.attr, + &sensor_dev_attr_fan5_input.dev_attr.attr, + &sensor_dev_attr_fan6_input.dev_attr.attr, + &sensor_dev_attr_fan7_input.dev_attr.attr, + &sensor_dev_attr_fan8_input.dev_attr.attr, + + &sensor_dev_attr_fan1_source.dev_attr.attr, + &sensor_dev_attr_fan2_source.dev_attr.attr, + &sensor_dev_attr_fan3_source.dev_attr.attr, + &sensor_dev_attr_fan4_source.dev_attr.attr, + &sensor_dev_attr_fan5_source.dev_attr.attr, + &sensor_dev_attr_fan6_source.dev_attr.attr, + &sensor_dev_attr_fan7_source.dev_attr.attr, + &sensor_dev_attr_fan8_source.dev_attr.attr, + + &sensor_dev_attr_fan1_alarm.dev_attr.attr, + &sensor_dev_attr_fan2_alarm.dev_attr.attr, + &sensor_dev_attr_fan3_alarm.dev_attr.attr, + &sensor_dev_attr_fan4_alarm.dev_attr.attr, + &sensor_dev_attr_fan5_alarm.dev_attr.attr, + &sensor_dev_attr_fan6_alarm.dev_attr.attr, + &sensor_dev_attr_fan7_alarm.dev_attr.attr, + &sensor_dev_attr_fan8_alarm.dev_attr.attr, + NULL +}; + +/* Voltages */ +static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_in_value, NULL, 0); +static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_in_value, NULL, 1); +static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_in_value, NULL, 2); +static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_in_value, NULL, 3); +static struct attribute *fts_voltage_attrs[] = { + &sensor_dev_attr_in1_input.dev_attr.attr, + &sensor_dev_attr_in2_input.dev_attr.attr, + &sensor_dev_attr_in3_input.dev_attr.attr, + &sensor_dev_attr_in4_input.dev_attr.attr, + NULL +}; + +static const struct attribute_group fts_voltage_attr_group = { + .attrs = fts_voltage_attrs +}; + +static const struct attribute_group fts_temp_attr_group = { + .attrs = fts_temp_attrs +}; + +static const struct attribute_group fts_fan_attr_group = { + .attrs = fts_fan_attrs +}; + +static const struct attribute_group *fts_attr_groups[] = { + &fts_voltage_attr_group, + &fts_temp_attr_group, + &fts_fan_attr_group, + NULL +}; + +/*****************************************************************************/ +/* Module initialization / remove functions */ +/*****************************************************************************/ +static int fts_remove(struct i2c_client *client) +{ + struct fts_data *data = dev_get_drvdata(&client->dev); + + watchdog_unregister_device(&data->wdd); + return 0; +} + +static int fts_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + u8 revision; + struct fts_data *data; + int err; + s8 deviceid; + struct device *hwmon_dev; + + if (client->addr != 0x73) + return -ENODEV; + + /* Baseboard Management Controller check */ + deviceid = i2c_smbus_read_byte_data(client, FTS_DEVICE_ID_REG); + if (deviceid > 0 && (deviceid & 0xF0) == 0x10) { + switch (deviceid & 0x0F) { + case 0x01: + break; + default: + dev_dbg(&client->dev, + "No Baseboard Management Controller\n"); + return -ENODEV; + } + } else { + dev_dbg(&client->dev, "No fujitsu board\n"); + return -ENODEV; + } + + data = devm_kzalloc(&client->dev, sizeof(struct fts_data), + GFP_KERNEL); + if (!data) + return -ENOMEM; + + mutex_init(&data->update_lock); + mutex_init(&data->access_lock); + data->client = client; + dev_set_drvdata(&client->dev, data); + + err = i2c_smbus_read_byte_data(client, FTS_DEVICE_REVISION_REG); + if (err < 0) + return err; + revision = err; + + hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev, + "ftsteutates", + data, + fts_attr_groups); + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); + + err = fts_watchdog_init(data); + if (err) + return err; + + dev_info(&client->dev, "Detected FTS Teutates chip, revision: %d.%d\n", + (revision & 0xF0) >> 4, revision & 0x0F); + return 0; +} + +/*****************************************************************************/ +/* Module Details */ +/*****************************************************************************/ +static struct i2c_driver fts_driver = { + .driver = { + .name = "ftsteutates", + }, + .id_table = fts_id, + .probe = fts_probe, + .remove = fts_remove, +}; + +module_i2c_driver(fts_driver); + +MODULE_AUTHOR("Thilo Cestonaro <thilo.cestonaro@ts.fujitsu.com>"); +MODULE_DESCRIPTION("FTS Teutates driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c new file mode 100644 index 000000000000..e6b49500c52a --- /dev/null +++ b/ drivers/hwmon/ina3221.c@@ -0,0 +1,445 @@ +/* + * INA3221 Triple Current/Voltage Monitor + * + * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis <afd@ti.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regmap.h> + +#define INA3221_DRIVER_NAME "ina3221" + +#define INA3221_CONFIG 0x00 +#define INA3221_SHUNT1 0x01 +#define INA3221_BUS1 0x02 +#define INA3221_SHUNT2 0x03 +#define INA3221_BUS2 0x04 +#define INA3221_SHUNT3 0x05 +#define INA3221_BUS3 0x06 +#define INA3221_CRIT1 0x07 +#define INA3221_WARN1 0x08 +#define INA3221_CRIT2 0x09 +#define INA3221_WARN2 0x0a +#define INA3221_CRIT3 0x0b +#define INA3221_WARN3 0x0c +#define INA3221_MASK_ENABLE 0x0f + +#define INA3221_CONFIG_MODE_SHUNT BIT(1) +#define INA3221_CONFIG_MODE_BUS BIT(2) +#define INA3221_CONFIG_MODE_CONTINUOUS BIT(3) + +#define INA3221_RSHUNT_DEFAULT 10000 + +enum ina3221_fields { + /* Configuration */ + F_RST, + + /* Alert Flags */ + F_WF3, F_WF2, F_WF1, + F_CF3, F_CF2, F_CF1, + + /* sentinel */ + F_MAX_FIELDS +}; + +static const struct reg_field ina3221_reg_fields[] = { + [F_RST] = REG_FIELD(INA3221_CONFIG, 15, 15), + + [F_WF3] = REG_FIELD(INA3221_MASK_ENABLE, 3, 3), + [F_WF2] = REG_FIELD(INA3221_MASK_ENABLE, 4, 4), + [F_WF1] = REG_FIELD(INA3221_MASK_ENABLE, 5, 5), + [F_CF3] = REG_FIELD(INA3221_MASK_ENABLE, 7, 7), + [F_CF2] = REG_FIELD(INA3221_MASK_ENABLE, 8, 8), + [F_CF1] = REG_FIELD(INA3221_MASK_ENABLE, 9, 9), +}; + +enum ina3221_channels { + INA3221_CHANNEL1, + INA3221_CHANNEL2, + INA3221_CHANNEL3, + INA3221_NUM_CHANNELS +}; + +static const unsigned int register_channel[] = { + [INA3221_SHUNT1] = INA3221_CHANNEL1, + [INA3221_SHUNT2] = INA3221_CHANNEL2, + [INA3221_SHUNT3] = INA3221_CHANNEL3, + [INA3221_CRIT1] = INA3221_CHANNEL1, + [INA3221_CRIT2] = INA3221_CHANNEL2, + [INA3221_CRIT3] = INA3221_CHANNEL3, + [INA3221_WARN1] = INA3221_CHANNEL1, + [INA3221_WARN2] = INA3221_CHANNEL2, + [INA3221_WARN3] = INA3221_CHANNEL3, +}; + +/** + * struct ina3221_data - device specific information + * @regmap: Register map of the device + * @fields: Register fields of the device + * @shunt_resistors: Array of resistor values per channel + */ +struct ina3221_data { + struct regmap *regmap; + struct regmap_field *fields[F_MAX_FIELDS]; + int shunt_resistors[INA3221_NUM_CHANNELS]; +}; + +static int ina3221_read_value(struct ina3221_data *ina, unsigned int reg, + int *val) +{ + unsigned int regval; + int ret; + + ret = regmap_read(ina->regmap, reg, ®val); + if (ret) + return ret; + + *val = sign_extend32(regval >> 3, 12); + + return 0; +} + +static ssize_t ina3221_show_bus_voltage(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr); + struct ina3221_data *ina = dev_get_drvdata(dev); + unsigned int reg = sd_attr->index; + int val, voltage_mv, ret; + + ret = ina3221_read_value(ina, reg, &val); + if (ret) + return ret; + + voltage_mv = val * 8; + + return snprintf(buf, PAGE_SIZE, "%d\n", voltage_mv); +} + +static ssize_t ina3221_show_shunt_voltage(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr); + struct ina3221_data *ina = dev_get_drvdata(dev); + unsigned int reg = sd_attr->index; + int val, voltage_uv, ret; + + ret = ina3221_read_value(ina, reg, &val); + if (ret) + return ret; + voltage_uv = val * 40; + + return snprintf(buf, PAGE_SIZE, "%d\n", voltage_uv); +} + +static ssize_t ina3221_show_current(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr); + struct ina3221_data *ina = dev_get_drvdata(dev); + unsigned int reg = sd_attr->index; + unsigned int channel = register_channel[reg]; + int resistance_uo = ina->shunt_resistors[channel]; + int val, current_ma, voltage_nv, ret; + + ret = ina3221_read_value(ina, reg, &val); + if (ret) + return ret; + voltage_nv = val * 40000; + + current_ma = DIV_ROUND_CLOSEST(voltage_nv, resistance_uo); + + return snprintf(buf, PAGE_SIZE, "%d\n", current_ma); +} + +static ssize_t ina3221_set_current(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr); + struct ina3221_data *ina = dev_get_drvdata(dev); + unsigned int reg = sd_attr->index; + unsigned int channel = register_channel[reg]; + int resistance_uo = ina->shunt_resistors[channel]; + int val, current_ma, voltage_uv, ret; + + ret = kstrtoint(buf, 0, ¤t_ma); + if (ret) + return ret; + + /* clamp current */ + current_ma = clamp_val(current_ma, + INT_MIN / resistance_uo, + INT_MAX / resistance_uo); + + voltage_uv = DIV_ROUND_CLOSEST(current_ma * resistance_uo, 1000); + + /* clamp voltage */ + voltage_uv = clamp_val(voltage_uv, -163800, 163800); + + /* 1 / 40uV(scale) << 3(register shift) = 5 */ + val = DIV_ROUND_CLOSEST(voltage_uv, 5) & 0xfff8; + + ret = regmap_write(ina->regmap, reg, val); + if (ret) + return ret; + + return count; +} + +static ssize_t ina3221_show_shunt(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr); + struct ina3221_data *ina = dev_get_drvdata(dev); + unsigned int channel = sd_attr->index; + unsigned int resistance_uo; + + resistance_uo = ina->shunt_resistors[channel]; + + return snprintf(buf, PAGE_SIZE, "%d\n", resistance_uo); +} + +static ssize_t ina3221_set_shunt(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr); + struct ina3221_data *ina = dev_get_drvdata(dev); + unsigned int channel = sd_attr->index; + int val; + int ret; + + ret = kstrtoint(buf, 0, &val); + if (ret) + return ret; + + val = clamp_val(val, 1, INT_MAX); + + ina->shunt_resistors[channel] = val; + + return count; +} + +static ssize_t ina3221_show_alert(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr); + struct ina3221_data *ina = dev_get_drvdata(dev); + unsigned int field = sd_attr->index; + unsigned int regval; + int ret; + + ret = regmap_field_read(ina->fields[field], ®val); + if (ret) + return ret; + + return snprintf(buf, PAGE_SIZE, "%d\n", regval); +} + +/* bus voltage */ +static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, + ina3221_show_bus_voltage, NULL, INA3221_BUS1); +static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, + ina3221_show_bus_voltage, NULL, INA3221_BUS2); +static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, + ina3221_show_bus_voltage, NULL, INA3221_BUS3); + +/* calculated current */ +static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, + ina3221_show_current, NULL, INA3221_SHUNT1); +static SENSOR_DEVICE_ATTR(curr2_input, S_IRUGO, + ina3221_show_current, NULL, INA3221_SHUNT2); +static SENSOR_DEVICE_ATTR(curr3_input, S_IRUGO, + ina3221_show_current, NULL, INA3221_SHUNT3); + +/* shunt resistance */ +static SENSOR_DEVICE_ATTR(shunt1_resistor, S_IRUGO | S_IWUSR, + ina3221_show_shunt, ina3221_set_shunt, INA3221_CHANNEL1); +static SENSOR_DEVICE_ATTR(shunt2_resistor, S_IRUGO | S_IWUSR, + ina3221_show_shunt, ina3221_set_shunt, INA3221_CHANNEL2); +static SENSOR_DEVICE_ATTR(shunt3_resistor, S_IRUGO | S_IWUSR, + ina3221_show_shunt, ina3221_set_shunt, INA3221_CHANNEL3); + +/* critical current */ +static SENSOR_DEVICE_ATTR(curr1_crit, S_IRUGO | S_IWUSR, + ina3221_show_current, ina3221_set_current, INA3221_CRIT1); +static SENSOR_DEVICE_ATTR(curr2_crit, S_IRUGO | S_IWUSR, + ina3221_show_current, ina3221_set_current, INA3221_CRIT2); +static SENSOR_DEVICE_ATTR(curr3_crit, S_IRUGO | S_IWUSR, + ina3221_show_current, ina3221_set_current, INA3221_CRIT3); + +/* critical current alert */ +static SENSOR_DEVICE_ATTR(curr1_crit_alarm, S_IRUGO, + ina3221_show_alert, NULL, F_CF1); +static SENSOR_DEVICE_ATTR(curr2_crit_alarm, S_IRUGO, + ina3221_show_alert, NULL, F_CF2); +static SENSOR_DEVICE_ATTR(curr3_crit_alarm, S_IRUGO, + ina3221_show_alert, NULL, F_CF3); + +/* warning current */ +static SENSOR_DEVICE_ATTR(curr1_max, S_IRUGO | S_IWUSR, + ina3221_show_current, ina3221_set_current, INA3221_WARN1); +static SENSOR_DEVICE_ATTR(curr2_max, S_IRUGO | S_IWUSR, + ina3221_show_current, ina3221_set_current, INA3221_WARN2); +static SENSOR_DEVICE_ATTR(curr3_max, S_IRUGO | S_IWUSR, + ina3221_show_current, ina3221_set_current, INA3221_WARN3); + +/* warning current alert */ +static SENSOR_DEVICE_ATTR(curr1_max_alarm, S_IRUGO, + ina3221_show_alert, NULL, F_WF1); +static SENSOR_DEVICE_ATTR(curr2_max_alarm, S_IRUGO, + ina3221_show_alert, NULL, F_WF2); +static SENSOR_DEVICE_ATTR(curr3_max_alarm, S_IRUGO, + ina3221_show_alert, NULL, F_WF3); + +/* shunt voltage */ +static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, + ina3221_show_shunt_voltage, NULL, INA3221_SHUNT1); +static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, + ina3221_show_shunt_voltage, NULL, INA3221_SHUNT2); +static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, + ina3221_show_shunt_voltage, NULL, INA3221_SHUNT3); + +static struct attribute *ina3221_attrs[] = { + /* channel 1 */ + &sensor_dev_attr_in1_input.dev_attr.attr, + &sensor_dev_attr_curr1_input.dev_attr.attr, + &sensor_dev_attr_shunt1_resistor.dev_attr.attr, + &sensor_dev_attr_curr1_crit.dev_attr.attr, + &sensor_dev_attr_curr1_crit_alarm.dev_attr.attr, + &sensor_dev_attr_curr1_max.dev_attr.attr, + &sensor_dev_attr_curr1_max_alarm.dev_attr.attr, + &sensor_dev_attr_in4_input.dev_attr.attr, + + /* channel 2 */ + &sensor_dev_attr_in2_input.dev_attr.attr, + &sensor_dev_attr_curr2_input.dev_attr.attr, + &sensor_dev_attr_shunt2_resistor.dev_attr.attr, + &sensor_dev_attr_curr2_crit.dev_attr.attr, + &sensor_dev_attr_curr2_crit_alarm.dev_attr.attr, + &sensor_dev_attr_curr2_max.dev_attr.attr, + &sensor_dev_attr_curr2_max_alarm.dev_attr.attr, + &sensor_dev_attr_in5_input.dev_attr.attr, + + /* channel 3 */ + &sensor_dev_attr_in3_input.dev_attr.attr, + &sensor_dev_attr_curr3_input.dev_attr.attr, + &sensor_dev_attr_shunt3_resistor.dev_attr.attr, + &sensor_dev_attr_curr3_crit.dev_attr.attr, + &sensor_dev_attr_curr3_crit_alarm.dev_attr.attr, + &sensor_dev_attr_curr3_max.dev_attr.attr, + &sensor_dev_attr_curr3_max_alarm.dev_attr.attr, + &sensor_dev_attr_in6_input.dev_attr.attr, + + NULL, +}; +ATTRIBUTE_GROUPS(ina3221); + +static const struct regmap_range ina3221_yes_ranges[] = { + regmap_reg_range(INA3221_SHUNT1, INA3221_BUS3), + regmap_reg_range(INA3221_MASK_ENABLE, INA3221_MASK_ENABLE), +}; + +static const struct regmap_access_table ina3221_volatile_table = { + .yes_ranges = ina3221_yes_ranges, + .n_yes_ranges = ARRAY_SIZE(ina3221_yes_ranges), +}; + +static const struct regmap_config ina3221_regmap_config = { + .reg_bits = 8, + .val_bits = 16, + + .cache_type = REGCACHE_RBTREE, + .volatile_table = &ina3221_volatile_table, +}; + +static int ina3221_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct ina3221_data *ina; + struct device *hwmon_dev; + int i, ret; + + ina = devm_kzalloc(dev, sizeof(*ina), GFP_KERNEL); + if (!ina) + return -ENOMEM; + + ina->regmap = devm_regmap_init_i2c(client, &ina3221_regmap_config); + if (IS_ERR(ina->regmap)) { + dev_err(dev, "Unable to allocate register map\n"); + return PTR_ERR(ina->regmap); + } + + for (i = 0; i < F_MAX_FIELDS; i++) { + ina->fields[i] = devm_regmap_field_alloc(dev, + ina->regmap, + ina3221_reg_fields[i]); + if (IS_ERR(ina->fields[i])) { + dev_err(dev, "Unable to allocate regmap fields\n"); + return PTR_ERR(ina->fields[i]); + } + } + + for (i = 0; i < INA3221_NUM_CHANNELS; i++) + ina->shunt_resistors[i] = INA3221_RSHUNT_DEFAULT; + + ret = regmap_field_write(ina->fields[F_RST], true); + if (ret) { + dev_err(dev, "Unable to reset device\n"); + return ret; + } + + hwmon_dev = devm_hwmon_device_register_with_groups(dev, + client->name, + ina, ina3221_groups); + if (IS_ERR(hwmon_dev)) { + dev_err(dev, "Unable to register hwmon device\n"); + return PTR_ERR(hwmon_dev); + } + + return 0; +} + +static const struct of_device_id ina3221_of_match_table[] = { + { .compatible = "ti,ina3221", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ina3221_of_match_table); + +static const struct i2c_device_id ina3221_ids[] = { + { "ina3221", 0 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(i2c, ina3221_ids); + +static struct i2c_driver ina3221_i2c_driver = { + .probe = ina3221_probe, + .driver = { + .name = INA3221_DRIVER_NAME, + .of_match_table = ina3221_of_match_table, + }, + .id_table = ina3221_ids, +}; +module_i2c_driver(ina3221_i2c_driver); + +MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>"); +MODULE_DESCRIPTION("Texas Instruments INA3221 HWMon Driver"); +MODULE_LICENSE("GPL v2"); @@ -31,6 +31,7 @@ #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> +#include <linux/of.h> /* Addresses to scan */ static const unsigned short normal_i2c[] = { @@ -104,6 +105,9 @@ static const unsigned short normal_i2c[] = { #define MCP9804_DEVID 0x0200 #define MCP9804_DEVID_MASK 0xfffc +#define MCP9808_DEVID 0x0400 +#define MCP9808_DEVID_MASK 0xfffc + #define MCP98242_DEVID 0x2000 #define MCP98242_DEVID_MASK 0xfffc @@ -160,6 +164,7 @@ static struct jc42_chips jc42_chips[] = { { IDT_MANID, TS3001_DEVID, TS3001_DEVID_MASK }, { MAX_MANID, MAX6604_DEVID, MAX6604_DEVID_MASK }, { MCP_MANID, MCP9804_DEVID, MCP9804_DEVID_MASK }, + { MCP_MANID, MCP9808_DEVID, MCP9808_DEVID_MASK }, { MCP_MANID, MCP98242_DEVID, MCP98242_DEVID_MASK }, { MCP_MANID, MCP98243_DEVID, MCP98243_DEVID_MASK }, { MCP_MANID, MCP98244_DEVID, MCP98244_DEVID_MASK }, @@ -537,11 +542,20 @@ static const struct i2c_device_id jc42_id[] = { }; MODULE_DEVICE_TABLE(i2c, jc42_id); +#ifdef CONFIG_OF +static const struct of_device_id jc42_of_ids[] = { + { .compatible = "jedec,jc-42.4-temp", }, + { } +}; +MODULE_DEVICE_TABLE(of, jc42_of_ids); +#endif + static struct i2c_driver jc42_driver = { - .class = I2C_CLASS_SPD, + .class = I2C_CLASS_SPD | I2C_CLASS_HWMON, .driver = { .name = "jc42", .pm = JC42_DEV_PM_OPS, + .of_match_table = of_match_ptr(jc42_of_ids), }, .probe = jc42_probe, .remove = jc42_remove, @@ -29,23 +29,13 @@ struct jz4740_hwmon { void __iomem *base; - int irq; - const struct mfd_cell *cell; - struct device *hwmon; - + struct platform_device *pdev; struct completion read_completion; - struct mutex lock; }; -static ssize_t jz4740_hwmon_show_name(struct device *dev, - struct device_attribute *dev_attr, char *buf) -{ - return sprintf(buf, "jz4740\n"); -} - static irqreturn_t jz4740_hwmon_irq(int irq, void *data) { struct jz4740_hwmon *hwmon = data; @@ -58,6 +48,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev, struct device_attribute *dev_attr, char *buf) { struct jz4740_hwmon *hwmon = dev_get_drvdata(dev); + struct platform_device *pdev = hwmon->pdev; struct completion *completion = &hwmon->read_completion; long t; unsigned long val; @@ -68,7 +59,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev, reinit_completion(completion); enable_irq(hwmon->irq); - hwmon->cell->enable(to_platform_device(dev)); + hwmon->cell->enable(pdev); t = wait_for_completion_interruptible_timeout(completion, HZ); @@ -80,7 +71,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev, ret = t ? t : -ETIMEDOUT; } - hwmon->cell->disable(to_platform_device(dev)); + hwmon->cell->disable(pdev); disable_irq(hwmon->irq); mutex_unlock(&hwmon->lock); @@ -88,26 +79,24 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev, return ret; } -static DEVICE_ATTR(name, S_IRUGO, jz4740_hwmon_show_name, NULL); static DEVICE_ATTR(in0_input, S_IRUGO, jz4740_hwmon_read_adcin, NULL); -static struct attribute *jz4740_hwmon_attributes[] = { - &dev_attr_name.attr, +static struct attribute *jz4740_attrs[] = { &dev_attr_in0_input.attr, NULL }; -static const struct attribute_group jz4740_hwmon_attr_group = { - .attrs = jz4740_hwmon_attributes, -}; +ATTRIBUTE_GROUPS(jz4740); static int jz4740_hwmon_probe(struct platform_device *pdev) { int ret; + struct device *dev = &pdev->dev; struct jz4740_hwmon *hwmon; + struct device *hwmon_dev; struct resource *mem; - hwmon = devm_kzalloc(&pdev->dev, sizeof(*hwmon), GFP_KERNEL); + hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL); if (!hwmon) return -ENOMEM; @@ -125,12 +114,11 @@ static int jz4740_hwmon_probe(struct platform_device *pdev) if (IS_ERR(hwmon->base)) return PTR_ERR(hwmon->base); + hwmon->pdev = pdev; init_completion(&hwmon->read_completion); mutex_init(&hwmon->lock); - platform_set_drvdata(pdev, hwmon); - - ret = devm_request_irq(&pdev->dev, hwmon->irq, jz4740_hwmon_irq, 0, + ret = devm_request_irq(dev, hwmon->irq, jz4740_hwmon_irq, 0, pdev->name, hwmon); if (ret) { dev_err(&pdev->dev, "Failed to request irq: %d\n", ret); @@ -138,38 +126,13 @@ static int jz4740_hwmon_probe(struct platform_device *pdev) } disable_irq(hwmon->irq); - ret = sysfs_create_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group); - if (ret) { - dev_err(&pdev->dev, "Failed to create sysfs group: %d\n", ret); - return ret; - } - - hwmon->hwmon = hwmon_device_register(&pdev->dev); - if (IS_ERR(hwmon->hwmon)) { - ret = PTR_ERR(hwmon->hwmon); - goto err_remove_file; - } - - return 0; - -err_remove_file: - sysfs_remove_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group); - return ret; -} - -static int jz4740_hwmon_remove(struct platform_device *pdev) -{ - struct jz4740_hwmon *hwmon = platform_get_drvdata(pdev); - - hwmon_device_unregister(hwmon->hwmon); - sysfs_remove_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group); - - return 0; + hwmon_dev = devm_hwmon_device_register_with_groups(dev, "jz4740", hwmon, + jz4740_groups); + return PTR_ERR_OR_ZERO(hwmon_dev); } static struct platform_driver jz4740_hwmon_driver = { .probe = jz4740_hwmon_probe, - .remove = jz4740_hwmon_remove, .driver = { .name = "jz4740-hwmon", }, @@ -26,8 +26,8 @@ #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> -#include <linux/mutex.h> #include <linux/of.h> +#include <linux/regmap.h> #include <linux/thermal.h> #include "lm75.h" @@ -66,35 +66,21 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, /* The LM75 registers */ +#define LM75_REG_TEMP 0x00 #define LM75_REG_CONF 0x01 -static const u8 LM75_REG_TEMP[3] = { - 0x00, /* input */ - 0x03, /* max */ - 0x02, /* hyst */ -}; +#define LM75_REG_HYST 0x02 +#define LM75_REG_MAX 0x03 /* Each client has this additional data */ struct lm75_data { struct i2c_client *client; - struct device *hwmon_dev; - struct mutex update_lock; + struct regmap *regmap; u8 orig_conf; u8 resolution; /* In bits, between 9 and 12 */ u8 resolution_limits; - char valid; /* !=0 if registers are valid */ - unsigned long last_updated; /* In jiffies */ - unsigned long sample_time; /* In jiffies */ - s16 temp[3]; /* Register values, - 0 = input - 1 = max - 2 = hyst */ + unsigned int sample_time; /* In ms */ }; -static int lm75_read_value(struct i2c_client *client, u8 reg); -static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value); -static struct lm75_data *lm75_update_device(struct device *dev); - - /*-----------------------------------------------------------------------*/ static inline long lm75_reg_to_mc(s16 temp, u8 resolution) @@ -106,12 +92,15 @@ static inline long lm75_reg_to_mc(s16 temp, u8 resolution) static int lm75_read_temp(void *dev, int *temp) { - struct lm75_data *data = lm75_update_device(dev); + struct lm75_data *data = dev_get_drvdata(dev); + unsigned int _temp; + int err; - if (IS_ERR(data)) - return PTR_ERR(data); + err = regmap_read(data->regmap, LM75_REG_TEMP, &_temp); + if (err < 0) + return err; - *temp = lm75_reg_to_mc(data->temp[0], data->resolution); + *temp = lm75_reg_to_mc(_temp, data->resolution); return 0; } @@ -120,13 +109,15 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); - struct lm75_data *data = lm75_update_device(dev); + struct lm75_data *data = dev_get_drvdata(dev); + unsigned int temp = 0; + int err; - if (IS_ERR(data)) - return PTR_ERR(data); + err = regmap_read(data->regmap, attr->index, &temp); + if (err < 0) + return err; - return sprintf(buf, "%ld\n", lm75_reg_to_mc(data->temp[attr->index], - data->resolution)); + return sprintf(buf, "%ld\n", lm75_reg_to_mc(temp, data->resolution)); } static ssize_t set_temp(struct device *dev, struct device_attribute *da, @@ -134,8 +125,6 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da, { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct lm75_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - int nr = attr->index; long temp; int error; u8 resolution; @@ -153,25 +142,36 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da, else resolution = data->resolution; - mutex_lock(&data->update_lock); temp = clamp_val(temp, LM75_TEMP_MIN, LM75_TEMP_MAX); - data->temp[nr] = DIV_ROUND_CLOSEST(temp << (resolution - 8), - 1000) << (16 - resolution); - lm75_write_value(client, LM75_REG_TEMP[nr], data->temp[nr]); - mutex_unlock(&data->update_lock); + temp = DIV_ROUND_CLOSEST(temp << (resolution - 8), + 1000) << (16 - resolution); + error = regmap_write(data->regmap, attr->index, temp); + if (error < 0) + return error; + return count; } +static ssize_t show_update_interval(struct device *dev, + struct device_attribute *da, char *buf) +{ + struct lm75_data *data = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", data->sample_time); +} + static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, - show_temp, set_temp, 1); + show_temp, set_temp, LM75_REG_MAX); static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, - show_temp, set_temp, 2); -static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0); + show_temp, set_temp, LM75_REG_HYST); +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, LM75_REG_TEMP); +static DEVICE_ATTR(update_interval, S_IRUGO, show_update_interval, NULL); static struct attribute *lm75_attrs[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, + &dev_attr_update_interval.attr, NULL }; @@ -185,10 +185,40 @@ static const struct thermal_zone_of_device_ops lm75_of_thermal_ops = { /* device probe and removal */ +static bool lm75_is_writeable_reg(struct device *dev, unsigned int reg) +{ + return reg != LM75_REG_TEMP; +} + +static bool lm75_is_volatile_reg(struct device *dev, unsigned int reg) +{ + return reg == LM75_REG_TEMP; +} + +static const struct regmap_config lm75_regmap_config = { + .reg_bits = 8, + .val_bits = 16, + .max_register = LM75_REG_MAX, + .writeable_reg = lm75_is_writeable_reg, + .volatile_reg = lm75_is_volatile_reg, + .val_format_endian = REGMAP_ENDIAN_BIG, + .cache_type = REGCACHE_RBTREE, + .use_single_rw = true, +}; + +static void lm75_remove(void *data) +{ + struct lm75_data *lm75 = data; + struct i2c_client *client = lm75->client; + + i2c_smbus_write_byte_data(client, LM75_REG_CONF, lm75->orig_conf); +} + static int lm75_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct device *dev = &client->dev; + struct device *hwmon_dev; struct lm75_data *data; int status; u8 set_mask, clr_mask; @@ -204,8 +234,10 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id) return -ENOMEM; data->client = client; - i2c_set_clientdata(client, data); - mutex_init(&data->update_lock); + + data->regmap = devm_regmap_init_i2c(client, &lm75_regmap_config); + if (IS_ERR(data->regmap)) + return PTR_ERR(data->regmap); /* Set to LM75 resolution (9 bits, 1/2 degree C) and range. * Then tweak to be more precise when appropriate. @@ -217,7 +249,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id) case adt75: clr_mask |= 1 << 5; /* not one-shot mode */ data->resolution = 12; - data->sample_time = HZ / 8; + data->sample_time = MSEC_PER_SEC / 8; break; case ds1775: case ds75: @@ -225,35 +257,35 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id) clr_mask |= 3 << 5; set_mask |= 2 << 5; /* 11-bit mode */ data->resolution = 11; - data->sample_time = HZ; + data->sample_time = MSEC_PER_SEC; break; case ds7505: set_mask |= 3 << 5; /* 12-bit mode */ data->resolution = 12; - data->sample_time = HZ / 4; + data->sample_time = MSEC_PER_SEC / 4; break; case g751: case lm75: case lm75a: data->resolution = 9; - data->sample_time = HZ / 2; + data->sample_time = MSEC_PER_SEC / 2; break; case lm75b: data->resolution = 11; - data->sample_time = HZ / 4; + data->sample_time = MSEC_PER_SEC / 4; break; case max6625: data->resolution = 9; - data->sample_time = HZ / 4; + data->sample_time = MSEC_PER_SEC / 4; break; case max6626: data->resolution = 12; data->resolution_limits = 9; - data->sample_time = HZ / 4; + data->sample_time = MSEC_PER_SEC / 4; break; case tcn75: data->resolution = 9; - data->sample_time = HZ / 8; + data->sample_time = MSEC_PER_SEC / 8; break; case mcp980x: data->resolution_limits = 9; @@ -262,14 +294,14 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id) case tmp101: set_mask |= 3 << 5; /* 12-bit mode */ data->resolution = 12; - data->sample_time = HZ; + data->sample_time = MSEC_PER_SEC; clr_mask |= 1 << 7; /* not one-shot mode */ break; case tmp112: set_mask |= 3 << 5; /* 12-bit mode */ clr_mask |= 1 << 7; /* not one-shot mode */ data->resolution = 12; - data->sample_time = HZ / 4; + data->sample_time = MSEC_PER_SEC / 4; break; case tmp105: case tmp175: @@ -278,17 +310,17 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id) set_mask |= 3 << 5; /* 12-bit mode */ clr_mask |= 1 << 7; /* not one-shot mode */ data->resolution = 12; - data->sample_time = HZ / 2; + data->sample_time = MSEC_PER_SEC / 2; break; case tmp75c: clr_mask |= 1 << 5; /* not one-shot mode */ data->resolution = 12; - data->sample_time = HZ / 4; + data->sample_time = MSEC_PER_SEC / 4; break; } /* configure as specified */ - status = lm75_read_value(client, LM75_REG_CONF); + status = i2c_smbus_read_byte_data(client, LM75_REG_CONF); if (status < 0) { dev_dbg(dev, "Can't read config? %d\n", status); return status; @@ -297,30 +329,23 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id) new = status & ~clr_mask; new |= set_mask; if (status != new) - lm75_write_value(client, LM75_REG_CONF, new); - dev_dbg(dev, "Config %02x\n", new); + i2c_smbus_write_byte_data(client, LM75_REG_CONF, new); - data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name, - data, lm75_groups); - if (IS_ERR(data->hwmon_dev)) - return PTR_ERR(data->hwmon_dev); + devm_add_action(dev, lm75_remove, data); - devm_thermal_zone_of_sensor_register(data->hwmon_dev, 0, - data->hwmon_dev, - &lm75_of_thermal_ops); + dev_dbg(dev, "Config %02x\n", new); - dev_info(dev, "%s: sensor '%s'\n", - dev_name(data->hwmon_dev), client->name); + hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, + data, lm75_groups); + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); - return 0; -} + devm_thermal_zone_of_sensor_register(hwmon_dev, 0, + hwmon_dev, + &lm75_of_thermal_ops); -static int lm75_remove(struct i2c_client *client) -{ - struct lm75_data *data = i2c_get_clientdata(client); + dev_info(dev, "%s: sensor '%s'\n", dev_name(hwmon_dev), client->name); - hwmon_device_unregister(data->hwmon_dev); - lm75_write_value(client, LM75_REG_CONF, data->orig_conf); return 0; } @@ -449,13 +474,13 @@ static int lm75_suspend(struct device *dev) { int status; struct i2c_client *client = to_i2c_client(dev); - status = lm75_read_value(client, LM75_REG_CONF); + status = i2c_smbus_read_byte_data(client, LM75_REG_CONF); if (status < 0) { dev_dbg(&client->dev, "Can't read config? %d\n", status); return status; } status = status | LM75_SHUTDOWN; - lm75_write_value(client, LM75_REG_CONF, status); + i2c_smbus_write_byte_data(client, LM75_REG_CONF, status); return 0; } @@ -463,13 +488,13 @@ static int lm75_resume(struct device *dev) { int status; struct i2c_client *client = to_i2c_client(dev); - status = lm75_read_value(client, LM75_REG_CONF); + status = i2c_smbus_read_byte_data(client, LM75_REG_CONF); if (status < 0) { dev_dbg(&client->dev, "Can't read config? %d\n", status); return status; } status = status & ~LM75_SHUTDOWN; - lm75_write_value(client, LM75_REG_CONF, status); + i2c_smbus_write_byte_data(client, LM75_REG_CONF, status); return 0; } @@ -489,73 +514,11 @@ static struct i2c_driver lm75_driver = { .pm = LM75_DEV_PM_OPS, }, .probe = lm75_probe, - .remove = lm75_remove, .id_table = lm75_ids, .detect = lm75_detect, .address_list = normal_i2c, }; -/*-----------------------------------------------------------------------*/ - -/* register access */ - -/* - * All registers are word-sized, except for the configuration register. - * LM75 uses a high-byte first convention, which is exactly opposite to - * the SMBus standard. - */ -static int lm75_read_value(struct i2c_client *client, u8 reg) -{ - if (reg == LM75_REG_CONF) - return i2c_smbus_read_byte_data(client, reg); - else - return i2c_smbus_read_word_swapped(client, reg); -} - -static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value) -{ - if (reg == LM75_REG_CONF) - return i2c_smbus_write_byte_data(client, reg, value); - else - return i2c_smbus_write_word_swapped(client, reg, value); -} - -static struct lm75_data *lm75_update_device(struct device *dev) -{ - struct lm75_data *data = dev_get_drvdata(dev); - struct i2c_client *client = data->client; - struct lm75_data *ret = data; - - mutex_lock(&data->update_lock); - - if (time_after(jiffies, data->last_updated + data->sample_time) - || !data->valid) { - int i; - dev_dbg(&client->dev, "Starting lm75 update\n"); - - for (i = 0; i < ARRAY_SIZE(data->temp); i++) { - int status; - - status = lm75_read_value(client, LM75_REG_TEMP[i]); - if (unlikely(status < 0)) { - dev_dbg(dev, - "LM75: Failed to read value: reg %d, error %d\n", - LM75_REG_TEMP[i], status); - ret = ERR_PTR(status); - data->valid = 0; - goto abort; - } - data->temp[i] = status; - } - data->last_updated = jiffies; - data->valid = 1; - } - -abort: - mutex_unlock(&data->update_lock); - return ret; -} - module_i2c_driver(lm75_driver); MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>"); @@ -171,7 +171,6 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680, #define SA56004_REG_R_LOCAL_TEMPL 0x22 -#define LM90_DEF_CONVRATE_RVAL 6 /* Def conversion rate register value */ #define LM90_MAX_CONVRATE_MS 16000 /* Maximum conversion rate in ms */ /* TMP451 registers */ @@ -366,16 +365,14 @@ enum lm90_temp11_reg_index { struct lm90_data { struct i2c_client *client; - struct device *hwmon_dev; const struct attribute_group *groups[6]; struct mutex update_lock; - struct regulator *regulator; - char valid; /* zero until following fields are valid */ + bool valid; /* true if register values are valid */ unsigned long last_updated; /* in jiffies */ int kind; u32 flags; - int update_interval; /* in milliseconds */ + unsigned int update_interval; /* in milliseconds */ u8 config_orig; /* Original configuration register value */ u8 convrate_orig; /* Original conversion rate register value */ @@ -412,7 +409,7 @@ static inline s32 adm1032_write_byte(struct i2c_client *client, u8 value) * because we don't want the address pointer to change between the write * byte and the read byte transactions. */ -static int lm90_read_reg(struct i2c_client *client, u8 reg, u8 *value) +static int lm90_read_reg(struct i2c_client *client, u8 reg) { int err; @@ -423,20 +420,12 @@ static int lm90_read_reg(struct i2c_client *client, u8 reg, u8 *value) } else err = i2c_smbus_read_byte_data(client, reg); - if (err < 0) { - dev_warn(&client->dev, "Register %#02x read failed (%d)\n", - reg, err); - return err; - } - *value = err; - - return 0; + return err; } -static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl, u16 *value) +static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl) { - int err; - u8 oldh, newh, l; + int oldh, newh, l; /* * There is a trick here. We have to read two registers to have the @@ -451,18 +440,21 @@ static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl, u16 *value) * we have to read the low byte again, and now we believe we have a * correct reading. */ - if ((err = lm90_read_reg(client, regh, &oldh)) - || (err = lm90_read_reg(client, regl, &l)) - || (err = lm90_read_reg(client, regh, &newh))) - return err; + oldh = lm90_read_reg(client, regh); + if (oldh < 0) + return oldh; + l = lm90_read_reg(client, regl); + if (l < 0) + return l; + newh = lm90_read_reg(client, regh); + if (newh < 0) + return newh; if (oldh != newh) { - err = lm90_read_reg(client, regl, &l); - if (err) - return err; + l = lm90_read_reg(client, regl); + if (l < 0) + return l; } - *value = (newh << 8) | l; - - return 0; + return (newh << 8) | l; } /* @@ -473,20 +465,23 @@ static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl, u16 *value) * various registers have different meanings as a result of selecting a * non-default remote channel. */ -static inline void lm90_select_remote_channel(struct i2c_client *client, - struct lm90_data *data, - int channel) +static inline int lm90_select_remote_channel(struct i2c_client *client, + struct lm90_data *data, + int channel) { - u8 config; + int config; if (data->kind == max6696) { - lm90_read_reg(client, LM90_REG_R_CONFIG1, &config); + config = lm90_read_reg(client, LM90_REG_R_CONFIG1); + if (config < 0) + return config; config &= ~0x08; if (channel) config |= 0x08; i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config); } + return 0; } /* @@ -513,118 +508,204 @@ static void lm90_set_convrate(struct i2c_client *client, struct lm90_data *data, data->update_interval = DIV_ROUND_CLOSEST(update_interval, 64); } +static int lm90_update_limits(struct device *dev) +{ + struct lm90_data *data = dev_get_drvdata(dev); + struct i2c_client *client = data->client; + int val; + + val = lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT); + if (val < 0) + return val; + data->temp8[LOCAL_CRIT] = val; + + val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT); + if (val < 0) + return val; + data->temp8[REMOTE_CRIT] = val; + + val = lm90_read_reg(client, LM90_REG_R_TCRIT_HYST); + if (val < 0) + return val; + data->temp_hyst = val; + + lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH); + if (val < 0) + return val; + data->temp11[REMOTE_LOW] = val << 8; + + if (data->flags & LM90_HAVE_REM_LIMIT_EXT) { + val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWL); + if (val < 0) + return val; + data->temp11[REMOTE_LOW] |= val; + } + + val = lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH); + if (val < 0) + return val; + data->temp11[REMOTE_HIGH] = val << 8; + + if (data->flags & LM90_HAVE_REM_LIMIT_EXT) { + val = lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHL); + if (val < 0) + return val; + data->temp11[REMOTE_HIGH] |= val; + } + + if (data->flags & LM90_HAVE_OFFSET) { + val = lm90_read16(client, LM90_REG_R_REMOTE_OFFSH, + LM90_REG_R_REMOTE_OFFSL); + if (val < 0) + return val; + data->temp11[REMOTE_OFFSET] = val; + } + + if (data->flags & LM90_HAVE_EMERGENCY) { + val = lm90_read_reg(client, MAX6659_REG_R_LOCAL_EMERG); + if (val < 0) + return val; + data->temp8[LOCAL_EMERG] = val; + + val = lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG); + if (val < 0) + return val; + data->temp8[REMOTE_EMERG] = val; + } + + if (data->kind == max6696) { + val = lm90_select_remote_channel(client, data, 1); + if (val < 0) + return val; + + val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT); + if (val < 0) + return val; + data->temp8[REMOTE2_CRIT] = val; + + val = lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG); + if (val < 0) + return val; + data->temp8[REMOTE2_EMERG] = val; + + val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH); + if (val < 0) + return val; + data->temp11[REMOTE2_LOW] = val << 8; + + val = lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH); + if (val < 0) + return val; + data->temp11[REMOTE2_HIGH] = val << 8; + + lm90_select_remote_channel(client, data, 0); + } + + return 0; +} + static struct lm90_data *lm90_update_device(struct device *dev) { struct lm90_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; unsigned long next_update; + int val = 0; mutex_lock(&data->update_lock); + if (!data->valid) { + val = lm90_update_limits(dev); + if (val < 0) + goto error; + } + next_update = data->last_updated + msecs_to_jiffies(data->update_interval); if (time_after(jiffies, next_update) || !data->valid) { - u8 h, l; - u8 alarms; - dev_dbg(&client->dev, "Updating lm90 data.\n"); - lm90_read_reg(client, LM90_REG_R_LOCAL_LOW, - &data->temp8[LOCAL_LOW]); - lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH, - &data->temp8[LOCAL_HIGH]); - lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT, - &data->temp8[LOCAL_CRIT]); - lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT, - &data->temp8[REMOTE_CRIT]); - lm90_read_reg(client, LM90_REG_R_TCRIT_HYST, &data->temp_hyst); + + data->valid = false; + + val = lm90_read_reg(client, LM90_REG_R_LOCAL_LOW); + if (val < 0) + goto error; + data->temp8[LOCAL_LOW] = val; + + val = lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH); + if (val < 0) + goto error; + data->temp8[LOCAL_HIGH] = val; if (data->reg_local_ext) { - lm90_read16(client, LM90_REG_R_LOCAL_TEMP, - data->reg_local_ext, - &data->temp11[LOCAL_TEMP]); + val = lm90_read16(client, LM90_REG_R_LOCAL_TEMP, + data->reg_local_ext); + if (val < 0) + goto error; + data->temp11[LOCAL_TEMP] = val; } else { - if (lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP, - &h) == 0) - data->temp11[LOCAL_TEMP] = h << 8; - } - lm90_read16(client, LM90_REG_R_REMOTE_TEMPH, - LM90_REG_R_REMOTE_TEMPL, - &data->temp11[REMOTE_TEMP]); - - if (lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH, &h) == 0) { - data->temp11[REMOTE_LOW] = h << 8; - if ((data->flags & LM90_HAVE_REM_LIMIT_EXT) - && lm90_read_reg(client, LM90_REG_R_REMOTE_LOWL, - &l) == 0) - data->temp11[REMOTE_LOW] |= l; - } - if (lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH, &h) == 0) { - data->temp11[REMOTE_HIGH] = h << 8; - if ((data->flags & LM90_HAVE_REM_LIMIT_EXT) - && lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHL, - &l) == 0) - data->temp11[REMOTE_HIGH] |= l; + val = lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP); + if (val < 0) + goto error; + data->temp11[LOCAL_TEMP] = val << 8; } + val = lm90_read16(client, LM90_REG_R_REMOTE_TEMPH, + LM90_REG_R_REMOTE_TEMPL); + if (val < 0) + goto error; + data->temp11[REMOTE_TEMP] = val; - if (data->flags & LM90_HAVE_OFFSET) { - if (lm90_read_reg(client, LM90_REG_R_REMOTE_OFFSH, - &h) == 0 - && lm90_read_reg(client, LM90_REG_R_REMOTE_OFFSL, - &l) == 0) - data->temp11[REMOTE_OFFSET] = (h << 8) | l; - } - if (data->flags & LM90_HAVE_EMERGENCY) { - lm90_read_reg(client, MAX6659_REG_R_LOCAL_EMERG, - &data->temp8[LOCAL_EMERG]); - lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG, - &data->temp8[REMOTE_EMERG]); - } - lm90_read_reg(client, LM90_REG_R_STATUS, &alarms); - data->alarms = alarms; /* save as 16 bit value */ + val = lm90_read_reg(client, LM90_REG_R_STATUS); + if (val < 0) + goto error; + data->alarms = val; /* lower 8 bit of alarms */ if (data->kind == max6696) { - lm90_select_remote_channel(client, data, 1); - lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT, - &data->temp8[REMOTE2_CRIT]); - lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG, - &data->temp8[REMOTE2_EMERG]); - lm90_read16(client, LM90_REG_R_REMOTE_TEMPH, - LM90_REG_R_REMOTE_TEMPL, - &data->temp11[REMOTE2_TEMP]); - if (!lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH, &h)) - data->temp11[REMOTE2_LOW] = h << 8; - if (!lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH, &h)) - data->temp11[REMOTE2_HIGH] = h << 8; + val = lm90_select_remote_channel(client, data, 1); + if (val < 0) + goto error; + + val = lm90_read16(client, LM90_REG_R_REMOTE_TEMPH, + LM90_REG_R_REMOTE_TEMPL); + if (val < 0) + goto error; + data->temp11[REMOTE2_TEMP] = val; + lm90_select_remote_channel(client, data, 0); - if (!lm90_read_reg(client, MAX6696_REG_R_STATUS2, - &alarms)) - data->alarms |= alarms << 8; + val = lm90_read_reg(client, MAX6696_REG_R_STATUS2); + if (val < 0) + goto error; + data->alarms |= val << 8; } /* * Re-enable ALERT# output if it was originally enabled and * relevant alarms are all clear */ - if ((data->config_orig & 0x80) == 0 - && (data->alarms & data->alert_alarms) == 0) { - u8 config; + if (!(data->config_orig & 0x80) && + !(data->alarms & data->alert_alarms)) { + val = lm90_read_reg(client, LM90_REG_R_CONFIG1); + if (val < 0) + goto error; - lm90_read_reg(client, LM90_REG_R_CONFIG1, &config); - if (config & 0x80) { + if (val & 0x80) { dev_dbg(&client->dev, "Re-enabling ALERT#\n"); i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, - config & ~0x80); + val & ~0x80); } } data->last_updated = jiffies; - data->valid = 1; + data->valid = true; } +error: mutex_unlock(&data->update_lock); + if (val < 0) + return ERR_PTR(val); + return data; } @@ -709,16 +790,14 @@ static inline int temp_from_u8_adt7461(struct lm90_data *data, u8 val) { if (data->flags & LM90_FLAG_ADT7461_EXT) return (val - 64) * 1000; - else - return temp_from_s8(val); + return temp_from_s8(val); } static inline int temp_from_u16_adt7461(struct lm90_data *data, u16 val) { if (data->flags & LM90_FLAG_ADT7461_EXT) return (val - 0x4000) / 64 * 250; - else - return temp_from_s16(val); + return temp_from_s16(val); } static u8 temp_to_u8_adt7461(struct lm90_data *data, long val) @@ -729,13 +808,12 @@ static u8 temp_to_u8_adt7461(struct lm90_data *data, long val) if (val >= 191000) return 0xFF; return (val + 500 + 64000) / 1000; - } else { - if (val <= 0) - return 0; - if (val >= 127000) - return 127; - return (val + 500) / 1000; } + if (val <= 0) + return 0; + if (val >= 127000) + return 127; + return (val + 500) / 1000; } static u16 temp_to_u16_adt7461(struct lm90_data *data, long val) @@ -746,13 +824,12 @@ static u16 temp_to_u16_adt7461(struct lm90_data *data, long val) if (val >= 191750) return 0xFFC0; return (val + 64000 + 125) / 250 * 64; - } else { - if (val <= 0) - return 0; - if (val >= 127750) - return 0x7FC0; - return (val + 125) / 250 * 64; } + if (val <= 0) + return 0; + if (val >= 127750) + return 0x7FC0; + return (val + 125) / 250 * 64; } /* @@ -766,6 +843,9 @@ static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr, struct lm90_data *data = lm90_update_device(dev); int temp; + if (IS_ERR(data)) + return PTR_ERR(data); + if (data->kind == adt7461 || data->kind == tmp451) temp = temp_from_u8_adt7461(data, data->temp8[attr->index]); else if (data->kind == max6646) @@ -832,6 +912,9 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr, struct lm90_data *data = lm90_update_device(dev); int temp; + if (IS_ERR(data)) + return PTR_ERR(data); + if (data->kind == adt7461 || data->kind == tmp451) temp = temp_from_u16_adt7461(data, data->temp11[attr->index]); else if (data->kind == max6646) @@ -907,6 +990,9 @@ static ssize_t show_temphyst(struct device *dev, struct lm90_data *data = lm90_update_device(dev); int temp; + if (IS_ERR(data)) + return PTR_ERR(data); + if (data->kind == adt7461 || data->kind == tmp451) temp = temp_from_u8_adt7461(data, data->temp8[attr->index]); else if (data->kind == max6646) @@ -953,6 +1039,10 @@ static ssize_t show_alarms(struct device *dev, struct device_attribute *dummy, char *buf) { struct lm90_data *data = lm90_update_device(dev); + + if (IS_ERR(data)) + return PTR_ERR(data); + return sprintf(buf, "%d\n", data->alarms); } @@ -963,6 +1053,9 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute struct lm90_data *data = lm90_update_device(dev); int bitnr = attr->index; + if (IS_ERR(data)) + return PTR_ERR(data); + return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1); } @@ -1404,8 +1497,11 @@ static int lm90_detect(struct i2c_client *client, return 0; } -static void lm90_restore_conf(struct i2c_client *client, struct lm90_data *data) +static void lm90_restore_conf(void *_data) { + struct lm90_data *data = _data; + struct i2c_client *client = data->client; + /* Restore initial configuration */ i2c_smbus_write_byte_data(client, LM90_REG_W_CONVRATE, data->convrate_orig); @@ -1413,24 +1509,22 @@ static void lm90_restore_conf(struct i2c_client *client, struct lm90_data *data) data->config_orig); } -static void lm90_init_client(struct i2c_client *client, struct lm90_data *data) +static int lm90_init_client(struct i2c_client *client, struct lm90_data *data) { - u8 config, convrate; + int config, convrate; - if (lm90_read_reg(client, LM90_REG_R_CONVRATE, &convrate) < 0) { - dev_warn(&client->dev, "Failed to read convrate register!\n"); - convrate = LM90_DEF_CONVRATE_RVAL; - } + convrate = lm90_read_reg(client, LM90_REG_R_CONVRATE); + if (convrate < 0) + return convrate; data->convrate_orig = convrate; /* * Start the conversions. */ lm90_set_convrate(client, data, 500); /* 500ms; 2Hz conversion rate */ - if (lm90_read_reg(client, LM90_REG_R_CONFIG1, &config) < 0) { - dev_warn(&client->dev, "Initialization failed!\n"); - return; - } + config = lm90_read_reg(client, LM90_REG_R_CONFIG1); + if (config < 0) + return config; data->config_orig = config; /* Check Temperature Range Select */ @@ -1456,17 +1550,26 @@ static void lm90_init_client(struct i2c_client *client, struct lm90_data *data) config &= 0xBF; /* run */ if (config != data->config_orig) /* Only write if changed */ i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config); + + devm_add_action(&client->dev, lm90_restore_conf, data); + + return 0; } static bool lm90_is_tripped(struct i2c_client *client, u16 *status) { struct lm90_data *data = i2c_get_clientdata(client); - u8 st, st2 = 0; + int st, st2 = 0; - lm90_read_reg(client, LM90_REG_R_STATUS, &st); + st = lm90_read_reg(client, LM90_REG_R_STATUS); + if (st < 0) + return false; - if (data->kind == max6696) - lm90_read_reg(client, MAX6696_REG_R_STATUS2, &st2); + if (data->kind == max6696) { + st2 = lm90_read_reg(client, MAX6696_REG_R_STATUS2); + if (st2 < 0) + return false; + } *status = st | (st2 << 8); @@ -1506,6 +1609,16 @@ static irqreturn_t lm90_irq_thread(int irq, void *dev_id) return IRQ_NONE; } +static void lm90_remove_pec(void *dev) +{ + device_remove_file(dev, &dev_attr_pec); +} + +static void lm90_regulator_disable(void *regulator) +{ + regulator_disable(regulator); +} + static int lm90_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -1513,6 +1626,7 @@ static int lm90_probe(struct i2c_client *client, struct i2c_adapter *adapter = to_i2c_adapter(dev->parent); struct lm90_data *data; struct regulator *regulator; + struct device *hwmon_dev; int groups = 0; int err; @@ -1526,6 +1640,8 @@ static int lm90_probe(struct i2c_client *client, return err; } + devm_add_action(dev, lm90_regulator_disable, regulator); + data = devm_kzalloc(dev, sizeof(struct lm90_data), GFP_KERNEL); if (!data) return -ENOMEM; @@ -1534,8 +1650,6 @@ static int lm90_probe(struct i2c_client *client, i2c_set_clientdata(client, data); mutex_init(&data->update_lock); - data->regulator = regulator; - /* Set the device type */ data->kind = id->driver_data; if (data->kind == adm1032) { @@ -1557,7 +1671,11 @@ static int lm90_probe(struct i2c_client *client, data->max_convrate = lm90_params[data->kind].max_convrate; /* Initialize the LM90 chip */ - lm90_init_client(client, data); + err = lm90_init_client(client, data); + if (err < 0) { + dev_err(dev, "Failed to initialize device\n"); + return err; + } /* Register sysfs hooks */ data->groups[groups++] = &lm90_group; @@ -1577,15 +1695,14 @@ static int lm90_probe(struct i2c_client *client, if (client->flags & I2C_CLIENT_PEC) { err = device_create_file(dev, &dev_attr_pec); if (err) - goto exit_restore; + return err; + devm_add_action(dev, lm90_remove_pec, dev); } - data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name, - data, data->groups); - if (IS_ERR(data->hwmon_dev)) { - err = PTR_ERR(data->hwmon_dev); - goto exit_remove_pec; - } + hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, + data, data->groups); + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); if (client->irq) { dev_dbg(dev, "IRQ: %d\n", client->irq); @@ -1595,39 +1712,21 @@ static int lm90_probe(struct i2c_client *client, "lm90", client); if (err < 0) { dev_err(dev, "cannot request IRQ %d\n", client->irq); - goto exit_unregister; + return err; } } return 0; - -exit_unregister: - hwmon_device_unregister(data->hwmon_dev); -exit_remove_pec: - device_remove_file(dev, &dev_attr_pec); -exit_restore: - lm90_restore_conf(client, data); - regulator_disable(data->regulator); - - return err; -} - -static int lm90_remove(struct i2c_client *client) -{ - struct lm90_data *data = i2c_get_clientdata(client); - - hwmon_device_unregister(data->hwmon_dev); - device_remove_file(&client->dev, &dev_attr_pec); - lm90_restore_conf(client, data); - regulator_disable(data->regulator); - - return 0; } -static void lm90_alert(struct i2c_client *client, unsigned int flag) +static void lm90_alert(struct i2c_client *client, enum i2c_alert_protocol type, + unsigned int flag) { u16 alarms; + if (type != I2C_PROTOCOL_SMBUS_ALERT) + return; + if (lm90_is_tripped(client, &alarms)) { /* * Disable ALERT# output, because these chips don't implement @@ -1636,13 +1735,16 @@ static void lm90_alert(struct i2c_client *client, unsigned int flag) */ struct lm90_data *data = i2c_get_clientdata(client); - if ((data->flags & LM90_HAVE_BROKEN_ALERT) - && (alarms & data->alert_alarms)) { - u8 config; + if ((data->flags & LM90_HAVE_BROKEN_ALERT) && + (alarms & data->alert_alarms)) { + int config; + dev_dbg(&client->dev, "Disabling ALERT#\n"); - lm90_read_reg(client, LM90_REG_R_CONFIG1, &config); - i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, - config | 0x80); + config = lm90_read_reg(client, LM90_REG_R_CONFIG1); + if (config >= 0) + i2c_smbus_write_byte_data(client, + LM90_REG_W_CONFIG1, + config | 0x80); } } else { dev_info(&client->dev, "Everything OK\n"); @@ -1655,7 +1757,6 @@ static struct i2c_driver lm90_driver = { .name = "lm90", }, .probe = lm90_probe, - .remove = lm90_remove, .alert = lm90_alert, .id_table = lm90_id, .detect = lm90_detect, diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c new file mode 100644 index 000000000000..b73a48832732 --- /dev/null +++ b/ drivers/hwmon/sht3x.c@@ -0,0 +1,775 @@ +/* Sensirion SHT3x-DIS humidity and temperature sensor driver. + * The SHT3x comes in many different versions, this driver is for the + * I2C version only. + * + * Copyright (C) 2016 Sensirion AG, Switzerland + * Author: David Frey <david.frey@sensirion.com> + * Author: Pascal Sachs <pascal.sachs@sensirion.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <asm/page.h> +#include <linux/crc8.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/jiffies.h> +#include <linux/platform_data/sht3x.h> + +/* commands (high precision mode) */ +static const unsigned char sht3x_cmd_measure_blocking_hpm[] = { 0x2c, 0x06 }; +static const unsigned char sht3x_cmd_measure_nonblocking_hpm[] = { 0x24, 0x00 }; + +/* commands (low power mode) */ +static const unsigned char sht3x_cmd_measure_blocking_lpm[] = { 0x2c, 0x10 }; +static const unsigned char sht3x_cmd_measure_nonblocking_lpm[] = { 0x24, 0x16 }; + +/* commands for periodic mode */ +static const unsigned char sht3x_cmd_measure_periodic_mode[] = { 0xe0, 0x00 }; +static const unsigned char sht3x_cmd_break[] = { 0x30, 0x93 }; + +/* commands for heater control */ +static const unsigned char sht3x_cmd_heater_on[] = { 0x30, 0x6d }; +static const unsigned char sht3x_cmd_heater_off[] = { 0x30, 0x66 }; + +/* other commands */ +static const unsigned char sht3x_cmd_read_status_reg[] = { 0xf3, 0x2d }; +static const unsigned char sht3x_cmd_clear_status_reg[] = { 0x30, 0x41 }; + +/* delays for non-blocking i2c commands, both in us */ +#define SHT3X_NONBLOCKING_WAIT_TIME_HPM 15000 +#define SHT3X_NONBLOCKING_WAIT_TIME_LPM 4000 + +#define SHT3X_WORD_LEN 2 +#define SHT3X_CMD_LENGTH 2 +#define SHT3X_CRC8_LEN 1 +#define SHT3X_RESPONSE_LENGTH 6 +#define SHT3X_CRC8_POLYNOMIAL 0x31 +#define SHT3X_CRC8_INIT 0xFF +#define SHT3X_MIN_TEMPERATURE -45000 +#define SHT3X_MAX_TEMPERATURE 130000 +#define SHT3X_MIN_HUMIDITY 0 +#define SHT3X_MAX_HUMIDITY 100000 + +enum sht3x_chips { + sht3x, + sts3x, +}; + +enum sht3x_limits { + limit_max = 0, + limit_max_hyst, + limit_min, + limit_min_hyst, +}; + +DECLARE_CRC8_TABLE(sht3x_crc8_table); + +/* periodic measure commands (high precision mode) */ +static const char periodic_measure_commands_hpm[][SHT3X_CMD_LENGTH] = { + /* 0.5 measurements per second */ + {0x20, 0x32}, + /* 1 measurements per second */ + {0x21, 0x30}, + /* 2 measurements per second */ + {0x22, 0x36}, + /* 4 measurements per second */ + {0x23, 0x34}, + /* 10 measurements per second */ + {0x27, 0x37}, +}; + +/* periodic measure commands (low power mode) */ +static const char periodic_measure_commands_lpm[][SHT3X_CMD_LENGTH] = { + /* 0.5 measurements per second */ + {0x20, 0x2f}, + /* 1 measurements per second */ + {0x21, 0x2d}, + /* 2 measurements per second */ + {0x22, 0x2b}, + /* 4 measurements per second */ + {0x23, 0x29}, + /* 10 measurements per second */ + {0x27, 0x2a}, +}; + +struct sht3x_limit_commands { + const char read_command[SHT3X_CMD_LENGTH]; + const char write_command[SHT3X_CMD_LENGTH]; +}; + +static const struct sht3x_limit_commands limit_commands[] = { + /* temp1_max, humidity1_max */ + [limit_max] = { {0xe1, 0x1f}, {0x61, 0x1d} }, + /* temp_1_max_hyst, humidity1_max_hyst */ + [limit_max_hyst] = { {0xe1, 0x14}, {0x61, 0x16} }, + /* temp1_min, humidity1_min */ + [limit_min] = { {0xe1, 0x02}, {0x61, 0x00} }, + /* temp_1_min_hyst, humidity1_min_hyst */ + [limit_min_hyst] = { {0xe1, 0x09}, {0x61, 0x0B} }, +}; + +#define SHT3X_NUM_LIMIT_CMD ARRAY_SIZE(limit_commands) + +static const u16 mode_to_update_interval[] = { + 0, + 2000, + 1000, + 500, + 250, + 100, +}; + +struct sht3x_data { + struct i2c_client *client; + struct mutex i2c_lock; /* lock for sending i2c commands */ + struct mutex data_lock; /* lock for updating driver data */ + + u8 mode; + const unsigned char *command; + u32 wait_time; /* in us*/ + unsigned long last_update; /* last update in periodic mode*/ + + struct sht3x_platform_data setup; + + /* + * cached values for temperature and humidity and limits + * the limits arrays have the following order: + * max, max_hyst, min, min_hyst + */ + int temperature; + int temperature_limits[SHT3X_NUM_LIMIT_CMD]; + u32 humidity; + u32 humidity_limits[SHT3X_NUM_LIMIT_CMD]; +}; + +static u8 get_mode_from_update_interval(u16 value) +{ + size_t index; + u8 number_of_modes = ARRAY_SIZE(mode_to_update_interval); + + if (value == 0) + return 0; + + /* find next faster update interval */ + for (index = 1; index < number_of_modes; index++) { + if (mode_to_update_interval[index] <= value) + return index; + } + + return number_of_modes - 1; +} + +static int sht3x_read_from_command(struct i2c_client *client, + struct sht3x_data *data, + const char *command, + char *buf, int length, u32 wait_time) +{ + int ret; + + mutex_lock(&data->i2c_lock); + ret = i2c_master_send(client, command, SHT3X_CMD_LENGTH); + + if (ret != SHT3X_CMD_LENGTH) { + ret = ret < 0 ? ret : -EIO; + goto out; + } + + if (wait_time) + usleep_range(wait_time, wait_time + 1000); + + ret = i2c_master_recv(client, buf, length); + if (ret != length) { + ret = ret < 0 ? ret : -EIO; + goto out; + } + + ret = 0; +out: + mutex_unlock(&data->i2c_lock); + return ret; +} + +static int sht3x_extract_temperature(u16 raw) +{ + /* + * From datasheet: + * T = -45 + 175 * ST / 2^16 + * Adapted for integer fixed point (3 digit) arithmetic. + */ + return ((21875 * (int)raw) >> 13) - 45000; +} + +static u32 sht3x_extract_humidity(u16 raw) +{ + /* + * From datasheet: + * RH = 100 * SRH / 2^16 + * Adapted for integer fixed point (3 digit) arithmetic. + */ + return (12500 * (u32)raw) >> 13; +} + +static struct sht3x_data *sht3x_update_client(struct device *dev) +{ + struct sht3x_data *data = dev_get_drvdata(dev); + struct i2c_client *client = data->client; + u16 interval_ms = mode_to_update_interval[data->mode]; + unsigned long interval_jiffies = msecs_to_jiffies(interval_ms); + unsigned char buf[SHT3X_RESPONSE_LENGTH]; + u16 val; + int ret = 0; + + mutex_lock(&data->data_lock); + /* + * Only update cached readings once per update interval in periodic + * mode. In single shot mode the sensor measures values on demand, so + * every time the sysfs interface is called, a measurement is triggered. + * In periodic mode however, the measurement process is handled + * internally by the sensor and reading out sensor values only makes + * sense if a new reading is available. + */ + if (time_after(jiffies, data->last_update + interval_jiffies)) { + ret = sht3x_read_from_command(client, data, data->command, buf, + sizeof(buf), data->wait_time); + if (ret) + goto out; + + val = be16_to_cpup((__be16 *)buf); + data->temperature = sht3x_extract_temperature(val); + val = be16_to_cpup((__be16 *)(buf + 3)); + data->humidity = sht3x_extract_humidity(val); + data->last_update = jiffies; + } + +out: + mutex_unlock(&data->data_lock); + if (ret) + return ERR_PTR(ret); + + return data; +} + +/* sysfs attributes */ +static ssize_t temp1_input_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct sht3x_data *data = sht3x_update_client(dev); + + if (IS_ERR(data)) + return PTR_ERR(data); + + return sprintf(buf, "%d\n", data->temperature); +} + +static ssize_t humidity1_input_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct sht3x_data *data = sht3x_update_client(dev); + + if (IS_ERR(data)) + return PTR_ERR(data); + + return sprintf(buf, "%u\n", data->humidity); +} + +/* + * limits_update must only be called from probe or with data_lock held + */ +static int limits_update(struct sht3x_data *data) +{ + int ret; + u8 index; + int temperature; + u32 humidity; + u16 raw; + char buffer[SHT3X_RESPONSE_LENGTH]; + const struct sht3x_limit_commands *commands; + struct i2c_client *client = data->client; + + for (index = 0; index < SHT3X_NUM_LIMIT_CMD; index++) { + commands = &limit_commands[index]; + ret = sht3x_read_from_command(client, data, + commands->read_command, buffer, + SHT3X_RESPONSE_LENGTH, 0); + + if (ret) + return ret; + + raw = be16_to_cpup((__be16 *)buffer); + temperature = sht3x_extract_temperature((raw & 0x01ff) << 7); + humidity = sht3x_extract_humidity(raw & 0xfe00); + data->temperature_limits[index] = temperature; + data->humidity_limits[index] = humidity; + } + + return ret; +} + +static ssize_t temp1_limit_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct sht3x_data *data = dev_get_drvdata(dev); + u8 index = to_sensor_dev_attr(attr)->index; + int temperature_limit = data->temperature_limits[index]; + + return scnprintf(buf, PAGE_SIZE, "%d\n", temperature_limit); +} + +static ssize_t humidity1_limit_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct sht3x_data *data = dev_get_drvdata(dev); + u8 index = to_sensor_dev_attr(attr)->index; + u32 humidity_limit = data->humidity_limits[index]; + + return scnprintf(buf, PAGE_SIZE, "%u\n", humidity_limit); +} + +/* + * limit_store must only be called with data_lock held + */ +static size_t limit_store(struct device *dev, + size_t count, + u8 index, + int temperature, + u32 humidity) +{ + char buffer[SHT3X_CMD_LENGTH + SHT3X_WORD_LEN + SHT3X_CRC8_LEN]; + char *position = buffer; + int ret; + u16 raw; + struct sht3x_data *data = dev_get_drvdata(dev); + struct i2c_client *client = data->client; + const struct sht3x_limit_commands *commands; + + commands = &limit_commands[index]; + + memcpy(position, commands->write_command, SHT3X_CMD_LENGTH); + position += SHT3X_CMD_LENGTH; + /* + * ST = (T + 45) / 175 * 2^16 + * SRH = RH / 100 * 2^16 + * adapted for fixed point arithmetic and packed the same as + * in limit_show() + */ + raw = ((u32)(temperature + 45000) * 24543) >> (16 + 7); + raw |= ((humidity * 42950) >> 16) & 0xfe00; + + *((__be16 *)position) = cpu_to_be16(raw); + position += SHT3X_WORD_LEN; + *position = crc8(sht3x_crc8_table, + position - SHT3X_WORD_LEN, + SHT3X_WORD_LEN, + SHT3X_CRC8_INIT); + + mutex_lock(&data->i2c_lock); + ret = i2c_master_send(client, buffer, sizeof(buffer)); + mutex_unlock(&data->i2c_lock); + + if (ret != sizeof(buffer)) + return ret < 0 ? ret : -EIO; + + data->temperature_limits[index] = temperature; + data->humidity_limits[index] = humidity; + return count; +} + +static ssize_t temp1_limit_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + int temperature; + int ret; + struct sht3x_data *data = dev_get_drvdata(dev); + u8 index = to_sensor_dev_attr(attr)->index; + + ret = kstrtoint(buf, 0, &temperature); + if (ret) + return ret; + + temperature = clamp_val(temperature, SHT3X_MIN_TEMPERATURE, + SHT3X_MAX_TEMPERATURE); + mutex_lock(&data->data_lock); + ret = limit_store(dev, count, index, temperature, + data->humidity_limits[index]); + mutex_unlock(&data->data_lock); + + return ret; +} + +static ssize_t humidity1_limit_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + u32 humidity; + int ret; + struct sht3x_data *data = dev_get_drvdata(dev); + u8 index = to_sensor_dev_attr(attr)->index; + + ret = kstrtou32(buf, 0, &humidity); + if (ret) + return ret; + + humidity = clamp_val(humidity, SHT3X_MIN_HUMIDITY, SHT3X_MAX_HUMIDITY); + mutex_lock(&data->data_lock); + ret = limit_store(dev, count, index, data->temperature_limits[index], + humidity); + mutex_unlock(&data->data_lock); + + return ret; +} + +static void sht3x_select_command(struct sht3x_data *data) +{ + /* + * In blocking mode (clock stretching mode) the I2C bus + * is blocked for other traffic, thus the call to i2c_master_recv() + * will wait until the data is ready. For non blocking mode, we + * have to wait ourselves. + */ + if (data->mode > 0) { + data->command = sht3x_cmd_measure_periodic_mode; + data->wait_time = 0; + } else if (data->setup.blocking_io) { + data->command = data->setup.high_precision ? + sht3x_cmd_measure_blocking_hpm : + sht3x_cmd_measure_blocking_lpm; + data->wait_time = 0; + } else { + if (data->setup.high_precision) { + data->command = sht3x_cmd_measure_nonblocking_hpm; + data->wait_time = SHT3X_NONBLOCKING_WAIT_TIME_HPM; + } else { + data->command = sht3x_cmd_measure_nonblocking_lpm; + data->wait_time = SHT3X_NONBLOCKING_WAIT_TIME_LPM; + } + } +} + +static int status_register_read(struct device *dev, + struct device_attribute *attr, + char *buffer, int length) +{ + int ret; + struct sht3x_data *data = dev_get_drvdata(dev); + struct i2c_client *client = data->client; + + ret = sht3x_read_from_command(client, data, sht3x_cmd_read_status_reg, + buffer, length, 0); + + return ret; +} + +static ssize_t temp1_alarm_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + char buffer[SHT3X_WORD_LEN + SHT3X_CRC8_LEN]; + int ret; + + ret = status_register_read(dev, attr, buffer, + SHT3X_WORD_LEN + SHT3X_CRC8_LEN); + if (ret) + return ret; + + return scnprintf(buf, PAGE_SIZE, "%d\n", !!(buffer[0] & 0x04)); +} + +static ssize_t humidity1_alarm_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + char buffer[SHT3X_WORD_LEN + SHT3X_CRC8_LEN]; + int ret; + + ret = status_register_read(dev, attr, buffer, + SHT3X_WORD_LEN + SHT3X_CRC8_LEN); + if (ret) + return ret; + + return scnprintf(buf, PAGE_SIZE, "%d\n", !!(buffer[0] & 0x08)); +} + +static ssize_t heater_enable_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + char buffer[SHT3X_WORD_LEN + SHT3X_CRC8_LEN]; + int ret; + + ret = status_register_read(dev, attr, buffer, + SHT3X_WORD_LEN + SHT3X_CRC8_LEN); + if (ret) + return ret; + + return scnprintf(buf, PAGE_SIZE, "%d\n", !!(buffer[0] & 0x20)); +} + +static ssize_t heater_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct sht3x_data *data = dev_get_drvdata(dev); + struct i2c_client *client = data->client; + int ret; + bool status; + + ret = kstrtobool(buf, &status); + if (ret) + return ret; + + mutex_lock(&data->i2c_lock); + + if (status) + ret = i2c_master_send(client, (char *)&sht3x_cmd_heater_on, + SHT3X_CMD_LENGTH); + else + ret = i2c_master_send(client, (char *)&sht3x_cmd_heater_off, + SHT3X_CMD_LENGTH); + + mutex_unlock(&data->i2c_lock); + + return ret; +} + +static ssize_t update_interval_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct sht3x_data *data = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%u\n", + mode_to_update_interval[data->mode]); +} + +static ssize_t update_interval_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + u16 update_interval; + u8 mode; + int ret; + const char *command; + struct sht3x_data *data = dev_get_drvdata(dev); + struct i2c_client *client = data->client; + + ret = kstrtou16(buf, 0, &update_interval); + if (ret) + return ret; + + mode = get_mode_from_update_interval(update_interval); + + mutex_lock(&data->data_lock); + /* mode did not change */ + if (mode == data->mode) { + mutex_unlock(&data->data_lock); + return count; + } + + mutex_lock(&data->i2c_lock); + /* + * Abort periodic measure mode. + * To do any changes to the configuration while in periodic mode, we + * have to send a break command to the sensor, which then falls back + * to single shot (mode = 0). + */ + if (data->mode > 0) { + ret = i2c_master_send(client, sht3x_cmd_break, + SHT3X_CMD_LENGTH); + if (ret != SHT3X_CMD_LENGTH) + goto out; + data->mode = 0; + } + + if (mode > 0) { + if (data->setup.high_precision) + command = periodic_measure_commands_hpm[mode - 1]; + else + command = periodic_measure_commands_lpm[mode - 1]; + + /* select mode */ + ret = i2c_master_send(client, command, SHT3X_CMD_LENGTH); + if (ret != SHT3X_CMD_LENGTH) + goto out; + } + + /* select mode and command */ + data->mode = mode; + sht3x_select_command(data); + +out: + mutex_unlock(&data->i2c_lock); + mutex_unlock(&data->data_lock); + if (ret != SHT3X_CMD_LENGTH) + return ret < 0 ? ret : -EIO; + + return count; +} + +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, temp1_input_show, NULL, 0); +static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO, humidity1_input_show, + NULL, 0); +static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, + temp1_limit_show, temp1_limit_store, + limit_max); +static SENSOR_DEVICE_ATTR(humidity1_max, S_IRUGO | S_IWUSR, + humidity1_limit_show, humidity1_limit_store, + limit_max); +static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, + temp1_limit_show, temp1_limit_store, + limit_max_hyst); +static SENSOR_DEVICE_ATTR(humidity1_max_hyst, S_IRUGO | S_IWUSR, + humidity1_limit_show, humidity1_limit_store, + limit_max_hyst); +static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR, + temp1_limit_show, temp1_limit_store, + limit_min); +static SENSOR_DEVICE_ATTR(humidity1_min, S_IRUGO | S_IWUSR, + humidity1_limit_show, humidity1_limit_store, + limit_min); +static SENSOR_DEVICE_ATTR(temp1_min_hyst, S_IRUGO | S_IWUSR, + temp1_limit_show, temp1_limit_store, + limit_min_hyst); +static SENSOR_DEVICE_ATTR(humidity1_min_hyst, S_IRUGO | S_IWUSR, + humidity1_limit_show, humidity1_limit_store, + limit_min_hyst); +static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, temp1_alarm_show, NULL, 0); +static SENSOR_DEVICE_ATTR(humidity1_alarm, S_IRUGO, humidity1_alarm_show, + NULL, 0); +static SENSOR_DEVICE_ATTR(heater_enable, S_IRUGO | S_IWUSR, + heater_enable_show, heater_enable_store, 0); +static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, + update_interval_show, update_interval_store, 0); + +static struct attribute *sht3x_attrs[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_humidity1_input.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, + &sensor_dev_attr_humidity1_max.dev_attr.attr, + &sensor_dev_attr_humidity1_max_hyst.dev_attr.attr, + &sensor_dev_attr_temp1_min.dev_attr.attr, + &sensor_dev_attr_temp1_min_hyst.dev_attr.attr, + &sensor_dev_attr_humidity1_min.dev_attr.attr, + &sensor_dev_attr_humidity1_min_hyst.dev_attr.attr, + &sensor_dev_attr_temp1_alarm.dev_attr.attr, + &sensor_dev_attr_humidity1_alarm.dev_attr.attr, + &sensor_dev_attr_heater_enable.dev_attr.attr, + &sensor_dev_attr_update_interval.dev_attr.attr, + NULL +}; + +static struct attribute *sts3x_attrs[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + NULL +}; + +ATTRIBUTE_GROUPS(sht3x); +ATTRIBUTE_GROUPS(sts3x); + +static int sht3x_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int ret; + struct sht3x_data *data; + struct device *hwmon_dev; + struct i2c_adapter *adap = client->adapter; + struct device *dev = &client->dev; + const struct attribute_group **attribute_groups; + + /* + * we require full i2c support since the sht3x uses multi-byte read and + * writes as well as multi-byte commands which are not supported by + * the smbus protocol + */ + if (!i2c_check_functionality(adap, I2C_FUNC_I2C)) + return -ENODEV; + + ret = i2c_master_send(client, sht3x_cmd_clear_status_reg, + SHT3X_CMD_LENGTH); + if (ret != SHT3X_CMD_LENGTH) + return ret < 0 ? ret : -ENODEV; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->setup.blocking_io = false; + data->setup.high_precision = true; + data->mode = 0; + data->last_update = 0; + data->client = client; + crc8_populate_msb(sht3x_crc8_table, SHT3X_CRC8_POLYNOMIAL); + + if (client->dev.platform_data) + data->setup = *(struct sht3x_platform_data *)dev->platform_data; + + sht3x_select_command(data); + + mutex_init(&data->i2c_lock); + mutex_init(&data->data_lock); + + ret = limits_update(data); + if (ret) + return ret; + + if (id->driver_data == sts3x) + attribute_groups = sts3x_groups; + else + attribute_groups = sht3x_groups; + + hwmon_dev = devm_hwmon_device_register_with_groups(dev, + client->name, + data, + attribute_groups); + + if (IS_ERR(hwmon_dev)) + dev_dbg(dev, "unable to register hwmon device\n"); + + return PTR_ERR_OR_ZERO(hwmon_dev); +} + +/* device ID table */ +static const struct i2c_device_id sht3x_ids[] = { + {"sht3x", sht3x}, + {"sts3x", sts3x}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, sht3x_ids); + +static struct i2c_driver sht3x_i2c_driver = { + .driver.name = "sht3x", + .probe = sht3x_probe, + .id_table = sht3x_ids, +}; + +module_i2c_driver(sht3x_i2c_driver); + +MODULE_AUTHOR("David Frey <david.frey@sensirion.com>"); +MODULE_AUTHOR("Pascal Sachs <pascal.sachs@sensirion.com>"); +MODULE_DESCRIPTION("Sensirion SHT3x humidity and temperature sensor driver"); +MODULE_LICENSE("GPL"); @@ -11,12 +11,9 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA */ +#include <linux/delay.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> @@ -27,6 +24,7 @@ #include <linux/mutex.h> #include <linux/device.h> #include <linux/jiffies.h> +#include <linux/regmap.h> #include <linux/thermal.h> #include <linux/of.h> @@ -50,14 +48,23 @@ #define TMP102_TLOW_REG 0x02 #define TMP102_THIGH_REG 0x03 +#define TMP102_CONFREG_MASK (TMP102_CONF_SD | TMP102_CONF_TM | \ + TMP102_CONF_POL | TMP102_CONF_F0 | \ + TMP102_CONF_F1 | TMP102_CONF_OS | \ + TMP102_CONF_EM | TMP102_CONF_AL | \ + TMP102_CONF_CR0 | TMP102_CONF_CR1) + +#define TMP102_CONFIG_CLEAR (TMP102_CONF_SD | TMP102_CONF_OS | \ + TMP102_CONF_CR0) +#define TMP102_CONFIG_SET (TMP102_CONF_TM | TMP102_CONF_EM | \ + TMP102_CONF_CR1) + +#define CONVERSION_TIME_MS 35 /* in milli-seconds */ + struct tmp102 { - struct i2c_client *client; - struct device *hwmon_dev; - struct mutex lock; + struct regmap *regmap; u16 config_orig; - unsigned long last_update; - int temp[3]; - bool first_time; + unsigned long ready_time; }; /* convert left adjusted 13-bit TMP102 register value to milliCelsius */ @@ -72,44 +79,22 @@ static inline u16 tmp102_mC_to_reg(int val) return (val * 128) / 1000; } -static const u8 tmp102_reg[] = { - TMP102_TEMP_REG, - TMP102_TLOW_REG, - TMP102_THIGH_REG, -}; - -static struct tmp102 *tmp102_update_device(struct device *dev) -{ - struct tmp102 *tmp102 = dev_get_drvdata(dev); - struct i2c_client *client = tmp102->client; - - mutex_lock(&tmp102->lock); - if (time_after(jiffies, tmp102->last_update + HZ / 3)) { - int i; - for (i = 0; i < ARRAY_SIZE(tmp102->temp); ++i) { - int status = i2c_smbus_read_word_swapped(client, - tmp102_reg[i]); - if (status > -1) - tmp102->temp[i] = tmp102_reg_to_mC(status); - } - tmp102->last_update = jiffies; - tmp102->first_time = false; - } - mutex_unlock(&tmp102->lock); - return tmp102; -} - static int tmp102_read_temp(void *dev, int *temp) { - struct tmp102 *tmp102 = tmp102_update_device(dev); + struct tmp102 *tmp102 = dev_get_drvdata(dev); + unsigned int reg; + int ret; - /* Is it too early even to return a conversion? */ - if (tmp102->first_time) { + if (time_before(jiffies, tmp102->ready_time)) { dev_dbg(dev, "%s: Conversion not ready yet..\n", __func__); return -EAGAIN; } - *temp = tmp102->temp[0]; + ret = regmap_read(tmp102->regmap, TMP102_TEMP_REG, ®); + if (ret < 0) + return ret; + + *temp = tmp102_reg_to_mC(reg); return 0; } @@ -119,13 +104,20 @@ static ssize_t tmp102_show_temp(struct device *dev, char *buf) { struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); - struct tmp102 *tmp102 = tmp102_update_device(dev); + struct tmp102 *tmp102 = dev_get_drvdata(dev); + int regaddr = sda->index; + unsigned int reg; + int err; - /* Is it too early even to return a read? */ - if (tmp102->first_time) + if (regaddr == TMP102_TEMP_REG && + time_before(jiffies, tmp102->ready_time)) return -EAGAIN; - return sprintf(buf, "%d\n", tmp102->temp[sda->index]); + err = regmap_read(tmp102->regmap, regaddr, ®); + if (err < 0) + return err; + + return sprintf(buf, "%d\n", tmp102_reg_to_mC(reg)); } static ssize_t tmp102_set_temp(struct device *dev, @@ -134,29 +126,26 @@ static ssize_t tmp102_set_temp(struct device *dev, { struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); struct tmp102 *tmp102 = dev_get_drvdata(dev); - struct i2c_client *client = tmp102->client; + int reg = sda->index; long val; - int status; + int err; if (kstrtol(buf, 10, &val) < 0) return -EINVAL; val = clamp_val(val, -256000, 255000); - mutex_lock(&tmp102->lock); - tmp102->temp[sda->index] = val; - status = i2c_smbus_write_word_swapped(client, tmp102_reg[sda->index], - tmp102_mC_to_reg(val)); - mutex_unlock(&tmp102->lock); - return status ? : count; + err = regmap_write(tmp102->regmap, reg, tmp102_mC_to_reg(val)); + return err ? : count; } -static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tmp102_show_temp, NULL , 0); +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tmp102_show_temp, NULL, + TMP102_TEMP_REG); static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, tmp102_show_temp, - tmp102_set_temp, 1); + tmp102_set_temp, TMP102_TLOW_REG); static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, tmp102_show_temp, - tmp102_set_temp, 2); + tmp102_set_temp, TMP102_THIGH_REG); static struct attribute *tmp102_attrs[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, @@ -166,20 +155,46 @@ static struct attribute *tmp102_attrs[] = { }; ATTRIBUTE_GROUPS(tmp102); -#define TMP102_CONFIG (TMP102_CONF_TM | TMP102_CONF_EM | TMP102_CONF_CR1) -#define TMP102_CONFIG_RD_ONLY (TMP102_CONF_R0 | TMP102_CONF_R1 | TMP102_CONF_AL) - static const struct thermal_zone_of_device_ops tmp102_of_thermal_ops = { .get_temp = tmp102_read_temp, }; +static void tmp102_restore_config(void *data) +{ + struct tmp102 *tmp102 = data; + + regmap_write(tmp102->regmap, TMP102_CONF_REG, tmp102->config_orig); +} + +static bool tmp102_is_writeable_reg(struct device *dev, unsigned int reg) +{ + return reg != TMP102_TEMP_REG; +} + +static bool tmp102_is_volatile_reg(struct device *dev, unsigned int reg) +{ + return reg == TMP102_TEMP_REG; +} + +static const struct regmap_config tmp102_regmap_config = { + .reg_bits = 8, + .val_bits = 16, + .max_register = TMP102_THIGH_REG, + .writeable_reg = tmp102_is_writeable_reg, + .volatile_reg = tmp102_is_volatile_reg, + .val_format_endian = REGMAP_ENDIAN_BIG, + .cache_type = REGCACHE_RBTREE, + .use_single_rw = true, +}; + static int tmp102_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct device *dev = &client->dev; struct device *hwmon_dev; struct tmp102 *tmp102; - int status; + unsigned int regval; + int err; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) { @@ -193,101 +208,82 @@ static int tmp102_probe(struct i2c_client *client, return -ENOMEM; i2c_set_clientdata(client, tmp102); - tmp102->client = client; - status = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG); - if (status < 0) { + tmp102->regmap = devm_regmap_init_i2c(client, &tmp102_regmap_config); + if (IS_ERR(tmp102->regmap)) + return PTR_ERR(tmp102->regmap); + + err = regmap_read(tmp102->regmap, TMP102_CONF_REG, ®val); + if (err < 0) { dev_err(dev, "error reading config register\n"); - return status; + return err; } - tmp102->config_orig = status; - status = i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, - TMP102_CONFIG); - if (status < 0) { - dev_err(dev, "error writing config register\n"); - goto fail_restore_config; + + if ((regval & ~TMP102_CONFREG_MASK) != + (TMP102_CONF_R0 | TMP102_CONF_R1)) { + dev_err(dev, "unexpected config register value\n"); + return -ENODEV; } - status = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG); - if (status < 0) { - dev_err(dev, "error reading config register\n"); - goto fail_restore_config; + + tmp102->config_orig = regval; + + devm_add_action(dev, tmp102_restore_config, tmp102); + + regval &= ~TMP102_CONFIG_CLEAR; + regval |= TMP102_CONFIG_SET; + + err = regmap_write(tmp102->regmap, TMP102_CONF_REG, regval); + if (err < 0) { + dev_err(dev, "error writing config register\n"); + return err; } - status &= ~TMP102_CONFIG_RD_ONLY; - if (status != TMP102_CONFIG) { - dev_err(dev, "config settings did not stick\n"); - status = -ENODEV; - goto fail_restore_config; + + tmp102->ready_time = jiffies; + if (tmp102->config_orig & TMP102_CONF_SD) { + /* + * Mark that we are not ready with data until the first + * conversion is complete + */ + tmp102->ready_time += msecs_to_jiffies(CONVERSION_TIME_MS); } - tmp102->last_update = jiffies; - /* Mark that we are not ready with data until conversion is complete */ - tmp102->first_time = true; - mutex_init(&tmp102->lock); - hwmon_dev = hwmon_device_register_with_groups(dev, client->name, - tmp102, tmp102_groups); + hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, + tmp102, + tmp102_groups); if (IS_ERR(hwmon_dev)) { dev_dbg(dev, "unable to register hwmon device\n"); - status = PTR_ERR(hwmon_dev); - goto fail_restore_config; + return PTR_ERR(hwmon_dev); } - tmp102->hwmon_dev = hwmon_dev; devm_thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev, &tmp102_of_thermal_ops); dev_info(dev, "initialized\n"); return 0; - -fail_restore_config: - i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, - tmp102->config_orig); - return status; -} - -static int tmp102_remove(struct i2c_client *client) -{ - struct tmp102 *tmp102 = i2c_get_clientdata(client); - - hwmon_device_unregister(tmp102->hwmon_dev); - - /* Stop monitoring if device was stopped originally */ - if (tmp102->config_orig & TMP102_CONF_SD) { - int config; - - config = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG); - if (config >= 0) - i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, - config | TMP102_CONF_SD); - } - - return 0; } #ifdef CONFIG_PM_SLEEP static int tmp102_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); - int config; - - config = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG); - if (config < 0) - return config; + struct tmp102 *tmp102 = i2c_get_clientdata(client); - config |= TMP102_CONF_SD; - return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config); + return regmap_update_bits(tmp102->regmap, TMP102_CONF_REG, + TMP102_CONF_SD, TMP102_CONF_SD); } static int tmp102_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); - int config; + struct tmp102 *tmp102 = i2c_get_clientdata(client); + int err; + + err = regmap_update_bits(tmp102->regmap, TMP102_CONF_REG, + TMP102_CONF_SD, 0); - config = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG); - if (config < 0) - return config; + tmp102->ready_time = jiffies + msecs_to_jiffies(CONVERSION_TIME_MS); - config &= ~TMP102_CONF_SD; - return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config); + return err; } #endif /* CONFIG_PM */ @@ -303,7 +299,6 @@ static struct i2c_driver tmp102_driver = { .driver.name = DRIVER_NAME, .driver.pm = &tmp102_dev_pm_ops, .probe = tmp102_probe, - .remove = tmp102_remove, .id_table = tmp102_id, }; @@ -47,7 +47,7 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; -enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 }; +enum chips { tmp401, tmp411, tmp431, tmp432, tmp435, tmp461 }; /* * The TMP401 registers, note some registers have different addresses for @@ -62,31 +62,34 @@ enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 }; #define TMP401_MANUFACTURER_ID_REG 0xFE #define TMP401_DEVICE_ID_REG 0xFF -static const u8 TMP401_TEMP_MSB_READ[6][2] = { +static const u8 TMP401_TEMP_MSB_READ[7][2] = { { 0x00, 0x01 }, /* temp */ { 0x06, 0x08 }, /* low limit */ { 0x05, 0x07 }, /* high limit */ { 0x20, 0x19 }, /* therm (crit) limit */ { 0x30, 0x34 }, /* lowest */ { 0x32, 0x36 }, /* highest */ + { 0, 0x11 }, /* offset */ }; -static const u8 TMP401_TEMP_MSB_WRITE[6][2] = { +static const u8 TMP401_TEMP_MSB_WRITE[7][2] = { { 0, 0 }, /* temp (unused) */ { 0x0C, 0x0E }, /* low limit */ { 0x0B, 0x0D }, /* high limit */ { 0x20, 0x19 }, /* therm (crit) limit */ { 0x30, 0x34 }, /* lowest */ { 0x32, 0x36 }, /* highest */ + { 0, 0x11 }, /* offset */ }; -static const u8 TMP401_TEMP_LSB[6][2] = { +static const u8 TMP401_TEMP_LSB[7][2] = { { 0x15, 0x10 }, /* temp */ { 0x17, 0x14 }, /* low limit */ { 0x16, 0x13 }, /* high limit */ { 0, 0 }, /* therm (crit) limit (unused) */ { 0x31, 0x35 }, /* lowest */ { 0x33, 0x37 }, /* highest */ + { 0, 0x12 }, /* offset */ }; static const u8 TMP432_TEMP_MSB_READ[4][3] = { @@ -149,6 +152,7 @@ static const struct i2c_device_id tmp401_id[] = { { "tmp431", tmp431 }, { "tmp432", tmp432 }, { "tmp435", tmp435 }, + { "tmp461", tmp461 }, { } }; MODULE_DEVICE_TABLE(i2c, tmp401_id); @@ -170,7 +174,7 @@ struct tmp401_data { /* register values */ u8 status[4]; u8 config; - u16 temp[6][3]; + u16 temp[7][3]; u8 temp_crit_hyst; }; @@ -613,6 +617,22 @@ static const struct attribute_group tmp432_group = { }; /* + * Additional features of the TMP461 chip. + * The TMP461 temperature offset for the remote channel. + */ +static SENSOR_DEVICE_ATTR_2(temp2_offset, S_IWUSR | S_IRUGO, show_temp, + store_temp, 6, 1); + +static struct attribute *tmp461_attributes[] = { + &sensor_dev_attr_temp2_offset.dev_attr.attr, + NULL +}; + +static const struct attribute_group tmp461_group = { + .attrs = tmp461_attributes, +}; + +/* * Begin non sysfs callback code (aka Real code) */ @@ -714,7 +734,7 @@ static int tmp401_probe(struct i2c_client *client, const struct i2c_device_id *id) { static const char * const names[] = { - "TMP401", "TMP411", "TMP431", "TMP432", "TMP435" + "TMP401", "TMP411", "TMP431", "TMP432", "TMP435", "TMP461" }; struct device *dev = &client->dev; struct device *hwmon_dev; @@ -745,6 +765,9 @@ static int tmp401_probe(struct i2c_client *client, if (data->kind == tmp432) data->groups[groups++] = &tmp432_group; + if (data->kind == tmp461) + data->groups[groups++] = &tmp461_group; + hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, data, data->groups); if (IS_ERR(hwmon_dev)) @@ -98,6 +98,7 @@ static int qcom_hwspinlock_probe(struct platform_device *pdev) } regmap = syscon_node_to_regmap(syscon); + of_node_put(syscon); if (IS_ERR(regmap)) return PTR_ERR(regmap); @@ -300,13 +300,10 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { /* * The trace run will continue with the same allocated trace - * buffer. As such zero-out the buffer so that we don't end - * up with stale data. - * - * Since the tracer is still enabled drvdata::buf - * can't be NULL. + * buffer. The trace buffer is cleared in tmc_etr_enable_hw(), + * so we don't have to explicitly clear it. Also, since the + * tracer is still enabled drvdata::buf can't be NULL. */ - memset(drvdata->buf, 0, drvdata->size); tmc_etr_enable_hw(drvdata); } else { /* @@ -315,7 +312,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) */ vaddr = drvdata->vaddr; paddr = drvdata->paddr; - drvdata->buf = NULL; + drvdata->buf = drvdata->vaddr = NULL; } drvdata->reading = false; @@ -385,7 +385,6 @@ static int _coresight_build_path(struct coresight_device *csdev, int i; bool found = false; struct coresight_node *node; - struct coresight_connection *conn; /* An activated sink has been found. Enqueue the element */ if ((csdev->type == CORESIGHT_DEV_TYPE_SINK || @@ -394,8 +393,9 @@ static int _coresight_build_path(struct coresight_device *csdev, /* Not a sink - recursively explore each port found on this element */ for (i = 0; i < csdev->nr_outport; i++) { - conn = &csdev->conns[i]; - if (_coresight_build_path(conn->child_dev, path) == 0) { + struct coresight_device *child_dev = csdev->conns[i].child_dev; + + if (child_dev && _coresight_build_path(child_dev, path) == 0) { found = true; break; } @@ -425,6 +425,7 @@ out: struct list_head *coresight_build_path(struct coresight_device *csdev) { struct list_head *path; + int rc; path = kzalloc(sizeof(struct list_head), GFP_KERNEL); if (!path) @@ -432,9 +433,10 @@ struct list_head *coresight_build_path(struct coresight_device *csdev) INIT_LIST_HEAD(path); - if (_coresight_build_path(csdev, path)) { + rc = _coresight_build_path(csdev, path); + if (rc) { kfree(path); - path = NULL; + return ERR_PTR(rc); } return path; @@ -507,8 +509,9 @@ int coresight_enable(struct coresight_device *csdev) goto out; path = coresight_build_path(csdev); - if (!path) { + if (IS_ERR(path)) { pr_err("building path(s) failed\n"); + ret = PTR_ERR(path); goto out; } @@ -23,6 +23,7 @@ #include <linux/debugfs.h> #include <linux/idr.h> #include <linux/pci.h> +#include <linux/pm_runtime.h> #include <linux/dma-mapping.h> #include "intel_th.h" @@ -67,23 +68,33 @@ static int intel_th_probe(struct device *dev) hubdrv = to_intel_th_driver(hub->dev.driver); + pm_runtime_set_active(dev); + pm_runtime_no_callbacks(dev); + pm_runtime_enable(dev); + ret = thdrv->probe(to_intel_th_device(dev)); if (ret) - return ret; + goto out_pm; if (thdrv->attr_group) { ret = sysfs_create_group(&thdev->dev.kobj, thdrv->attr_group); - if (ret) { - thdrv->remove(thdev); - - return ret; - } + if (ret) + goto out; } if (thdev->type == INTEL_TH_OUTPUT && !intel_th_output_assigned(thdev)) + /* does not talk to hardware */ ret = hubdrv->assign(hub, thdev); +out: + if (ret) + thdrv->remove(thdev); + +out_pm: + if (ret) + pm_runtime_disable(dev); + return ret; } @@ -103,6 +114,8 @@ static int intel_th_remove(struct device *dev) if (thdrv->attr_group) sysfs_remove_group(&thdev->dev.kobj, thdrv->attr_group); + pm_runtime_get_sync(dev); + thdrv->remove(thdev); if (intel_th_output_assigned(thdev)) { @@ -110,9 +123,14 @@ static int intel_th_remove(struct device *dev) to_intel_th_driver(dev->parent->driver); if (hub->dev.driver) + /* does not talk to hardware */ hubdrv->unassign(hub, thdev); } + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + return 0; } @@ -185,6 +203,7 @@ static int intel_th_output_activate(struct intel_th_device *thdev) { struct intel_th_driver *thdrv = to_intel_th_driver_or_null(thdev->dev.driver); + int ret = 0; if (!thdrv) return -ENODEV; @@ -192,12 +211,17 @@ static int intel_th_output_activate(struct intel_th_device *thdev) if (!try_module_get(thdrv->driver.owner)) return -ENODEV; + pm_runtime_get_sync(&thdev->dev); + if (thdrv->activate) - return thdrv->activate(thdev); + ret = thdrv->activate(thdev); + else + intel_th_trace_enable(thdev); - intel_th_trace_enable(thdev); + if (ret) + pm_runtime_put(&thdev->dev); - return 0; + return ret; } static void intel_th_output_deactivate(struct intel_th_device *thdev) @@ -213,6 +237,7 @@ static void intel_th_output_deactivate(struct intel_th_device *thdev) else intel_th_trace_disable(thdev); + pm_runtime_put(&thdev->dev); module_put(thdrv->driver.owner); } @@ -465,6 +490,38 @@ static struct intel_th_subdevice { }, }; +#ifdef CONFIG_MODULES +static void __intel_th_request_hub_module(struct work_struct *work) +{ + struct intel_th *th = container_of(work, struct intel_th, + request_module_work); + + request_module("intel_th_%s", th->hub->name); +} + +static int intel_th_request_hub_module(struct intel_th *th) +{ + INIT_WORK(&th->request_module_work, __intel_th_request_hub_module); + schedule_work(&th->request_module_work); + + return 0; +} + +static void intel_th_request_hub_module_flush(struct intel_th *th) +{ + flush_work(&th->request_module_work); +} +#else +static inline int intel_th_request_hub_module(struct intel_th *th) +{ + return -EINVAL; +} + +static inline void intel_th_request_hub_module_flush(struct intel_th *th) +{ +} +#endif /* CONFIG_MODULES */ + static int intel_th_populate(struct intel_th *th, struct resource *devres, unsigned int ndevres, int irq) { @@ -535,7 +592,7 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres, /* need switch driver to be loaded to enumerate the rest */ if (subdev->type == INTEL_TH_SWITCH && !req) { th->hub = thdev; - err = request_module("intel_th_%s", subdev->name); + err = intel_th_request_hub_module(th); if (!err) req++; } @@ -628,6 +685,10 @@ intel_th_alloc(struct device *dev, struct resource *devres, dev_set_drvdata(dev, th); + pm_runtime_no_callbacks(dev); + pm_runtime_put(dev); + pm_runtime_allow(dev); + err = intel_th_populate(th, devres, ndevres, irq); if (err) goto err_chrdev; @@ -635,6 +696,8 @@ intel_th_alloc(struct device *dev, struct resource *devres, return th; err_chrdev: + pm_runtime_forbid(dev); + __unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS, "intel_th/output"); @@ -652,12 +715,16 @@ void intel_th_free(struct intel_th *th) { int i; + intel_th_request_hub_module_flush(th); for (i = 0; i < TH_SUBDEVICE_MAX; i++) if (th->thdev[i] != th->hub) intel_th_device_remove(th->thdev[i]); intel_th_device_remove(th->hub); + pm_runtime_get_sync(th->dev); + pm_runtime_forbid(th->dev); + __unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS, "intel_th/output"); @@ -682,6 +749,7 @@ int intel_th_trace_enable(struct intel_th_device *thdev) if (WARN_ON_ONCE(thdev->type != INTEL_TH_OUTPUT)) return -EINVAL; + pm_runtime_get_sync(&thdev->dev); hubdrv->enable(hub, &thdev->output); return 0; @@ -702,6 +770,7 @@ int intel_th_trace_disable(struct intel_th_device *thdev) return -EINVAL; hubdrv->disable(hub, &thdev->output); + pm_runtime_put(&thdev->dev); return 0; } @@ -22,6 +22,7 @@ #include <linux/mm.h> #include <linux/slab.h> #include <linux/bitmap.h> +#include <linux/pm_runtime.h> #include "intel_th.h" #include "gth.h" @@ -190,6 +191,11 @@ static ssize_t master_attr_store(struct device *dev, if (old_port >= 0) { gth->master[ma->master] = -1; clear_bit(ma->master, gth->output[old_port].master); + + /* + * if the port is active, program this setting, + * implies that runtime PM is on + */ if (gth->output[old_port].output->active) gth_master_set(gth, ma->master, -1); } @@ -204,7 +210,7 @@ static ssize_t master_attr_store(struct device *dev, set_bit(ma->master, gth->output[port].master); - /* if the port is active, program this setting */ + /* if the port is active, program this setting, see above */ if (gth->output[port].output->active) gth_master_set(gth, ma->master, port); } @@ -326,11 +332,15 @@ static ssize_t output_attr_show(struct device *dev, struct gth_device *gth = oa->gth; size_t count; + pm_runtime_get_sync(dev); + spin_lock(>h->gth_lock); count = snprintf(buf, PAGE_SIZE, "%x\n", gth_output_parm_get(gth, oa->port, oa->parm)); spin_unlock(>h->gth_lock); + pm_runtime_put(dev); + return count; } @@ -346,10 +356,14 @@ static ssize_t output_attr_store(struct device *dev, if (kstrtouint(buf, 16, &config) < 0) return -EINVAL; + pm_runtime_get_sync(dev); + spin_lock(>h->gth_lock); gth_output_parm_set(gth, oa->port, oa->parm, config); spin_unlock(>h->gth_lock); + pm_runtime_put(dev); + return count; } @@ -451,7 +465,7 @@ static int intel_th_output_attributes(struct gth_device *gth) } /** - * intel_th_gth_disable() - enable tracing to an output device + * intel_th_gth_disable() - disable tracing to an output device * @thdev: GTH device * @output: output device's descriptor * @@ -114,6 +114,9 @@ intel_th_output_assigned(struct intel_th_device *thdev) * @unassign: deassociate an output type device from an output port * @enable: enable tracing for a given output device * @disable: disable tracing for a given output device + * @irq: interrupt callback + * @activate: enable tracing on the output's side + * @deactivate: disable tracing on the output's side * @fops: file operations for device nodes * @attr_group: attributes provided by the driver * @@ -205,6 +208,9 @@ struct intel_th { int id; int major; +#ifdef CONFIG_MODULES + struct work_struct request_module_work; +#endif /* CONFIG_MODULES */ #ifdef CONFIG_INTEL_TH_DEBUG struct dentry *dbg; #endif @@ -80,6 +80,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1a8e), .driver_data = (kernel_ulong_t)0, }, + { + /* Kaby Lake PCH-H */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6), + .driver_data = (kernel_ulong_t)0, + }, { 0 }, }; @@ -15,6 +15,7 @@ * as defined in MIPI STPv2 specification. */ +#include <linux/pm_runtime.h> #include <linux/uaccess.h> #include <linux/kernel.h> #include <linux/module.h> @@ -482,14 +483,40 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf, return -EFAULT; } + pm_runtime_get_sync(&stm->dev); + count = stm_write(stm->data, stmf->output.master, stmf->output.channel, kbuf, count); + pm_runtime_mark_last_busy(&stm->dev); + pm_runtime_put_autosuspend(&stm->dev); kfree(kbuf); return count; } +static void stm_mmap_open(struct vm_area_struct *vma) +{ + struct stm_file *stmf = vma->vm_file->private_data; + struct stm_device *stm = stmf->stm; + + pm_runtime_get(&stm->dev); +} + +static void stm_mmap_close(struct vm_area_struct *vma) +{ + struct stm_file *stmf = vma->vm_file->private_data; + struct stm_device *stm = stmf->stm; + + pm_runtime_mark_last_busy(&stm->dev); + pm_runtime_put_autosuspend(&stm->dev); +} + +static const struct vm_operations_struct stm_mmap_vmops = { + .open = stm_mmap_open, + .close = stm_mmap_close, +}; + static int stm_char_mmap(struct file *file, struct vm_area_struct *vma) { struct stm_file *stmf = file->private_data; @@ -514,8 +541,11 @@ static int stm_char_mmap(struct file *file, struct vm_area_struct *vma) if (!phys) return -EINVAL; + pm_runtime_get_sync(&stm->dev); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_ops = &stm_mmap_vmops; vm_iomap_memory(vma, phys, size); return 0; @@ -701,6 +731,17 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, if (err) goto err_device; + /* + * Use delayed autosuspend to avoid bouncing back and forth + * on recurring character device writes, with the initial + * delay time of 2 seconds. + */ + pm_runtime_no_callbacks(&stm->dev); + pm_runtime_use_autosuspend(&stm->dev); + pm_runtime_set_autosuspend_delay(&stm->dev, 2000); + pm_runtime_set_suspended(&stm->dev); + pm_runtime_enable(&stm->dev); + return 0; err_device: @@ -724,6 +765,9 @@ void stm_unregister_device(struct stm_data *stm_data) struct stm_source_device *src, *iter; int i, ret; + pm_runtime_dont_use_autosuspend(&stm->dev); + pm_runtime_disable(&stm->dev); + mutex_lock(&stm->link_mutex); list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) { ret = __stm_source_link_drop(src, stm); @@ -878,6 +922,8 @@ static int __stm_source_link_drop(struct stm_source_device *src, stm_output_free(link, &src->output); list_del_init(&src->link_entry); + pm_runtime_mark_last_busy(&link->dev); + pm_runtime_put_autosuspend(&link->dev); /* matches stm_find_device() from stm_source_link_store() */ stm_put_device(link); rcu_assign_pointer(src->link, NULL); @@ -971,8 +1017,11 @@ static ssize_t stm_source_link_store(struct device *dev, if (!link) return -EINVAL; + pm_runtime_get(&link->dev); + err = stm_source_link_add(src, link); if (err) { + pm_runtime_put_autosuspend(&link->dev); /* matches the stm_find_device() above */ stm_put_device(link); } @@ -1033,6 +1082,9 @@ int stm_source_register_device(struct device *parent, if (err) goto err; + pm_runtime_no_callbacks(&src->dev); + pm_runtime_forbid(&src->dev); + err = device_add(&src->dev); if (err) goto err; @@ -88,8 +88,8 @@ config I2C_SMBUS tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO help Say Y here if you want support for SMBus extensions to the I2C - specification. At the moment, the only supported extension is - the SMBus alert protocol. + specification. At the moment, two extensions are supported: + the SMBus Alert protocol and the SMBus Host Notify protocol. This support is also available as a module. If so, the module will be called i2c-smbus. @@ -91,6 +91,7 @@ config I2C_I801 tristate "Intel 82801 (ICH/PCH)" depends on PCI select CHECK_SIGNATURE if X86 && DMI + select I2C_SMBUS help If you say yes to this option, support will be included for the Intel 801 family of mainboard I2C interfaces. Specifically, the following @@ -397,7 +398,7 @@ config I2C_BCM_KONA config I2C_BRCMSTB tristate "BRCM Settop I2C controller" - depends on ARCH_BRCMSTB || COMPILE_TEST + depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST default y help If you say yes to this option, support will be included for the @@ -490,7 +491,9 @@ config I2C_DESIGNWARE_PCI config I2C_DESIGNWARE_BAYTRAIL bool "Intel Baytrail I2C semaphore support" - depends on I2C_DESIGNWARE_PLATFORM && IOSF_MBI=y && ACPI + depends on ACPI + depends on (I2C_DESIGNWARE_PLATFORM=m && IOSF_MBI) || \ + (I2C_DESIGNWARE_PLATFORM=y && IOSF_MBI=y) help This driver enables managed host access to the PMIC I2C bus on select Intel BayTrail platforms using the X-Powers AXP288 PMIC. It allows @@ -635,7 +638,7 @@ config I2C_LPC2K config I2C_MESON tristate "Amlogic Meson I2C controller" - depends on ARCH_MESON + depends on ARCH_MESON || COMPILE_TEST help If you say yes to this option, support will be included for the I2C interface on the Amlogic Meson family of SoCs. @@ -924,7 +927,7 @@ config I2C_UNIPHIER_F config I2C_VERSATILE tristate "ARM Versatile/Realview I2C bus support" - depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS + depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST select I2C_ALGOBIT help Say yes if you want to support the I2C serial bus on ARMs Versatile @@ -253,7 +253,8 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) i2c_dev->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(i2c_dev->clk)) { - dev_err(&pdev->dev, "Could not get clock\n"); + if (PTR_ERR(i2c_dev->clk) != -EPROBE_DEFER) + dev_err(&pdev->dev, "Could not get clock\n"); return PTR_ERR(i2c_dev->clk); } @@ -343,10 +343,9 @@ static int brcmstb_i2c_xfer_bsc_data(struct brcmstb_i2c_dev *dev, struct bsc_regs *pi2creg = dev->bsc_regmap; int no_ack = pmsg->flags & I2C_M_IGNORE_NAK; int data_regsz = brcmstb_i2c_get_data_regsz(dev); - int xfersz = brcmstb_i2c_get_xfersz(dev); /* see if the transaction needs to check NACK conditions */ - if (no_ack || len <= xfersz) { + if (no_ack) { cmd = (pmsg->flags & I2C_M_RD) ? CMD_RD_NOACK : CMD_WR_NOACK; pi2creg->ctlhi_reg |= BSC_CTLHI_REG_IGNORE_ACK_MASK; @@ -663,7 +663,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) i2c_dw_xfer_init(dev); /* wait for tx to complete */ - if (!wait_for_completion_timeout(&dev->cmd_complete, HZ)) { + if (!wait_for_completion_timeout(&dev->cmd_complete, adap->timeout)) { dev_err(dev->dev, "controller timed out\n"); /* i2c_dw_init implicitly disables the adapter */ i2c_dw_init(dev); @@ -26,6 +26,7 @@ #define DW_IC_CON_MASTER 0x1 #define DW_IC_CON_SPEED_STD 0x2 #define DW_IC_CON_SPEED_FAST 0x4 +#define DW_IC_CON_SPEED_MASK 0x6 #define DW_IC_CON_10BITADDR_MASTER 0x10 #define DW_IC_CON_RESTART_EN 0x20 #define DW_IC_CON_SLAVE_DISABLE 0x40 @@ -6,7 +6,7 @@ * Copyright (C) 2006 Texas Instruments. * Copyright (C) 2007 MontaVista Software Inc. * Copyright (C) 2009 Provigent Ltd. - * Copyright (C) 2011, 2015 Intel Corporation. + * Copyright (C) 2011, 2015, 2016 Intel Corporation. * * ---------------------------------------------------------------------------- * @@ -23,31 +23,27 @@ * */ -#include <linux/kernel.h> -#include <linux/module.h> +#include <linux/acpi.h> #include <linux/delay.h> -#include <linux/i2c.h> -#include <linux/errno.h> -#include <linux/sched.h> #include <linux/err.h> +#include <linux/errno.h> +#include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> -#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/pci.h> #include <linux/pm_runtime.h> -#include <linux/acpi.h> +#include <linux/sched.h> +#include <linux/slab.h> + #include "i2c-designware-core.h" #define DRIVER_NAME "i2c-designware-pci" enum dw_pci_ctl_id_t { - medfield_0, - medfield_1, - medfield_2, - medfield_3, - medfield_4, - medfield_5, - + medfield, + merrifield, baytrail, haswell, }; @@ -68,6 +64,7 @@ struct dw_pci_controller { u32 clk_khz; u32 functionality; struct dw_scl_sda_cfg *scl_sda_cfg; + int (*setup)(struct pci_dev *pdev, struct dw_pci_controller *c); }; #define INTEL_MID_STD_CFG (DW_IC_CON_MASTER | \ @@ -80,6 +77,14 @@ struct dw_pci_controller { I2C_FUNC_SMBUS_WORD_DATA | \ I2C_FUNC_SMBUS_I2C_BLOCK) +/* Merrifield HCNT/LCNT/SDA hold time */ +static struct dw_scl_sda_cfg mrfld_config = { + .ss_hcnt = 0x2f8, + .fs_hcnt = 0x87, + .ss_lcnt = 0x37b, + .fs_lcnt = 0x10a, +}; + /* BayTrail HCNT/LCNT/SDA hold time */ static struct dw_scl_sda_cfg byt_config = { .ss_hcnt = 0x200, @@ -98,48 +103,60 @@ static struct dw_scl_sda_cfg hsw_config = { .sda_hold = 0x9, }; +static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c) +{ + switch (pdev->device) { + case 0x0817: + c->bus_cfg &= ~DW_IC_CON_SPEED_MASK; + c->bus_cfg |= DW_IC_CON_SPEED_STD; + case 0x0818: + case 0x0819: + c->bus_num = pdev->device - 0x817 + 3; + return 0; + case 0x082C: + case 0x082D: + case 0x082E: + c->bus_num = pdev->device - 0x82C + 0; + return 0; + } + return -ENODEV; +} + +static int mrfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c) +{ + /* + * On Intel Merrifield the user visible i2c busses are enumerated + * [1..7]. So, we add 1 to shift the default range. Besides that the + * first PCI slot provides 4 functions, that's why we have to add 0 to + * the first slot and 4 to the next one. + */ + switch (PCI_SLOT(pdev->devfn)) { + case 8: + c->bus_num = PCI_FUNC(pdev->devfn) + 0 + 1; + return 0; + case 9: + c->bus_num = PCI_FUNC(pdev->devfn) + 4 + 1; + return 0; + } + return -ENODEV; +} + static struct dw_pci_controller dw_pci_controllers[] = { - [medfield_0] = { - .bus_num = 0, - .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, - .tx_fifo_depth = 32, - .rx_fifo_depth = 32, - .clk_khz = 25000, - }, - [medfield_1] = { - .bus_num = 1, - .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, - .tx_fifo_depth = 32, - .rx_fifo_depth = 32, - .clk_khz = 25000, - }, - [medfield_2] = { - .bus_num = 2, - .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, - .tx_fifo_depth = 32, - .rx_fifo_depth = 32, - .clk_khz = 25000, - }, - [medfield_3] = { - .bus_num = 3, - .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD, - .tx_fifo_depth = 32, - .rx_fifo_depth = 32, - .clk_khz = 25000, - }, - [medfield_4] = { - .bus_num = 4, + [medfield] = { + .bus_num = -1, .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, .tx_fifo_depth = 32, .rx_fifo_depth = 32, .clk_khz = 25000, + .setup = mfld_setup, }, - [medfield_5] = { - .bus_num = 5, - .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, - .tx_fifo_depth = 32, - .rx_fifo_depth = 32, - .clk_khz = 25000, + [merrifield] = { + .bus_num = -1, + .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, + .tx_fifo_depth = 64, + .rx_fifo_depth = 64, + .scl_sda_cfg = &mrfld_config, + .setup = mrfld_setup, }, [baytrail] = { .bus_num = -1, @@ -190,7 +207,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, struct dw_i2c_dev *dev; struct i2c_adapter *adap; int r; - struct dw_pci_controller *controller; + struct dw_pci_controller *controller; struct dw_scl_sda_cfg *cfg; if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers)) { @@ -224,6 +241,13 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, dev->base = pcim_iomap_table(pdev)[0]; dev->dev = &pdev->dev; dev->irq = pdev->irq; + + if (controller->setup) { + r = controller->setup(pdev, controller); + if (r) + return r; + } + dev->functionality = controller->functionality | DW_DEFAULT_FUNCTIONALITY; @@ -276,12 +300,15 @@ MODULE_ALIAS("i2c_designware-pci"); static const struct pci_device_id i2_designware_pci_ids[] = { /* Medfield */ - { PCI_VDEVICE(INTEL, 0x0817), medfield_3 }, - { PCI_VDEVICE(INTEL, 0x0818), medfield_4 }, - { PCI_VDEVICE(INTEL, 0x0819), medfield_5 }, - { PCI_VDEVICE(INTEL, 0x082C), medfield_0 }, - { PCI_VDEVICE(INTEL, 0x082D), medfield_1 }, - { PCI_VDEVICE(INTEL, 0x082E), medfield_2 }, + { PCI_VDEVICE(INTEL, 0x0817), medfield }, + { PCI_VDEVICE(INTEL, 0x0818), medfield }, + { PCI_VDEVICE(INTEL, 0x0819), medfield }, + { PCI_VDEVICE(INTEL, 0x082C), medfield }, + { PCI_VDEVICE(INTEL, 0x082D), medfield }, + { PCI_VDEVICE(INTEL, 0x082E), medfield }, + /* Merrifield */ + { PCI_VDEVICE(INTEL, 0x1195), merrifield }, + { PCI_VDEVICE(INTEL, 0x1196), merrifield }, /* Baytrail */ { PCI_VDEVICE(INTEL, 0x0F41), baytrail }, { PCI_VDEVICE(INTEL, 0x0F42), baytrail }, @@ -433,7 +433,7 @@ static int efm32_i2c_probe(struct platform_device *pdev) ret = request_irq(ddata->irq, efm32_i2c_irq, 0, DRIVER_NAME, ddata); if (ret < 0) { dev_err(&pdev->dev, "failed to request irq (%d)\n", ret); - return ret; + goto err_disable_clk; } ret = i2c_add_adapter(&ddata->adapter); @@ -319,16 +319,6 @@ static struct isa_driver i2c_elektor_driver = { }, }; -static int __init i2c_pcfisa_init(void) -{ - return isa_register_driver(&i2c_elektor_driver, 1); -} - -static void __exit i2c_pcfisa_exit(void) -{ - isa_unregister_driver(&i2c_elektor_driver); -} - MODULE_AUTHOR("Hans Berglund <hb@spacetec.no>"); MODULE_DESCRIPTION("I2C-Bus adapter routines for PCF8584 ISA bus adapter"); MODULE_LICENSE("GPL"); @@ -338,6 +328,4 @@ module_param(irq, int, 0); module_param(clock, int, 0); module_param(own, int, 0); module_param(mmapped, int, 0); - -module_init(i2c_pcfisa_init); -module_exit(i2c_pcfisa_exit); +module_isa_driver(i2c_elektor_driver, 1); @@ -72,6 +72,7 @@ * Block process call transaction no * I2C block read transaction yes (doesn't use the block buffer) * Slave mode no + * SMBus Host Notify yes * Interrupt processing yes * * See the file Documentation/i2c/busses/i2c-i801 for details. @@ -86,6 +87,7 @@ #include <linux/ioport.h> #include <linux/init.h> #include <linux/i2c.h> +#include <linux/i2c-smbus.h> #include <linux/acpi.h> #include <linux/io.h> #include <linux/dmi.h> @@ -96,8 +98,7 @@ #include <linux/platform_data/itco_wdt.h> #include <linux/pm_runtime.h> -#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \ - defined CONFIG_DMI +#if IS_ENABLED(CONFIG_I2C_MUX_GPIO) && defined CONFIG_DMI #include <linux/gpio.h> #include <linux/i2c-mux-gpio.h> #endif @@ -113,6 +114,10 @@ #define SMBPEC(p) (8 + (p)->smba) /* ICH3 and later */ #define SMBAUXSTS(p) (12 + (p)->smba) /* ICH4 and later */ #define SMBAUXCTL(p) (13 + (p)->smba) /* ICH4 and later */ +#define SMBSLVSTS(p) (16 + (p)->smba) /* ICH3 and later */ +#define SMBSLVCMD(p) (17 + (p)->smba) /* ICH3 and later */ +#define SMBNTFDADD(p) (20 + (p)->smba) /* ICH3 and later */ +#define SMBNTFDDAT(p) (22 + (p)->smba) /* ICH3 and later */ /* PCI Address Constants */ #define SMBBAR 4 @@ -144,6 +149,10 @@ /* TCO configuration bits for TCOCTL */ #define TCOCTL_EN 0x0100 +/* Auxiliary status register bits, ICH4+ only */ +#define SMBAUXSTS_CRCE 1 +#define SMBAUXSTS_STCO 2 + /* Auxiliary control register bits, ICH4+ only */ #define SMBAUXCTL_CRC 1 #define SMBAUXCTL_E32B 2 @@ -177,6 +186,12 @@ #define SMBHSTSTS_INTR 0x02 #define SMBHSTSTS_HOST_BUSY 0x01 +/* Host Notify Status registers bits */ +#define SMBSLVSTS_HST_NTFY_STS 1 + +/* Host Notify Command registers bits */ +#define SMBSLVCMD_HST_NTFY_INTREN 0x01 + #define STATUS_ERROR_FLAGS (SMBHSTSTS_FAILED | SMBHSTSTS_BUS_ERR | \ SMBHSTSTS_DEV_ERR) @@ -239,19 +254,29 @@ struct i801_priv { int len; u8 *data; -#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \ - defined CONFIG_DMI +#if IS_ENABLED(CONFIG_I2C_MUX_GPIO) && defined CONFIG_DMI const struct i801_mux_config *mux_drvdata; struct platform_device *mux_pdev; #endif struct platform_device *tco_pdev; + + /* + * If set to true the host controller registers are reserved for + * ACPI AML use. Protected by acpi_lock. + */ + bool acpi_reserved; + struct mutex acpi_lock; + struct smbus_host_notify *host_notify; }; +#define SMBHSTNTFY_SIZE 8 + #define FEATURE_SMBUS_PEC (1 << 0) #define FEATURE_BLOCK_BUFFER (1 << 1) #define FEATURE_BLOCK_PROC (1 << 2) #define FEATURE_I2C_BLOCK_READ (1 << 3) #define FEATURE_IRQ (1 << 4) +#define FEATURE_HOST_NOTIFY (1 << 5) /* Not really a feature, but it's convenient to handle it as such */ #define FEATURE_IDF (1 << 15) #define FEATURE_TCO (1 << 16) @@ -262,6 +287,7 @@ static const char *i801_feature_names[] = { "Block process call", "I2C block read", "Interrupt", + "SMBus Host Notify", }; static unsigned int disable_features; @@ -270,7 +296,8 @@ MODULE_PARM_DESC(disable_features, "Disable selected driver features:\n" "\t\t 0x01 disable SMBus PEC\n" "\t\t 0x02 disable the block buffer\n" "\t\t 0x08 disable the I2C block read functionality\n" - "\t\t 0x10 don't use interrupts "); + "\t\t 0x10 don't use interrupts\n" + "\t\t 0x20 disable SMBus Host Notify "); /* Make sure the SMBus host is ready to start transmitting. Return 0 if it is, -EBUSY if it is not. */ @@ -298,6 +325,29 @@ static int i801_check_pre(struct i801_priv *priv) } } + /* + * Clear CRC status if needed. + * During normal operation, i801_check_post() takes care + * of it after every operation. We do it here only in case + * the hardware was already in this state when the driver + * started. + */ + if (priv->features & FEATURE_SMBUS_PEC) { + status = inb_p(SMBAUXSTS(priv)) & SMBAUXSTS_CRCE; + if (status) { + dev_dbg(&priv->pci_dev->dev, + "Clearing aux status flags (%02x)\n", status); + outb_p(status, SMBAUXSTS(priv)); + status = inb_p(SMBAUXSTS(priv)) & SMBAUXSTS_CRCE; + if (status) { + dev_err(&priv->pci_dev->dev, + "Failed clearing aux status flags (%02x)\n", + status); + return -EBUSY; + } + } + } + return 0; } @@ -341,8 +391,30 @@ static int i801_check_post(struct i801_priv *priv, int status) dev_err(&priv->pci_dev->dev, "Transaction failed\n"); } if (status & SMBHSTSTS_DEV_ERR) { - result = -ENXIO; - dev_dbg(&priv->pci_dev->dev, "No response\n"); + /* + * This may be a PEC error, check and clear it. + * + * AUXSTS is handled differently from HSTSTS. + * For HSTSTS, i801_isr() or i801_wait_intr() + * has already cleared the error bits in hardware, + * and we are passed a copy of the original value + * in "status". + * For AUXSTS, the hardware register is left + * for us to handle here. + * This is asymmetric, slightly iffy, but safe, + * since all this code is serialized and the CRCE + * bit is harmless as long as it's cleared before + * the next operation. + */ + if ((priv->features & FEATURE_SMBUS_PEC) && + (inb_p(SMBAUXSTS(priv)) & SMBAUXSTS_CRCE)) { + outb_p(SMBAUXSTS_CRCE, SMBAUXSTS(priv)); + result = -EBADMSG; + dev_dbg(&priv->pci_dev->dev, "PEC error\n"); + } else { + result = -ENXIO; + dev_dbg(&priv->pci_dev->dev, "No response\n"); + } } if (status & SMBHSTSTS_BUS_ERR) { result = -EAGAIN; @@ -504,8 +576,23 @@ static void i801_isr_byte_done(struct i801_priv *priv) outb_p(SMBHSTSTS_BYTE_DONE, SMBHSTSTS(priv)); } +static irqreturn_t i801_host_notify_isr(struct i801_priv *priv) +{ + unsigned short addr; + unsigned int data; + + addr = inb_p(SMBNTFDADD(priv)) >> 1; + data = inw_p(SMBNTFDDAT(priv)); + + i2c_handle_smbus_host_notify(priv->host_notify, addr, data); + + /* clear Host Notify bit and return */ + outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv)); + return IRQ_HANDLED; +} + /* - * There are two kinds of interrupts: + * There are three kinds of interrupts: * * 1) i801 signals transaction completion with one of these interrupts: * INTR - Success @@ -517,6 +604,8 @@ static void i801_isr_byte_done(struct i801_priv *priv) * * 2) For byte-by-byte (I2C read/write) transactions, one BYTE_DONE interrupt * occurs for each byte of a byte-by-byte to prepare the next byte. + * + * 3) Host Notify interrupts */ static irqreturn_t i801_isr(int irq, void *dev_id) { @@ -529,6 +618,12 @@ static irqreturn_t i801_isr(int irq, void *dev_id) if (!(pcists & SMBPCISTS_INTS)) return IRQ_NONE; + if (priv->features & FEATURE_HOST_NOTIFY) { + status = inb_p(SMBSLVSTS(priv)); + if (status & SMBSLVSTS_HST_NTFY_STS) + return i801_host_notify_isr(priv); + } + status = inb_p(SMBHSTSTS(priv)); if (status & SMBHSTSTS_BYTE_DONE) i801_isr_byte_done(priv); @@ -540,7 +635,7 @@ static irqreturn_t i801_isr(int irq, void *dev_id) status &= SMBHSTSTS_INTR | STATUS_ERROR_FLAGS; if (status) { outb_p(status, SMBHSTSTS(priv)); - priv->status |= status; + priv->status = status; wake_up(&priv->waitq); } @@ -718,6 +813,12 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, int ret = 0, xact = 0; struct i801_priv *priv = i2c_get_adapdata(adap); + mutex_lock(&priv->acpi_lock); + if (priv->acpi_reserved) { + mutex_unlock(&priv->acpi_lock); + return -EBUSY; + } + pm_runtime_get_sync(&priv->pci_dev->dev); hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC) @@ -820,6 +921,7 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, out: pm_runtime_mark_last_busy(&priv->pci_dev->dev); pm_runtime_put_autosuspend(&priv->pci_dev->dev); + mutex_unlock(&priv->acpi_lock); return ret; } @@ -833,7 +935,28 @@ static u32 i801_func(struct i2c_adapter *adapter) I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_WRITE_I2C_BLOCK | ((priv->features & FEATURE_SMBUS_PEC) ? I2C_FUNC_SMBUS_PEC : 0) | ((priv->features & FEATURE_I2C_BLOCK_READ) ? - I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0); + I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0) | + ((priv->features & FEATURE_HOST_NOTIFY) ? + I2C_FUNC_SMBUS_HOST_NOTIFY : 0); +} + +static int i801_enable_host_notify(struct i2c_adapter *adapter) +{ + struct i801_priv *priv = i2c_get_adapdata(adapter); + + if (!(priv->features & FEATURE_HOST_NOTIFY)) + return -ENOTSUPP; + + if (!priv->host_notify) + priv->host_notify = i2c_setup_smbus_host_notify(adapter); + if (!priv->host_notify) + return -ENOMEM; + + outb_p(SMBSLVCMD_HST_NTFY_INTREN, SMBSLVCMD(priv)); + /* clear Host Notify bit to allow a new notification */ + outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv)); + + return 0; } static const struct i2c_algorithm smbus_algorithm = { @@ -1008,8 +1131,7 @@ static void __init input_apanel_init(void) {} static void i801_probe_optional_slaves(struct i801_priv *priv) {} #endif /* CONFIG_X86 && CONFIG_DMI */ -#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \ - defined CONFIG_DMI +#if IS_ENABLED(CONFIG_I2C_MUX_GPIO) && defined CONFIG_DMI static struct i801_mux_config i801_mux_config_asus_z8_d12 = { .gpio_chip = "gpio_ich", .values = { 0x02, 0x03 }, @@ -1257,6 +1379,83 @@ static void i801_add_tco(struct i801_priv *priv) priv->tco_pdev = pdev; } +#ifdef CONFIG_ACPI +static acpi_status +i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, + u64 *value, void *handler_context, void *region_context) +{ + struct i801_priv *priv = handler_context; + struct pci_dev *pdev = priv->pci_dev; + acpi_status status; + + /* + * Once BIOS AML code touches the OpRegion we warn and inhibit any + * further access from the driver itself. This device is now owned + * by the system firmware. + */ + mutex_lock(&priv->acpi_lock); + + if (!priv->acpi_reserved) { + priv->acpi_reserved = true; + + dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); + dev_warn(&pdev->dev, "Driver SMBus register access inhibited\n"); + + /* + * BIOS is accessing the host controller so prevent it from + * suspending automatically from now on. + */ + pm_runtime_get_sync(&pdev->dev); + } + + if ((function & ACPI_IO_MASK) == ACPI_READ) + status = acpi_os_read_port(address, (u32 *)value, bits); + else + status = acpi_os_write_port(address, (u32)*value, bits); + + mutex_unlock(&priv->acpi_lock); + + return status; +} + +static int i801_acpi_probe(struct i801_priv *priv) +{ + struct acpi_device *adev; + acpi_status status; + + adev = ACPI_COMPANION(&priv->pci_dev->dev); + if (adev) { + status = acpi_install_address_space_handler(adev->handle, + ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler, + NULL, priv); + if (ACPI_SUCCESS(status)) + return 0; + } + + return acpi_check_resource_conflict(&priv->pci_dev->resource[SMBBAR]); +} + +static void i801_acpi_remove(struct i801_priv *priv) +{ + struct acpi_device *adev; + + adev = ACPI_COMPANION(&priv->pci_dev->dev); + if (!adev) + return; + + acpi_remove_address_space_handler(adev->handle, + ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler); + + mutex_lock(&priv->acpi_lock); + if (priv->acpi_reserved) + pm_runtime_put(&priv->pci_dev->dev); + mutex_unlock(&priv->acpi_lock); +} +#else +static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; } +static inline void i801_acpi_remove(struct i801_priv *priv) { } +#endif + static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) { unsigned char temp; @@ -1274,6 +1473,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) priv->adapter.dev.parent = &dev->dev; ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev)); priv->adapter.retries = 3; + mutex_init(&priv->acpi_lock); priv->pci_dev = dev; switch (dev->device) { @@ -1287,6 +1487,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) priv->features |= FEATURE_SMBUS_PEC; priv->features |= FEATURE_BLOCK_BUFFER; priv->features |= FEATURE_TCO; + priv->features |= FEATURE_HOST_NOTIFY; break; case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0: @@ -1306,6 +1507,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) priv->features |= FEATURE_BLOCK_BUFFER; /* fall through */ case PCI_DEVICE_ID_INTEL_82801CA_3: + priv->features |= FEATURE_HOST_NOTIFY; + /* fall through */ case PCI_DEVICE_ID_INTEL_82801BA_2: case PCI_DEVICE_ID_INTEL_82801AB_3: case PCI_DEVICE_ID_INTEL_82801AA_3: @@ -1336,10 +1539,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) return -ENODEV; } - err = acpi_check_resource_conflict(&dev->resource[SMBBAR]); - if (err) { + if (i801_acpi_probe(priv)) return -ENODEV; - } err = pcim_iomap_regions(dev, 1 << SMBBAR, dev_driver_string(&dev->dev)); @@ -1348,6 +1549,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) "Failed to request SMBus region 0x%lx-0x%Lx\n", priv->smba, (unsigned long long)pci_resource_end(dev, SMBBAR)); + i801_acpi_remove(priv); return err; } @@ -1412,9 +1614,19 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) err = i2c_add_adapter(&priv->adapter); if (err) { dev_err(&dev->dev, "Failed to add SMBus adapter\n"); + i801_acpi_remove(priv); return err; } + /* + * Enable Host Notify for chips that supports it. + * It is done after i2c_add_adapter() so that we are sure the work queue + * is not used if i2c_add_adapter() fails. + */ + err = i801_enable_host_notify(&priv->adapter); + if (err && err != -ENOTSUPP) + dev_warn(&dev->dev, "Unable to enable SMBus Host Notify\n"); + i801_probe_optional_slaves(priv); /* We ignore errors - multiplexing is optional */ i801_add_mux(priv); @@ -1438,6 +1650,7 @@ static void i801_remove(struct pci_dev *dev) i801_del_mux(priv); i2c_del_adapter(&priv->adapter); + i801_acpi_remove(priv); pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg); platform_device_unregister(priv->tco_pdev); @@ -1460,6 +1673,14 @@ static int i801_suspend(struct device *dev) static int i801_resume(struct device *dev) { + struct pci_dev *pci_dev = to_pci_dev(dev); + struct i801_priv *priv = pci_get_drvdata(pci_dev); + int err; + + err = i801_enable_host_notify(&priv->adapter); + if (err && err != -ENOTSUPP) + dev_warn(dev, "Unable to enable SMBus Host Notify\n"); + return 0; } #endif @@ -791,10 +791,6 @@ static int jz4780_i2c_probe(struct platform_device *pdev) jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0); - i2c->cmd = 0; - memset(i2c->cmd_buf, 0, BUFSIZE); - memset(i2c->data_buf, 0, BUFSIZE); - i2c->irq = platform_get_irq(pdev, 0); ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0, dev_name(&pdev->dev), i2c); @@ -934,8 +934,15 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target, return result; for (i = 0; i < length; i++) { - /* for the last byte TWSI_CTL_AAK must not be set */ - if (i + 1 == length) + /* + * For the last byte to receive TWSI_CTL_AAK must not be set. + * + * A special case is I2C_M_RECV_LEN where we don't know the + * additional length yet. If recv_len is set we assume we're + * not reading the final byte and therefore need to set + * TWSI_CTL_AAK. + */ + if ((i + 1 == length) && !(recv_len && i == 0)) final_read = true; /* clear iflg to allow next event */ @@ -950,12 +957,8 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target, data[i] = octeon_i2c_data_read(i2c); if (recv_len && i == 0) { - if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) { - dev_err(i2c->dev, - "%s: read len > I2C_SMBUS_BLOCK_MAX %d\n", - __func__, data[i]); + if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) return -EPROTO; - } length += data[i]; } @@ -193,23 +193,12 @@ static struct isa_driver pca_isa_driver = { } }; -static int __init pca_isa_init(void) -{ - return isa_register_driver(&pca_isa_driver, 1); -} - -static void __exit pca_isa_exit(void) -{ - isa_unregister_driver(&pca_isa_driver); -} - MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>"); MODULE_DESCRIPTION("ISA base PCA9564/PCA9665 driver"); MODULE_LICENSE("GPL"); module_param(base, ulong, 0); MODULE_PARM_DESC(base, "I/O base address"); - module_param(irq, int, 0); MODULE_PARM_DESC(irq, "IRQ"); module_param(clock, int, 0); @@ -220,6 +209,4 @@ MODULE_PARM_DESC(clock, "Clock rate in hertz.\n\t\t" "\t\t\t\tFast: 100100 - 400099\n" "\t\t\t\tFast+: 400100 - 10000099\n" "\t\t\t\tTurbo: Up to 1265800"); - -module_init(pca_isa_init); -module_exit(pca_isa_exit); +module_isa_driver(pca_isa_driver, 1); @@ -213,14 +213,16 @@ static irqreturn_t qup_i2c_interrupt(int irq, void *dev) bus_err &= I2C_STATUS_ERROR_MASK; qup_err &= QUP_STATUS_ERROR_FLAGS; - if (qup_err) { - /* Clear Error interrupt */ + /* Clear the error bits in QUP_ERROR_FLAGS */ + if (qup_err) writel(qup_err, qup->base + QUP_ERROR_FLAGS); - goto done; - } - if (bus_err) { - /* Clear Error interrupt */ + /* Clear the error bits in QUP_I2C_STATUS */ + if (bus_err) + writel(bus_err, qup->base + QUP_I2C_STATUS); + + /* Reset the QUP State in case of error */ + if (qup_err || bus_err) { writel(QUP_RESET_STATE, qup->base + QUP_STATE); goto done; } @@ -310,6 +312,7 @@ static int qup_i2c_wait_ready(struct qup_i2c_dev *qup, int op, bool val, u32 opflags; u32 status; u32 shift = __ffs(op); + int ret = 0; len *= qup->one_byte_t; /* timeout after a wait of twice the max time */ @@ -321,18 +324,28 @@ static int qup_i2c_wait_ready(struct qup_i2c_dev *qup, int op, bool val, if (((opflags & op) >> shift) == val) { if ((op == QUP_OUT_NOT_EMPTY) && qup->is_last) { - if (!(status & I2C_STATUS_BUS_ACTIVE)) - return 0; + if (!(status & I2C_STATUS_BUS_ACTIVE)) { + ret = 0; + goto done; + } } else { - return 0; + ret = 0; + goto done; } } - if (time_after(jiffies, timeout)) - return -ETIMEDOUT; - + if (time_after(jiffies, timeout)) { + ret = -ETIMEDOUT; + goto done; + } usleep_range(len, len * 2); } + +done: + if (qup->bus_err || qup->qup_err) + ret = (qup->bus_err & QUP_I2C_NACK_FLAG) ? -ENXIO : -EIO; + + return ret; } static void qup_i2c_set_write_mode_v2(struct qup_i2c_dev *qup, @@ -585,8 +598,8 @@ static void qup_i2c_bam_cb(void *data) } static int qup_sg_set_buf(struct scatterlist *sg, void *buf, - struct qup_i2c_tag *tg, unsigned int buflen, - struct qup_i2c_dev *qup, int map, int dir) + unsigned int buflen, struct qup_i2c_dev *qup, + int dir) { int ret; @@ -595,9 +608,6 @@ static int qup_sg_set_buf(struct scatterlist *sg, void *buf, if (!ret) return -EINVAL; - if (!map) - sg_dma_address(sg) = tg->addr + ((u8 *)buf - tg->start); - return 0; } @@ -649,37 +659,37 @@ static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg, u8 *tags; while (idx < num) { - blocks = (msg->len + limit) / limit; - rem = msg->len % limit; tx_len = 0, len = 0, i = 0; qup->is_last = (idx == (num - 1)); qup_i2c_set_blk_data(qup, msg); + blocks = qup->blk.count; + rem = msg->len - (blocks - 1) * limit; + if (msg->flags & I2C_M_RD) { rx_nents += (blocks * 2) + 1; tx_nents += 1; while (qup->blk.pos < blocks) { - /* length set to '0' implies 256 bytes */ - tlen = (i == (blocks - 1)) ? rem : 0; + tlen = (i == (blocks - 1)) ? rem : limit; tags = &qup->start_tag.start[off + len]; len += qup_i2c_set_tags(tags, qup, msg, 1); + qup->blk.data_len -= tlen; /* scratch buf to read the start and len tags */ ret = qup_sg_set_buf(&qup->brx.sg[rx_buf++], &qup->brx.tag.start[0], - &qup->brx.tag, - 2, qup, 0, 0); + 2, qup, DMA_FROM_DEVICE); if (ret) return ret; ret = qup_sg_set_buf(&qup->brx.sg[rx_buf++], &msg->buf[limit * i], - NULL, tlen, qup, - 1, DMA_FROM_DEVICE); + tlen, qup, + DMA_FROM_DEVICE); if (ret) return ret; @@ -688,7 +698,7 @@ static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg, } ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++], &qup->start_tag.start[off], - &qup->start_tag, len, qup, 0, 0); + len, qup, DMA_TO_DEVICE); if (ret) return ret; @@ -696,30 +706,28 @@ static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg, /* scratch buf to read the BAM EOT and FLUSH tags */ ret = qup_sg_set_buf(&qup->brx.sg[rx_buf++], &qup->brx.tag.start[0], - &qup->brx.tag, 2, - qup, 0, 0); + 2, qup, DMA_FROM_DEVICE); if (ret) return ret; } else { tx_nents += (blocks * 2); while (qup->blk.pos < blocks) { - tlen = (i == (blocks - 1)) ? rem : 0; + tlen = (i == (blocks - 1)) ? rem : limit; tags = &qup->start_tag.start[off + tx_len]; len = qup_i2c_set_tags(tags, qup, msg, 1); + qup->blk.data_len -= tlen; ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++], - tags, - &qup->start_tag, len, - qup, 0, 0); + tags, len, + qup, DMA_TO_DEVICE); if (ret) return ret; tx_len += len; ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++], &msg->buf[limit * i], - NULL, tlen, qup, 1, - DMA_TO_DEVICE); + tlen, qup, DMA_TO_DEVICE); if (ret) return ret; i++; @@ -738,8 +746,7 @@ static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg, QUP_BAM_FLUSH_STOP; ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++], &qup->btx.tag.start[0], - &qup->btx.tag, len, - qup, 0, 0); + len, qup, DMA_TO_DEVICE); if (ret) return ret; tx_nents += 1; @@ -801,39 +808,35 @@ static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg, } if (ret || qup->bus_err || qup->qup_err) { - if (qup->bus_err & QUP_I2C_NACK_FLAG) { - msg--; - dev_err(qup->dev, "NACK from %x\n", msg->addr); - ret = -EIO; + if (qup_i2c_change_state(qup, QUP_RUN_STATE)) { + dev_err(qup->dev, "change to run state timed out"); + goto desc_err; + } - if (qup_i2c_change_state(qup, QUP_RUN_STATE)) { - dev_err(qup->dev, "change to run state timed out"); - return ret; - } + if (rx_nents) + writel(QUP_BAM_INPUT_EOT, + qup->base + QUP_OUT_FIFO_BASE); - if (rx_nents) - writel(QUP_BAM_INPUT_EOT, - qup->base + QUP_OUT_FIFO_BASE); + writel(QUP_BAM_FLUSH_STOP, qup->base + QUP_OUT_FIFO_BASE); - writel(QUP_BAM_FLUSH_STOP, - qup->base + QUP_OUT_FIFO_BASE); + qup_i2c_flush(qup); - qup_i2c_flush(qup); + /* wait for remaining interrupts to occur */ + if (!wait_for_completion_timeout(&qup->xfer, HZ)) + dev_err(qup->dev, "flush timed out\n"); - /* wait for remaining interrupts to occur */ - if (!wait_for_completion_timeout(&qup->xfer, HZ)) - dev_err(qup->dev, "flush timed out\n"); + qup_i2c_rel_dma(qup); - qup_i2c_rel_dma(qup); - } + ret = (qup->bus_err & QUP_I2C_NACK_FLAG) ? -ENXIO : -EIO; } +desc_err: dma_unmap_sg(qup->dev, qup->btx.sg, tx_nents, DMA_TO_DEVICE); if (rx_nents) dma_unmap_sg(qup->dev, qup->brx.sg, rx_nents, DMA_FROM_DEVICE); -desc_err: + return ret; } @@ -849,9 +852,6 @@ static int qup_i2c_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, if (ret) goto out; - qup->bus_err = 0; - qup->qup_err = 0; - writel(0, qup->base + QUP_MX_INPUT_CNT); writel(0, qup->base + QUP_MX_OUTPUT_CNT); @@ -889,12 +889,8 @@ static int qup_i2c_wait_for_complete(struct qup_i2c_dev *qup, ret = -ETIMEDOUT; } - if (qup->bus_err || qup->qup_err) { - if (qup->bus_err & QUP_I2C_NACK_FLAG) { - dev_err(qup->dev, "NACK from %x\n", msg->addr); - ret = -EIO; - } - } + if (qup->bus_err || qup->qup_err) + ret = (qup->bus_err & QUP_I2C_NACK_FLAG) ? -ENXIO : -EIO; return ret; } @@ -1020,7 +1016,7 @@ static void qup_i2c_issue_read(struct qup_i2c_dev *qup, struct i2c_msg *msg) { u32 addr, len, val; - addr = (msg->addr << 1) | 1; + addr = i2c_8bit_addr_from_msg(msg); /* 0 is used to specify a length 256 (QUP_READ_LIMIT) */ len = (msg->len == QUP_READ_LIMIT) ? 0 : msg->len; @@ -1186,6 +1182,9 @@ static int qup_i2c_xfer(struct i2c_adapter *adap, if (ret < 0) goto out; + qup->bus_err = 0; + qup->qup_err = 0; + writel(1, qup->base + QUP_SW_RESET); ret = qup_i2c_poll_state(qup, QUP_RESET_STATE); if (ret) @@ -1235,6 +1234,9 @@ static int qup_i2c_xfer_v2(struct i2c_adapter *adap, struct qup_i2c_dev *qup = i2c_get_adapdata(adap); int ret, len, idx = 0, use_dma = 0; + qup->bus_err = 0; + qup->qup_err = 0; + ret = pm_runtime_get_sync(qup->dev); if (ret < 0) goto out; @@ -1268,6 +1270,8 @@ static int qup_i2c_xfer_v2(struct i2c_adapter *adap, } } + idx = 0; + do { if (msgs[idx].len == 0) { ret = -EINVAL; @@ -1407,27 +1411,21 @@ static int qup_i2c_probe(struct platform_device *pdev) /* 2 tag bytes for each block + 5 for start, stop tags */ size = blocks * 2 + 5; - qup->dpool = dma_pool_create("qup_i2c-dma-pool", &pdev->dev, - size, 4, 0); - qup->start_tag.start = dma_pool_alloc(qup->dpool, GFP_KERNEL, - &qup->start_tag.addr); + qup->start_tag.start = devm_kzalloc(&pdev->dev, + size, GFP_KERNEL); if (!qup->start_tag.start) { ret = -ENOMEM; goto fail_dma; } - qup->brx.tag.start = dma_pool_alloc(qup->dpool, - GFP_KERNEL, - &qup->brx.tag.addr); + qup->brx.tag.start = devm_kzalloc(&pdev->dev, 2, GFP_KERNEL); if (!qup->brx.tag.start) { ret = -ENOMEM; goto fail_dma; } - qup->btx.tag.start = dma_pool_alloc(qup->dpool, - GFP_KERNEL, - &qup->btx.tag.addr); + qup->btx.tag.start = devm_kzalloc(&pdev->dev, 2, GFP_KERNEL); if (!qup->btx.tag.start) { ret = -ENOMEM; goto fail_dma; @@ -1566,13 +1564,6 @@ static int qup_i2c_remove(struct platform_device *pdev) struct qup_i2c_dev *qup = platform_get_drvdata(pdev); if (qup->is_dma) { - dma_pool_free(qup->dpool, qup->start_tag.start, - qup->start_tag.addr); - dma_pool_free(qup->dpool, qup->brx.tag.start, - qup->brx.tag.addr); - dma_pool_free(qup->dpool, qup->btx.tag.start, - qup->btx.tag.addr); - dma_pool_destroy(qup->dpool); dma_release_channel(qup->btx.dma); dma_release_channel(qup->brx.dma); } @@ -58,6 +58,12 @@ enum { #define REG_CON_LASTACK BIT(5) /* 1: send NACK after last received byte */ #define REG_CON_ACTACK BIT(6) /* 1: stop if NACK is received */ +#define REG_CON_TUNING_MASK GENMASK(15, 8) + +#define REG_CON_SDA_CFG(cfg) ((cfg) << 8) +#define REG_CON_STA_CFG(cfg) ((cfg) << 12) +#define REG_CON_STO_CFG(cfg) ((cfg) << 14) + /* REG_MRXADDR bits */ #define REG_MRXADDR_VALID(x) BIT(24 + (x)) /* [x*8+7:x*8] of MRX[R]ADDR valid */ @@ -75,6 +81,77 @@ enum { #define WAIT_TIMEOUT 1000 /* ms */ #define DEFAULT_SCL_RATE (100 * 1000) /* Hz */ +/** + * struct i2c_spec_values: + * @min_hold_start_ns: min hold time (repeated) START condition + * @min_low_ns: min LOW period of the SCL clock + * @min_high_ns: min HIGH period of the SCL cloc + * @min_setup_start_ns: min set-up time for a repeated START conditio + * @max_data_hold_ns: max data hold time + * @min_data_setup_ns: min data set-up time + * @min_setup_stop_ns: min set-up time for STOP condition + * @min_hold_buffer_ns: min bus free time between a STOP and + * START condition + */ +struct i2c_spec_values { + unsigned long min_hold_start_ns; + unsigned long min_low_ns; + unsigned long min_high_ns; + unsigned long min_setup_start_ns; + unsigned long max_data_hold_ns; + unsigned long min_data_setup_ns; + unsigned long min_setup_stop_ns; + unsigned long min_hold_buffer_ns; +}; + +static const struct i2c_spec_values standard_mode_spec = { + .min_hold_start_ns = 4000, + .min_low_ns = 4700, + .min_high_ns = 4000, + .min_setup_start_ns = 4700, + .max_data_hold_ns = 3450, + .min_data_setup_ns = 250, + .min_setup_stop_ns = 4000, + .min_hold_buffer_ns = 4700, +}; + +static const struct i2c_spec_values fast_mode_spec = { + .min_hold_start_ns = 600, + .min_low_ns = 1300, + .min_high_ns = 600, + .min_setup_start_ns = 600, + .max_data_hold_ns = 900, + .min_data_setup_ns = 100, + .min_setup_stop_ns = 600, + .min_hold_buffer_ns = 1300, +}; + +static const struct i2c_spec_values fast_mode_plus_spec = { + .min_hold_start_ns = 260, + .min_low_ns = 500, + .min_high_ns = 260, + .min_setup_start_ns = 260, + .max_data_hold_ns = 400, + .min_data_setup_ns = 50, + .min_setup_stop_ns = 260, + .min_hold_buffer_ns = 500, +}; + +/** + * struct rk3x_i2c_calced_timings: + * @div_low: Divider output for low + * @div_high: Divider output for high + * @tuning: Used to adjust setup/hold data time, + * setup/hold start time and setup stop time for + * v1's calc_timings, the tuning should all be 0 + * for old hardware anyone using v0's calc_timings. + */ +struct rk3x_i2c_calced_timings { + unsigned long div_low; + unsigned long div_high; + unsigned int tuning; +}; + enum rk3x_i2c_state { STATE_IDLE, STATE_START, @@ -85,11 +162,35 @@ enum rk3x_i2c_state { /** * @grf_offset: offset inside the grf regmap for setting the i2c type + * @calc_timings: Callback function for i2c timing information calculated */ struct rk3x_i2c_soc_data { int grf_offset; + int (*calc_timings)(unsigned long, struct i2c_timings *, + struct rk3x_i2c_calced_timings *); }; +/** + * struct rk3x_i2c - private data of the controller + * @adap: corresponding I2C adapter + * @dev: device for this controller + * @soc_data: related soc data struct + * @regs: virtual memory area + * @clk: function clk for rk3399 or function & Bus clks for others + * @pclk: Bus clk for rk3399 + * @clk_rate_nb: i2c clk rate change notify + * @t: I2C known timing information + * @lock: spinlock for the i2c bus + * @wait: the waitqueue to wait for i2c transfer + * @busy: the condition for the event to wait for + * @msg: current i2c message + * @addr: addr of i2c slave device + * @mode: mode of i2c transfer + * @is_last_msg: flag determines whether it is the last msg in this transfer + * @state: state of i2c transfer + * @processed: byte length which has been send or received + * @error: error code for i2c transfer + */ struct rk3x_i2c { struct i2c_adapter adap; struct device *dev; @@ -98,6 +199,7 @@ struct rk3x_i2c { /* Hardware resources */ void __iomem *regs; struct clk *clk; + struct clk *pclk; struct notifier_block clk_rate_nb; /* Settings */ @@ -116,7 +218,7 @@ struct rk3x_i2c { /* I2C state machine */ enum rk3x_i2c_state state; - unsigned int processed; /* sent/received bytes */ + unsigned int processed; int error; }; @@ -142,13 +244,12 @@ static inline void rk3x_i2c_clean_ipd(struct rk3x_i2c *i2c) */ static void rk3x_i2c_start(struct rk3x_i2c *i2c) { - u32 val; + u32 val = i2c_readl(i2c, REG_CON) & REG_CON_TUNING_MASK; - rk3x_i2c_clean_ipd(i2c); i2c_writel(i2c, REG_INT_START, REG_IEN); /* enable adapter with correct mode, send START condition */ - val = REG_CON_EN | REG_CON_MOD(i2c->mode) | REG_CON_START; + val |= REG_CON_EN | REG_CON_MOD(i2c->mode) | REG_CON_START; /* if we want to react to NACK, set ACTACK bit */ if (!(i2c->msg->flags & I2C_M_IGNORE_NAK)) @@ -189,7 +290,8 @@ static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error) * get the intended effect by resetting its internal state * and issuing an ordinary START. */ - i2c_writel(i2c, 0, REG_CON); + ctrl = i2c_readl(i2c, REG_CON) & REG_CON_TUNING_MASK; + i2c_writel(i2c, ctrl, REG_CON); /* signal that we are finished with the current msg */ wake_up(&i2c->wait); @@ -431,26 +533,37 @@ out: } /** + * Get timing values of I2C specification + * + * @speed: Desired SCL frequency + * + * Returns: Matched i2c spec values. + */ +static const struct i2c_spec_values *rk3x_i2c_get_spec(unsigned int speed) +{ + if (speed <= 100000) + return &standard_mode_spec; + else if (speed <= 400000) + return &fast_mode_spec; + else + return &fast_mode_plus_spec; +} + +/** * Calculate divider values for desired SCL frequency * * @clk_rate: I2C input clock rate - * @t: Known I2C timing information. - * @div_low: Divider output for low - * @div_high: Divider output for high + * @t: Known I2C timing information + * @t_calc: Caculated rk3x private timings that would be written into regs * * Returns: 0 on success, -EINVAL if the goal SCL rate is too slow. In that case * a best-effort divider value is returned in divs. If the target rate is * too high, we silently use the highest possible rate. */ -static int rk3x_i2c_calc_divs(unsigned long clk_rate, - struct i2c_timings *t, - unsigned long *div_low, - unsigned long *div_high) +static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate, + struct i2c_timings *t, + struct rk3x_i2c_calced_timings *t_calc) { - unsigned long spec_min_low_ns, spec_min_high_ns; - unsigned long spec_setup_start, spec_max_data_hold_ns; - unsigned long data_hold_buffer_ns; - unsigned long min_low_ns, min_high_ns; unsigned long max_low_ns, min_total_ns; @@ -462,6 +575,8 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long min_div_for_hold, min_total_div; unsigned long extra_div, extra_low_div, ideal_low_div; + unsigned long data_hold_buffer_ns = 50; + const struct i2c_spec_values *spec; int ret = 0; /* Only support standard-mode and fast-mode */ @@ -484,22 +599,8 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, * This is because the i2c host on Rockchip holds the data line * for half the low time. */ - if (t->bus_freq_hz <= 100000) { - /* Standard-mode */ - spec_min_low_ns = 4700; - spec_setup_start = 4700; - spec_min_high_ns = 4000; - spec_max_data_hold_ns = 3450; - data_hold_buffer_ns = 50; - } else { - /* Fast-mode */ - spec_min_low_ns = 1300; - spec_setup_start = 600; - spec_min_high_ns = 600; - spec_max_data_hold_ns = 900; - data_hold_buffer_ns = 50; - } - min_high_ns = t->scl_rise_ns + spec_min_high_ns; + spec = rk3x_i2c_get_spec(t->bus_freq_hz); + min_high_ns = t->scl_rise_ns + spec->min_high_ns; /* * Timings for repeated start: @@ -509,14 +610,14 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, * We need to account for those rules in picking our "high" time so * we meet tSU;STA and tHD;STA times. */ - min_high_ns = max(min_high_ns, - DIV_ROUND_UP((t->scl_rise_ns + spec_setup_start) * 1000, 875)); - min_high_ns = max(min_high_ns, - DIV_ROUND_UP((t->scl_rise_ns + spec_setup_start + - t->sda_fall_ns + spec_min_high_ns), 2)); - - min_low_ns = t->scl_fall_ns + spec_min_low_ns; - max_low_ns = spec_max_data_hold_ns * 2 - data_hold_buffer_ns; + min_high_ns = max(min_high_ns, DIV_ROUND_UP( + (t->scl_rise_ns + spec->min_setup_start_ns) * 1000, 875)); + min_high_ns = max(min_high_ns, DIV_ROUND_UP( + (t->scl_rise_ns + spec->min_setup_start_ns + t->sda_fall_ns + + spec->min_high_ns), 2)); + + min_low_ns = t->scl_fall_ns + spec->min_low_ns; + max_low_ns = spec->max_data_hold_ns * 2 - data_hold_buffer_ns; min_total_ns = min_low_ns + min_high_ns; /* Adjust to avoid overflow */ @@ -552,8 +653,8 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, * Time needed to meet hold requirements is important. * Just use that. */ - *div_low = min_low_div; - *div_high = min_high_div; + t_calc->div_low = min_low_div; + t_calc->div_high = min_high_div; } else { /* * We've got to distribute some time among the low and high @@ -582,25 +683,186 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, /* Give low the "ideal" and give high whatever extra is left */ extra_low_div = ideal_low_div - min_low_div; - *div_low = ideal_low_div; - *div_high = min_high_div + (extra_div - extra_low_div); + t_calc->div_low = ideal_low_div; + t_calc->div_high = min_high_div + (extra_div - extra_low_div); } /* * Adjust to the fact that the hardware has an implicit "+1". * NOTE: Above calculations always produce div_low > 0 and div_high > 0. */ - *div_low = *div_low - 1; - *div_high = *div_high - 1; + t_calc->div_low--; + t_calc->div_high--; + + /* Maximum divider supported by hw is 0xffff */ + if (t_calc->div_low > 0xffff) { + t_calc->div_low = 0xffff; + ret = -EINVAL; + } + + if (t_calc->div_high > 0xffff) { + t_calc->div_high = 0xffff; + ret = -EINVAL; + } + + return ret; +} + +/** + * Calculate timing values for desired SCL frequency + * + * @clk_rate: I2C input clock rate + * @t: Known I2C timing information + * @t_calc: Caculated rk3x private timings that would be written into regs + * + * Returns: 0 on success, -EINVAL if the goal SCL rate is too slow. In that case + * a best-effort divider value is returned in divs. If the target rate is + * too high, we silently use the highest possible rate. + * The following formulas are v1's method to calculate timings. + * + * l = divl + 1; + * h = divh + 1; + * s = sda_update_config + 1; + * u = start_setup_config + 1; + * p = stop_setup_config + 1; + * T = Tclk_i2c; + * + * tHigh = 8 * h * T; + * tLow = 8 * l * T; + * + * tHD;sda = (l * s + 1) * T; + * tSU;sda = [(8 - s) * l + 1] * T; + * tI2C = 8 * (l + h) * T; + * + * tSU;sta = (8h * u + 1) * T; + * tHD;sta = [8h * (u + 1) - 1] * T; + * tSU;sto = (8h * p + 1) * T; + */ +static int rk3x_i2c_v1_calc_timings(unsigned long clk_rate, + struct i2c_timings *t, + struct rk3x_i2c_calced_timings *t_calc) +{ + unsigned long min_low_ns, min_high_ns, min_total_ns; + unsigned long min_setup_start_ns, min_setup_data_ns; + unsigned long min_setup_stop_ns, max_hold_data_ns; + + unsigned long clk_rate_khz, scl_rate_khz; + + unsigned long min_low_div, min_high_div; + + unsigned long min_div_for_hold, min_total_div; + unsigned long extra_div, extra_low_div; + unsigned long sda_update_cfg, stp_sta_cfg, stp_sto_cfg; + + const struct i2c_spec_values *spec; + int ret = 0; + + /* Support standard-mode, fast-mode and fast-mode plus */ + if (WARN_ON(t->bus_freq_hz > 1000000)) + t->bus_freq_hz = 1000000; + + /* prevent scl_rate_khz from becoming 0 */ + if (WARN_ON(t->bus_freq_hz < 1000)) + t->bus_freq_hz = 1000; + + /* + * min_low_ns: The minimum number of ns we need to hold low to + * meet I2C specification, should include fall time. + * min_high_ns: The minimum number of ns we need to hold high to + * meet I2C specification, should include rise time. + */ + spec = rk3x_i2c_get_spec(t->bus_freq_hz); + + /* calculate min-divh and min-divl */ + clk_rate_khz = DIV_ROUND_UP(clk_rate, 1000); + scl_rate_khz = t->bus_freq_hz / 1000; + min_total_div = DIV_ROUND_UP(clk_rate_khz, scl_rate_khz * 8); + + min_high_ns = t->scl_rise_ns + spec->min_high_ns; + min_high_div = DIV_ROUND_UP(clk_rate_khz * min_high_ns, 8 * 1000000); + + min_low_ns = t->scl_fall_ns + spec->min_low_ns; + min_low_div = DIV_ROUND_UP(clk_rate_khz * min_low_ns, 8 * 1000000); + + /* + * Final divh and divl must be greater than 0, otherwise the + * hardware would not output the i2c clk. + */ + min_high_div = (min_high_div < 1) ? 2 : min_high_div; + min_low_div = (min_low_div < 1) ? 2 : min_low_div; + + /* These are the min dividers needed for min hold times. */ + min_div_for_hold = (min_low_div + min_high_div); + min_total_ns = min_low_ns + min_high_ns; + + /* + * This is the maximum divider so we don't go over the maximum. + * We don't round up here (we round down) since this is a maximum. + */ + if (min_div_for_hold >= min_total_div) { + /* + * Time needed to meet hold requirements is important. + * Just use that. + */ + t_calc->div_low = min_low_div; + t_calc->div_high = min_high_div; + } else { + /* + * We've got to distribute some time among the low and high + * so we don't run too fast. + * We'll try to split things up by the scale of min_low_div and + * min_high_div, biasing slightly towards having a higher div + * for low (spend more time low). + */ + extra_div = min_total_div - min_div_for_hold; + extra_low_div = DIV_ROUND_UP(min_low_div * extra_div, + min_div_for_hold); + + t_calc->div_low = min_low_div + extra_low_div; + t_calc->div_high = min_high_div + (extra_div - extra_low_div); + } + + /* + * calculate sda data hold count by the rules, data_upd_st:3 + * is a appropriate value to reduce calculated times. + */ + for (sda_update_cfg = 3; sda_update_cfg > 0; sda_update_cfg--) { + max_hold_data_ns = DIV_ROUND_UP((sda_update_cfg + * (t_calc->div_low) + 1) + * 1000000, clk_rate_khz); + min_setup_data_ns = DIV_ROUND_UP(((8 - sda_update_cfg) + * (t_calc->div_low) + 1) + * 1000000, clk_rate_khz); + if ((max_hold_data_ns < spec->max_data_hold_ns) && + (min_setup_data_ns > spec->min_data_setup_ns)) + break; + } + + /* calculate setup start config */ + min_setup_start_ns = t->scl_rise_ns + spec->min_setup_start_ns; + stp_sta_cfg = DIV_ROUND_UP(clk_rate_khz * min_setup_start_ns + - 1000000, 8 * 1000000 * (t_calc->div_high)); + + /* calculate setup stop config */ + min_setup_stop_ns = t->scl_rise_ns + spec->min_setup_stop_ns; + stp_sto_cfg = DIV_ROUND_UP(clk_rate_khz * min_setup_stop_ns + - 1000000, 8 * 1000000 * (t_calc->div_high)); + + t_calc->tuning = REG_CON_SDA_CFG(--sda_update_cfg) | + REG_CON_STA_CFG(--stp_sta_cfg) | + REG_CON_STO_CFG(--stp_sto_cfg); + + t_calc->div_low--; + t_calc->div_high--; /* Maximum divider supported by hw is 0xffff */ - if (*div_low > 0xffff) { - *div_low = 0xffff; + if (t_calc->div_low > 0xffff) { + t_calc->div_low = 0xffff; ret = -EINVAL; } - if (*div_high > 0xffff) { - *div_high = 0xffff; + if (t_calc->div_high > 0xffff) { + t_calc->div_high = 0xffff; ret = -EINVAL; } @@ -610,19 +872,31 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate) { struct i2c_timings *t = &i2c->t; - unsigned long div_low, div_high; + struct rk3x_i2c_calced_timings calc; u64 t_low_ns, t_high_ns; + unsigned long flags; + u32 val; int ret; - ret = rk3x_i2c_calc_divs(clk_rate, t, &div_low, &div_high); + ret = i2c->soc_data->calc_timings(clk_rate, t, &calc); WARN_ONCE(ret != 0, "Could not reach SCL freq %u", t->bus_freq_hz); - clk_enable(i2c->clk); - i2c_writel(i2c, (div_high << 16) | (div_low & 0xffff), REG_CLKDIV); - clk_disable(i2c->clk); + clk_enable(i2c->pclk); + + spin_lock_irqsave(&i2c->lock, flags); + val = i2c_readl(i2c, REG_CON); + val &= ~REG_CON_TUNING_MASK; + val |= calc.tuning; + i2c_writel(i2c, val, REG_CON); + i2c_writel(i2c, (calc.div_high << 16) | (calc.div_low & 0xffff), + REG_CLKDIV); + spin_unlock_irqrestore(&i2c->lock, flags); + + clk_disable(i2c->pclk); - t_low_ns = div_u64(((u64)div_low + 1) * 8 * 1000000000, clk_rate); - t_high_ns = div_u64(((u64)div_high + 1) * 8 * 1000000000, clk_rate); + t_low_ns = div_u64(((u64)calc.div_low + 1) * 8 * 1000000000, clk_rate); + t_high_ns = div_u64(((u64)calc.div_high + 1) * 8 * 1000000000, + clk_rate); dev_dbg(i2c->dev, "CLK %lukhz, Req %uns, Act low %lluns high %lluns\n", clk_rate / 1000, @@ -652,12 +926,17 @@ static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long { struct clk_notifier_data *ndata = data; struct rk3x_i2c *i2c = container_of(nb, struct rk3x_i2c, clk_rate_nb); - unsigned long div_low, div_high; + struct rk3x_i2c_calced_timings calc; switch (event) { case PRE_RATE_CHANGE: - if (rk3x_i2c_calc_divs(ndata->new_rate, &i2c->t, - &div_low, &div_high) != 0) + /* + * Try the calculation (but don't store the result) ahead of + * time to see if we need to block the clock change. Timings + * shouldn't actually take effect until rk3x_i2c_adapt_div(). + */ + if (i2c->soc_data->calc_timings(ndata->new_rate, &i2c->t, + &calc) != 0) return NOTIFY_STOP; /* scale up */ @@ -767,12 +1046,14 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap, { struct rk3x_i2c *i2c = (struct rk3x_i2c *)adap->algo_data; unsigned long timeout, flags; + u32 val; int ret = 0; int i; spin_lock_irqsave(&i2c->lock, flags); clk_enable(i2c->clk); + clk_enable(i2c->pclk); i2c->is_last_msg = false; @@ -806,7 +1087,9 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap, /* Force a STOP condition without interrupt */ i2c_writel(i2c, 0, REG_IEN); - i2c_writel(i2c, REG_CON_EN | REG_CON_STOP, REG_CON); + val = i2c_readl(i2c, REG_CON) & REG_CON_TUNING_MASK; + val |= REG_CON_EN | REG_CON_STOP; + i2c_writel(i2c, val, REG_CON); i2c->state = STATE_IDLE; @@ -820,7 +1103,9 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap, } } + clk_disable(i2c->pclk); clk_disable(i2c->clk); + spin_unlock_irqrestore(&i2c->lock, flags); return ret < 0 ? ret : num; @@ -836,17 +1121,52 @@ static const struct i2c_algorithm rk3x_i2c_algorithm = { .functionality = rk3x_i2c_func, }; -static struct rk3x_i2c_soc_data soc_data[3] = { - { .grf_offset = 0x154 }, /* rk3066 */ - { .grf_offset = 0x0a4 }, /* rk3188 */ - { .grf_offset = -1 }, /* no I2C switching needed */ +static const struct rk3x_i2c_soc_data rk3066_soc_data = { + .grf_offset = 0x154, + .calc_timings = rk3x_i2c_v0_calc_timings, +}; + +static const struct rk3x_i2c_soc_data rk3188_soc_data = { + .grf_offset = 0x0a4, + .calc_timings = rk3x_i2c_v0_calc_timings, +}; + +static const struct rk3x_i2c_soc_data rk3228_soc_data = { + .grf_offset = -1, + .calc_timings = rk3x_i2c_v0_calc_timings, +}; + +static const struct rk3x_i2c_soc_data rk3288_soc_data = { + .grf_offset = -1, + .calc_timings = rk3x_i2c_v0_calc_timings, +}; + +static const struct rk3x_i2c_soc_data rk3399_soc_data = { + .grf_offset = -1, + .calc_timings = rk3x_i2c_v1_calc_timings, }; static const struct of_device_id rk3x_i2c_match[] = { - { .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] }, - { .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] }, - { .compatible = "rockchip,rk3228-i2c", .data = (void *)&soc_data[2] }, - { .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] }, + { + .compatible = "rockchip,rk3066-i2c", + .data = (void *)&rk3066_soc_data + }, + { + .compatible = "rockchip,rk3188-i2c", + .data = (void *)&rk3188_soc_data + }, + { + .compatible = "rockchip,rk3228-i2c", + .data = (void *)&rk3228_soc_data + }, + { + .compatible = "rockchip,rk3288-i2c", + .data = (void *)&rk3288_soc_data + }, + { + .compatible = "rockchip,rk3399-i2c", + .data = (void *)&rk3399_soc_data + }, {}, }; MODULE_DEVICE_TABLE(of, rk3x_i2c_match); @@ -886,12 +1206,6 @@ static int rk3x_i2c_probe(struct platform_device *pdev) spin_lock_init(&i2c->lock); init_waitqueue_head(&i2c->wait); - i2c->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(i2c->clk)) { - dev_err(&pdev->dev, "cannot get clock\n"); - return PTR_ERR(i2c->clk); - } - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); i2c->regs = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(i2c->regs)) @@ -945,17 +1259,44 @@ static int rk3x_i2c_probe(struct platform_device *pdev) platform_set_drvdata(pdev, i2c); + if (i2c->soc_data->calc_timings == rk3x_i2c_v0_calc_timings) { + /* Only one clock to use for bus clock and peripheral clock */ + i2c->clk = devm_clk_get(&pdev->dev, NULL); + i2c->pclk = i2c->clk; + } else { + i2c->clk = devm_clk_get(&pdev->dev, "i2c"); + i2c->pclk = devm_clk_get(&pdev->dev, "pclk"); + } + + if (IS_ERR(i2c->clk)) { + ret = PTR_ERR(i2c->clk); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "Can't get bus clk: %d\n", ret); + return ret; + } + if (IS_ERR(i2c->pclk)) { + ret = PTR_ERR(i2c->pclk); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "Can't get periph clk: %d\n", ret); + return ret; + } + ret = clk_prepare(i2c->clk); if (ret < 0) { - dev_err(&pdev->dev, "Could not prepare clock\n"); + dev_err(&pdev->dev, "Can't prepare bus clk: %d\n", ret); return ret; } + ret = clk_prepare(i2c->pclk); + if (ret < 0) { + dev_err(&pdev->dev, "Can't prepare periph clock: %d\n", ret); + goto err_clk; + } i2c->clk_rate_nb.notifier_call = rk3x_i2c_clk_notifier_cb; ret = clk_notifier_register(i2c->clk, &i2c->clk_rate_nb); if (ret != 0) { dev_err(&pdev->dev, "Unable to register clock notifier\n"); - goto err_clk; + goto err_pclk; } clk_rate = clk_get_rate(i2c->clk); @@ -973,6 +1314,8 @@ static int rk3x_i2c_probe(struct platform_device *pdev) err_clk_notifier: clk_notifier_unregister(i2c->clk, &i2c->clk_rate_nb); +err_pclk: + clk_unprepare(i2c->pclk); err_clk: clk_unprepare(i2c->clk); return ret; @@ -985,6 +1328,7 @@ static int rk3x_i2c_remove(struct platform_device *pdev) i2c_del_adapter(&i2c->adap); clk_notifier_unregister(i2c->clk, &i2c->clk_rate_nb); + clk_unprepare(i2c->pclk); clk_unprepare(i2c->clk); return 0; @@ -125,7 +125,7 @@ static struct i2c_algorithm osif_algorithm = { #define USB_OSIF_VENDOR_ID 0x1964 #define USB_OSIF_PRODUCT_ID 0x0001 -static struct usb_device_id osif_table[] = { +static const struct usb_device_id osif_table[] = { { USB_DEVICE(USB_OSIF_VENDOR_ID, USB_OSIF_PRODUCT_ID) }, { } }; @@ -912,7 +912,7 @@ static int tegra_i2c_probe(struct platform_device *pdev) ret = tegra_i2c_init(i2c_dev); if (ret) { dev_err(&pdev->dev, "Failed to initialize i2c controller"); - goto unprepare_div_clk; + goto disable_div_clk; } ret = devm_request_irq(&pdev->dev, i2c_dev->irq, @@ -70,28 +70,14 @@ static int i2c_versatile_probe(struct platform_device *dev) struct resource *r; int ret; + i2c = devm_kzalloc(&dev->dev, sizeof(struct i2c_versatile), GFP_KERNEL); + if (!i2c) + return -ENOMEM; + r = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (!r) { - ret = -EINVAL; - goto err_out; - } - - if (!request_mem_region(r->start, resource_size(r), "versatile-i2c")) { - ret = -EBUSY; - goto err_out; - } - - i2c = kzalloc(sizeof(struct i2c_versatile), GFP_KERNEL); - if (!i2c) { - ret = -ENOMEM; - goto err_release; - } - - i2c->base = ioremap(r->start, resource_size(r)); - if (!i2c->base) { - ret = -ENOMEM; - goto err_free; - } + i2c->base = devm_ioremap_resource(&dev->dev, r); + if (IS_ERR(i2c->base)) + return PTR_ERR(i2c->base); writel(SCL | SDA, i2c->base + I2C_CONTROLS); @@ -105,18 +91,12 @@ static int i2c_versatile_probe(struct platform_device *dev) i2c->adap.nr = dev->id; ret = i2c_bit_add_numbered_bus(&i2c->adap); - if (ret >= 0) { - platform_set_drvdata(dev, i2c); - return 0; - } - - iounmap(i2c->base); - err_free: - kfree(i2c); - err_release: - release_mem_region(r->start, resource_size(r)); - err_out: - return ret; + if (ret < 0) + return ret; + + platform_set_drvdata(dev, i2c); + + return 0; } static int i2c_versatile_remove(struct platform_device *dev) @@ -6,6 +6,7 @@ * warranty of any kind, whether express or implied. */ +#include <linux/acpi.h> #include <linux/completion.h> #include <linux/i2c.h> #include <linux/init.h> @@ -341,11 +342,10 @@ static struct i2c_algorithm xlp9xx_i2c_algo = { static int xlp9xx_i2c_get_frequency(struct platform_device *pdev, struct xlp9xx_i2c_dev *priv) { - struct device_node *np = pdev->dev.of_node; u32 freq; int err; - err = of_property_read_u32(np, "clock-frequency", &freq); + err = device_property_read_u32(&pdev->dev, "clock-frequency", &freq); if (err) { freq = XLP9XX_I2C_DEFAULT_FREQ; dev_dbg(&pdev->dev, "using default frequency %u\n", freq); @@ -429,12 +429,21 @@ static const struct of_device_id xlp9xx_i2c_of_match[] = { { /* sentinel */ }, }; +#ifdef CONFIG_ACPI +static const struct acpi_device_id xlp9xx_i2c_acpi_ids[] = { + {"BRCM9007", 0}, + {} +}; +MODULE_DEVICE_TABLE(acpi, xlp9xx_i2c_acpi_ids); +#endif + static struct platform_driver xlp9xx_i2c_driver = { .probe = xlp9xx_i2c_probe, .remove = xlp9xx_i2c_remove, .driver = { .name = "xlp9xx-i2c", .of_match_table = xlp9xx_i2c_of_match, + .acpi_match_table = ACPI_PTR(xlp9xx_i2c_acpi_ids), }, }; @@ -56,9 +56,7 @@ EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bus_num); * The board info passed can safely be __initdata, but be careful of embedded * pointers (for platform_data, functions, etc) since that won't be copied. */ -int __init -i2c_register_board_info(int busnum, - struct i2c_board_info const *info, unsigned len) +int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len) { int status; @@ -27,6 +27,8 @@ I2C slave support (c) 2014 by Wolfram Sang <wsa@sang-engineering.com> */ +#define pr_fmt(fmt) "i2c-core: " fmt + #include <dt-bindings/i2c/i2c.h> #include <asm/uaccess.h> #include <linux/acpi.h> @@ -107,12 +109,11 @@ struct acpi_i2c_lookup { acpi_handle device_handle; }; -static int acpi_i2c_find_address(struct acpi_resource *ares, void *data) +static int acpi_i2c_fill_info(struct acpi_resource *ares, void *data) { struct acpi_i2c_lookup *lookup = data; struct i2c_board_info *info = lookup->info; struct acpi_resource_i2c_serialbus *sb; - acpi_handle adapter_handle; acpi_status status; if (info->addr || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) @@ -122,80 +123,102 @@ static int acpi_i2c_find_address(struct acpi_resource *ares, void *data) if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) return 1; - /* - * Extract the ResourceSource and make sure that the handle matches - * with the I2C adapter handle. - */ status = acpi_get_handle(lookup->device_handle, sb->resource_source.string_ptr, - &adapter_handle); - if (ACPI_SUCCESS(status) && adapter_handle == lookup->adapter_handle) { - info->addr = sb->slave_address; - if (sb->access_mode == ACPI_I2C_10BIT_MODE) - info->flags |= I2C_CLIENT_TEN; - } + &lookup->adapter_handle); + if (!ACPI_SUCCESS(status)) + return 1; + + info->addr = sb->slave_address; + if (sb->access_mode == ACPI_I2C_10BIT_MODE) + info->flags |= I2C_CLIENT_TEN; return 1; } -static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level, - void *data, void **return_value) +static int acpi_i2c_get_info(struct acpi_device *adev, + struct i2c_board_info *info, + acpi_handle *adapter_handle) { - struct i2c_adapter *adapter = data; struct list_head resource_list; - struct acpi_i2c_lookup lookup; struct resource_entry *entry; - struct i2c_board_info info; - struct acpi_device *adev; + struct acpi_i2c_lookup lookup; int ret; - if (acpi_bus_get_device(handle, &adev)) - return AE_OK; - if (acpi_bus_get_status(adev) || !adev->status.present) - return AE_OK; + if (acpi_bus_get_status(adev) || !adev->status.present || + acpi_device_enumerated(adev)) + return -EINVAL; - memset(&info, 0, sizeof(info)); - info.fwnode = acpi_fwnode_handle(adev); + memset(info, 0, sizeof(*info)); + info->fwnode = acpi_fwnode_handle(adev); memset(&lookup, 0, sizeof(lookup)); - lookup.adapter_handle = ACPI_HANDLE(&adapter->dev); - lookup.device_handle = handle; - lookup.info = &info; + lookup.device_handle = acpi_device_handle(adev); + lookup.info = info; - /* - * Look up for I2cSerialBus resource with ResourceSource that - * matches with this adapter. - */ + /* Look up for I2cSerialBus resource */ INIT_LIST_HEAD(&resource_list); ret = acpi_dev_get_resources(adev, &resource_list, - acpi_i2c_find_address, &lookup); + acpi_i2c_fill_info, &lookup); acpi_dev_free_resource_list(&resource_list); - if (ret < 0 || !info.addr) - return AE_OK; + if (ret < 0 || !info->addr) + return -EINVAL; + + *adapter_handle = lookup.adapter_handle; /* Then fill IRQ number if any */ ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); if (ret < 0) - return AE_OK; + return -EINVAL; resource_list_for_each_entry(entry, &resource_list) { if (resource_type(entry->res) == IORESOURCE_IRQ) { - info.irq = entry->res->start; + info->irq = entry->res->start; break; } } acpi_dev_free_resource_list(&resource_list); + strlcpy(info->type, dev_name(&adev->dev), sizeof(info->type)); + + return 0; +} + +static void acpi_i2c_register_device(struct i2c_adapter *adapter, + struct acpi_device *adev, + struct i2c_board_info *info) +{ adev->power.flags.ignore_parent = true; - strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type)); - if (!i2c_new_device(adapter, &info)) { + acpi_device_set_enumerated(adev); + + if (!i2c_new_device(adapter, info)) { adev->power.flags.ignore_parent = false; dev_err(&adapter->dev, "failed to add I2C device %s from ACPI\n", dev_name(&adev->dev)); } +} + +static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level, + void *data, void **return_value) +{ + struct i2c_adapter *adapter = data; + struct acpi_device *adev; + acpi_handle adapter_handle; + struct i2c_board_info info; + + if (acpi_bus_get_device(handle, &adev)) + return AE_OK; + + if (acpi_i2c_get_info(adev, &info, &adapter_handle)) + return AE_OK; + + if (adapter_handle != ACPI_HANDLE(&adapter->dev)) + return AE_OK; + + acpi_i2c_register_device(adapter, adev, &info); return AE_OK; } @@ -225,8 +248,80 @@ static void acpi_i2c_register_devices(struct i2c_adapter *adap) dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); } +static int acpi_i2c_match_adapter(struct device *dev, void *data) +{ + struct i2c_adapter *adapter = i2c_verify_adapter(dev); + + if (!adapter) + return 0; + + return ACPI_HANDLE(dev) == (acpi_handle)data; +} + +static int acpi_i2c_match_device(struct device *dev, void *data) +{ + return ACPI_COMPANION(dev) == data; +} + +static struct i2c_adapter *acpi_i2c_find_adapter_by_handle(acpi_handle handle) +{ + struct device *dev; + + dev = bus_find_device(&i2c_bus_type, NULL, handle, + acpi_i2c_match_adapter); + return dev ? i2c_verify_adapter(dev) : NULL; +} + +static struct i2c_client *acpi_i2c_find_client_by_adev(struct acpi_device *adev) +{ + struct device *dev; + + dev = bus_find_device(&i2c_bus_type, NULL, adev, acpi_i2c_match_device); + return dev ? i2c_verify_client(dev) : NULL; +} + +static int acpi_i2c_notify(struct notifier_block *nb, unsigned long value, + void *arg) +{ + struct acpi_device *adev = arg; + struct i2c_board_info info; + acpi_handle adapter_handle; + struct i2c_adapter *adapter; + struct i2c_client *client; + + switch (value) { + case ACPI_RECONFIG_DEVICE_ADD: + if (acpi_i2c_get_info(adev, &info, &adapter_handle)) + break; + + adapter = acpi_i2c_find_adapter_by_handle(adapter_handle); + if (!adapter) + break; + + acpi_i2c_register_device(adapter, adev, &info); + break; + case ACPI_RECONFIG_DEVICE_REMOVE: + if (!acpi_device_enumerated(adev)) + break; + + client = acpi_i2c_find_client_by_adev(adev); + if (!client) + break; + + i2c_unregister_device(client); + put_device(&client->dev); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block i2c_acpi_notifier = { + .notifier_call = acpi_i2c_notify, +}; #else /* CONFIG_ACPI */ static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { } +extern struct notifier_block i2c_acpi_notifier; #endif /* CONFIG_ACPI */ #ifdef CONFIG_ACPI_I2C_OPREGION @@ -400,7 +495,8 @@ acpi_i2c_space_handler(u32 function, acpi_physical_address command, break; default: - pr_info("protocol(0x%02x) is not supported.\n", accessor_type); + dev_warn(&adapter->dev, "protocol 0x%02x not supported for client 0x%02x\n", + accessor_type, client->addr); ret = AE_BAD_PARAMETER; goto err; } @@ -666,6 +762,47 @@ int i2c_recover_bus(struct i2c_adapter *adap) } EXPORT_SYMBOL_GPL(i2c_recover_bus); +static void i2c_init_recovery(struct i2c_adapter *adap) +{ + struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; + char *err_str; + + if (!bri) + return; + + if (!bri->recover_bus) { + err_str = "no recover_bus() found"; + goto err; + } + + /* Generic GPIO recovery */ + if (bri->recover_bus == i2c_generic_gpio_recovery) { + if (!gpio_is_valid(bri->scl_gpio)) { + err_str = "invalid SCL gpio"; + goto err; + } + + if (gpio_is_valid(bri->sda_gpio)) + bri->get_sda = get_sda_gpio_value; + else + bri->get_sda = NULL; + + bri->get_scl = get_scl_gpio_value; + bri->set_scl = set_scl_gpio_value; + } else if (bri->recover_bus == i2c_generic_scl_recovery) { + /* Generic SCL recovery */ + if (!bri->set_scl || !bri->get_scl) { + err_str = "no {get|set}_scl() found"; + goto err; + } + } + + return; + err: + dev_err(&adap->dev, "Not using recovery: %s\n", err_str); + adap->bus_recovery_info = NULL; +} + static int i2c_device_probe(struct device *dev) { struct i2c_client *client = i2c_verify_client(dev); @@ -1089,6 +1226,8 @@ void i2c_unregister_device(struct i2c_client *client) { if (client->dev.of_node) of_node_clear_flag(client->dev.of_node, OF_POPULATED); + if (ACPI_COMPANION(&client->dev)) + acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev)); device_unregister(&client->dev); } EXPORT_SYMBOL_GPL(i2c_unregister_device); @@ -1145,6 +1284,47 @@ struct i2c_client *i2c_new_dummy(struct i2c_adapter *adapter, u16 address) } EXPORT_SYMBOL_GPL(i2c_new_dummy); +/** + * i2c_new_secondary_device - Helper to get the instantiated secondary address + * and create the associated device + * @client: Handle to the primary client + * @name: Handle to specify which secondary address to get + * @default_addr: Used as a fallback if no secondary address was specified + * Context: can sleep + * + * I2C clients can be composed of multiple I2C slaves bound together in a single + * component. The I2C client driver then binds to the master I2C slave and needs + * to create I2C dummy clients to communicate with all the other slaves. + * + * This function creates and returns an I2C dummy client whose I2C address is + * retrieved from the platform firmware based on the given slave name. If no + * address is specified by the firmware default_addr is used. + * + * On DT-based platforms the address is retrieved from the "reg" property entry + * cell whose "reg-names" value matches the slave name. + * + * This returns the new i2c client, which should be saved for later use with + * i2c_unregister_device(); or NULL to indicate an error. + */ +struct i2c_client *i2c_new_secondary_device(struct i2c_client *client, + const char *name, + u16 default_addr) +{ + struct device_node *np = client->dev.of_node; + u32 addr = default_addr; + int i; + + if (np) { + i = of_property_match_string(np, "reg-names", name); + if (i >= 0) + of_property_read_u32_index(np, "reg", i, &addr); + } + + dev_dbg(&client->adapter->dev, "Address for %s : 0x%x\n", name, addr); + return i2c_new_dummy(client->adapter, addr); +} +EXPORT_SYMBOL_GPL(i2c_new_secondary_device); + /* ------------------------------------------------------------------------- */ /* I2C bus adapters -- one roots each I2C or SMBUS segment */ @@ -1513,7 +1693,7 @@ static int __process_new_adapter(struct device_driver *d, void *data) static int i2c_register_adapter(struct i2c_adapter *adap) { - int res = 0; + int res = -EINVAL; /* Can't register until after driver model init */ if (WARN_ON(!is_registered)) { @@ -1522,15 +1702,12 @@ static int i2c_register_adapter(struct i2c_adapter *adap) } /* Sanity checks */ - if (unlikely(adap->name[0] == '\0')) { - pr_err("i2c-core: Attempt to register an adapter with " - "no name!\n"); - return -EINVAL; - } - if (unlikely(!adap->algo)) { - pr_err("i2c-core: Attempt to register adapter '%s' with " - "no algo!\n", adap->name); - return -EINVAL; + if (WARN(!adap->name[0], "i2c adapter has no name")) + goto out_list; + + if (!adap->algo) { + pr_err("adapter '%s': no algo supplied!\n", adap->name); + goto out_list; } if (!adap->lock_bus) { @@ -1552,8 +1729,10 @@ static int i2c_register_adapter(struct i2c_adapter *adap) adap->dev.bus = &i2c_bus_type; adap->dev.type = &i2c_adapter_type; res = device_register(&adap->dev); - if (res) + if (res) { + pr_err("adapter '%s': can't register device (%d)\n", adap->name, res); goto out_list; + } dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name); @@ -1569,41 +1748,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap) "Failed to create compatibility class link\n"); #endif - /* bus recovery specific initialization */ - if (adap->bus_recovery_info) { - struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; - - if (!bri->recover_bus) { - dev_err(&adap->dev, "No recover_bus() found, not using recovery\n"); - adap->bus_recovery_info = NULL; - goto exit_recovery; - } - - /* Generic GPIO recovery */ - if (bri->recover_bus == i2c_generic_gpio_recovery) { - if (!gpio_is_valid(bri->scl_gpio)) { - dev_err(&adap->dev, "Invalid SCL gpio, not using recovery\n"); - adap->bus_recovery_info = NULL; - goto exit_recovery; - } + i2c_init_recovery(adap); - if (gpio_is_valid(bri->sda_gpio)) - bri->get_sda = get_sda_gpio_value; - else - bri->get_sda = NULL; - - bri->get_scl = get_scl_gpio_value; - bri->set_scl = set_scl_gpio_value; - } else if (bri->recover_bus == i2c_generic_scl_recovery) { - /* Generic SCL recovery */ - if (!bri->set_scl || !bri->get_scl) { - dev_err(&adap->dev, "No {get|set}_scl() found, not using recovery\n"); - adap->bus_recovery_info = NULL; - } - } - } - -exit_recovery: /* create pre-declared device nodes */ of_i2c_register_devices(adap); acpi_i2c_register_devices(adap); @@ -1635,13 +1781,12 @@ out_list: */ static int __i2c_add_numbered_adapter(struct i2c_adapter *adap) { - int id; + int id; mutex_lock(&core_lock); - id = idr_alloc(&i2c_adapter_idr, adap, adap->nr, adap->nr + 1, - GFP_KERNEL); + id = idr_alloc(&i2c_adapter_idr, adap, adap->nr, adap->nr + 1, GFP_KERNEL); mutex_unlock(&core_lock); - if (id < 0) + if (WARN(id < 0, "couldn't get idr")) return id == -ENOSPC ? -EBUSY : id; return i2c_register_adapter(adap); @@ -1678,7 +1823,7 @@ int i2c_add_adapter(struct i2c_adapter *adapter) id = idr_alloc(&i2c_adapter_idr, adapter, __i2c_first_dynamic_bus_num, 0, GFP_KERNEL); mutex_unlock(&core_lock); - if (id < 0) + if (WARN(id < 0, "couldn't get idr")) return id; adapter->nr = id; @@ -1776,8 +1921,7 @@ void i2c_del_adapter(struct i2c_adapter *adap) found = idr_find(&i2c_adapter_idr, adap->nr); mutex_unlock(&core_lock); if (found != adap) { - pr_debug("i2c-core: attempting to delete unregistered " - "adapter [%s]\n", adap->name); + pr_debug("attempting to delete unregistered adapter [%s]\n", adap->name); return; } @@ -1937,7 +2081,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) if (res) return res; - pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name); + pr_debug("driver [%s] registered\n", driver->driver.name); INIT_LIST_HEAD(&driver->clients); /* Walk the adapters that are already present */ @@ -1964,7 +2108,7 @@ void i2c_del_driver(struct i2c_driver *driver) i2c_for_each_dev(driver, __process_removed_driver); driver_unregister(&driver->driver); - pr_debug("i2c-core: driver [%s] unregistered\n", driver->driver.name); + pr_debug("driver [%s] unregistered\n", driver->driver.name); } EXPORT_SYMBOL(i2c_del_driver); @@ -2055,8 +2199,8 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action, put_device(&adap->dev); if (IS_ERR(client)) { - pr_err("%s: failed to create for '%s'\n", - __func__, rd->dn->full_name); + dev_err(&adap->dev, "failed to create client for '%s'\n", + rd->dn->full_name); return notifier_from_errno(PTR_ERR(client)); } break; @@ -2117,6 +2261,8 @@ static int __init i2c_init(void) if (IS_ENABLED(CONFIG_OF_DYNAMIC)) WARN_ON(of_reconfig_notifier_register(&i2c_of_notifier)); + if (IS_ENABLED(CONFIG_ACPI)) + WARN_ON(acpi_reconfig_notifier_register(&i2c_acpi_notifier)); return 0; @@ -2132,6 +2278,8 @@ bus_err: static void __exit i2c_exit(void) { + if (IS_ENABLED(CONFIG_ACPI)) + WARN_ON(acpi_reconfig_notifier_unregister(&i2c_acpi_notifier)); if (IS_ENABLED(CONFIG_OF_DYNAMIC)) WARN_ON(of_reconfig_notifier_unregister(&i2c_of_notifier)); i2c_del_driver(&dummy_driver); @@ -2673,7 +2821,7 @@ static int i2c_smbus_check_pec(u8 cpec, struct i2c_msg *msg) cpec = i2c_smbus_msg_pec(cpec, msg); if (rpec != cpec) { - pr_debug("i2c-core: Bad PEC 0x%02x vs. 0x%02x\n", + pr_debug("Bad PEC 0x%02x vs. 0x%02x\n", rpec, cpec); return -EBADMSG; } @@ -485,13 +485,8 @@ static int i2cdev_open(struct inode *inode, struct file *file) unsigned int minor = iminor(inode); struct i2c_client *client; struct i2c_adapter *adap; - struct i2c_dev *i2c_dev; - - i2c_dev = i2c_dev_get_by_minor(minor); - if (!i2c_dev) - return -ENODEV; - adap = i2c_get_adapter(i2c_dev->adap->nr); + adap = i2c_get_adapter(minor); if (!adap) return -ENODEV; @@ -33,7 +33,8 @@ struct i2c_smbus_alert { struct alert_data { unsigned short addr; - u8 flag:1; + enum i2c_alert_protocol type; + unsigned int data; }; /* If this is the alerting device, notify its driver */ @@ -56,7 +57,7 @@ static int smbus_do_alert(struct device *dev, void *addrp) if (client->dev.driver) { driver = to_i2c_driver(client->dev.driver); if (driver->alert) - driver->alert(client, data->flag); + driver->alert(client, data->type, data->data); else dev_warn(&client->dev, "no driver alert()!\n"); } else @@ -96,8 +97,9 @@ static void smbus_alert(struct work_struct *work) if (status < 0) break; - data.flag = status & 1; + data.data = status & 1; data.addr = status >> 1; + data.type = I2C_PROTOCOL_SMBUS_ALERT; if (data.addr == prev_addr) { dev_warn(&ara->dev, "Duplicate SMBALERT# from dev " @@ -105,7 +107,7 @@ static void smbus_alert(struct work_struct *work) break; } dev_dbg(&ara->dev, "SMBALERT# from dev 0x%02x, flag %d\n", - data.addr, data.flag); + data.addr, data.data); /* Notify driver for the device which issued the alert */ device_for_each_child(&ara->adapter->dev, &data, @@ -239,6 +241,108 @@ int i2c_handle_smbus_alert(struct i2c_client *ara) } EXPORT_SYMBOL_GPL(i2c_handle_smbus_alert); +static void smbus_host_notify_work(struct work_struct *work) +{ + struct alert_data alert; + struct i2c_adapter *adapter; + unsigned long flags; + u16 payload; + u8 addr; + struct smbus_host_notify *data; + + data = container_of(work, struct smbus_host_notify, work); + + spin_lock_irqsave(&data->lock, flags); + payload = data->payload; + addr = data->addr; + adapter = data->adapter; + + /* clear the pending bit and release the spinlock */ + data->pending = false; + spin_unlock_irqrestore(&data->lock, flags); + + if (!adapter || !addr) + return; + + alert.type = I2C_PROTOCOL_SMBUS_HOST_NOTIFY; + alert.addr = addr; + alert.data = payload; + + device_for_each_child(&adapter->dev, &alert, smbus_do_alert); +} + +/** + * i2c_setup_smbus_host_notify - Allocate a new smbus_host_notify for the given + * I2C adapter. + * @adapter: the adapter we want to associate a Host Notify function + * + * Returns a struct smbus_host_notify pointer on success, and NULL on failure. + * The resulting smbus_host_notify must not be freed afterwards, it is a + * managed resource already. + */ +struct smbus_host_notify *i2c_setup_smbus_host_notify(struct i2c_adapter *adap) +{ + struct smbus_host_notify *host_notify; + + host_notify = devm_kzalloc(&adap->dev, sizeof(struct smbus_host_notify), + GFP_KERNEL); + if (!host_notify) + return NULL; + + host_notify->adapter = adap; + + spin_lock_init(&host_notify->lock); + INIT_WORK(&host_notify->work, smbus_host_notify_work); + + return host_notify; +} +EXPORT_SYMBOL_GPL(i2c_setup_smbus_host_notify); + +/** + * i2c_handle_smbus_host_notify - Forward a Host Notify event to the correct + * I2C client. + * @host_notify: the struct host_notify attached to the relevant adapter + * @addr: the I2C address of the notifying device + * @data: the payload of the notification + * Context: can't sleep + * + * Helper function to be called from an I2C bus driver's interrupt + * handler. It will schedule the Host Notify work, in turn calling the + * corresponding I2C device driver's alert function. + * + * host_notify should be a valid pointer previously returned by + * i2c_setup_smbus_host_notify(). + */ +int i2c_handle_smbus_host_notify(struct smbus_host_notify *host_notify, + unsigned short addr, unsigned int data) +{ + unsigned long flags; + struct i2c_adapter *adapter; + + if (!host_notify || !host_notify->adapter) + return -EINVAL; + + adapter = host_notify->adapter; + + spin_lock_irqsave(&host_notify->lock, flags); + + if (host_notify->pending) { + spin_unlock_irqrestore(&host_notify->lock, flags); + dev_warn(&adapter->dev, "Host Notify already scheduled.\n"); + return -EBUSY; + } + + host_notify->payload = data; + host_notify->addr = addr; + + /* Mark that there is a pending notification and release the lock */ + host_notify->pending = true; + spin_unlock_irqrestore(&host_notify->lock, flags); + + return schedule_work(&host_notify->work); +} +EXPORT_SYMBOL_GPL(i2c_handle_smbus_host_notify); + module_i2c_driver(smbalert_driver); MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>"); @@ -145,7 +145,7 @@ static int i2c_mux_reg_probe_dt(struct regmux *mux, mux->data.idle_in_use = true; /* map address from "reg" if exists */ - if (of_address_to_resource(np, 0, &res)) { + if (of_address_to_resource(np, 0, &res) == 0) { mux->data.reg_size = resource_size(&res); mux->data.reg = devm_ioremap_resource(&pdev->dev, &res); if (IS_ERR(mux->data.reg)) @@ -260,6 +260,7 @@ static struct platform_driver i2c_mux_reg_driver = { .remove = i2c_mux_reg_remove, .driver = { .name = "i2c-mux-reg", + .of_match_table = of_match_ptr(i2c_mux_reg_of_match), }, }; @@ -1770,7 +1770,6 @@ static int ide_cd_probe(ide_drive_t *drive) drive->driver_data = info; g->minors = 1; - g->driverfs_dev = &drive->gendev; g->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE; if (ide_cdrom_setup(drive)) { put_device(&info->dev); @@ -1780,7 +1779,7 @@ static int ide_cd_probe(ide_drive_t *drive) ide_cd_read_toc(drive, &sense); g->fops = &idecd_ops; g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; - add_disk(g); + device_add_disk(&drive->gendev, g); return 0; out_free_disk: @@ -459,9 +459,6 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi, layer. the packet must be complete, as we do not touch it at all. */ - if (cgc->data_direction == CGC_DATA_WRITE) - flags |= REQ_WRITE; - if (cgc->sense) memset(cgc->sense, 0, sizeof(struct request_sense)); @@ -186,7 +186,7 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq, BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED); BUG_ON(rq->cmd_type != REQ_TYPE_FS); - ledtrig_ide_activity(); + ledtrig_disk_activity(); pr_debug("%s: %sing: block=%llu, sectors=%u\n", drive->name, rq_data_dir(rq) == READ ? "read" : "writ", @@ -431,7 +431,7 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq) ide_drive_t *drive = q->queuedata; struct ide_cmd *cmd; - if (!(rq->cmd_flags & REQ_FLUSH)) + if (req_op(rq) != REQ_OP_FLUSH) return BLKPREP_OK; if (rq->special) { @@ -206,7 +206,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive, memcpy(rq->cmd, pc->c, 12); pc->rq = rq; - if (rq->cmd_flags & REQ_WRITE) + if (cmd == WRITE) pc->flags |= PC_FLAG_WRITING; pc->flags |= PC_FLAG_DMA_OK; @@ -412,12 +412,11 @@ static int ide_gd_probe(ide_drive_t *drive) set_capacity(g, ide_gd_capacity(drive)); g->minors = IDE_DISK_MINORS; - g->driverfs_dev = &drive->gendev; g->flags |= GENHD_FL_EXT_DEVT; if (drive->dev_flags & IDE_DFLAG_REMOVABLE) g->flags = GENHD_FL_REMOVABLE; g->fops = &ide_gd_ops; - add_disk(g); + device_add_disk(&drive->gendev, g); return 0; out_free_disk: @@ -46,8 +46,6 @@ * to avoid complications with the lapic timer workaround. * Have not seen issues with suspend, but may need same workaround here. * - * There is currently no kernel-based automatic probing/loading mechanism - * if the driver is built as a module. */ /* un-comment DEBUG to enable pr_debug() statements */ @@ -60,8 +58,9 @@ #include <linux/sched.h> #include <linux/notifier.h> #include <linux/cpu.h> -#include <linux/module.h> +#include <linux/moduleparam.h> #include <asm/cpu_device_id.h> +#include <asm/intel-family.h> #include <asm/mwait.h> #include <asm/msr.h> @@ -827,6 +826,35 @@ static struct cpuidle_state bxt_cstates[] = { .enter = NULL } }; +static struct cpuidle_state dnv_cstates[] = { + { + .name = "C1-DNV", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00), + .exit_latency = 2, + .target_residency = 2, + .enter = &intel_idle, + .enter_freeze = intel_idle_freeze, }, + { + .name = "C1E-DNV", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01), + .exit_latency = 10, + .target_residency = 20, + .enter = &intel_idle, + .enter_freeze = intel_idle_freeze, }, + { + .name = "C6-DNV", + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 50, + .target_residency = 500, + .enter = &intel_idle, + .enter_freeze = intel_idle_freeze, }, + { + .enter = NULL } +}; + /** * intel_idle * @dev: cpuidle_device @@ -1016,45 +1044,50 @@ static const struct idle_cpu idle_cpu_bxt = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_dnv = { + .state_table = dnv_cstates, + .disable_promotion_to_c1e = true, +}; + #define ICPU(model, cpu) \ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } static const struct x86_cpu_id intel_idle_ids[] __initconst = { - ICPU(0x1a, idle_cpu_nehalem), - ICPU(0x1e, idle_cpu_nehalem), - ICPU(0x1f, idle_cpu_nehalem), - ICPU(0x25, idle_cpu_nehalem), - ICPU(0x2c, idle_cpu_nehalem), - ICPU(0x2e, idle_cpu_nehalem), - ICPU(0x1c, idle_cpu_atom), - ICPU(0x26, idle_cpu_lincroft), - ICPU(0x2f, idle_cpu_nehalem), - ICPU(0x2a, idle_cpu_snb), - ICPU(0x2d, idle_cpu_snb), - ICPU(0x36, idle_cpu_atom), - ICPU(0x37, idle_cpu_byt), - ICPU(0x4c, idle_cpu_cht), - ICPU(0x3a, idle_cpu_ivb), - ICPU(0x3e, idle_cpu_ivt), - ICPU(0x3c, idle_cpu_hsw), - ICPU(0x3f, idle_cpu_hsw), - ICPU(0x45, idle_cpu_hsw), - ICPU(0x46, idle_cpu_hsw), - ICPU(0x4d, idle_cpu_avn), - ICPU(0x3d, idle_cpu_bdw), - ICPU(0x47, idle_cpu_bdw), - ICPU(0x4f, idle_cpu_bdw), - ICPU(0x56, idle_cpu_bdw), - ICPU(0x4e, idle_cpu_skl), - ICPU(0x5e, idle_cpu_skl), - ICPU(0x8e, idle_cpu_skl), - ICPU(0x9e, idle_cpu_skl), - ICPU(0x55, idle_cpu_skx), - ICPU(0x57, idle_cpu_knl), - ICPU(0x5c, idle_cpu_bxt), + ICPU(INTEL_FAM6_NEHALEM_EP, idle_cpu_nehalem), + ICPU(INTEL_FAM6_NEHALEM, idle_cpu_nehalem), + ICPU(INTEL_FAM6_WESTMERE2, idle_cpu_nehalem), + ICPU(INTEL_FAM6_WESTMERE, idle_cpu_nehalem), + ICPU(INTEL_FAM6_WESTMERE_EP, idle_cpu_nehalem), + ICPU(INTEL_FAM6_NEHALEM_EX, idle_cpu_nehalem), + ICPU(INTEL_FAM6_ATOM_PINEVIEW, idle_cpu_atom), + ICPU(INTEL_FAM6_ATOM_LINCROFT, idle_cpu_lincroft), + ICPU(INTEL_FAM6_WESTMERE_EX, idle_cpu_nehalem), + ICPU(INTEL_FAM6_SANDYBRIDGE, idle_cpu_snb), + ICPU(INTEL_FAM6_SANDYBRIDGE_X, idle_cpu_snb), + ICPU(INTEL_FAM6_ATOM_CEDARVIEW, idle_cpu_atom), + ICPU(INTEL_FAM6_ATOM_SILVERMONT1, idle_cpu_byt), + ICPU(INTEL_FAM6_ATOM_AIRMONT, idle_cpu_cht), + ICPU(INTEL_FAM6_IVYBRIDGE, idle_cpu_ivb), + ICPU(INTEL_FAM6_IVYBRIDGE_X, idle_cpu_ivt), + ICPU(INTEL_FAM6_HASWELL_CORE, idle_cpu_hsw), + ICPU(INTEL_FAM6_HASWELL_X, idle_cpu_hsw), + ICPU(INTEL_FAM6_HASWELL_ULT, idle_cpu_hsw), + ICPU(INTEL_FAM6_HASWELL_GT3E, idle_cpu_hsw), + ICPU(INTEL_FAM6_ATOM_SILVERMONT2, idle_cpu_avn), + ICPU(INTEL_FAM6_BROADWELL_CORE, idle_cpu_bdw), + ICPU(INTEL_FAM6_BROADWELL_GT3E, idle_cpu_bdw), + ICPU(INTEL_FAM6_BROADWELL_X, idle_cpu_bdw), + ICPU(INTEL_FAM6_BROADWELL_XEON_D, idle_cpu_bdw), + ICPU(INTEL_FAM6_SKYLAKE_MOBILE, idle_cpu_skl), + ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, idle_cpu_skl), + ICPU(INTEL_FAM6_KABYLAKE_MOBILE, idle_cpu_skl), + ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, idle_cpu_skl), + ICPU(INTEL_FAM6_SKYLAKE_X, idle_cpu_skx), + ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl), + ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt), + ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv), {} }; -MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); /* * intel_idle_probe() @@ -1154,7 +1187,10 @@ static unsigned long long irtl_2_usec(unsigned long long irtl) { unsigned long long ns; - ns = irtl_ns_units[(irtl >> 10) & 0x3]; + if (!irtl) + return 0; + + ns = irtl_ns_units[(irtl >> 10) & 0x7]; return div64_u64((irtl & 0x3FF) * ns, 1000); } @@ -1167,43 +1203,39 @@ static unsigned long long irtl_2_usec(unsigned long long irtl) static void bxt_idle_state_table_update(void) { unsigned long long msr; + unsigned int usec; rdmsrl(MSR_PKGC6_IRTL, msr); - if (msr) { - unsigned int usec = irtl_2_usec(msr); - + usec = irtl_2_usec(msr); + if (usec) { bxt_cstates[2].exit_latency = usec; bxt_cstates[2].target_residency = usec; } rdmsrl(MSR_PKGC7_IRTL, msr); - if (msr) { - unsigned int usec = irtl_2_usec(msr); - + usec = irtl_2_usec(msr); + if (usec) { bxt_cstates[3].exit_latency = usec; bxt_cstates[3].target_residency = usec; } rdmsrl(MSR_PKGC8_IRTL, msr); - if (msr) { - unsigned int usec = irtl_2_usec(msr); - + usec = irtl_2_usec(msr); + if (usec) { bxt_cstates[4].exit_latency = usec; bxt_cstates[4].target_residency = usec; } rdmsrl(MSR_PKGC9_IRTL, msr); - if (msr) { - unsigned int usec = irtl_2_usec(msr); - + usec = irtl_2_usec(msr); + if (usec) { bxt_cstates[5].exit_latency = usec; bxt_cstates[5].target_residency = usec; } rdmsrl(MSR_PKGC10_IRTL, msr); - if (msr) { - unsigned int usec = irtl_2_usec(msr); - + usec = irtl_2_usec(msr); + if (usec) { bxt_cstates[6].exit_latency = usec; bxt_cstates[6].target_residency = usec; } @@ -1261,13 +1293,13 @@ static void intel_idle_state_table_update(void) { switch (boot_cpu_data.x86_model) { - case 0x3e: /* IVT */ + case INTEL_FAM6_IVYBRIDGE_X: ivt_idle_state_table_update(); break; - case 0x5c: /* BXT */ + case INTEL_FAM6_ATOM_GOLDMONT: bxt_idle_state_table_update(); break; - case 0x5e: /* SKL-H */ + case INTEL_FAM6_SKYLAKE_DESKTOP: sklh_idle_state_table_update(); break; } @@ -1415,34 +1447,12 @@ static int __init intel_idle_init(void) return 0; } +device_initcall(intel_idle_init); -static void __exit intel_idle_exit(void) -{ - struct cpuidle_device *dev; - int i; - - cpu_notifier_register_begin(); - - if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) - on_each_cpu(__setup_broadcast_timer, (void *)false, 1); - __unregister_cpu_notifier(&cpu_hotplug_notifier); - - for_each_possible_cpu(i) { - dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); - cpuidle_unregister_device(dev); - } - - cpu_notifier_register_done(); - - cpuidle_unregister_driver(&intel_idle_driver); - free_percpu(intel_idle_cpuidle_devices); -} - -module_init(intel_idle_init); -module_exit(intel_idle_exit); - +/* + * We are not really modular, but we used to support that. Meaning we also + * support "intel_idle.max_cstate=..." at boot and also a read-only export of + * it at /sys/module/intel_idle/parameters/max_cstate -- so using module_param + * is the easiest way (currently) to continue doing that. + */ module_param(max_cstate, int, 0444); - -MODULE_AUTHOR("Len Brown <len.brown@intel.com>"); -MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION); -MODULE_LICENSE("GPL"); @@ -46,6 +46,14 @@ config IIO_CONSUMERS_PER_TRIGGER This value controls the maximum number of consumers that a given trigger may handle. Default is 2. +config IIO_SW_DEVICE + tristate "Enable software IIO device support" + select IIO_CONFIGFS + help + Provides IIO core support for software devices. A software + device can be created via configfs or directly by a driver + using the API provided. + config IIO_SW_TRIGGER tristate "Enable software triggers support" select IIO_CONFIGFS @@ -8,6 +8,7 @@ industrialio-$(CONFIG_IIO_BUFFER) += industrialio-buffer.o industrialio-$(CONFIG_IIO_TRIGGER) += industrialio-trigger.o obj-$(CONFIG_IIO_CONFIGFS) += industrialio-configfs.o +obj-$(CONFIG_IIO_SW_DEVICE) += industrialio-sw-device.o obj-$(CONFIG_IIO_SW_TRIGGER) += industrialio-sw-trigger.o obj-$(CONFIG_IIO_TRIGGERED_EVENT) += industrialio-triggered-event.o @@ -17,6 +17,16 @@ config BMA180 To compile this driver as a module, choose M here: the module will be called bma180. +config BMA220 + tristate "Bosch BMA220 3-Axis Accelerometer Driver" + depends on SPI + help + Say yes here to add support for the Bosch BMA220 triaxial + acceleration sensor. + + To compile this driver as a module, choose M here: the + module will be called bma220_spi. + config BMC150_ACCEL tristate "Bosch BMC150 Accelerometer Driver" select IIO_BUFFER @@ -136,13 +146,23 @@ config MMA7455_SPI To compile this driver as a module, choose M here: the module will be called mma7455_spi. +config MMA7660 + tristate "Freescale MMA7660FC 3-Axis Accelerometer Driver" + depends on I2C + help + Say yes here to get support for the Freescale MMA7660FC 3-Axis + accelerometer. + + Choosing M will build the driver as a module. If so, the module + will be called mma7660. + config MMA8452 - tristate "Freescale MMA8452Q and similar Accelerometers Driver" + tristate "Freescale / NXP MMA8452Q and similar Accelerometers Driver" depends on I2C select IIO_BUFFER select IIO_TRIGGERED_BUFFER help - Say yes here to build support for the following Freescale 3-axis + Say yes here to build support for the following Freescale / NXP 3-axis accelerometers: MMA8451Q, MMA8452Q, MMA8453Q, MMA8652FC, MMA8653FC, FXLS8471Q. @@ -4,6 +4,7 @@ # When adding new entries keep the list in alphabetical order obj-$(CONFIG_BMA180) += bma180.o +obj-$(CONFIG_BMA220) += bma220_spi.o obj-$(CONFIG_BMC150_ACCEL) += bmc150-accel-core.o obj-$(CONFIG_BMC150_ACCEL_I2C) += bmc150-accel-i2c.o obj-$(CONFIG_BMC150_ACCEL_SPI) += bmc150-accel-spi.o @@ -15,6 +16,8 @@ obj-$(CONFIG_MMA7455) += mma7455_core.o obj-$(CONFIG_MMA7455_I2C) += mma7455_i2c.o obj-$(CONFIG_MMA7455_SPI) += mma7455_spi.o +obj-$(CONFIG_MMA7660) += mma7660.o + obj-$(CONFIG_MMA8452) += mma8452.o obj-$(CONFIG_MMA9551_CORE) += mma9551_core.o @@ -654,7 +654,7 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct bma180_data *data = iio_priv(indio_dev); - int64_t time_ns = iio_get_time_ns(); + s64 time_ns = iio_get_time_ns(indio_dev); int bit, ret, i = 0; mutex_lock(&data->mutex); diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c new file mode 100644 index 000000000000..1098d10df8e8 --- /dev/null +++ b/ drivers/iio/accel/bma220_spi.c@@ -0,0 +1,338 @@ +/** + * BMA220 Digital triaxial acceleration sensor driver + * + * Copyright (c) 2016, Intel Corporation. + * + * This file is subject to the terms and conditions of version 2 of + * the GNU General Public License. See the file COPYING in the main + * directory of this archive for more details. + */ + +#include <linux/acpi.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/iio/buffer.h> +#include <linux/iio/iio.h> +#include <linux/iio/sysfs.h> +#include <linux/spi/spi.h> +#include <linux/iio/trigger_consumer.h> +#include <linux/iio/triggered_buffer.h> + +#define BMA220_REG_ID 0x00 +#define BMA220_REG_ACCEL_X 0x02 +#define BMA220_REG_ACCEL_Y 0x03 +#define BMA220_REG_ACCEL_Z 0x04 +#define BMA220_REG_RANGE 0x11 +#define BMA220_REG_SUSPEND 0x18 + +#define BMA220_CHIP_ID 0xDD +#define BMA220_READ_MASK 0x80 +#define BMA220_RANGE_MASK 0x03 +#define BMA220_DATA_SHIFT 2 +#define BMA220_SUSPEND_SLEEP 0xFF +#define BMA220_SUSPEND_WAKE 0x00 + +#define BMA220_DEVICE_NAME "bma220" +#define BMA220_SCALE_AVAILABLE "0.623 1.248 2.491 4.983" + +#define BMA220_ACCEL_CHANNEL(index, reg, axis) { \ + .type = IIO_ACCEL, \ + .address = reg, \ + .modified = 1, \ + .channel2 = IIO_MOD_##axis, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .scan_index = index, \ + .scan_type = { \ + .sign = 's', \ + .realbits = 6, \ + .storagebits = 8, \ + .shift = BMA220_DATA_SHIFT, \ + .endianness = IIO_CPU, \ + }, \ +} + +enum bma220_axis { + AXIS_X, + AXIS_Y, + AXIS_Z, +}; + +static IIO_CONST_ATTR(in_accel_scale_available, BMA220_SCALE_AVAILABLE); + +static struct attribute *bma220_attributes[] = { + &iio_const_attr_in_accel_scale_available.dev_attr.attr, + NULL, +}; + +static const struct attribute_group bma220_attribute_group = { + .attrs = bma220_attributes, +}; + +static const int bma220_scale_table[][4] = { + {0, 623000}, {1, 248000}, {2, 491000}, {4, 983000} +}; + +struct bma220_data { + struct spi_device *spi_device; + struct mutex lock; + s8 buffer[16]; /* 3x8-bit channels + 5x8 padding + 8x8 timestamp */ + u8 tx_buf[2] ____cacheline_aligned; +}; + +static const struct iio_chan_spec bma220_channels[] = { + BMA220_ACCEL_CHANNEL(0, BMA220_REG_ACCEL_X, X), + BMA220_ACCEL_CHANNEL(1, BMA220_REG_ACCEL_Y, Y), + BMA220_ACCEL_CHANNEL(2, BMA220_REG_ACCEL_Z, Z), + IIO_CHAN_SOFT_TIMESTAMP(3), +}; + +static inline int bma220_read_reg(struct spi_device *spi, u8 reg) +{ + return spi_w8r8(spi, reg | BMA220_READ_MASK); +} + +static const unsigned long bma220_accel_scan_masks[] = { + BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z), + 0 +}; + +static irqreturn_t bma220_trigger_handler(int irq, void *p) +{ + int ret; + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct bma220_data *data = iio_priv(indio_dev); + struct spi_device *spi = data->spi_device; + + mutex_lock(&data->lock); + data->tx_buf[0] = BMA220_REG_ACCEL_X | BMA220_READ_MASK; + ret = spi_write_then_read(spi, data->tx_buf, 1, data->buffer, + ARRAY_SIZE(bma220_channels) - 1); + if (ret < 0) + goto err; + + iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, + pf->timestamp); +err: + mutex_unlock(&data->lock); + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + +static int bma220_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + int ret; + u8 range_idx; + struct bma220_data *data = iio_priv(indio_dev); + + switch (mask) { + case IIO_CHAN_INFO_RAW: + ret = bma220_read_reg(data->spi_device, chan->address); + if (ret < 0) + return -EINVAL; + *val = sign_extend32(ret >> BMA220_DATA_SHIFT, 5); + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: + ret = bma220_read_reg(data->spi_device, BMA220_REG_RANGE); + if (ret < 0) + return ret; + range_idx = ret & BMA220_RANGE_MASK; + *val = bma220_scale_table[range_idx][0]; + *val2 = bma220_scale_table[range_idx][1]; + return IIO_VAL_INT_PLUS_MICRO; + } + + return -EINVAL; +} + +static int bma220_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + int i; + int ret; + int index = -1; + struct bma220_data *data = iio_priv(indio_dev); + + switch (mask) { + case IIO_CHAN_INFO_SCALE: + for (i = 0; i < ARRAY_SIZE(bma220_scale_table); i++) + if (val == bma220_scale_table[i][0] && + val2 == bma220_scale_table[i][1]) { + index = i; + break; + } + if (index < 0) + return -EINVAL; + + mutex_lock(&data->lock); + data->tx_buf[0] = BMA220_REG_RANGE; + data->tx_buf[1] = index; + ret = spi_write(data->spi_device, data->tx_buf, + sizeof(data->tx_buf)); + if (ret < 0) + dev_err(&data->spi_device->dev, + "failed to set measurement range\n"); + mutex_unlock(&data->lock); + + return 0; + } + + return -EINVAL; +} + +static const struct iio_info bma220_info = { + .driver_module = THIS_MODULE, + .read_raw = bma220_read_raw, + .write_raw = bma220_write_raw, + .attrs = &bma220_attribute_group, +}; + +static int bma220_init(struct spi_device *spi) +{ + int ret; + + ret = bma220_read_reg(spi, BMA220_REG_ID); + if (ret != BMA220_CHIP_ID) + return -ENODEV; + + /* Make sure the chip is powered on */ + ret = bma220_read_reg(spi, BMA220_REG_SUSPEND); + if (ret < 0) + return ret; + else if (ret == BMA220_SUSPEND_WAKE) + return bma220_read_reg(spi, BMA220_REG_SUSPEND); + + return 0; +} + +static int bma220_deinit(struct spi_device *spi) +{ + int ret; + + /* Make sure the chip is powered off */ + ret = bma220_read_reg(spi, BMA220_REG_SUSPEND); + if (ret < 0) + return ret; + else if (ret == BMA220_SUSPEND_SLEEP) + return bma220_read_reg(spi, BMA220_REG_SUSPEND); + + return 0; +} + +static int bma220_probe(struct spi_device *spi) +{ + int ret; + struct iio_dev *indio_dev; + struct bma220_data *data; + + indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*data)); + if (!indio_dev) { + dev_err(&spi->dev, "iio allocation failed!\n"); + return -ENOMEM; + } + + data = iio_priv(indio_dev); + data->spi_device = spi; + spi_set_drvdata(spi, indio_dev); + mutex_init(&data->lock); + + indio_dev->dev.parent = &spi->dev; + indio_dev->info = &bma220_info; + indio_dev->name = BMA220_DEVICE_NAME; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = bma220_channels; + indio_dev->num_channels = ARRAY_SIZE(bma220_channels); + indio_dev->available_scan_masks = bma220_accel_scan_masks; + + ret = bma220_init(data->spi_device); + if (ret < 0) + return ret; + + ret = iio_triggered_buffer_setup(indio_dev, NULL, + bma220_trigger_handler, NULL); + if (ret < 0) { + dev_err(&spi->dev, "iio triggered buffer setup failed\n"); + goto err_suspend; + } + + ret = iio_device_register(indio_dev); + if (ret < 0) { + dev_err(&spi->dev, "iio_device_register failed\n"); + iio_triggered_buffer_cleanup(indio_dev); + goto err_suspend; + } + + return 0; + +err_suspend: + return bma220_deinit(spi); +} + +static int bma220_remove(struct spi_device *spi) +{ + struct iio_dev *indio_dev = spi_get_drvdata(spi); + + iio_device_unregister(indio_dev); + iio_triggered_buffer_cleanup(indio_dev); + + return bma220_deinit(spi); +} + +#ifdef CONFIG_PM_SLEEP +static int bma220_suspend(struct device *dev) +{ + struct bma220_data *data = + iio_priv(spi_get_drvdata(to_spi_device(dev))); + + /* The chip can be suspended/woken up by a simple register read. */ + return bma220_read_reg(data->spi_device, BMA220_REG_SUSPEND); +} + +static int bma220_resume(struct device *dev) +{ + struct bma220_data *data = + iio_priv(spi_get_drvdata(to_spi_device(dev))); + + return bma220_read_reg(data->spi_device, BMA220_REG_SUSPEND); +} + +static SIMPLE_DEV_PM_OPS(bma220_pm_ops, bma220_suspend, bma220_resume); + +#define BMA220_PM_OPS (&bma220_pm_ops) +#else +#define BMA220_PM_OPS NULL +#endif + +static const struct spi_device_id bma220_spi_id[] = { + {"bma220", 0}, + {} +}; + +static const struct acpi_device_id bma220_acpi_id[] = { + {"BMA0220", 0}, + {} +}; + +MODULE_DEVICE_TABLE(spi, bma220_spi_id); + +static struct spi_driver bma220_driver = { + .driver = { + .name = "bma220_spi", + .pm = BMA220_PM_OPS, + .acpi_match_table = ACPI_PTR(bma220_acpi_id), + }, + .probe = bma220_probe, + .remove = bma220_remove, + .id_table = bma220_spi_id, +}; + +module_spi_driver(bma220_driver); + +MODULE_AUTHOR("Tiberiu Breana <tiberiu.a.breana@intel.com>"); +MODULE_DESCRIPTION("BMA220 acceleration sensor driver"); +MODULE_LICENSE("GPL v2"); @@ -901,7 +901,7 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev, */ if (!irq) { data->old_timestamp = data->timestamp; - data->timestamp = iio_get_time_ns(); + data->timestamp = iio_get_time_ns(indio_dev); } /* @@ -1303,7 +1303,7 @@ static irqreturn_t bmc150_accel_irq_handler(int irq, void *private) int i; data->old_timestamp = data->timestamp; - data->timestamp = iio_get_time_ns(); + data->timestamp = iio_get_time_ns(indio_dev); for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) { if (data->triggers[i].enabled) { @@ -1129,7 +1129,7 @@ static irqreturn_t kxcjk1013_data_rdy_trig_poll(int irq, void *private) struct iio_dev *indio_dev = private; struct kxcjk1013_data *data = iio_priv(indio_dev); - data->timestamp = iio_get_time_ns(); + data->timestamp = iio_get_time_ns(indio_dev); if (data->dready_trigger_on) iio_trigger_poll(data->dready_trig); @@ -81,7 +81,7 @@ static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro) mutex_lock(&st->buf_lock); ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C)); - if (ret) + if (ret < 0) goto error_ret; st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C); st->tx[1] = (ret & ~KXSD9_FS_MASK) | i; @@ -163,7 +163,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev, break; case IIO_CHAN_INFO_SCALE: ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C)); - if (ret) + if (ret < 0) goto error_ret; *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK]; ret = IIO_VAL_INT_PLUS_MICRO; @@ -97,7 +97,8 @@ static irqreturn_t mma7455_trigger_handler(int irq, void *p) if (ret) goto done; - iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns()); + iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_get_time_ns(indio_dev)); done: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c new file mode 100644 index 000000000000..0acdee516973 --- /dev/null +++ b/ drivers/iio/accel/mma7660.c@@ -0,0 +1,277 @@ +/** + * Freescale MMA7660FC 3-Axis Accelerometer + * + * Copyright (c) 2016, Intel Corporation. + * + * This file is subject to the terms and conditions of version 2 of + * the GNU General Public License. See the file COPYING in the main + * directory of this archive for more details. + * + * IIO driver for Freescale MMA7660FC; 7-bit I2C address: 0x4c. + */ + +#include <linux/acpi.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/iio/iio.h> +#include <linux/iio/sysfs.h> + +#define MMA7660_DRIVER_NAME "mma7660" + +#define MMA7660_REG_XOUT 0x00 +#define MMA7660_REG_YOUT 0x01 +#define MMA7660_REG_ZOUT 0x02 +#define MMA7660_REG_OUT_BIT_ALERT BIT(6) + +#define MMA7660_REG_MODE 0x07 +#define MMA7660_REG_MODE_BIT_MODE BIT(0) +#define MMA7660_REG_MODE_BIT_TON BIT(2) + +#define MMA7660_I2C_READ_RETRIES 5 + +/* + * The accelerometer has one measurement range: + * + * -1.5g - +1.5g (6-bit, signed) + * + * scale = (1.5 + 1.5) * 9.81 / (2^6 - 1) = 0.467142857 + */ + +#define MMA7660_SCALE_AVAIL "0.467142857" + +const int mma7660_nscale = 467142857; + +#define MMA7660_CHANNEL(reg, axis) { \ + .type = IIO_ACCEL, \ + .address = reg, \ + .modified = 1, \ + .channel2 = IIO_MOD_##axis, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ +} + +static const struct iio_chan_spec mma7660_channels[] = { + MMA7660_CHANNEL(MMA7660_REG_XOUT, X), + MMA7660_CHANNEL(MMA7660_REG_YOUT, Y), + MMA7660_CHANNEL(MMA7660_REG_ZOUT, Z), +}; + +enum mma7660_mode { + MMA7660_MODE_STANDBY, + MMA7660_MODE_ACTIVE +}; + +struct mma7660_data { + struct i2c_client *client; + struct mutex lock; + enum mma7660_mode mode; +}; + +static IIO_CONST_ATTR(in_accel_scale_available, MMA7660_SCALE_AVAIL); + +static struct attribute *mma7660_attributes[] = { + &iio_const_attr_in_accel_scale_available.dev_attr.attr, + NULL, +}; + +static const struct attribute_group mma7660_attribute_group = { + .attrs = mma7660_attributes +}; + +static int mma7660_set_mode(struct mma7660_data *data, + enum mma7660_mode mode) +{ + int ret; + struct i2c_client *client = data->client; + + if (mode == data->mode) + return 0; + + ret = i2c_smbus_read_byte_data(client, MMA7660_REG_MODE); + if (ret < 0) { + dev_err(&client->dev, "failed to read sensor mode\n"); + return ret; + } + + if (mode == MMA7660_MODE_ACTIVE) { + ret &= ~MMA7660_REG_MODE_BIT_TON; + ret |= MMA7660_REG_MODE_BIT_MODE; + } else { + ret &= ~MMA7660_REG_MODE_BIT_TON; + ret &= ~MMA7660_REG_MODE_BIT_MODE; + } + + ret = i2c_smbus_write_byte_data(client, MMA7660_REG_MODE, ret); + if (ret < 0) { + dev_err(&client->dev, "failed to change sensor mode\n"); + return ret; + } + + data->mode = mode; + + return ret; +} + +static int mma7660_read_accel(struct mma7660_data *data, u8 address) +{ + int ret, retries = MMA7660_I2C_READ_RETRIES; + struct i2c_client *client = data->client; + + /* + * Read data. If the Alert bit is set, the register was read at + * the same time as the device was attempting to update the content. + * The solution is to read the register again. Do this only + * MMA7660_I2C_READ_RETRIES times to avoid spending too much time + * in the kernel. + */ + do { + ret = i2c_smbus_read_byte_data(client, address); + if (ret < 0) { + dev_err(&client->dev, "register read failed\n"); + return ret; + } + } while (retries-- > 0 && ret & MMA7660_REG_OUT_BIT_ALERT); + + if (ret & MMA7660_REG_OUT_BIT_ALERT) { + dev_err(&client->dev, "all register read retries failed\n"); + return -ETIMEDOUT; + } + + return ret; +} + +static int mma7660_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct mma7660_data *data = iio_priv(indio_dev); + int ret; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + mutex_lock(&data->lock); + ret = mma7660_read_accel(data, chan->address); + mutex_unlock(&data->lock); + if (ret < 0) + return ret; + *val = sign_extend32(ret, 5); + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: + *val = 0; + *val2 = mma7660_nscale; + return IIO_VAL_INT_PLUS_NANO; + default: + return -EINVAL; + } + + return -EINVAL; +} + +static const struct iio_info mma7660_info = { + .driver_module = THIS_MODULE, + .read_raw = mma7660_read_raw, + .attrs = &mma7660_attribute_group, +}; + +static int mma7660_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int ret; + struct iio_dev *indio_dev; + struct mma7660_data *data; + + indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); + if (!indio_dev) { + dev_err(&client->dev, "iio allocation failed!\n"); + return -ENOMEM; + } + + data = iio_priv(indio_dev); + data->client = client; + i2c_set_clientdata(client, indio_dev); + mutex_init(&data->lock); + data->mode = MMA7660_MODE_STANDBY; + + indio_dev->dev.parent = &client->dev; + indio_dev->info = &mma7660_info; + indio_dev->name = MMA7660_DRIVER_NAME; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = mma7660_channels; + indio_dev->num_channels = ARRAY_SIZE(mma7660_channels); + + ret = mma7660_set_mode(data, MMA7660_MODE_ACTIVE); + if (ret < 0) + return ret; + + ret = iio_device_register(indio_dev); + if (ret < 0) { + dev_err(&client->dev, "device_register failed\n"); + mma7660_set_mode(data, MMA7660_MODE_STANDBY); + } + + return ret; +} + +static int mma7660_remove(struct i2c_client *client) +{ + struct iio_dev *indio_dev = i2c_get_clientdata(client); + + iio_device_unregister(indio_dev); + + return mma7660_set_mode(iio_priv(indio_dev), MMA7660_MODE_STANDBY); +} + +#ifdef CONFIG_PM_SLEEP +static int mma7660_suspend(struct device *dev) +{ + struct mma7660_data *data; + + data = iio_priv(i2c_get_clientdata(to_i2c_client(dev))); + + return mma7660_set_mode(data, MMA7660_MODE_STANDBY); +} + +static int mma7660_resume(struct device *dev) +{ + struct mma7660_data *data; + + data = iio_priv(i2c_get_clientdata(to_i2c_client(dev))); + + return mma7660_set_mode(data, MMA7660_MODE_ACTIVE); +} + +static SIMPLE_DEV_PM_OPS(mma7660_pm_ops, mma7660_suspend, mma7660_resume); + +#define MMA7660_PM_OPS (&mma7660_pm_ops) +#else +#define MMA7660_PM_OPS NULL +#endif + +static const struct i2c_device_id mma7660_i2c_id[] = { + {"mma7660", 0}, + {} +}; + +static const struct acpi_device_id mma7660_acpi_id[] = { + {"MMA7660", 0}, + {} +}; + +MODULE_DEVICE_TABLE(acpi, mma7660_acpi_id); + +static struct i2c_driver mma7660_driver = { + .driver = { + .name = "mma7660", + .pm = MMA7660_PM_OPS, + .acpi_match_table = ACPI_PTR(mma7660_acpi_id), + }, + .probe = mma7660_probe, + .remove = mma7660_remove, + .id_table = mma7660_i2c_id, +}; + +module_i2c_driver(mma7660_driver); + +MODULE_AUTHOR("Constantin Musca <constantin.musca@intel.com>"); +MODULE_DESCRIPTION("Freescale MMA7660FC 3-Axis Accelerometer driver"); +MODULE_LICENSE("GPL v2"); @@ -1,22 +1,22 @@ /* - * mma8452.c - Support for following Freescale 3-axis accelerometers: + * mma8452.c - Support for following Freescale / NXP 3-axis accelerometers: * - * MMA8451Q (14 bit) - * MMA8452Q (12 bit) - * MMA8453Q (10 bit) - * MMA8652FC (12 bit) - * MMA8653FC (10 bit) - * FXLS8471Q (14 bit) + * device name digital output 7-bit I2C slave address (pin selectable) + * --------------------------------------------------------------------- + * MMA8451Q 14 bit 0x1c / 0x1d + * MMA8452Q 12 bit 0x1c / 0x1d + * MMA8453Q 10 bit 0x1c / 0x1d + * MMA8652FC 12 bit 0x1d + * MMA8653FC 10 bit 0x1d + * FXLS8471Q 14 bit 0x1e / 0x1d / 0x1c / 0x1f * - * Copyright 2015 Martin Kepplinger <martin.kepplinger@theobroma-systems.com> + * Copyright 2015 Martin Kepplinger <martink@posteo.de> * Copyright 2014 Peter Meerwald <pmeerw@pmeerw.net> * * This file is subject to the terms and conditions of version 2 of * the GNU General Public License. See the file COPYING in the main * directory of this archive for more details. * - * 7-bit I2C slave address 0x1c/0x1d (pin selectable) - * * TODO: orientation events */ @@ -76,6 +76,8 @@ #define MMA8452_CTRL_DR_DEFAULT 0x4 /* 50 Hz sample frequency */ #define MMA8452_CTRL_REG2 0x2b #define MMA8452_CTRL_REG2_RST BIT(6) +#define MMA8452_CTRL_REG2_MODS_SHIFT 3 +#define MMA8452_CTRL_REG2_MODS_MASK 0x1b #define MMA8452_CTRL_REG4 0x2d #define MMA8452_CTRL_REG5 0x2e #define MMA8452_OFF_X 0x2f @@ -106,7 +108,7 @@ struct mma8452_data { }; /** - * struct mma_chip_info - chip specific data for Freescale's accelerometers + * struct mma_chip_info - chip specific data * @chip_id: WHO_AM_I register's value * @channels: struct iio_chan_spec matching the device's * capabilities @@ -257,20 +259,17 @@ static const int mma8452_samp_freq[8][2] = { {6, 250000}, {1, 560000} }; -/* Datasheet table 35 (step time vs sample frequency) */ -static const int mma8452_transient_time_step_us[8] = { - 1250, - 2500, - 5000, - 10000, - 20000, - 20000, - 20000, - 20000 +/* Datasheet table: step time "Relationship with the ODR" (sample frequency) */ +static const int mma8452_transient_time_step_us[4][8] = { + { 1250, 2500, 5000, 10000, 20000, 20000, 20000, 20000 }, /* normal */ + { 1250, 2500, 5000, 10000, 20000, 80000, 80000, 80000 }, /* l p l n */ + { 1250, 2500, 2500, 2500, 2500, 2500, 2500, 2500 }, /* high res*/ + { 1250, 2500, 5000, 10000, 20000, 80000, 160000, 160000 } /* l p */ }; -/* Datasheet table 18 (normal mode) */ -static const int mma8452_hp_filter_cutoff[8][4][2] = { +/* Datasheet table "High-Pass Filter Cutoff Options" */ +static const int mma8452_hp_filter_cutoff[4][8][4][2] = { + { /* normal */ { {16, 0}, {8, 0}, {4, 0}, {2, 0} }, /* 800 Hz sample */ { {16, 0}, {8, 0}, {4, 0}, {2, 0} }, /* 400 Hz sample */ { {8, 0}, {4, 0}, {2, 0}, {1, 0} }, /* 200 Hz sample */ @@ -279,8 +278,61 @@ static const int mma8452_hp_filter_cutoff[8][4][2] = { { {2, 0}, {1, 0}, {0, 500000}, {0, 250000} }, /* 12.5 Hz sample */ { {2, 0}, {1, 0}, {0, 500000}, {0, 250000} }, /* 6.25 Hz sample */ { {2, 0}, {1, 0}, {0, 500000}, {0, 250000} } /* 1.56 Hz sample */ + }, + { /* low noise low power */ + { {16, 0}, {8, 0}, {4, 0}, {2, 0} }, + { {16, 0}, {8, 0}, {4, 0}, {2, 0} }, + { {8, 0}, {4, 0}, {2, 0}, {1, 0} }, + { {4, 0}, {2, 0}, {1, 0}, {0, 500000} }, + { {2, 0}, {1, 0}, {0, 500000}, {0, 250000} }, + { {0, 500000}, {0, 250000}, {0, 125000}, {0, 063000} }, + { {0, 500000}, {0, 250000}, {0, 125000}, {0, 063000} }, + { {0, 500000}, {0, 250000}, {0, 125000}, {0, 063000} } + }, + { /* high resolution */ + { {16, 0}, {8, 0}, {4, 0}, {2, 0} }, + { {16, 0}, {8, 0}, {4, 0}, {2, 0} }, + { {16, 0}, {8, 0}, {4, 0}, {2, 0} }, + { {16, 0}, {8, 0}, {4, 0}, {2, 0} }, + { {16, 0}, {8, 0}, {4, 0}, {2, 0} }, + { {16, 0}, {8, 0}, {4, 0}, {2, 0} }, + { {16, 0}, {8, 0}, {4, 0}, {2, 0} }, + { {16, 0}, {8, 0}, {4, 0}, {2, 0} } + }, + { /* low power */ + { {16, 0}, {8, 0}, {4, 0}, {2, 0} }, + { {8, 0}, {4, 0}, {2, 0}, {1, 0} }, + { {4, 0}, {2, 0}, {1, 0}, {0, 500000} }, + { {2, 0}, {1, 0}, {0, 500000}, {0, 250000} }, + { {1, 0}, {0, 500000}, {0, 250000}, {0, 125000} }, + { {0, 250000}, {0, 125000}, {0, 063000}, {0, 031000} }, + { {0, 250000}, {0, 125000}, {0, 063000}, {0, 031000} }, + { {0, 250000}, {0, 125000}, {0, 063000}, {0, 031000} } + } }; +/* Datasheet table "MODS Oversampling modes averaging values at each ODR" */ +static const u16 mma8452_os_ratio[4][8] = { + /* 800 Hz, 400 Hz, ... , 1.56 Hz */ + { 2, 4, 4, 4, 4, 16, 32, 128 }, /* normal */ + { 2, 4, 4, 4, 4, 4, 8, 32 }, /* low power low noise */ + { 2, 4, 8, 16, 32, 128, 256, 1024 }, /* high resolution */ + { 2, 2, 2, 2, 2, 2, 4, 16 } /* low power */ +}; + +static int mma8452_get_power_mode(struct mma8452_data *data) +{ + int reg; + + reg = i2c_smbus_read_byte_data(data->client, + MMA8452_CTRL_REG2); + if (reg < 0) + return reg; + + return ((reg & MMA8452_CTRL_REG2_MODS_MASK) >> + MMA8452_CTRL_REG2_MODS_SHIFT); +} + static ssize_t mma8452_show_samp_freq_avail(struct device *dev, struct device_attribute *attr, char *buf) @@ -306,10 +358,39 @@ static ssize_t mma8452_show_hp_cutoff_avail(struct device *dev, { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct mma8452_data *data = iio_priv(indio_dev); + int i, j; + + i = mma8452_get_odr_index(data); + j = mma8452_get_power_mode(data); + if (j < 0) + return j; + + return mma8452_show_int_plus_micros(buf, mma8452_hp_filter_cutoff[j][i], + ARRAY_SIZE(mma8452_hp_filter_cutoff[0][0])); +} + +static ssize_t mma8452_show_os_ratio_avail(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct mma8452_data *data = iio_priv(indio_dev); int i = mma8452_get_odr_index(data); + int j; + u16 val = 0; + size_t len = 0; - return mma8452_show_int_plus_micros(buf, mma8452_hp_filter_cutoff[i], - ARRAY_SIZE(mma8452_hp_filter_cutoff[0])); + for (j = 0; j < ARRAY_SIZE(mma8452_os_ratio); j++) { + if (val == mma8452_os_ratio[j][i]) + continue; + + val = mma8452_os_ratio[j][i]; + + len += scnprintf(buf + len, PAGE_SIZE - len, "%d ", val); + } + buf[len - 1] = '\n'; + + return len; } static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(mma8452_show_samp_freq_avail); @@ -317,6 +398,8 @@ static IIO_DEVICE_ATTR(in_accel_scale_available, S_IRUGO, mma8452_show_scale_avail, NULL, 0); static IIO_DEVICE_ATTR(in_accel_filter_high_pass_3db_frequency_available, S_IRUGO, mma8452_show_hp_cutoff_avail, NULL, 0); +static IIO_DEVICE_ATTR(in_accel_oversampling_ratio_available, S_IRUGO, + mma8452_show_os_ratio_avail, NULL, 0); static int mma8452_get_samp_freq_index(struct mma8452_data *data, int val, int val2) @@ -335,24 +418,33 @@ static int mma8452_get_scale_index(struct mma8452_data *data, int val, int val2) static int mma8452_get_hp_filter_index(struct mma8452_data *data, int val, int val2) { - int i = mma8452_get_odr_index(data); + int i, j; + + i = mma8452_get_odr_index(data); + j = mma8452_get_power_mode(data); + if (j < 0) + return j; - return mma8452_get_int_plus_micros_index(mma8452_hp_filter_cutoff[i], - ARRAY_SIZE(mma8452_hp_filter_cutoff[0]), val, val2); + return mma8452_get_int_plus_micros_index(mma8452_hp_filter_cutoff[j][i], + ARRAY_SIZE(mma8452_hp_filter_cutoff[0][0]), val, val2); } static int mma8452_read_hp_filter(struct mma8452_data *data, int *hz, int *uHz) { - int i, ret; + int j, i, ret; ret = i2c_smbus_read_byte_data(data->client, MMA8452_HP_FILTER_CUTOFF); if (ret < 0) return ret; i = mma8452_get_odr_index(data); + j = mma8452_get_power_mode(data); + if (j < 0) + return j; + ret &= MMA8452_HP_FILTER_CUTOFF_SEL_MASK; - *hz = mma8452_hp_filter_cutoff[i][ret][0]; - *uHz = mma8452_hp_filter_cutoff[i][ret][1]; + *hz = mma8452_hp_filter_cutoff[j][i][ret][0]; + *uHz = mma8452_hp_filter_cutoff[j][i][ret][1]; return 0; } @@ -414,6 +506,15 @@ static int mma8452_read_raw(struct iio_dev *indio_dev, } return IIO_VAL_INT_PLUS_MICRO; + case IIO_CHAN_INFO_OVERSAMPLING_RATIO: + ret = mma8452_get_power_mode(data); + if (ret < 0) + return ret; + + i = mma8452_get_odr_index(data); + + *val = mma8452_os_ratio[ret][i]; + return IIO_VAL_INT; } return -EINVAL; @@ -480,6 +581,21 @@ fail: return ret; } +static int mma8452_set_power_mode(struct mma8452_data *data, u8 mode) +{ + int reg; + + reg = i2c_smbus_read_byte_data(data->client, + MMA8452_CTRL_REG2); + if (reg < 0) + return reg; + + reg &= ~MMA8452_CTRL_REG2_MODS_MASK; + reg |= mode << MMA8452_CTRL_REG2_MODS_SHIFT; + + return mma8452_change_config(data, MMA8452_CTRL_REG2, reg); +} + /* returns >0 if in freefall mode, 0 if not or <0 if an error occurred */ static int mma8452_freefall_mode_enabled(struct mma8452_data *data) { @@ -518,11 +634,7 @@ static int mma8452_set_freefall_mode(struct mma8452_data *data, bool state) val |= MMA8452_FF_MT_CFG_OAE; } - val = mma8452_change_config(data, chip->ev_cfg, val); - if (val) - return val; - - return 0; + return mma8452_change_config(data, chip->ev_cfg, val); } static int mma8452_set_hp_filter_frequency(struct mma8452_data *data, @@ -597,6 +709,14 @@ static int mma8452_write_raw(struct iio_dev *indio_dev, return mma8452_change_config(data, MMA8452_DATA_CFG, data->data_cfg); + case IIO_CHAN_INFO_OVERSAMPLING_RATIO: + ret = mma8452_get_odr_index(data); + + for (i = 0; i < ARRAY_SIZE(mma8452_os_ratio); i++) { + if (mma8452_os_ratio[i][ret] == val) + return mma8452_set_power_mode(data, i); + } + default: return -EINVAL; } @@ -610,7 +730,7 @@ static int mma8452_read_thresh(struct iio_dev *indio_dev, int *val, int *val2) { struct mma8452_data *data = iio_priv(indio_dev); - int ret, us; + int ret, us, power_mode; switch (info) { case IIO_EV_INFO_VALUE: @@ -629,7 +749,11 @@ static int mma8452_read_thresh(struct iio_dev *indio_dev, if (ret < 0) return ret; - us = ret * mma8452_transient_time_step_us[ + power_mode = mma8452_get_power_mode(data); + if (power_mode < 0) + return power_mode; + + us = ret * mma8452_transient_time_step_us[power_mode][ mma8452_get_odr_index(data)]; *val = us / USEC_PER_SEC; *val2 = us % USEC_PER_SEC; @@ -677,8 +801,12 @@ static int mma8452_write_thresh(struct iio_dev *indio_dev, val); case IIO_EV_INFO_PERIOD: + ret = mma8452_get_power_mode(data); + if (ret < 0) + return ret; + steps = (val * USEC_PER_SEC + val2) / - mma8452_transient_time_step_us[ + mma8452_transient_time_step_us[ret][ mma8452_get_odr_index(data)]; if (steps < 0 || steps > 0xff) @@ -785,7 +913,7 @@ static int mma8452_write_event_config(struct iio_dev *indio_dev, static void mma8452_transient_interrupt(struct iio_dev *indio_dev) { struct mma8452_data *data = iio_priv(indio_dev); - s64 ts = iio_get_time_ns(); + s64 ts = iio_get_time_ns(indio_dev); int src; src = i2c_smbus_read_byte_data(data->client, data->chip_info->ev_src); @@ -865,7 +993,7 @@ static irqreturn_t mma8452_trigger_handler(int irq, void *p) goto done; iio_push_to_buffers_with_timestamp(indio_dev, buffer, - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); done: iio_trigger_notify_done(indio_dev->trig); @@ -978,7 +1106,8 @@ static struct attribute_group mma8452_event_attribute_group = { BIT(IIO_CHAN_INFO_CALIBBIAS), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \ BIT(IIO_CHAN_INFO_SCALE) | \ - BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY), \ + BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY) | \ + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \ .scan_index = idx, \ .scan_type = { \ .sign = 's', \ @@ -998,7 +1127,8 @@ static struct attribute_group mma8452_event_attribute_group = { .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_CALIBBIAS), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \ - BIT(IIO_CHAN_INFO_SCALE), \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \ .scan_index = idx, \ .scan_type = { \ .sign = 's', \ @@ -1171,6 +1301,7 @@ static struct attribute *mma8452_attributes[] = { &iio_dev_attr_sampling_frequency_available.dev_attr.attr, &iio_dev_attr_in_accel_scale_available.dev_attr.attr, &iio_dev_attr_in_accel_filter_high_pass_3db_frequency_available.dev_attr.attr, + &iio_dev_attr_in_accel_oversampling_ratio_available.dev_attr.attr, NULL }; @@ -1444,8 +1575,8 @@ static int mma8452_probe(struct i2c_client *client, goto buffer_cleanup; ret = mma8452_set_freefall_mode(data, false); - if (ret) - return ret; + if (ret < 0) + goto buffer_cleanup; return 0; @@ -1558,5 +1689,5 @@ static struct i2c_driver mma8452_driver = { module_i2c_driver(mma8452_driver); MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>"); -MODULE_DESCRIPTION("Freescale MMA8452 accelerometer driver"); +MODULE_DESCRIPTION("Freescale / NXP MMA8452 accelerometer driver"); MODULE_LICENSE("GPL"); @@ -391,7 +391,7 @@ static irqreturn_t mma9551_event_handler(int irq, void *private) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_INCLI, 0, (mma_axis + 1), IIO_EV_TYPE_ROC, IIO_EV_DIR_RISING), - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); out: mutex_unlock(&data->mutex); @@ -1001,7 +1001,7 @@ static irqreturn_t mma9553_irq_handler(int irq, void *private) struct iio_dev *indio_dev = private; struct mma9553_data *data = iio_priv(indio_dev); - data->timestamp = iio_get_time_ns(); + data->timestamp = iio_get_time_ns(indio_dev); /* * Since we only configure the interrupt pin when an * event is enabled, we are sure we have at least @@ -29,6 +29,7 @@ #define LSM330_ACCEL_DEV_NAME "lsm330_accel" #define LSM303AGR_ACCEL_DEV_NAME "lsm303agr_accel" #define LIS2DH12_ACCEL_DEV_NAME "lis2dh12_accel" +#define LIS3L02DQ_ACCEL_DEV_NAME "lis3l02dq" /** * struct st_sensors_platform_data - default accel platform data @@ -91,7 +91,7 @@ static const struct iio_buffer_setup_ops st_accel_buffer_setup_ops = { int st_accel_allocate_ring(struct iio_dev *indio_dev) { - return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, + return iio_triggered_buffer_setup(indio_dev, NULL, &st_sensors_trigger_handler, &st_accel_buffer_setup_ops); } @@ -215,6 +215,22 @@ #define ST_ACCEL_6_IHL_IRQ_MASK 0x80 #define ST_ACCEL_6_MULTIREAD_BIT true +/* CUSTOM VALUES FOR SENSOR 7 */ +#define ST_ACCEL_7_ODR_ADDR 0x20 +#define ST_ACCEL_7_ODR_MASK 0x30 +#define ST_ACCEL_7_ODR_AVL_280HZ_VAL 0x00 +#define ST_ACCEL_7_ODR_AVL_560HZ_VAL 0x01 +#define ST_ACCEL_7_ODR_AVL_1120HZ_VAL 0x02 +#define ST_ACCEL_7_ODR_AVL_4480HZ_VAL 0x03 +#define ST_ACCEL_7_PW_ADDR 0x20 +#define ST_ACCEL_7_PW_MASK 0xc0 +#define ST_ACCEL_7_FS_AVL_2_GAIN IIO_G_TO_M_S_2(488) +#define ST_ACCEL_7_BDU_ADDR 0x21 +#define ST_ACCEL_7_BDU_MASK 0x40 +#define ST_ACCEL_7_DRDY_IRQ_ADDR 0x21 +#define ST_ACCEL_7_DRDY_IRQ_INT1_MASK 0x04 +#define ST_ACCEL_7_MULTIREAD_BIT false + static const struct iio_chan_spec st_accel_8bit_channels[] = { ST_SENSORS_LSM_CHANNELS(IIO_ACCEL, BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), @@ -662,6 +678,54 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .multi_read_bit = ST_ACCEL_6_MULTIREAD_BIT, .bootime = 2, }, + { + /* No WAI register present */ + .sensors_supported = { + [0] = LIS3L02DQ_ACCEL_DEV_NAME, + }, + .ch = (struct iio_chan_spec *)st_accel_12bit_channels, + .odr = { + .addr = ST_ACCEL_7_ODR_ADDR, + .mask = ST_ACCEL_7_ODR_MASK, + .odr_avl = { + { 280, ST_ACCEL_7_ODR_AVL_280HZ_VAL, }, + { 560, ST_ACCEL_7_ODR_AVL_560HZ_VAL, }, + { 1120, ST_ACCEL_7_ODR_AVL_1120HZ_VAL, }, + { 4480, ST_ACCEL_7_ODR_AVL_4480HZ_VAL, }, + }, + }, + .pw = { + .addr = ST_ACCEL_7_PW_ADDR, + .mask = ST_ACCEL_7_PW_MASK, + .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE, + .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, + }, + .enable_axis = { + .addr = ST_SENSORS_DEFAULT_AXIS_ADDR, + .mask = ST_SENSORS_DEFAULT_AXIS_MASK, + }, + .fs = { + .fs_avl = { + [0] = { + .num = ST_ACCEL_FS_AVL_2G, + .gain = ST_ACCEL_7_FS_AVL_2_GAIN, + }, + }, + }, + /* + * The part has a BDU bit but if set the data is never + * updated so don't set it. + */ + .bdu = { + }, + .drdy_irq = { + .addr = ST_ACCEL_7_DRDY_IRQ_ADDR, + .mask_int1 = ST_ACCEL_7_DRDY_IRQ_INT1_MASK, + .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + }, + .multi_read_bit = ST_ACCEL_7_MULTIREAD_BIT, + .bootime = 2, + }, }; static int st_accel_read_raw(struct iio_dev *indio_dev, @@ -741,6 +805,7 @@ static const struct iio_info accel_info = { static const struct iio_trigger_ops st_accel_trigger_ops = { .owner = THIS_MODULE, .set_trigger_state = ST_ACCEL_TRIGGER_SET_STATE, + .validate_device = st_sensors_validate_device, }; #define ST_ACCEL_TRIGGER_OPS (&st_accel_trigger_ops) #else @@ -757,13 +822,15 @@ int st_accel_common_probe(struct iio_dev *indio_dev) indio_dev->info = &accel_info; mutex_init(&adata->tb.buf_lock); - st_sensors_power_enable(indio_dev); + err = st_sensors_power_enable(indio_dev); + if (err) + return err; err = st_sensors_check_device_support(indio_dev, ARRAY_SIZE(st_accel_sensors_settings), st_accel_sensors_settings); if (err < 0) - return err; + goto st_accel_power_off; adata->num_data_channels = ST_ACCEL_NUMBER_DATA_CHANNELS; adata->multiread_bit = adata->sensor_settings->multi_read_bit; @@ -780,11 +847,11 @@ int st_accel_common_probe(struct iio_dev *indio_dev) err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data); if (err < 0) - return err; + goto st_accel_power_off; err = st_accel_allocate_ring(indio_dev); if (err < 0) - return err; + goto st_accel_power_off; if (irq > 0) { err = st_sensors_allocate_trigger(indio_dev, @@ -807,6 +874,8 @@ st_accel_device_register_error: st_sensors_deallocate_trigger(indio_dev); st_accel_probe_trigger_error: st_accel_deallocate_ring(indio_dev); +st_accel_power_off: + st_sensors_power_disable(indio_dev); return err; } @@ -80,6 +80,10 @@ static const struct of_device_id st_accel_of_match[] = { .compatible = "st,h3lis331dl-accel", .data = H3LIS331DL_DRIVER_NAME, }, + { + .compatible = "st,lis3l02dq", + .data = LIS3L02DQ_ACCEL_DEV_NAME, + }, {}, }; MODULE_DEVICE_TABLE(of, st_accel_of_match); @@ -130,6 +134,7 @@ static const struct i2c_device_id st_accel_id_table[] = { { LSM330_ACCEL_DEV_NAME }, { LSM303AGR_ACCEL_DEV_NAME }, { LIS2DH12_ACCEL_DEV_NAME }, + { LIS3L02DQ_ACCEL_DEV_NAME }, {}, }; MODULE_DEVICE_TABLE(i2c, st_accel_id_table); @@ -59,6 +59,7 @@ static const struct spi_device_id st_accel_id_table[] = { { LSM330_ACCEL_DEV_NAME }, { LSM303AGR_ACCEL_DEV_NAME }, { LIS2DH12_ACCEL_DEV_NAME }, + { LIS3L02DQ_ACCEL_DEV_NAME }, {}, }; MODULE_DEVICE_TABLE(spi, st_accel_id_table); @@ -153,6 +153,18 @@ config AXP288_ADC To compile this driver as a module, choose M here: the module will be called axp288_adc. +config BCM_IPROC_ADC + tristate "Broadcom IPROC ADC driver" + depends on ARCH_BCM_IPROC || COMPILE_TEST + depends on MFD_SYSCON + default ARCH_BCM_CYGNUS + help + Say Y here if you want to add support for the Broadcom static + ADC driver. + + Broadcom iProc ADC driver. Broadcom iProc ADC controller has 8 + channels. The driver allows the user to read voltage values. + config BERLIN2_ADC tristate "Marvell Berlin2 ADC driver" depends on ARCH_BERLIN @@ -16,6 +16,7 @@ obj-$(CONFIG_AD799X) += ad799x.o obj-$(CONFIG_AT91_ADC) += at91_adc.o obj-$(CONFIG_AT91_SAMA5D2_ADC) += at91-sama5d2_adc.o obj-$(CONFIG_AXP288_ADC) += axp288_adc.o +obj-$(CONFIG_BCM_IPROC_ADC) += bcm_iproc_adc.o obj-$(CONFIG_BERLIN2_ADC) += berlin2-adc.o obj-$(CONFIG_CC10001_ADC) += cc10001_adc.o obj-$(CONFIG_DA9150_GPADC) += da9150-gpadc.o @@ -154,12 +154,11 @@ static int ad7266_read_raw(struct iio_dev *indio_dev, switch (m) { case IIO_CHAN_INFO_RAW: - if (iio_buffer_enabled(indio_dev)) - return -EBUSY; - - ret = ad7266_read_single(st, val, chan->address); + ret = iio_device_claim_direct_mode(indio_dev); if (ret) return ret; + ret = ad7266_read_single(st, val, chan->address); + iio_device_release_direct_mode(indio_dev); *val = (*val >> 2) & 0xfff; if (chan->scan_type.sign == 's') @@ -396,8 +395,8 @@ static int ad7266_probe(struct spi_device *spi) st = iio_priv(indio_dev); - st->reg = devm_regulator_get(&spi->dev, "vref"); - if (!IS_ERR_OR_NULL(st->reg)) { + st->reg = devm_regulator_get_optional(&spi->dev, "vref"); + if (!IS_ERR(st->reg)) { ret = regulator_enable(st->reg); if (ret) return ret; @@ -408,6 +407,9 @@ static int ad7266_probe(struct spi_device *spi) st->vref_mv = ret / 1000; } else { + /* Any other error indicates that the regulator does exist */ + if (PTR_ERR(st->reg) != -ENODEV) + return PTR_ERR(st->reg); /* Use internal reference */ st->vref_mv = 2500; } @@ -438,6 +440,7 @@ static int ad7266_probe(struct spi_device *spi) st->spi = spi; indio_dev->dev.parent = &spi->dev; + indio_dev->dev.of_node = spi->dev.of_node; indio_dev->name = spi_get_device_id(spi)->name; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &ad7266_info; @@ -115,7 +115,7 @@ static irqreturn_t ad7291_event_handler(int irq, void *private) u16 t_status, v_status; u16 command; int i; - s64 timestamp = iio_get_time_ns(); + s64 timestamp = iio_get_time_ns(indio_dev); if (ad7291_i2c_read(chip, AD7291_T_ALERT_STATUS, &t_status)) return IRQ_HANDLED; @@ -505,6 +505,7 @@ static int ad7291_probe(struct i2c_client *client, indio_dev->num_channels = ARRAY_SIZE(ad7291_channels); indio_dev->dev.parent = &client->dev; + indio_dev->dev.of_node = client->dev.of_node; indio_dev->info = &ad7291_info; indio_dev->modes = INDIO_DIRECT_MODE; @@ -163,7 +163,7 @@ static irqreturn_t ad7298_trigger_handler(int irq, void *p) goto done; iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf, - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); done: iio_trigger_notify_done(indio_dev->trig); @@ -315,6 +315,7 @@ static int ad7298_probe(struct spi_device *spi) indio_dev->name = spi_get_device_id(spi)->name; indio_dev->dev.parent = &spi->dev; + indio_dev->dev.of_node = spi->dev.of_node; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = ad7298_channels; indio_dev->num_channels = ARRAY_SIZE(ad7298_channels); @@ -70,7 +70,7 @@ static irqreturn_t ad7476_trigger_handler(int irq, void *p) goto done; iio_push_to_buffers_with_timestamp(indio_dev, st->data, - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); done: iio_trigger_notify_done(indio_dev->trig); @@ -106,12 +106,11 @@ static int ad7476_read_raw(struct iio_dev *indio_dev, switch (m) { case IIO_CHAN_INFO_RAW: - mutex_lock(&indio_dev->mlock); - if (iio_buffer_enabled(indio_dev)) - ret = -EBUSY; - else - ret = ad7476_scan_direct(st); - mutex_unlock(&indio_dev->mlock); + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + ret = ad7476_scan_direct(st); + iio_device_release_direct_mode(indio_dev); if (ret < 0) return ret; @@ -228,6 +227,7 @@ static int ad7476_probe(struct spi_device *spi) /* Establish that the iio_dev is a child of the spi device */ indio_dev->dev.parent = &spi->dev; + indio_dev->dev.of_node = spi->dev.of_node; indio_dev->name = spi_get_device_id(spi)->name; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = st->chip_info->channel; @@ -272,30 +272,22 @@ static ssize_t ad7791_write_frequency(struct device *dev, struct ad7791_state *st = iio_priv(indio_dev); int i, ret; - mutex_lock(&indio_dev->mlock); - if (iio_buffer_enabled(indio_dev)) { - mutex_unlock(&indio_dev->mlock); - return -EBUSY; - } - mutex_unlock(&indio_dev->mlock); - - ret = -EINVAL; - - for (i = 0; i < ARRAY_SIZE(ad7791_sample_freq_avail); i++) { - if (sysfs_streq(ad7791_sample_freq_avail[i], buf)) { - - mutex_lock(&indio_dev->mlock); - st->filter &= ~AD7791_FILTER_RATE_MASK; - st->filter |= i; - ad_sd_write_reg(&st->sd, AD7791_REG_FILTER, - sizeof(st->filter), st->filter); - mutex_unlock(&indio_dev->mlock); - ret = 0; + for (i = 0; i < ARRAY_SIZE(ad7791_sample_freq_avail); i++) + if (sysfs_streq(ad7791_sample_freq_avail[i], buf)) break; - } - } + if (i == ARRAY_SIZE(ad7791_sample_freq_avail)) + return -EINVAL; + + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + st->filter &= ~AD7791_FILTER_RATE_MASK; + st->filter |= i; + ad_sd_write_reg(&st->sd, AD7791_REG_FILTER, sizeof(st->filter), + st->filter); + iio_device_release_direct_mode(indio_dev); - return ret ? ret : len; + return len; } static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, @@ -383,6 +375,7 @@ static int ad7791_probe(struct spi_device *spi) spi_set_drvdata(spi, indio_dev); indio_dev->dev.parent = &spi->dev; + indio_dev->dev.of_node = spi->dev.of_node; indio_dev->name = spi_get_device_id(spi)->name; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = st->info->channels; @@ -369,13 +369,6 @@ static ssize_t ad7793_write_frequency(struct device *dev, long lval; int i, ret; - mutex_lock(&indio_dev->mlock); - if (iio_buffer_enabled(indio_dev)) { - mutex_unlock(&indio_dev->mlock); - return -EBUSY; - } - mutex_unlock(&indio_dev->mlock); - ret = kstrtol(buf, 10, &lval); if (ret) return ret; @@ -383,20 +376,21 @@ static ssize_t ad7793_write_frequency(struct device *dev, if (lval == 0) return -EINVAL; - ret = -EINVAL; - for (i = 0; i < 16; i++) - if (lval == st->chip_info->sample_freq_avail[i]) { - mutex_lock(&indio_dev->mlock); - st->mode &= ~AD7793_MODE_RATE(-1); - st->mode |= AD7793_MODE_RATE(i); - ad_sd_write_reg(&st->sd, AD7793_REG_MODE, - sizeof(st->mode), st->mode); - mutex_unlock(&indio_dev->mlock); - ret = 0; - } + if (lval == st->chip_info->sample_freq_avail[i]) + break; + if (i == 16) + return -EINVAL; - return ret ? ret : len; + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + st->mode &= ~AD7793_MODE_RATE(-1); + st->mode |= AD7793_MODE_RATE(i); + ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), st->mode); + iio_device_release_direct_mode(indio_dev); + + return len; } static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, @@ -790,6 +784,7 @@ static int ad7793_probe(struct spi_device *spi) spi_set_drvdata(spi, indio_dev); indio_dev->dev.parent = &spi->dev; + indio_dev->dev.of_node = spi->dev.of_node; indio_dev->name = spi_get_device_id(spi)->name; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = st->chip_info->channels; @@ -122,7 +122,7 @@ static irqreturn_t ad7887_trigger_handler(int irq, void *p) goto done; iio_push_to_buffers_with_timestamp(indio_dev, st->data, - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); done: iio_trigger_notify_done(indio_dev->trig); @@ -156,12 +156,11 @@ static int ad7887_read_raw(struct iio_dev *indio_dev, switch (m) { case IIO_CHAN_INFO_RAW: - mutex_lock(&indio_dev->mlock); - if (iio_buffer_enabled(indio_dev)) - ret = -EBUSY; - else - ret = ad7887_scan_direct(st, chan->address); - mutex_unlock(&indio_dev->mlock); + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + ret = ad7887_scan_direct(st, chan->address); + iio_device_release_direct_mode(indio_dev); if (ret < 0) return ret; @@ -265,6 +264,7 @@ static int ad7887_probe(struct spi_device *spi) /* Estabilish that the iio_dev is a child of the spi device */ indio_dev->dev.parent = &spi->dev; + indio_dev->dev.of_node = spi->dev.of_node; indio_dev->name = spi_get_device_id(spi)->name; indio_dev->info = &ad7887_info; indio_dev->modes = INDIO_DIRECT_MODE; @@ -181,7 +181,7 @@ static irqreturn_t ad7923_trigger_handler(int irq, void *p) goto done; iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf, - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); done: iio_trigger_notify_done(indio_dev->trig); @@ -233,12 +233,11 @@ static int ad7923_read_raw(struct iio_dev *indio_dev, switch (m) { case IIO_CHAN_INFO_RAW: - mutex_lock(&indio_dev->mlock); - if (iio_buffer_enabled(indio_dev)) - ret = -EBUSY; - else - ret = ad7923_scan_direct(st, chan->address); - mutex_unlock(&indio_dev->mlock); + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + ret = ad7923_scan_direct(st, chan->address); + iio_device_release_direct_mode(indio_dev); if (ret < 0) return ret; @@ -289,6 +288,7 @@ static int ad7923_probe(struct spi_device *spi) indio_dev->name = spi_get_device_id(spi)->name; indio_dev->dev.parent = &spi->dev; + indio_dev->dev.of_node = spi->dev.of_node; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = info->channels; indio_dev->num_channels = info->num_channels; @@ -212,7 +212,7 @@ static irqreturn_t ad799x_trigger_handler(int irq, void *p) goto out; iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf, - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); out: iio_trigger_notify_done(indio_dev->trig); @@ -282,12 +282,11 @@ static int ad799x_read_raw(struct iio_dev *indio_dev, switch (m) { case IIO_CHAN_INFO_RAW: - mutex_lock(&indio_dev->mlock); - if (iio_buffer_enabled(indio_dev)) - ret = -EBUSY; - else - ret = ad799x_scan_direct(st, chan->scan_index); - mutex_unlock(&indio_dev->mlock); + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + ret = ad799x_scan_direct(st, chan->scan_index); + iio_device_release_direct_mode(indio_dev); if (ret < 0) return ret; @@ -395,11 +394,9 @@ static int ad799x_write_event_config(struct iio_dev *indio_dev, struct ad799x_state *st = iio_priv(indio_dev); int ret; - mutex_lock(&indio_dev->mlock); - if (iio_buffer_enabled(indio_dev)) { - ret = -EBUSY; - goto done; - } + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; if (state) st->config |= BIT(chan->scan_index) << AD799X_CHANNEL_SHIFT; @@ -412,10 +409,7 @@ static int ad799x_write_event_config(struct iio_dev *indio_dev, st->config &= ~AD7998_ALERT_EN; ret = ad799x_write_config(st, st->config); - -done: - mutex_unlock(&indio_dev->mlock); - + iio_device_release_direct_mode(indio_dev); return ret; } @@ -508,7 +502,7 @@ static irqreturn_t ad799x_event_handler(int irq, void *private) (i >> 1), IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); } done: @@ -812,6 +806,7 @@ static int ad799x_probe(struct i2c_client *client, st->client = client; indio_dev->dev.parent = &client->dev; + indio_dev->dev.of_node = client->dev.of_node; indio_dev->name = id->name; indio_dev->info = st->chip_config->info; diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c new file mode 100644 index 000000000000..21d38c8af21e --- /dev/null +++ b/ drivers/iio/adc/bcm_iproc_adc.c@@ -0,0 +1,644 @@ +/* + * Copyright 2016 Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation (the "GPL"). + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 (GPLv2) for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 (GPLv2) along with this source code. + */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> + +#include <linux/iio/iio.h> + +/* Below Register's are common to IPROC ADC and Touchscreen IP */ +#define IPROC_REGCTL1 0x00 +#define IPROC_REGCTL2 0x04 +#define IPROC_INTERRUPT_THRES 0x08 +#define IPROC_INTERRUPT_MASK 0x0c +#define IPROC_INTERRUPT_STATUS 0x10 +#define IPROC_ANALOG_CONTROL 0x1c +#define IPROC_CONTROLLER_STATUS 0x14 +#define IPROC_AUX_DATA 0x20 +#define IPROC_SOFT_BYPASS_CONTROL 0x38 +#define IPROC_SOFT_BYPASS_DATA 0x3C + +/* IPROC ADC Channel register offsets */ +#define IPROC_ADC_CHANNEL_REGCTL1 0x800 +#define IPROC_ADC_CHANNEL_REGCTL2 0x804 +#define IPROC_ADC_CHANNEL_STATUS 0x808 +#define IPROC_ADC_CHANNEL_INTERRUPT_STATUS 0x80c +#define IPROC_ADC_CHANNEL_INTERRUPT_MASK 0x810 +#define IPROC_ADC_CHANNEL_DATA 0x814 +#define IPROC_ADC_CHANNEL_OFFSET 0x20 + +/* Bit definitions for IPROC_REGCTL2 */ +#define IPROC_ADC_AUXIN_SCAN_ENA BIT(0) +#define IPROC_ADC_PWR_LDO BIT(5) +#define IPROC_ADC_PWR_ADC BIT(4) +#define IPROC_ADC_PWR_BG BIT(3) +#define IPROC_ADC_CONTROLLER_EN BIT(17) + +/* Bit definitions for IPROC_INTERRUPT_MASK and IPROC_INTERRUPT_STATUS */ +#define IPROC_ADC_AUXDATA_RDY_INTR BIT(3) +#define IPROC_ADC_INTR 9 +#define IPROC_ADC_INTR_MASK (0xFF << IPROC_ADC_INTR) + +/* Bit definitions for IPROC_ANALOG_CONTROL */ +#define IPROC_ADC_CHANNEL_SEL 11 +#define IPROC_ADC_CHANNEL_SEL_MASK (0x7 << IPROC_ADC_CHANNEL_SEL) + +/* Bit definitions for IPROC_ADC_CHANNEL_REGCTL1 */ +#define IPROC_ADC_CHANNEL_ROUNDS 0x2 +#define IPROC_ADC_CHANNEL_ROUNDS_MASK (0x3F << IPROC_ADC_CHANNEL_ROUNDS) +#define IPROC_ADC_CHANNEL_MODE 0x1 +#define IPROC_ADC_CHANNEL_MODE_MASK (0x1 << IPROC_ADC_CHANNEL_MODE) +#define IPROC_ADC_CHANNEL_MODE_TDM 0x1 +#define IPROC_ADC_CHANNEL_MODE_SNAPSHOT 0x0 +#define IPROC_ADC_CHANNEL_ENABLE 0x0 +#define IPROC_ADC_CHANNEL_ENABLE_MASK 0x1 + +/* Bit definitions for IPROC_ADC_CHANNEL_REGCTL2 */ +#define IPROC_ADC_CHANNEL_WATERMARK 0x0 +#define IPROC_ADC_CHANNEL_WATERMARK_MASK \ + (0x3F << IPROC_ADC_CHANNEL_WATERMARK) + +#define IPROC_ADC_WATER_MARK_LEVEL 0x1 + +/* Bit definitions for IPROC_ADC_CHANNEL_STATUS */ +#define IPROC_ADC_CHANNEL_DATA_LOST 0x0 +#define IPROC_ADC_CHANNEL_DATA_LOST_MASK \ + (0x0 << IPROC_ADC_CHANNEL_DATA_LOST) +#define IPROC_ADC_CHANNEL_VALID_ENTERIES 0x1 +#define IPROC_ADC_CHANNEL_VALID_ENTERIES_MASK \ + (0xFF << IPROC_ADC_CHANNEL_VALID_ENTERIES) +#define IPROC_ADC_CHANNEL_TOTAL_ENTERIES 0x9 +#define IPROC_ADC_CHANNEL_TOTAL_ENTERIES_MASK \ + (0xFF << IPROC_ADC_CHANNEL_TOTAL_ENTERIES) + +/* Bit definitions for IPROC_ADC_CHANNEL_INTERRUPT_MASK */ +#define IPROC_ADC_CHANNEL_WTRMRK_INTR 0x0 +#define IPROC_ADC_CHANNEL_WTRMRK_INTR_MASK \ + (0x1 << IPROC_ADC_CHANNEL_WTRMRK_INTR) +#define IPROC_ADC_CHANNEL_FULL_INTR 0x1 +#define IPROC_ADC_CHANNEL_FULL_INTR_MASK \ + (0x1 << IPROC_ADC_IPROC_ADC_CHANNEL_FULL_INTR) +#define IPROC_ADC_CHANNEL_EMPTY_INTR 0x2 +#define IPROC_ADC_CHANNEL_EMPTY_INTR_MASK \ + (0x1 << IPROC_ADC_CHANNEL_EMPTY_INTR) + +#define IPROC_ADC_WATER_MARK_INTR_ENABLE 0x1 + +/* Number of time to retry a set of the interrupt mask reg */ +#define IPROC_ADC_INTMASK_RETRY_ATTEMPTS 10 + +#define IPROC_ADC_READ_TIMEOUT (HZ*2) + +#define iproc_adc_dbg_reg(dev, priv, reg) \ +do { \ + u32 val; \ + regmap_read(priv->regmap, reg, &val); \ + dev_dbg(dev, "%20s= 0x%08x\n", #reg, val); \ +} while (0) + +struct iproc_adc_priv { + struct regmap *regmap; + struct clk *adc_clk; + struct mutex mutex; + int irqno; + int chan_val; + int chan_id; + struct completion completion; +}; + +static void iproc_adc_reg_dump(struct iio_dev *indio_dev) +{ + struct device *dev = &indio_dev->dev; + struct iproc_adc_priv *adc_priv = iio_priv(indio_dev); + + iproc_adc_dbg_reg(dev, adc_priv, IPROC_REGCTL1); + iproc_adc_dbg_reg(dev, adc_priv, IPROC_REGCTL2); + iproc_adc_dbg_reg(dev, adc_priv, IPROC_INTERRUPT_THRES); + iproc_adc_dbg_reg(dev, adc_priv, IPROC_INTERRUPT_MASK); + iproc_adc_dbg_reg(dev, adc_priv, IPROC_INTERRUPT_STATUS); + iproc_adc_dbg_reg(dev, adc_priv, IPROC_CONTROLLER_STATUS); + iproc_adc_dbg_reg(dev, adc_priv, IPROC_ANALOG_CONTROL); + iproc_adc_dbg_reg(dev, adc_priv, IPROC_AUX_DATA); + iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_CONTROL); + iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA); +} + +static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data) +{ + u32 channel_intr_status; + u32 intr_status; + u32 intr_mask; + struct iio_dev *indio_dev = data; + struct iproc_adc_priv *adc_priv = iio_priv(indio_dev); + + /* + * This interrupt is shared with the touchscreen driver. + * Make sure this interrupt is intended for us. + * Handle only ADC channel specific interrupts. + */ + regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status); + regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &intr_mask); + intr_status = intr_status & intr_mask; + channel_intr_status = (intr_status & IPROC_ADC_INTR_MASK) >> + IPROC_ADC_INTR; + if (channel_intr_status) + return IRQ_WAKE_THREAD; + + return IRQ_NONE; +} + +static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data) +{ + irqreturn_t retval = IRQ_NONE; + struct iproc_adc_priv *adc_priv; + struct iio_dev *indio_dev = data; + unsigned int valid_entries; + u32 intr_status; + u32 intr_channels; + u32 channel_status; + u32 ch_intr_status; + + adc_priv = iio_priv(indio_dev); + + regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status); + dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_thread(),INTRPT_STS:%x\n", + intr_status); + + intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR; + if (intr_channels) { + regmap_read(adc_priv->regmap, + IPROC_ADC_CHANNEL_INTERRUPT_STATUS + + IPROC_ADC_CHANNEL_OFFSET * adc_priv->chan_id, + &ch_intr_status); + + if (ch_intr_status & IPROC_ADC_CHANNEL_WTRMRK_INTR_MASK) { + regmap_read(adc_priv->regmap, + IPROC_ADC_CHANNEL_STATUS + + IPROC_ADC_CHANNEL_OFFSET * + adc_priv->chan_id, + &channel_status); + + valid_entries = ((channel_status & + IPROC_ADC_CHANNEL_VALID_ENTERIES_MASK) >> + IPROC_ADC_CHANNEL_VALID_ENTERIES); + if (valid_entries >= 1) { + regmap_read(adc_priv->regmap, + IPROC_ADC_CHANNEL_DATA + + IPROC_ADC_CHANNEL_OFFSET * + adc_priv->chan_id, + &adc_priv->chan_val); + complete(&adc_priv->completion); + } else { + dev_err(&indio_dev->dev, + "No data rcvd on channel %d\n", + adc_priv->chan_id); + } + regmap_write(adc_priv->regmap, + IPROC_ADC_CHANNEL_INTERRUPT_MASK + + IPROC_ADC_CHANNEL_OFFSET * + adc_priv->chan_id, + (ch_intr_status & + ~(IPROC_ADC_CHANNEL_WTRMRK_INTR_MASK))); + } + regmap_write(adc_priv->regmap, + IPROC_ADC_CHANNEL_INTERRUPT_STATUS + + IPROC_ADC_CHANNEL_OFFSET * adc_priv->chan_id, + ch_intr_status); + regmap_write(adc_priv->regmap, IPROC_INTERRUPT_STATUS, + intr_channels); + retval = IRQ_HANDLED; + } + + return retval; +} + +static int iproc_adc_do_read(struct iio_dev *indio_dev, + int channel, + u16 *p_adc_data) +{ + int read_len = 0; + u32 val; + u32 mask; + u32 val_check; + int failed_cnt = 0; + struct iproc_adc_priv *adc_priv = iio_priv(indio_dev); + + mutex_lock(&adc_priv->mutex); + + /* + * After a read is complete the ADC interrupts will be disabled so + * we can assume this section of code is safe from interrupts. + */ + adc_priv->chan_val = -1; + adc_priv->chan_id = channel; + + reinit_completion(&adc_priv->completion); + /* Clear any pending interrupt */ + regmap_update_bits(adc_priv->regmap, IPROC_INTERRUPT_STATUS, + IPROC_ADC_INTR_MASK | IPROC_ADC_AUXDATA_RDY_INTR, + ((0x0 << channel) << IPROC_ADC_INTR) | + IPROC_ADC_AUXDATA_RDY_INTR); + + /* Configure channel for snapshot mode and enable */ + val = (BIT(IPROC_ADC_CHANNEL_ROUNDS) | + (IPROC_ADC_CHANNEL_MODE_SNAPSHOT << IPROC_ADC_CHANNEL_MODE) | + (0x1 << IPROC_ADC_CHANNEL_ENABLE)); + + mask = IPROC_ADC_CHANNEL_ROUNDS_MASK | IPROC_ADC_CHANNEL_MODE_MASK | + IPROC_ADC_CHANNEL_ENABLE_MASK; + regmap_update_bits(adc_priv->regmap, (IPROC_ADC_CHANNEL_REGCTL1 + + IPROC_ADC_CHANNEL_OFFSET * channel), + mask, val); + + /* Set the Watermark for a channel */ + regmap_update_bits(adc_priv->regmap, (IPROC_ADC_CHANNEL_REGCTL2 + + IPROC_ADC_CHANNEL_OFFSET * channel), + IPROC_ADC_CHANNEL_WATERMARK_MASK, + 0x1); + + /* Enable water mark interrupt */ + regmap_update_bits(adc_priv->regmap, (IPROC_ADC_CHANNEL_INTERRUPT_MASK + + IPROC_ADC_CHANNEL_OFFSET * + channel), + IPROC_ADC_CHANNEL_WTRMRK_INTR_MASK, + IPROC_ADC_WATER_MARK_INTR_ENABLE); + regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &val); + + /* Enable ADC interrupt for a channel */ + val |= (BIT(channel) << IPROC_ADC_INTR); + regmap_write(adc_priv->regmap, IPROC_INTERRUPT_MASK, val); + + /* + * There seems to be a very rare issue where writing to this register + * does not take effect. To work around the issue we will try multiple + * writes. In total we will spend about 10*10 = 100 us attempting this. + * Testing has shown that this may loop a few time, but we have never + * hit the full count. + */ + regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &val_check); + while (val_check != val) { + failed_cnt++; + + if (failed_cnt > IPROC_ADC_INTMASK_RETRY_ATTEMPTS) + break; + + udelay(10); + regmap_update_bits(adc_priv->regmap, IPROC_INTERRUPT_MASK, + IPROC_ADC_INTR_MASK, + ((0x1 << channel) << + IPROC_ADC_INTR)); + + regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &val_check); + } + + if (failed_cnt) { + dev_dbg(&indio_dev->dev, + "IntMask failed (%d times)", failed_cnt); + if (failed_cnt > IPROC_ADC_INTMASK_RETRY_ATTEMPTS) { + dev_err(&indio_dev->dev, + "IntMask set failed. Read will likely fail."); + read_len = -EIO; + goto adc_err; + }; + } + regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &val_check); + + if (wait_for_completion_timeout(&adc_priv->completion, + IPROC_ADC_READ_TIMEOUT) > 0) { + + /* Only the lower 16 bits are relevant */ + *p_adc_data = adc_priv->chan_val & 0xFFFF; + read_len = sizeof(*p_adc_data); + + } else { + /* + * We never got the interrupt, something went wrong. + * Perhaps the interrupt may still be coming, we do not want + * that now. Lets disable the ADC interrupt, and clear the + * status to put it back in to normal state. + */ + read_len = -ETIMEDOUT; + goto adc_err; + } + mutex_unlock(&adc_priv->mutex); + + return read_len; + +adc_err: + regmap_update_bits(adc_priv->regmap, IPROC_INTERRUPT_MASK, + IPROC_ADC_INTR_MASK, + ((0x0 << channel) << IPROC_ADC_INTR)); + + regmap_update_bits(adc_priv->regmap, IPROC_INTERRUPT_STATUS, + IPROC_ADC_INTR_MASK, + ((0x0 << channel) << IPROC_ADC_INTR)); + + dev_err(&indio_dev->dev, "Timed out waiting for ADC data!\n"); + iproc_adc_reg_dump(indio_dev); + mutex_unlock(&adc_priv->mutex); + + return read_len; +} + +static int iproc_adc_enable(struct iio_dev *indio_dev) +{ + u32 val; + u32 channel_id; + struct iproc_adc_priv *adc_priv = iio_priv(indio_dev); + int ret; + + /* Set i_amux = 3b'000, select channel 0 */ + ret = regmap_update_bits(adc_priv->regmap, IPROC_ANALOG_CONTROL, + IPROC_ADC_CHANNEL_SEL_MASK, 0); + if (ret) { + dev_err(&indio_dev->dev, + "failed to write IPROC_ANALOG_CONTROL %d\n", ret); + return ret; + } + adc_priv->chan_val = -1; + + /* + * PWR up LDO, ADC, and Band Gap (0 to enable) + * Also enable ADC controller (set high) + */ + ret = regmap_read(adc_priv->regmap, IPROC_REGCTL2, &val); + if (ret) { + dev_err(&indio_dev->dev, + "failed to read IPROC_REGCTL2 %d\n", ret); + return ret; + } + + val &= ~(IPROC_ADC_PWR_LDO | IPROC_ADC_PWR_ADC | IPROC_ADC_PWR_BG); + + ret = regmap_write(adc_priv->regmap, IPROC_REGCTL2, val); + if (ret) { + dev_err(&indio_dev->dev, + "failed to write IPROC_REGCTL2 %d\n", ret); + return ret; + } + + ret = regmap_read(adc_priv->regmap, IPROC_REGCTL2, &val); + if (ret) { + dev_err(&indio_dev->dev, + "failed to read IPROC_REGCTL2 %d\n", ret); + return ret; + } + + val |= IPROC_ADC_CONTROLLER_EN; + ret = regmap_write(adc_priv->regmap, IPROC_REGCTL2, val); + if (ret) { + dev_err(&indio_dev->dev, + "failed to write IPROC_REGCTL2 %d\n", ret); + return ret; + } + + for (channel_id = 0; channel_id < indio_dev->num_channels; + channel_id++) { + ret = regmap_write(adc_priv->regmap, + IPROC_ADC_CHANNEL_INTERRUPT_MASK + + IPROC_ADC_CHANNEL_OFFSET * channel_id, 0); + if (ret) { + dev_err(&indio_dev->dev, + "failed to write ADC_CHANNEL_INTERRUPT_MASK %d\n", + ret); + return ret; + } + + ret = regmap_write(adc_priv->regmap, + IPROC_ADC_CHANNEL_INTERRUPT_STATUS + + IPROC_ADC_CHANNEL_OFFSET * channel_id, 0); + if (ret) { + dev_err(&indio_dev->dev, + "failed to write ADC_CHANNEL_INTERRUPT_STATUS %d\n", + ret); + return ret; + } + } + + return 0; +} + +static void iproc_adc_disable(struct iio_dev *indio_dev) +{ + u32 val; + int ret; + struct iproc_adc_priv *adc_priv = iio_priv(indio_dev); + + ret = regmap_read(adc_priv->regmap, IPROC_REGCTL2, &val); + if (ret) { + dev_err(&indio_dev->dev, + "failed to read IPROC_REGCTL2 %d\n", ret); + return; + } + + val &= ~IPROC_ADC_CONTROLLER_EN; + ret = regmap_write(adc_priv->regmap, IPROC_REGCTL2, val); + if (ret) { + dev_err(&indio_dev->dev, + "failed to write IPROC_REGCTL2 %d\n", ret); + return; + } +} + +static int iproc_adc_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, + int *val2, + long mask) +{ + u16 adc_data; + int err; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + err = iproc_adc_do_read(indio_dev, chan->channel, &adc_data); + if (err < 0) + return err; + *val = adc_data; + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: + switch (chan->type) { + case IIO_VOLTAGE: + *val = 1800; + *val2 = 10; + return IIO_VAL_FRACTIONAL_LOG2; + default: + return -EINVAL; + } + default: + return -EINVAL; + } +} + +static const struct iio_info iproc_adc_iio_info = { + .read_raw = &iproc_adc_read_raw, + .driver_module = THIS_MODULE, +}; + +#define IPROC_ADC_CHANNEL(_index, _id) { \ + .type = IIO_VOLTAGE, \ + .indexed = 1, \ + .channel = _index, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .datasheet_name = _id, \ +} + +static const struct iio_chan_spec iproc_adc_iio_channels[] = { + IPROC_ADC_CHANNEL(0, "adc0"), + IPROC_ADC_CHANNEL(1, "adc1"), + IPROC_ADC_CHANNEL(2, "adc2"), + IPROC_ADC_CHANNEL(3, "adc3"), + IPROC_ADC_CHANNEL(4, "adc4"), + IPROC_ADC_CHANNEL(5, "adc5"), + IPROC_ADC_CHANNEL(6, "adc6"), + IPROC_ADC_CHANNEL(7, "adc7"), +}; + +static int iproc_adc_probe(struct platform_device *pdev) +{ + struct iproc_adc_priv *adc_priv; + struct iio_dev *indio_dev = NULL; + int ret; + + indio_dev = devm_iio_device_alloc(&pdev->dev, + sizeof(*adc_priv)); + if (!indio_dev) { + dev_err(&pdev->dev, "failed to allocate iio device\n"); + return -ENOMEM; + } + + adc_priv = iio_priv(indio_dev); + platform_set_drvdata(pdev, indio_dev); + + mutex_init(&adc_priv->mutex); + + init_completion(&adc_priv->completion); + + adc_priv->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "adc-syscon"); + if (IS_ERR(adc_priv->regmap)) { + dev_err(&pdev->dev, "failed to get handle for tsc syscon\n"); + ret = PTR_ERR(adc_priv->regmap); + return ret; + } + + adc_priv->adc_clk = devm_clk_get(&pdev->dev, "tsc_clk"); + if (IS_ERR(adc_priv->adc_clk)) { + dev_err(&pdev->dev, + "failed getting clock tsc_clk\n"); + ret = PTR_ERR(adc_priv->adc_clk); + return ret; + } + + adc_priv->irqno = platform_get_irq(pdev, 0); + if (adc_priv->irqno <= 0) { + dev_err(&pdev->dev, "platform_get_irq failed\n"); + ret = -ENODEV; + return ret; + } + + ret = regmap_update_bits(adc_priv->regmap, IPROC_REGCTL2, + IPROC_ADC_AUXIN_SCAN_ENA, 0); + if (ret) { + dev_err(&pdev->dev, "failed to write IPROC_REGCTL2 %d\n", ret); + return ret; + } + + ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno, + iproc_adc_interrupt_thread, + iproc_adc_interrupt_handler, + IRQF_SHARED, "iproc-adc", indio_dev); + if (ret) { + dev_err(&pdev->dev, "request_irq error %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(adc_priv->adc_clk); + if (ret) { + dev_err(&pdev->dev, + "clk_prepare_enable failed %d\n", ret); + return ret; + } + + ret = iproc_adc_enable(indio_dev); + if (ret) { + dev_err(&pdev->dev, "failed to enable adc %d\n", ret); + goto err_adc_enable; + } + + indio_dev->name = "iproc-static-adc"; + indio_dev->dev.parent = &pdev->dev; + indio_dev->dev.of_node = pdev->dev.of_node; + indio_dev->info = &iproc_adc_iio_info; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = iproc_adc_iio_channels; + indio_dev->num_channels = ARRAY_SIZE(iproc_adc_iio_channels); + + ret = iio_device_register(indio_dev); + if (ret) { + dev_err(&pdev->dev, "iio_device_register failed:err %d\n", ret); + goto err_clk; + } + + return 0; + +err_clk: + iproc_adc_disable(indio_dev); +err_adc_enable: + clk_disable_unprepare(adc_priv->adc_clk); + + return ret; +} + +static int iproc_adc_remove(struct platform_device *pdev) +{ + struct iio_dev *indio_dev = platform_get_drvdata(pdev); + struct iproc_adc_priv *adc_priv = iio_priv(indio_dev); + + iio_device_unregister(indio_dev); + iproc_adc_disable(indio_dev); + clk_disable_unprepare(adc_priv->adc_clk); + + return 0; +} + +static const struct of_device_id iproc_adc_of_match[] = { + {.compatible = "brcm,iproc-static-adc", }, + { }, +}; +MODULE_DEVICE_TABLE(of, iproc_adc_of_match); + +static struct platform_driver iproc_adc_driver = { + .probe = iproc_adc_probe, + .remove = iproc_adc_remove, + .driver = { + .name = "iproc-static-adc", + .of_match_table = of_match_ptr(iproc_adc_of_match), + }, +}; +module_platform_driver(iproc_adc_driver); + +MODULE_DESCRIPTION("Broadcom iProc ADC controller driver"); +MODULE_AUTHOR("Raveendra Padasalagi <raveendra.padasalagi@broadcom.com>"); +MODULE_LICENSE("GPL v2"); @@ -186,7 +186,7 @@ done: if (!sample_invalid) iio_push_to_buffers_with_timestamp(indio_dev, data, - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; @@ -400,7 +400,7 @@ static void hi8435_iio_push_event(struct iio_dev *idev, unsigned int val) iio_push_event(idev, IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, i, IIO_EV_TYPE_THRESH, dir), - iio_get_time_ns()); + iio_get_time_ns(idev)); } } @@ -455,6 +455,7 @@ static int hi8435_probe(struct spi_device *spi) mutex_init(&priv->lock); idev->dev.parent = &spi->dev; + idev->dev.of_node = spi->dev.of_node; idev->name = spi_get_device_id(spi)->name; idev->modes = INDIO_DIRECT_MODE; idev->info = &hi8435_info; @@ -465,7 +465,7 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev) s64 time_a, time_b; unsigned int alert; - time_a = iio_get_time_ns(); + time_a = iio_get_time_ns(indio_dev); /* * Because the timer thread and the chip conversion clock @@ -504,7 +504,7 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev) data[i++] = val; } - time_b = iio_get_time_ns(); + time_b = iio_get_time_ns(indio_dev); iio_push_to_buffers_with_timestamp(indio_dev, (unsigned int *)data, time_a); @@ -554,7 +554,7 @@ static int ina2xx_buffer_enable(struct iio_dev *indio_dev) dev_dbg(&indio_dev->dev, "Async readout mode: %d\n", chip->allow_async_readout); - chip->prev_ns = iio_get_time_ns(); + chip->prev_ns = iio_get_time_ns(indio_dev); chip->task = kthread_run(ina2xx_capture_thread, (void *)indio_dev, "%s:%d-%uus", indio_dev->name, indio_dev->id, @@ -691,6 +691,7 @@ static int ina2xx_probe(struct i2c_client *client, indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE; indio_dev->dev.parent = &client->dev; + indio_dev->dev.of_node = client->dev.of_node; indio_dev->channels = ina2xx_channels; indio_dev->num_channels = ARRAY_SIZE(ina2xx_channels); indio_dev->name = id->name; @@ -426,6 +426,7 @@ static int max1027_probe(struct spi_device *spi) indio_dev->name = spi_get_device_id(spi)->name; indio_dev->dev.parent = &spi->dev; + indio_dev->dev.of_node = spi->dev.of_node; indio_dev->info = &max1027_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = st->info->channels; @@ -25,6 +25,8 @@ #include <linux/slab.h> #include <linux/err.h> #include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> @@ -788,7 +790,7 @@ static irqreturn_t max1363_event_handler(int irq, void *private) { struct iio_dev *indio_dev = private; struct max1363_state *st = iio_priv(indio_dev); - s64 timestamp = iio_get_time_ns(); + s64 timestamp = iio_get_time_ns(indio_dev); unsigned long mask, loc; u8 rx; u8 tx[2] = { st->setupbyte, @@ -1506,7 +1508,8 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p) if (b_sent < 0) goto done_free; - iio_push_to_buffers_with_timestamp(indio_dev, rxbuf, iio_get_time_ns()); + iio_push_to_buffers_with_timestamp(indio_dev, rxbuf, + iio_get_time_ns(indio_dev)); done_free: kfree(rxbuf); @@ -1516,6 +1519,56 @@ done: return IRQ_HANDLED; } +#ifdef CONFIG_OF + +#define MAX1363_COMPATIBLE(of_compatible, cfg) { \ + .compatible = of_compatible, \ + .data = &max1363_chip_info_tbl[cfg], \ +} + +static const struct of_device_id max1363_of_match[] = { + MAX1363_COMPATIBLE("maxim,max1361", max1361), + MAX1363_COMPATIBLE("maxim,max1362", max1362), + MAX1363_COMPATIBLE("maxim,max1363", max1363), + MAX1363_COMPATIBLE("maxim,max1364", max1364), + MAX1363_COMPATIBLE("maxim,max1036", max1036), + MAX1363_COMPATIBLE("maxim,max1037", max1037), + MAX1363_COMPATIBLE("maxim,max1038", max1038), + MAX1363_COMPATIBLE("maxim,max1039", max1039), + MAX1363_COMPATIBLE("maxim,max1136", max1136), + MAX1363_COMPATIBLE("maxim,max1137", max1137), + MAX1363_COMPATIBLE("maxim,max1138", max1138), + MAX1363_COMPATIBLE("maxim,max1139", max1139), + MAX1363_COMPATIBLE("maxim,max1236", max1236), + MAX1363_COMPATIBLE("maxim,max1237", max1237), + MAX1363_COMPATIBLE("maxim,max1238", max1238), + MAX1363_COMPATIBLE("maxim,max1239", max1239), + MAX1363_COMPATIBLE("maxim,max11600", max11600), + MAX1363_COMPATIBLE("maxim,max11601", max11601), + MAX1363_COMPATIBLE("maxim,max11602", max11602), + MAX1363_COMPATIBLE("maxim,max11603", max11603), + MAX1363_COMPATIBLE("maxim,max11604", max11604), + MAX1363_COMPATIBLE("maxim,max11605", max11605), + MAX1363_COMPATIBLE("maxim,max11606", max11606), + MAX1363_COMPATIBLE("maxim,max11607", max11607), + MAX1363_COMPATIBLE("maxim,max11608", max11608), + MAX1363_COMPATIBLE("maxim,max11609", max11609), + MAX1363_COMPATIBLE("maxim,max11610", max11610), + MAX1363_COMPATIBLE("maxim,max11611", max11611), + MAX1363_COMPATIBLE("maxim,max11612", max11612), + MAX1363_COMPATIBLE("maxim,max11613", max11613), + MAX1363_COMPATIBLE("maxim,max11614", max11614), + MAX1363_COMPATIBLE("maxim,max11615", max11615), + MAX1363_COMPATIBLE("maxim,max11616", max11616), + MAX1363_COMPATIBLE("maxim,max11617", max11617), + MAX1363_COMPATIBLE("maxim,max11644", max11644), + MAX1363_COMPATIBLE("maxim,max11645", max11645), + MAX1363_COMPATIBLE("maxim,max11646", max11646), + MAX1363_COMPATIBLE("maxim,max11647", max11647), + { /* sentinel */ } +}; +#endif + static int max1363_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -1523,6 +1576,7 @@ static int max1363_probe(struct i2c_client *client, struct max1363_state *st; struct iio_dev *indio_dev; struct regulator *vref; + const struct of_device_id *match; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(struct max1363_state)); @@ -1549,7 +1603,12 @@ static int max1363_probe(struct i2c_client *client, /* this is only used for device removal purposes */ i2c_set_clientdata(client, indio_dev); - st->chip_info = &max1363_chip_info_tbl[id->driver_data]; + match = of_match_device(of_match_ptr(max1363_of_match), + &client->dev); + if (match) + st->chip_info = of_device_get_match_data(&client->dev); + else + st->chip_info = &max1363_chip_info_tbl[id->driver_data]; st->client = client; st->vref_uv = st->chip_info->int_vref_mv * 1000; @@ -1587,6 +1646,7 @@ static int max1363_probe(struct i2c_client *client, /* Establish that the iio_dev is a child of the i2c device */ indio_dev->dev.parent = &client->dev; + indio_dev->dev.of_node = client->dev.of_node; indio_dev->name = id->name; indio_dev->channels = st->chip_info->channels; indio_dev->num_channels = st->chip_info->num_channels; @@ -1692,6 +1752,7 @@ MODULE_DEVICE_TABLE(i2c, max1363_id); static struct i2c_driver max1363_driver = { .driver = { .name = "max1363", + .of_match_table = of_match_ptr(max1363_of_match), }, .probe = max1363_probe, .remove = max1363_remove, @@ -308,6 +308,7 @@ static int mcp320x_probe(struct spi_device *spi) adc->spi = spi; indio_dev->dev.parent = &spi->dev; + indio_dev->dev.of_node = spi->dev.of_node; indio_dev->name = spi_get_device_id(spi)->name; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &mcp320x_info; @@ -352,6 +352,7 @@ static int mcp3422_probe(struct i2c_client *client, mutex_init(&adc->lock); indio_dev->dev.parent = &client->dev; + indio_dev->dev.of_node = client->dev.of_node; indio_dev->name = dev_name(&client->dev); indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &mcp3422_info; @@ -373,13 +373,6 @@ static u32 mxs_lradc_plate_mask(struct mxs_lradc *lradc) return LRADC_CTRL0_MX28_PLATE_MASK; } -static u32 mxs_lradc_irq_en_mask(struct mxs_lradc *lradc) -{ - if (lradc->soc == IMX23_LRADC) - return LRADC_CTRL1_MX23_LRADC_IRQ_EN_MASK; - return LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK; -} - static u32 mxs_lradc_irq_mask(struct mxs_lradc *lradc) { if (lradc->soc == IMX23_LRADC) @@ -1120,18 +1113,16 @@ static int mxs_lradc_ts_register(struct mxs_lradc *lradc) { struct input_dev *input; struct device *dev = lradc->dev; - int ret; if (!lradc->use_touchscreen) return 0; - input = input_allocate_device(); + input = devm_input_allocate_device(dev); if (!input) return -ENOMEM; input->name = DRIVER_NAME; input->id.bustype = BUS_HOST; - input->dev.parent = dev; input->open = mxs_lradc_ts_open; input->close = mxs_lradc_ts_close; @@ -1146,20 +1137,8 @@ static int mxs_lradc_ts_register(struct mxs_lradc *lradc) lradc->ts_input = input; input_set_drvdata(input, lradc); - ret = input_register_device(input); - if (ret) - input_free_device(lradc->ts_input); - - return ret; -} - -static void mxs_lradc_ts_unregister(struct mxs_lradc *lradc) -{ - if (!lradc->use_touchscreen) - return; - mxs_lradc_disable_ts(lradc); - input_unregister_device(lradc->ts_input); + return input_register_device(input); } /* @@ -1510,7 +1489,9 @@ static void mxs_lradc_hw_stop(struct mxs_lradc *lradc) { int i; - mxs_lradc_reg_clear(lradc, mxs_lradc_irq_en_mask(lradc), LRADC_CTRL1); + mxs_lradc_reg_clear(lradc, + lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET, + LRADC_CTRL1); for (i = 0; i < LRADC_MAX_DELAY_CHANS; i++) mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(i)); @@ -1721,13 +1702,11 @@ static int mxs_lradc_probe(struct platform_device *pdev) ret = iio_device_register(iio); if (ret) { dev_err(dev, "Failed to register IIO device\n"); - goto err_ts; + return ret; } return 0; -err_ts: - mxs_lradc_ts_unregister(lradc); err_ts_register: mxs_lradc_hw_stop(lradc); err_dev: @@ -1745,7 +1724,6 @@ static int mxs_lradc_remove(struct platform_device *pdev) struct mxs_lradc *lradc = iio_priv(iio); iio_device_unregister(iio); - mxs_lradc_ts_unregister(lradc); mxs_lradc_hw_stop(lradc); mxs_lradc_trigger_remove(iio); iio_triggered_buffer_cleanup(iio); @@ -79,10 +79,29 @@ static const struct iio_chan_spec nau7802_chan_array[] = { static const u16 nau7802_sample_freq_avail[] = {10, 20, 40, 80, 10, 10, 10, 320}; +static ssize_t nau7802_show_scales(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nau7802_state *st = iio_priv(dev_to_iio_dev(dev)); + int i, len = 0; + + for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++) + len += scnprintf(buf + len, PAGE_SIZE - len, "0.%09d ", + st->scale_avail[i]); + + buf[len-1] = '\n'; + + return len; +} + static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("10 40 80 320"); +static IIO_DEVICE_ATTR(in_voltage_scale_available, S_IRUGO, nau7802_show_scales, + NULL, 0); + static struct attribute *nau7802_attributes[] = { &iio_const_attr_sampling_frequency_available.dev_attr.attr, + &iio_dev_attr_in_voltage_scale_available.dev_attr.attr, NULL }; @@ -414,6 +433,7 @@ static int nau7802_probe(struct i2c_client *client, i2c_set_clientdata(client, indio_dev); indio_dev->dev.parent = &client->dev; + indio_dev->dev.of_node = client->dev.of_node; indio_dev->name = dev_name(&client->dev); indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &nau7802_info; @@ -22,6 +22,7 @@ #include <linux/i2c.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/acpi.h> #include <linux/iio/iio.h> #include <linux/iio/buffer.h> @@ -138,7 +139,8 @@ static irqreturn_t adc081c_trigger_handler(int irq, void *p) if (ret < 0) goto out; buf[0] = ret; - iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns()); + iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_get_time_ns(indio_dev)); out: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; @@ -149,12 +151,24 @@ static int adc081c_probe(struct i2c_client *client, { struct iio_dev *iio; struct adc081c *adc; - struct adcxx1c_model *model = &adcxx1c_models[id->driver_data]; + struct adcxx1c_model *model; int err; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) return -EOPNOTSUPP; + if (ACPI_COMPANION(&client->dev)) { + const struct acpi_device_id *ad_id; + + ad_id = acpi_match_device(client->dev.driver->acpi_match_table, + &client->dev); + if (!ad_id) + return -ENODEV; + model = &adcxx1c_models[ad_id->driver_data]; + } else { + model = &adcxx1c_models[id->driver_data]; + } + iio = devm_iio_device_alloc(&client->dev, sizeof(*adc)); if (!iio) return -ENOMEM; @@ -172,6 +186,7 @@ static int adc081c_probe(struct i2c_client *client, return err; iio->dev.parent = &client->dev; + iio->dev.of_node = client->dev.of_node; iio->name = dev_name(&client->dev); iio->modes = INDIO_DIRECT_MODE; iio->info = &adc081c_info; @@ -231,10 +246,21 @@ static const struct of_device_id adc081c_of_match[] = { MODULE_DEVICE_TABLE(of, adc081c_of_match); #endif +#ifdef CONFIG_ACPI +static const struct acpi_device_id adc081c_acpi_match[] = { + { "ADC081C", ADC081C }, + { "ADC101C", ADC101C }, + { "ADC121C", ADC121C }, + { } +}; +MODULE_DEVICE_TABLE(acpi, adc081c_acpi_match); +#endif + static struct i2c_driver adc081c_driver = { .driver = { .name = "adc081c", .of_match_table = of_match_ptr(adc081c_of_match), + .acpi_match_table = ACPI_PTR(adc081c_acpi_match), }, .probe = adc081c_probe, .remove = adc081c_remove, @@ -194,6 +194,7 @@ static int adc0832_probe(struct spi_device *spi) indio_dev->name = spi_get_device_id(spi)->name; indio_dev->dev.parent = &spi->dev; + indio_dev->dev.of_node = spi->dev.of_node; indio_dev->info = &adc0832_info; indio_dev->modes = INDIO_DIRECT_MODE; @@ -150,6 +150,7 @@ static int adc128_probe(struct spi_device *spi) spi_set_drvdata(spi, indio_dev); indio_dev->dev.parent = &spi->dev; + indio_dev->dev.of_node = spi->dev.of_node; indio_dev->name = spi_get_device_id(spi)->name; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &adc128_info; @@ -55,6 +55,11 @@ #define ADS1015_DEFAULT_DATA_RATE 4 #define ADS1015_DEFAULT_CHAN 0 +enum { + ADS1015, + ADS1115, +}; + enum ads1015_channels { ADS1015_AIN0_AIN1 = 0, ADS1015_AIN0_AIN3, @@ -71,6 +76,10 @@ static const unsigned int ads1015_data_rate[] = { 128, 250, 490, 920, 1600, 2400, 3300, 3300 }; +static const unsigned int ads1115_data_rate[] = { + 8, 16, 32, 64, 128, 250, 475, 860 +}; + static const struct { int scale; int uscale; @@ -101,6 +110,7 @@ static const struct { .shift = 4, \ .endianness = IIO_CPU, \ }, \ + .datasheet_name = "AIN"#_chan, \ } #define ADS1015_V_DIFF_CHAN(_chan, _chan2, _addr) { \ @@ -121,6 +131,45 @@ static const struct { .shift = 4, \ .endianness = IIO_CPU, \ }, \ + .datasheet_name = "AIN"#_chan"-AIN"#_chan2, \ +} + +#define ADS1115_V_CHAN(_chan, _addr) { \ + .type = IIO_VOLTAGE, \ + .indexed = 1, \ + .address = _addr, \ + .channel = _chan, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_SAMP_FREQ), \ + .scan_index = _addr, \ + .scan_type = { \ + .sign = 's', \ + .realbits = 16, \ + .storagebits = 16, \ + .endianness = IIO_CPU, \ + }, \ + .datasheet_name = "AIN"#_chan, \ +} + +#define ADS1115_V_DIFF_CHAN(_chan, _chan2, _addr) { \ + .type = IIO_VOLTAGE, \ + .differential = 1, \ + .indexed = 1, \ + .address = _addr, \ + .channel = _chan, \ + .channel2 = _chan2, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_SAMP_FREQ), \ + .scan_index = _addr, \ + .scan_type = { \ + .sign = 's', \ + .realbits = 16, \ + .storagebits = 16, \ + .endianness = IIO_CPU, \ + }, \ + .datasheet_name = "AIN"#_chan"-AIN"#_chan2, \ } struct ads1015_data { @@ -131,6 +180,8 @@ struct ads1015_data { */ struct mutex lock; struct ads1015_channel_data channel_data[ADS1015_CHANNELS]; + + unsigned int *data_rate; }; static bool ads1015_is_writeable_reg(struct device *dev, unsigned int reg) @@ -157,6 +208,18 @@ static const struct iio_chan_spec ads1015_channels[] = { IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP), }; +static const struct iio_chan_spec ads1115_channels[] = { + ADS1115_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1), + ADS1115_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3), + ADS1115_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3), + ADS1115_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3), + ADS1115_V_CHAN(0, ADS1015_AIN0), + ADS1115_V_CHAN(1, ADS1015_AIN1), + ADS1115_V_CHAN(2, ADS1015_AIN2), + ADS1115_V_CHAN(3, ADS1015_AIN3), + IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP), +}; + static int ads1015_set_power_state(struct ads1015_data *data, bool on) { int ret; @@ -196,7 +259,7 @@ int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val) return ret; if (change) { - conv_time = DIV_ROUND_UP(USEC_PER_SEC, ads1015_data_rate[dr]); + conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]); usleep_range(conv_time, conv_time + 1); } @@ -225,7 +288,8 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p) buf[0] = res; mutex_unlock(&data->lock); - iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns()); + iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_get_time_ns(indio_dev)); err: iio_trigger_notify_done(indio_dev->trig); @@ -263,7 +327,7 @@ static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate) int i, ret, rindex = -1; for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++) - if (ads1015_data_rate[i] == rate) { + if (data->data_rate[i] == rate) { rindex = i; break; } @@ -291,7 +355,9 @@ static int ads1015_read_raw(struct iio_dev *indio_dev, mutex_lock(&indio_dev->mlock); mutex_lock(&data->lock); switch (mask) { - case IIO_CHAN_INFO_RAW: + case IIO_CHAN_INFO_RAW: { + int shift = chan->scan_type.shift; + if (iio_buffer_enabled(indio_dev)) { ret = -EBUSY; break; @@ -307,8 +373,7 @@ static int ads1015_read_raw(struct iio_dev *indio_dev, break; } - /* 12 bit res, D0 is bit 4 in conversion register */ - *val = sign_extend32(*val >> 4, 11); + *val = sign_extend32(*val >> shift, 15 - shift); ret = ads1015_set_power_state(data, false); if (ret < 0) @@ -316,6 +381,7 @@ static int ads1015_read_raw(struct iio_dev *indio_dev, ret = IIO_VAL_INT; break; + } case IIO_CHAN_INFO_SCALE: idx = data->channel_data[chan->address].pga; *val = ads1015_scale[idx].scale; @@ -324,7 +390,7 @@ static int ads1015_read_raw(struct iio_dev *indio_dev, break; case IIO_CHAN_INFO_SAMP_FREQ: idx = data->channel_data[chan->address].data_rate; - *val = ads1015_data_rate[idx]; + *val = data->data_rate[idx]; ret = IIO_VAL_INT; break; default: @@ -380,12 +446,15 @@ static const struct iio_buffer_setup_ops ads1015_buffer_setup_ops = { }; static IIO_CONST_ATTR(scale_available, "3 2 1 0.5 0.25 0.125"); -static IIO_CONST_ATTR(sampling_frequency_available, - "128 250 490 920 1600 2400 3300"); + +static IIO_CONST_ATTR_NAMED(ads1015_sampling_frequency_available, + sampling_frequency_available, "128 250 490 920 1600 2400 3300"); +static IIO_CONST_ATTR_NAMED(ads1115_sampling_frequency_available, + sampling_frequency_available, "8 16 32 64 128 250 475 860"); static struct attribute *ads1015_attributes[] = { &iio_const_attr_scale_available.dev_attr.attr, - &iio_const_attr_sampling_frequency_available.dev_attr.attr, + &iio_const_attr_ads1015_sampling_frequency_available.dev_attr.attr, NULL, }; @@ -393,11 +462,28 @@ static const struct attribute_group ads1015_attribute_group = { .attrs = ads1015_attributes, }; -static const struct iio_info ads1015_info = { +static struct attribute *ads1115_attributes[] = { + &iio_const_attr_scale_available.dev_attr.attr, + &iio_const_attr_ads1115_sampling_frequency_available.dev_attr.attr, + NULL, +}; + +static const struct attribute_group ads1115_attribute_group = { + .attrs = ads1115_attributes, +}; + +static struct iio_info ads1015_info = { + .driver_module = THIS_MODULE, + .read_raw = ads1015_read_raw, + .write_raw = ads1015_write_raw, + .attrs = &ads1015_attribute_group, +}; + +static struct iio_info ads1115_info = { .driver_module = THIS_MODULE, .read_raw = ads1015_read_raw, .write_raw = ads1015_write_raw, - .attrs = &ads1015_attribute_group, + .attrs = &ads1115_attribute_group, }; #ifdef CONFIG_OF @@ -500,12 +586,25 @@ static int ads1015_probe(struct i2c_client *client, mutex_init(&data->lock); indio_dev->dev.parent = &client->dev; - indio_dev->info = &ads1015_info; + indio_dev->dev.of_node = client->dev.of_node; indio_dev->name = ADS1015_DRV_NAME; - indio_dev->channels = ads1015_channels; - indio_dev->num_channels = ARRAY_SIZE(ads1015_channels); indio_dev->modes = INDIO_DIRECT_MODE; + switch (id->driver_data) { + case ADS1015: + indio_dev->channels = ads1015_channels; + indio_dev->num_channels = ARRAY_SIZE(ads1015_channels); + indio_dev->info = &ads1015_info; + data->data_rate = (unsigned int *) &ads1015_data_rate; + break; + case ADS1115: + indio_dev->channels = ads1115_channels; + indio_dev->num_channels = ARRAY_SIZE(ads1115_channels); + indio_dev->info = &ads1115_info; + data->data_rate = (unsigned int *) &ads1115_data_rate; + break; + } + /* we need to keep this ABI the same as used by hwmon ADS1015 driver */ ads1015_get_channels_config(client); @@ -590,7 +689,8 @@ static const struct dev_pm_ops ads1015_pm_ops = { }; static const struct i2c_device_id ads1015_id[] = { - {"ads1015", 0}, + {"ads1015", ADS1015}, + {"ads1115", ADS1115}, {} }; MODULE_DEVICE_TABLE(i2c, ads1015_id); @@ -421,6 +421,7 @@ static int ads8688_probe(struct spi_device *spi) indio_dev->name = spi_get_device_id(spi)->name; indio_dev->dev.parent = &spi->dev; + indio_dev->dev.of_node = spi->dev.of_node; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = st->chip_info->channels; indio_dev->num_channels = st->chip_info->num_channels; @@ -326,8 +326,7 @@ static int tiadc_channel_init(struct iio_dev *indio_dev, int channels) int i; indio_dev->num_channels = channels; - chan_array = kcalloc(channels, - sizeof(struct iio_chan_spec), GFP_KERNEL); + chan_array = kcalloc(channels, sizeof(*chan_array), GFP_KERNEL); if (chan_array == NULL) return -ENOMEM; @@ -467,8 +466,7 @@ static int tiadc_probe(struct platform_device *pdev) return -EINVAL; } - indio_dev = devm_iio_device_alloc(&pdev->dev, - sizeof(struct tiadc_device)); + indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*indio_dev)); if (indio_dev == NULL) { dev_err(&pdev->dev, "failed to allocate iio device\n"); return -ENOMEM; @@ -531,8 +529,7 @@ static int tiadc_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int tiadc_suspend(struct device *dev) +static int __maybe_unused tiadc_suspend(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct tiadc_device *adc_dev = iio_priv(indio_dev); @@ -550,7 +547,7 @@ static int tiadc_suspend(struct device *dev) return 0; } -static int tiadc_resume(struct device *dev) +static int __maybe_unused tiadc_resume(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct tiadc_device *adc_dev = iio_priv(indio_dev); @@ -567,14 +564,7 @@ static int tiadc_resume(struct device *dev) return 0; } -static const struct dev_pm_ops tiadc_pm_ops = { - .suspend = tiadc_suspend, - .resume = tiadc_resume, -}; -#define TIADC_PM_OPS (&tiadc_pm_ops) -#else -#define TIADC_PM_OPS NULL -#endif +static SIMPLE_DEV_PM_OPS(tiadc_pm_ops, tiadc_suspend, tiadc_resume); static const struct of_device_id ti_adc_dt_ids[] = { { .compatible = "ti,am3359-adc", }, @@ -585,7 +575,7 @@ MODULE_DEVICE_TABLE(of, ti_adc_dt_ids); static struct platform_driver tiadc_driver = { .driver = { .name = "TI-am335x-adc", - .pm = TIADC_PM_OPS, + .pm = &tiadc_pm_ops, .of_match_table = ti_adc_dt_ids, }, .probe = tiadc_probe, @@ -594,7 +594,8 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id) if (iio_buffer_enabled(indio_dev)) { info->buffer[0] = info->value; iio_push_to_buffers_with_timestamp(indio_dev, - info->buffer, iio_get_time_ns()); + info->buffer, + iio_get_time_ns(indio_dev)); iio_trigger_notify_done(indio_dev->trig); } else complete(&info->completion); @@ -46,7 +46,7 @@ static void xadc_handle_event(struct iio_dev *indio_dev, unsigned int event) iio_push_event(indio_dev, IIO_UNMOD_EVENT_CODE(chan->type, chan->channel, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); } else { /* * For other channels we don't know whether it is a upper or @@ -56,7 +56,7 @@ static void xadc_handle_event(struct iio_dev *indio_dev, unsigned int event) iio_push_event(indio_dev, IIO_UNMOD_EVENT_CODE(chan->type, chan->channel, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER), - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); } } @@ -305,7 +305,7 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer) queue->fileio.active_block = NULL; spin_lock_irq(&queue->list_lock); - for (i = 0; i < 2; i++) { + for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { block = queue->fileio.blocks[i]; /* If we can't re-use it free it */ @@ -323,7 +323,7 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer) INIT_LIST_HEAD(&queue->incoming); - for (i = 0; i < 2; i++) { + for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { if (queue->fileio.blocks[i]) { block = queue->fileio.blocks[i]; if (block->state == IIO_BLOCK_STATE_DEAD) { @@ -5,15 +5,17 @@ menu "Chemical Sensors" config ATLAS_PH_SENSOR - tristate "Atlas Scientific OEM pH-SM sensor" + tristate "Atlas Scientific OEM SM sensors" depends on I2C select REGMAP_I2C select IIO_BUFFER select IIO_TRIGGERED_BUFFER select IRQ_WORK help - Say Y here to build I2C interface support for the Atlas - Scientific OEM pH-SM sensor. + Say Y here to build I2C interface support for the following + Atlas Scientific OEM SM sensors: + * pH SM sensor + * EC SM sensor To compile this driver as module, choose M here: the module will be called atlas-ph-sensor. @@ -24,6 +24,7 @@ #include <linux/irq_work.h> #include <linux/gpio.h> #include <linux/i2c.h> +#include <linux/of_device.h> #include <linux/regmap.h> #include <linux/iio/iio.h> #include <linux/iio/buffer.h> @@ -43,29 +44,50 @@ #define ATLAS_REG_PWR_CONTROL 0x06 -#define ATLAS_REG_CALIB_STATUS 0x0d -#define ATLAS_REG_CALIB_STATUS_MASK 0x07 -#define ATLAS_REG_CALIB_STATUS_LOW BIT(0) -#define ATLAS_REG_CALIB_STATUS_MID BIT(1) -#define ATLAS_REG_CALIB_STATUS_HIGH BIT(2) +#define ATLAS_REG_PH_CALIB_STATUS 0x0d +#define ATLAS_REG_PH_CALIB_STATUS_MASK 0x07 +#define ATLAS_REG_PH_CALIB_STATUS_LOW BIT(0) +#define ATLAS_REG_PH_CALIB_STATUS_MID BIT(1) +#define ATLAS_REG_PH_CALIB_STATUS_HIGH BIT(2) -#define ATLAS_REG_TEMP_DATA 0x0e +#define ATLAS_REG_EC_CALIB_STATUS 0x0f +#define ATLAS_REG_EC_CALIB_STATUS_MASK 0x0f +#define ATLAS_REG_EC_CALIB_STATUS_DRY BIT(0) +#define ATLAS_REG_EC_CALIB_STATUS_SINGLE BIT(1) +#define ATLAS_REG_EC_CALIB_STATUS_LOW BIT(2) +#define ATLAS_REG_EC_CALIB_STATUS_HIGH BIT(3) + +#define ATLAS_REG_PH_TEMP_DATA 0x0e #define ATLAS_REG_PH_DATA 0x16 +#define ATLAS_REG_EC_PROBE 0x08 +#define ATLAS_REG_EC_TEMP_DATA 0x10 +#define ATLAS_REG_EC_DATA 0x18 +#define ATLAS_REG_TDS_DATA 0x1c +#define ATLAS_REG_PSS_DATA 0x20 + #define ATLAS_PH_INT_TIME_IN_US 450000 +#define ATLAS_EC_INT_TIME_IN_US 650000 + +enum { + ATLAS_PH_SM, + ATLAS_EC_SM, +}; struct atlas_data { struct i2c_client *client; struct iio_trigger *trig; + struct atlas_device *chip; struct regmap *regmap; struct irq_work work; - __be32 buffer[4]; /* 32-bit pH data + 32-bit pad + 64-bit timestamp */ + __be32 buffer[6]; /* 96-bit data + 32-bit pad + 64-bit timestamp */ }; static const struct regmap_range atlas_volatile_ranges[] = { regmap_reg_range(ATLAS_REG_INT_CONTROL, ATLAS_REG_INT_CONTROL), regmap_reg_range(ATLAS_REG_PH_DATA, ATLAS_REG_PH_DATA + 4), + regmap_reg_range(ATLAS_REG_EC_DATA, ATLAS_REG_PSS_DATA + 4), }; static const struct regmap_access_table atlas_volatile_table = { @@ -80,13 +102,14 @@ static const struct regmap_config atlas_regmap_config = { .val_bits = 8, .volatile_table = &atlas_volatile_table, - .max_register = ATLAS_REG_PH_DATA + 4, + .max_register = ATLAS_REG_PSS_DATA + 4, .cache_type = REGCACHE_RBTREE, }; -static const struct iio_chan_spec atlas_channels[] = { +static const struct iio_chan_spec atlas_ph_channels[] = { { .type = IIO_PH, + .address = ATLAS_REG_PH_DATA, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), .scan_index = 0, @@ -100,7 +123,7 @@ static const struct iio_chan_spec atlas_channels[] = { IIO_CHAN_SOFT_TIMESTAMP(1), { .type = IIO_TEMP, - .address = ATLAS_REG_TEMP_DATA, + .address = ATLAS_REG_PH_TEMP_DATA, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), .output = 1, @@ -108,6 +131,142 @@ static const struct iio_chan_spec atlas_channels[] = { }, }; +#define ATLAS_EC_CHANNEL(_idx, _addr) \ + {\ + .type = IIO_CONCENTRATION, \ + .indexed = 1, \ + .channel = _idx, \ + .address = _addr, \ + .info_mask_separate = \ + BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), \ + .scan_index = _idx + 1, \ + .scan_type = { \ + .sign = 'u', \ + .realbits = 32, \ + .storagebits = 32, \ + .endianness = IIO_BE, \ + }, \ + } + +static const struct iio_chan_spec atlas_ec_channels[] = { + { + .type = IIO_ELECTRICALCONDUCTIVITY, + .address = ATLAS_REG_EC_DATA, + .info_mask_separate = + BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), + .scan_index = 0, + .scan_type = { + .sign = 'u', + .realbits = 32, + .storagebits = 32, + .endianness = IIO_BE, + }, + }, + ATLAS_EC_CHANNEL(0, ATLAS_REG_TDS_DATA), + ATLAS_EC_CHANNEL(1, ATLAS_REG_PSS_DATA), + IIO_CHAN_SOFT_TIMESTAMP(3), + { + .type = IIO_TEMP, + .address = ATLAS_REG_EC_TEMP_DATA, + .info_mask_separate = + BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), + .output = 1, + .scan_index = -1 + }, +}; + +static int atlas_check_ph_calibration(struct atlas_data *data) +{ + struct device *dev = &data->client->dev; + int ret; + unsigned int val; + + ret = regmap_read(data->regmap, ATLAS_REG_PH_CALIB_STATUS, &val); + if (ret) + return ret; + + if (!(val & ATLAS_REG_PH_CALIB_STATUS_MASK)) { + dev_warn(dev, "device has not been calibrated\n"); + return 0; + } + + if (!(val & ATLAS_REG_PH_CALIB_STATUS_LOW)) + dev_warn(dev, "device missing low point calibration\n"); + + if (!(val & ATLAS_REG_PH_CALIB_STATUS_MID)) + dev_warn(dev, "device missing mid point calibration\n"); + + if (!(val & ATLAS_REG_PH_CALIB_STATUS_HIGH)) + dev_warn(dev, "device missing high point calibration\n"); + + return 0; +} + +static int atlas_check_ec_calibration(struct atlas_data *data) +{ + struct device *dev = &data->client->dev; + int ret; + unsigned int val; + + ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &val, 2); + if (ret) + return ret; + + dev_info(dev, "probe set to K = %d.%.2d", be16_to_cpu(val) / 100, + be16_to_cpu(val) % 100); + + ret = regmap_read(data->regmap, ATLAS_REG_EC_CALIB_STATUS, &val); + if (ret) + return ret; + + if (!(val & ATLAS_REG_EC_CALIB_STATUS_MASK)) { + dev_warn(dev, "device has not been calibrated\n"); + return 0; + } + + if (!(val & ATLAS_REG_EC_CALIB_STATUS_DRY)) + dev_warn(dev, "device missing dry point calibration\n"); + + if (val & ATLAS_REG_EC_CALIB_STATUS_SINGLE) { + dev_warn(dev, "device using single point calibration\n"); + } else { + if (!(val & ATLAS_REG_EC_CALIB_STATUS_LOW)) + dev_warn(dev, "device missing low point calibration\n"); + + if (!(val & ATLAS_REG_EC_CALIB_STATUS_HIGH)) + dev_warn(dev, "device missing high point calibration\n"); + } + + return 0; +} + +struct atlas_device { + const struct iio_chan_spec *channels; + int num_channels; + int data_reg; + + int (*calibration)(struct atlas_data *data); + int delay; +}; + +static struct atlas_device atlas_devices[] = { + [ATLAS_PH_SM] = { + .channels = atlas_ph_channels, + .num_channels = 3, + .data_reg = ATLAS_REG_PH_DATA, + .calibration = &atlas_check_ph_calibration, + .delay = ATLAS_PH_INT_TIME_IN_US, + }, + [ATLAS_EC_SM] = { + .channels = atlas_ec_channels, + .num_channels = 5, + .data_reg = ATLAS_REG_EC_DATA, + .calibration = &atlas_check_ec_calibration, + .delay = ATLAS_EC_INT_TIME_IN_US, + }, + +}; + static int atlas_set_powermode(struct atlas_data *data, int on) { return regmap_write(data->regmap, ATLAS_REG_PWR_CONTROL, on); @@ -178,12 +337,13 @@ static irqreturn_t atlas_trigger_handler(int irq, void *private) struct atlas_data *data = iio_priv(indio_dev); int ret; - ret = regmap_bulk_read(data->regmap, ATLAS_REG_PH_DATA, - (u8 *) &data->buffer, sizeof(data->buffer[0])); + ret = regmap_bulk_read(data->regmap, data->chip->data_reg, + (u8 *) &data->buffer, + sizeof(__be32) * (data->chip->num_channels - 2)); if (!ret) iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); iio_trigger_notify_done(indio_dev->trig); @@ -200,7 +360,7 @@ static irqreturn_t atlas_interrupt_handler(int irq, void *private) return IRQ_HANDLED; } -static int atlas_read_ph_measurement(struct atlas_data *data, __be32 *val) +static int atlas_read_measurement(struct atlas_data *data, int reg, __be32 *val) { struct device *dev = &data->client->dev; int suspended = pm_runtime_suspended(dev); @@ -213,11 +373,9 @@ static int atlas_read_ph_measurement(struct atlas_data *data, __be32 *val) } if (suspended) - usleep_range(ATLAS_PH_INT_TIME_IN_US, - ATLAS_PH_INT_TIME_IN_US + 100000); + usleep_range(data->chip->delay, data->chip->delay + 100000); - ret = regmap_bulk_read(data->regmap, ATLAS_REG_PH_DATA, - (u8 *) val, sizeof(*val)); + ret = regmap_bulk_read(data->regmap, reg, (u8 *) val, sizeof(*val)); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); @@ -242,12 +400,15 @@ static int atlas_read_raw(struct iio_dev *indio_dev, (u8 *) ®, sizeof(reg)); break; case IIO_PH: + case IIO_CONCENTRATION: + case IIO_ELECTRICALCONDUCTIVITY: mutex_lock(&indio_dev->mlock); if (iio_buffer_enabled(indio_dev)) ret = -EBUSY; else - ret = atlas_read_ph_measurement(data, ®); + ret = atlas_read_measurement(data, + chan->address, ®); mutex_unlock(&indio_dev->mlock); break; @@ -271,6 +432,14 @@ static int atlas_read_raw(struct iio_dev *indio_dev, *val = 1; /* 0.001 */ *val2 = 1000; break; + case IIO_ELECTRICALCONDUCTIVITY: + *val = 1; /* 0.00001 */ + *val = 100000; + break; + case IIO_CONCENTRATION: + *val = 0; /* 0.000000001 */ + *val2 = 1000; + return IIO_VAL_INT_PLUS_NANO; default: return -EINVAL; } @@ -303,37 +472,26 @@ static const struct iio_info atlas_info = { .write_raw = atlas_write_raw, }; -static int atlas_check_calibration(struct atlas_data *data) -{ - struct device *dev = &data->client->dev; - int ret; - unsigned int val; - - ret = regmap_read(data->regmap, ATLAS_REG_CALIB_STATUS, &val); - if (ret) - return ret; - - if (!(val & ATLAS_REG_CALIB_STATUS_MASK)) { - dev_warn(dev, "device has not been calibrated\n"); - return 0; - } - - if (!(val & ATLAS_REG_CALIB_STATUS_LOW)) - dev_warn(dev, "device missing low point calibration\n"); - - if (!(val & ATLAS_REG_CALIB_STATUS_MID)) - dev_warn(dev, "device missing mid point calibration\n"); - - if (!(val & ATLAS_REG_CALIB_STATUS_HIGH)) - dev_warn(dev, "device missing high point calibration\n"); +static const struct i2c_device_id atlas_id[] = { + { "atlas-ph-sm", ATLAS_PH_SM}, + { "atlas-ec-sm", ATLAS_EC_SM}, + {} +}; +MODULE_DEVICE_TABLE(i2c, atlas_id); - return 0; +static const struct of_device_id atlas_dt_ids[] = { + { .compatible = "atlas,ph-sm", .data = (void *)ATLAS_PH_SM, }, + { .compatible = "atlas,ec-sm", .data = (void *)ATLAS_EC_SM, }, + { } }; +MODULE_DEVICE_TABLE(of, atlas_dt_ids); static int atlas_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct atlas_data *data; + struct atlas_device *chip; + const struct of_device_id *of_id; struct iio_trigger *trig; struct iio_dev *indio_dev; int ret; @@ -342,10 +500,16 @@ static int atlas_probe(struct i2c_client *client, if (!indio_dev) return -ENOMEM; + of_id = of_match_device(atlas_dt_ids, &client->dev); + if (!of_id) + chip = &atlas_devices[id->driver_data]; + else + chip = &atlas_devices[(unsigned long)of_id->data]; + indio_dev->info = &atlas_info; indio_dev->name = ATLAS_DRV_NAME; - indio_dev->channels = atlas_channels; - indio_dev->num_channels = ARRAY_SIZE(atlas_channels); + indio_dev->channels = chip->channels; + indio_dev->num_channels = chip->num_channels; indio_dev->modes = INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE; indio_dev->dev.parent = &client->dev; @@ -358,6 +522,7 @@ static int atlas_probe(struct i2c_client *client, data = iio_priv(indio_dev); data->client = client; data->trig = trig; + data->chip = chip; trig->dev.parent = indio_dev->dev.parent; trig->ops = &atlas_interrupt_trigger_ops; iio_trigger_set_drvdata(trig, indio_dev); @@ -379,7 +544,7 @@ static int atlas_probe(struct i2c_client *client, return -EINVAL; } - ret = atlas_check_calibration(data); + ret = chip->calibration(data); if (ret) return ret; @@ -480,18 +645,6 @@ static const struct dev_pm_ops atlas_pm_ops = { atlas_runtime_resume, NULL) }; -static const struct i2c_device_id atlas_id[] = { - { "atlas-ph-sm", 0 }, - {} -}; -MODULE_DEVICE_TABLE(i2c, atlas_id); - -static const struct of_device_id atlas_dt_ids[] = { - { .compatible = "atlas,ph-sm" }, - { } -}; -MODULE_DEVICE_TABLE(of, atlas_dt_ids); - static struct i2c_driver atlas_driver = { .driver = { .name = ATLAS_DRV_NAME, @@ -22,34 +22,32 @@ #include <linux/iio/common/st_sensors.h> -int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf) +static int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf) { - int i, len; - int total = 0; + int i; struct st_sensor_data *sdata = iio_priv(indio_dev); unsigned int num_data_channels = sdata->num_data_channels; - for (i = 0; i < num_data_channels; i++) { - unsigned int bytes_to_read; - - if (test_bit(i, indio_dev->active_scan_mask)) { - bytes_to_read = indio_dev->channels[i].scan_type.storagebits >> 3; - len = sdata->tf->read_multiple_byte(&sdata->tb, - sdata->dev, indio_dev->channels[i].address, - bytes_to_read, - buf + total, sdata->multiread_bit); - - if (len < bytes_to_read) - return -EIO; - - /* Advance the buffer pointer */ - total += len; - } + for_each_set_bit(i, indio_dev->active_scan_mask, num_data_channels) { + const struct iio_chan_spec *channel = &indio_dev->channels[i]; + unsigned int bytes_to_read = channel->scan_type.realbits >> 3; + unsigned int storage_bytes = + channel->scan_type.storagebits >> 3; + + buf = PTR_ALIGN(buf, storage_bytes); + if (sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev, + channel->address, + bytes_to_read, buf, + sdata->multiread_bit) < + bytes_to_read) + return -EIO; + + /* Advance the buffer pointer */ + buf += storage_bytes; } - return total; + return 0; } -EXPORT_SYMBOL(st_sensors_get_buffer_element); irqreturn_t st_sensors_trigger_handler(int irq, void *p) { @@ -57,31 +55,25 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct st_sensor_data *sdata = iio_priv(indio_dev); - - /* If we have a status register, check if this IRQ came from us */ - if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) { - u8 status; - - len = sdata->tf->read_byte(&sdata->tb, sdata->dev, - sdata->sensor_settings->drdy_irq.addr_stat_drdy, - &status); - if (len < 0) - dev_err(sdata->dev, "could not read channel status\n"); - - /* - * If this was not caused by any channels on this sensor, - * return IRQ_NONE - */ - if (!(status & (u8)indio_dev->active_scan_mask[0])) - return IRQ_NONE; - } + s64 timestamp; + + /* + * If we do timetamping here, do it before reading the values, because + * once we've read the values, new interrupts can occur (when using + * the hardware trigger) and the hw_timestamp may get updated. + * By storing it in a local variable first, we are safe. + */ + if (sdata->hw_irq_trigger) + timestamp = sdata->hw_timestamp; + else + timestamp = iio_get_time_ns(indio_dev); len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data); if (len < 0) goto st_sensors_get_buffer_element_error; iio_push_to_buffers_with_timestamp(indio_dev, sdata->buffer_data, - pf->timestamp); + timestamp); st_sensors_get_buffer_element_error: iio_trigger_notify_done(indio_dev->trig); @@ -228,7 +228,7 @@ int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable) } EXPORT_SYMBOL(st_sensors_set_axis_enable); -void st_sensors_power_enable(struct iio_dev *indio_dev) +int st_sensors_power_enable(struct iio_dev *indio_dev) { struct st_sensor_data *pdata = iio_priv(indio_dev); int err; @@ -237,18 +237,37 @@ void st_sensors_power_enable(struct iio_dev *indio_dev) pdata->vdd = devm_regulator_get_optional(indio_dev->dev.parent, "vdd"); if (!IS_ERR(pdata->vdd)) { err = regulator_enable(pdata->vdd); - if (err != 0) + if (err != 0) { dev_warn(&indio_dev->dev, "Failed to enable specified Vdd supply\n"); + return err; + } + } else { + err = PTR_ERR(pdata->vdd); + if (err != -ENODEV) + return err; } pdata->vdd_io = devm_regulator_get_optional(indio_dev->dev.parent, "vddio"); if (!IS_ERR(pdata->vdd_io)) { err = regulator_enable(pdata->vdd_io); - if (err != 0) + if (err != 0) { dev_warn(&indio_dev->dev, "Failed to enable specified Vdd_IO supply\n"); + goto st_sensors_disable_vdd; + } + } else { + err = PTR_ERR(pdata->vdd_io); + if (err != -ENODEV) + goto st_sensors_disable_vdd; } + + return 0; + +st_sensors_disable_vdd: + if (!IS_ERR_OR_NULL(pdata->vdd)) + regulator_disable(pdata->vdd); + return err; } EXPORT_SYMBOL(st_sensors_power_enable); @@ -256,10 +275,10 @@ void st_sensors_power_disable(struct iio_dev *indio_dev) { struct st_sensor_data *pdata = iio_priv(indio_dev); - if (!IS_ERR(pdata->vdd)) + if (!IS_ERR_OR_NULL(pdata->vdd)) regulator_disable(pdata->vdd); - if (!IS_ERR(pdata->vdd_io)) + if (!IS_ERR_OR_NULL(pdata->vdd_io)) regulator_disable(pdata->vdd_io); } EXPORT_SYMBOL(st_sensors_power_disable); @@ -363,6 +382,11 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev, if (err < 0) return err; + /* Disable DRDY, this might be still be enabled after reboot. */ + err = st_sensors_set_dataready_irq(indio_dev, false); + if (err < 0) + return err; + if (sdata->current_fullscale) { err = st_sensors_set_fullscale(indio_dev, sdata->current_fullscale->num); @@ -424,6 +448,9 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable) else drdy_mask = sdata->sensor_settings->drdy_irq.mask_int2; + /* Flag to the poll function that the hardware trigger is in use */ + sdata->hw_irq_trigger = enable; + /* Enable/Disable the interrupt generator for data ready. */ err = st_sensors_write_data_with_mask(indio_dev, sdata->sensor_settings->drdy_irq.addr, @@ -463,7 +490,7 @@ static int st_sensors_read_axis_data(struct iio_dev *indio_dev, int err; u8 *outdata; struct st_sensor_data *sdata = iio_priv(indio_dev); - unsigned int byte_for_channel = ch->scan_type.storagebits >> 3; + unsigned int byte_for_channel = ch->scan_type.realbits >> 3; outdata = kmalloc(byte_for_channel, GFP_KERNEL); if (!outdata) @@ -523,7 +550,7 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev, int num_sensors_list, const struct st_sensor_settings *sensor_settings) { - int i, n, err; + int i, n, err = 0; u8 wai; struct st_sensor_data *sdata = iio_priv(indio_dev); @@ -543,17 +570,21 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev, return -ENODEV; } - err = sdata->tf->read_byte(&sdata->tb, sdata->dev, - sensor_settings[i].wai_addr, &wai); - if (err < 0) { - dev_err(&indio_dev->dev, "failed to read Who-Am-I register.\n"); - return err; - } + if (sensor_settings[i].wai_addr) { + err = sdata->tf->read_byte(&sdata->tb, sdata->dev, + sensor_settings[i].wai_addr, &wai); + if (err < 0) { + dev_err(&indio_dev->dev, + "failed to read Who-Am-I register.\n"); + return err; + } - if (sensor_settings[i].wai != wai) { - dev_err(&indio_dev->dev, "%s: WhoAmI mismatch (0x%x).\n", - indio_dev->name, wai); - return -EINVAL; + if (sensor_settings[i].wai != wai) { + dev_err(&indio_dev->dev, + "%s: WhoAmI mismatch (0x%x).\n", + indio_dev->name, wai); + return -EINVAL; + } } sdata->sensor_settings = @@ -48,8 +48,8 @@ static int st_sensors_i2c_read_multiple_byte( if (multiread_bit) reg_addr |= ST_SENSORS_I2C_MULTIREAD; - return i2c_smbus_read_i2c_block_data(to_i2c_client(dev), - reg_addr, len, data); + return i2c_smbus_read_i2c_block_data_or_emulated(to_i2c_client(dev), + reg_addr, len, data); } static int st_sensors_i2c_write_byte(struct st_sensor_transfer_buffer *tb, @@ -17,6 +17,116 @@ #include <linux/iio/common/st_sensors.h> #include "st_sensors_core.h" +/** + * st_sensors_new_samples_available() - check if more samples came in + * returns: + * 0 - no new samples available + * 1 - new samples available + * negative - error or unknown + */ +static int st_sensors_new_samples_available(struct iio_dev *indio_dev, + struct st_sensor_data *sdata) +{ + u8 status; + int ret; + + /* How would I know if I can't check it? */ + if (!sdata->sensor_settings->drdy_irq.addr_stat_drdy) + return -EINVAL; + + /* No scan mask, no interrupt */ + if (!indio_dev->active_scan_mask) + return 0; + + ret = sdata->tf->read_byte(&sdata->tb, sdata->dev, + sdata->sensor_settings->drdy_irq.addr_stat_drdy, + &status); + if (ret < 0) { + dev_err(sdata->dev, + "error checking samples available\n"); + return ret; + } + /* + * the lower bits of .active_scan_mask[0] is directly mapped + * to the channels on the sensor: either bit 0 for + * one-dimensional sensors, or e.g. x,y,z for accelerometers, + * gyroscopes or magnetometers. No sensor use more than 3 + * channels, so cut the other status bits here. + */ + status &= 0x07; + + if (status & (u8)indio_dev->active_scan_mask[0]) + return 1; + + return 0; +} + +/** + * st_sensors_irq_handler() - top half of the IRQ-based triggers + * @irq: irq number + * @p: private handler data + */ +irqreturn_t st_sensors_irq_handler(int irq, void *p) +{ + struct iio_trigger *trig = p; + struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig); + struct st_sensor_data *sdata = iio_priv(indio_dev); + + /* Get the time stamp as close in time as possible */ + sdata->hw_timestamp = iio_get_time_ns(indio_dev); + return IRQ_WAKE_THREAD; +} + +/** + * st_sensors_irq_thread() - bottom half of the IRQ-based triggers + * @irq: irq number + * @p: private handler data + */ +irqreturn_t st_sensors_irq_thread(int irq, void *p) +{ + struct iio_trigger *trig = p; + struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig); + struct st_sensor_data *sdata = iio_priv(indio_dev); + + /* + * If this trigger is backed by a hardware interrupt and we have a + * status register, check if this IRQ came from us. Notice that + * we will process also if st_sensors_new_samples_available() + * returns negative: if we can't check status, then poll + * unconditionally. + */ + if (sdata->hw_irq_trigger && + st_sensors_new_samples_available(indio_dev, sdata)) { + iio_trigger_poll_chained(p); + } else { + dev_dbg(sdata->dev, "spurious IRQ\n"); + return IRQ_NONE; + } + + /* + * If we have proper level IRQs the handler will be re-entered if + * the line is still active, so return here and come back in through + * the top half if need be. + */ + if (!sdata->edge_irq) + return IRQ_HANDLED; + + /* + * If we are using egde IRQs, new samples arrived while processing + * the IRQ and those may be missed unless we pick them here, so poll + * again. If the sensor delivery frequency is very high, this thread + * turns into a polled loop handler. + */ + while (sdata->hw_irq_trigger && + st_sensors_new_samples_available(indio_dev, sdata)) { + dev_dbg(sdata->dev, "more samples came in during polling\n"); + sdata->hw_timestamp = iio_get_time_ns(indio_dev); + iio_trigger_poll_chained(p); + } + + return IRQ_HANDLED; +} + int st_sensors_allocate_trigger(struct iio_dev *indio_dev, const struct iio_trigger_ops *trigger_ops) { @@ -30,19 +140,28 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, return -ENOMEM; } + iio_trigger_set_drvdata(sdata->trig, indio_dev); + sdata->trig->ops = trigger_ops; + sdata->trig->dev.parent = sdata->dev; + irq = sdata->get_irq_data_ready(indio_dev); irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq)); /* * If the IRQ is triggered on falling edge, we need to mark the * interrupt as active low, if the hardware supports this. */ - if (irq_trig == IRQF_TRIGGER_FALLING) { + switch(irq_trig) { + case IRQF_TRIGGER_FALLING: + case IRQF_TRIGGER_LOW: if (!sdata->sensor_settings->drdy_irq.addr_ihl) { dev_err(&indio_dev->dev, - "falling edge specified for IRQ but hardware " - "only support rising edge, will request " - "rising edge\n"); - irq_trig = IRQF_TRIGGER_RISING; + "falling/low specified for IRQ " + "but hardware only support rising/high: " + "will request rising/high\n"); + if (irq_trig == IRQF_TRIGGER_FALLING) + irq_trig = IRQF_TRIGGER_RISING; + if (irq_trig == IRQF_TRIGGER_LOW) + irq_trig = IRQF_TRIGGER_HIGH; } else { /* Set up INT active low i.e. falling edge */ err = st_sensors_write_data_with_mask(indio_dev, @@ -51,20 +170,39 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, if (err < 0) goto iio_trigger_free; dev_info(&indio_dev->dev, - "interrupts on the falling edge\n"); + "interrupts on the falling edge or " + "active low level\n"); } - } else if (irq_trig == IRQF_TRIGGER_RISING) { + break; + case IRQF_TRIGGER_RISING: dev_info(&indio_dev->dev, "interrupts on the rising edge\n"); - - } else { + break; + case IRQF_TRIGGER_HIGH: + dev_info(&indio_dev->dev, + "interrupts active high level\n"); + break; + default: + /* This is the most preferred mode, if possible */ dev_err(&indio_dev->dev, - "unsupported IRQ trigger specified (%lx), only " - "rising and falling edges supported, enforce " + "unsupported IRQ trigger specified (%lx), enforce " "rising edge\n", irq_trig); irq_trig = IRQF_TRIGGER_RISING; } + /* Tell the interrupt handler that we're dealing with edges */ + if (irq_trig == IRQF_TRIGGER_FALLING || + irq_trig == IRQF_TRIGGER_RISING) + sdata->edge_irq = true; + else + /* + * If we're not using edges (i.e. level interrupts) we + * just mask off the IRQ, handle one interrupt, then + * if the line is still low, we return to the + * interrupt handler top half again and start over. + */ + irq_trig |= IRQF_ONESHOT; + /* * If the interrupt pin is Open Drain, by definition this * means that the interrupt line may be shared with other @@ -77,9 +215,9 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, sdata->sensor_settings->drdy_irq.addr_stat_drdy) irq_trig |= IRQF_SHARED; - err = request_threaded_irq(irq, - iio_trigger_generic_data_rdy_poll, - NULL, + err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev), + st_sensors_irq_handler, + st_sensors_irq_thread, irq_trig, sdata->trig->name, sdata->trig); @@ -88,10 +226,6 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, goto iio_trigger_free; } - iio_trigger_set_drvdata(sdata->trig, indio_dev); - sdata->trig->ops = trigger_ops; - sdata->trig->dev.parent = sdata->dev; - err = iio_trigger_register(sdata->trig); if (err < 0) { dev_err(&indio_dev->dev, "failed to register iio trigger.\n"); @@ -119,6 +253,18 @@ void st_sensors_deallocate_trigger(struct iio_dev *indio_dev) } EXPORT_SYMBOL(st_sensors_deallocate_trigger); +int st_sensors_validate_device(struct iio_trigger *trig, + struct iio_dev *indio_dev) +{ + struct iio_dev *indio = iio_trigger_get_drvdata(trig); + + if (indio != indio_dev) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL(st_sensors_validate_device); + MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>"); MODULE_DESCRIPTION("STMicroelectronics ST-sensors trigger"); MODULE_LICENSE("GPL v2"); @@ -247,12 +247,13 @@ config MCP4922 config STX104 tristate "Apex Embedded Systems STX104 DAC driver" - depends on X86 && ISA + depends on X86 && ISA_BUS_API + select GPIOLIB help - Say yes here to build support for the 2-channel DAC on the Apex - Embedded Systems STX104 integrated analog PC/104 card. The base port - addresses for the devices may be configured via the "base" module - parameter array. + Say yes here to build support for the 2-channel DAC and GPIO on the + Apex Embedded Systems STX104 integrated analog PC/104 card. The base + port addresses for the devices may be configured via the base array + module parameter. config VF610_DAC tristate "Vybrid vf610 DAC driver" @@ -242,7 +242,7 @@ static irqreturn_t ad5421_fault_handler(int irq, void *data) 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); } if (events & AD5421_FAULT_UNDER_CURRENT) { @@ -251,7 +251,7 @@ static irqreturn_t ad5421_fault_handler(int irq, void *data) 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); } if (events & AD5421_FAULT_TEMP_OVER_140) { @@ -260,7 +260,7 @@ static irqreturn_t ad5421_fault_handler(int irq, void *data) 0, IIO_EV_TYPE_MAG, IIO_EV_DIR_RISING), - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); } old_fault = fault; @@ -223,7 +223,7 @@ static irqreturn_t ad5504_event_handler(int irq, void *private) 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), - iio_get_time_ns()); + iio_get_time_ns((struct iio_dev *)private)); return IRQ_HANDLED; } @@ -525,7 +525,7 @@ static int ad5592r_alloc_channels(struct ad5592r_state *st) device_for_each_child_node(st->dev, child) { ret = fwnode_property_read_u32(child, "reg", ®); - if (ret || reg > ARRAY_SIZE(st->channel_modes)) + if (ret || reg >= ARRAY_SIZE(st->channel_modes)) continue; ret = fwnode_property_read_u32(child, "adi,mode", &tmp); @@ -14,6 +14,7 @@ #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/delay.h> +#include <linux/of.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/platform_data/ad5755.h> @@ -109,6 +110,51 @@ enum ad5755_type { ID_AD5737, }; +#ifdef CONFIG_OF +static const int ad5755_dcdc_freq_table[][2] = { + { 250000, AD5755_DC_DC_FREQ_250kHZ }, + { 410000, AD5755_DC_DC_FREQ_410kHZ }, + { 650000, AD5755_DC_DC_FREQ_650kHZ } +}; + +static const int ad5755_dcdc_maxv_table[][2] = { + { 23000000, AD5755_DC_DC_MAXV_23V }, + { 24500000, AD5755_DC_DC_MAXV_24V5 }, + { 27000000, AD5755_DC_DC_MAXV_27V }, + { 29500000, AD5755_DC_DC_MAXV_29V5 }, +}; + +static const int ad5755_slew_rate_table[][2] = { + { 64000, AD5755_SLEW_RATE_64k }, + { 32000, AD5755_SLEW_RATE_32k }, + { 16000, AD5755_SLEW_RATE_16k }, + { 8000, AD5755_SLEW_RATE_8k }, + { 4000, AD5755_SLEW_RATE_4k }, + { 2000, AD5755_SLEW_RATE_2k }, + { 1000, AD5755_SLEW_RATE_1k }, + { 500, AD5755_SLEW_RATE_500 }, + { 250, AD5755_SLEW_RATE_250 }, + { 125, AD5755_SLEW_RATE_125 }, + { 64, AD5755_SLEW_RATE_64 }, + { 32, AD5755_SLEW_RATE_32 }, + { 16, AD5755_SLEW_RATE_16 }, + { 8, AD5755_SLEW_RATE_8 }, + { 4, AD5755_SLEW_RATE_4 }, + { 0, AD5755_SLEW_RATE_0_5 }, +}; + +static const int ad5755_slew_step_table[][2] = { + { 256, AD5755_SLEW_STEP_SIZE_256 }, + { 128, AD5755_SLEW_STEP_SIZE_128 }, + { 64, AD5755_SLEW_STEP_SIZE_64 }, + { 32, AD5755_SLEW_STEP_SIZE_32 }, + { 16, AD5755_SLEW_STEP_SIZE_16 }, + { 4, AD5755_SLEW_STEP_SIZE_4 }, + { 2, AD5755_SLEW_STEP_SIZE_2 }, + { 1, AD5755_SLEW_STEP_SIZE_1 }, +}; +#endif + static int ad5755_write_unlocked(struct iio_dev *indio_dev, unsigned int reg, unsigned int val) { @@ -556,6 +602,129 @@ static const struct ad5755_platform_data ad5755_default_pdata = { }, }; +#ifdef CONFIG_OF +static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct device_node *pp; + struct ad5755_platform_data *pdata; + unsigned int tmp; + unsigned int tmparray[3]; + int devnr, i; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + pdata->ext_dc_dc_compenstation_resistor = + of_property_read_bool(np, "adi,ext-dc-dc-compenstation-resistor"); + + if (!of_property_read_u32(np, "adi,dc-dc-phase", &tmp)) + pdata->dc_dc_phase = tmp; + else + pdata->dc_dc_phase = AD5755_DC_DC_PHASE_ALL_SAME_EDGE; + + pdata->dc_dc_freq = AD5755_DC_DC_FREQ_410kHZ; + if (!of_property_read_u32(np, "adi,dc-dc-freq-hz", &tmp)) { + for (i = 0; i < ARRAY_SIZE(ad5755_dcdc_freq_table); i++) { + if (tmp == ad5755_dcdc_freq_table[i][0]) { + pdata->dc_dc_freq = ad5755_dcdc_freq_table[i][1]; + break; + } + } + + if (i == ARRAY_SIZE(ad5755_dcdc_freq_table)) { + dev_err(dev, + "adi,dc-dc-freq out of range selecting 410kHz"); + } + } + + pdata->dc_dc_maxv = AD5755_DC_DC_MAXV_23V; + if (!of_property_read_u32(np, "adi,dc-dc-max-microvolt", &tmp)) { + for (i = 0; i < ARRAY_SIZE(ad5755_dcdc_maxv_table); i++) { + if (tmp == ad5755_dcdc_maxv_table[i][0]) { + pdata->dc_dc_maxv = ad5755_dcdc_maxv_table[i][1]; + break; + } + } + if (i == ARRAY_SIZE(ad5755_dcdc_maxv_table)) { + dev_err(dev, + "adi,dc-dc-maxv out of range selecting 23V"); + } + } + + devnr = 0; + for_each_child_of_node(np, pp) { + if (devnr > AD5755_NUM_CHANNELS) { + dev_err(dev, + "There is to many channels defined in DT\n"); + goto error_out; + } + + if (!of_property_read_u32(pp, "adi,mode", &tmp)) + pdata->dac[devnr].mode = tmp; + else + pdata->dac[devnr].mode = AD5755_MODE_CURRENT_4mA_20mA; + + pdata->dac[devnr].ext_current_sense_resistor = + of_property_read_bool(pp, "adi,ext-current-sense-resistor"); + + pdata->dac[devnr].enable_voltage_overrange = + of_property_read_bool(pp, "adi,enable-voltage-overrange"); + + if (!of_property_read_u32_array(pp, "adi,slew", tmparray, 3)) { + pdata->dac[devnr].slew.enable = tmparray[0]; + + pdata->dac[devnr].slew.rate = AD5755_SLEW_RATE_64k; + for (i = 0; i < ARRAY_SIZE(ad5755_slew_rate_table); i++) { + if (tmparray[1] == ad5755_slew_rate_table[i][0]) { + pdata->dac[devnr].slew.rate = + ad5755_slew_rate_table[i][1]; + break; + } + } + if (i == ARRAY_SIZE(ad5755_slew_rate_table)) { + dev_err(dev, + "channel %d slew rate out of range selecting 64kHz", + devnr); + } + + pdata->dac[devnr].slew.step_size = AD5755_SLEW_STEP_SIZE_1; + for (i = 0; i < ARRAY_SIZE(ad5755_slew_step_table); i++) { + if (tmparray[2] == ad5755_slew_step_table[i][0]) { + pdata->dac[devnr].slew.step_size = + ad5755_slew_step_table[i][1]; + break; + } + } + if (i == ARRAY_SIZE(ad5755_slew_step_table)) { + dev_err(dev, + "channel %d slew step size out of range selecting 1 LSB", + devnr); + } + } else { + pdata->dac[devnr].slew.enable = false; + pdata->dac[devnr].slew.rate = AD5755_SLEW_RATE_64k; + pdata->dac[devnr].slew.step_size = + AD5755_SLEW_STEP_SIZE_1; + } + devnr++; + } + + return pdata; + + error_out: + devm_kfree(dev, pdata); + return NULL; +} +#else +static +struct ad5755_platform_data *ad5755_parse_dt(struct device *dev) +{ + return NULL; +} +#endif + static int ad5755_probe(struct spi_device *spi) { enum ad5755_type type = spi_get_device_id(spi)->driver_data; @@ -583,8 +752,15 @@ static int ad5755_probe(struct spi_device *spi) indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->num_channels = AD5755_NUM_CHANNELS; - if (!pdata) + if (spi->dev.of_node) + pdata = ad5755_parse_dt(&spi->dev); + else + pdata = spi->dev.platform_data; + + if (!pdata) { + dev_warn(&spi->dev, "no platform data? using default\n"); pdata = &ad5755_default_pdata; + } ret = ad5755_init_channels(indio_dev, pdata); if (ret) @@ -607,6 +783,16 @@ static const struct spi_device_id ad5755_id[] = { }; MODULE_DEVICE_TABLE(spi, ad5755_id); +static const struct of_device_id ad5755_of_match[] = { + { .compatible = "adi,ad5755" }, + { .compatible = "adi,ad5755-1" }, + { .compatible = "adi,ad5757" }, + { .compatible = "adi,ad5735" }, + { .compatible = "adi,ad5737" }, + { } +}; +MODULE_DEVICE_TABLE(of, ad5755_of_match); + static struct spi_driver ad5755_driver = { .driver = { .name = "ad5755", @@ -14,6 +14,7 @@ #include <linux/bitops.h> #include <linux/device.h> #include <linux/errno.h> +#include <linux/gpio/driver.h> #include <linux/iio/iio.h> #include <linux/iio/types.h> #include <linux/io.h> @@ -21,6 +22,7 @@ #include <linux/isa.h> #include <linux/module.h> #include <linux/moduleparam.h> +#include <linux/spinlock.h> #define STX104_NUM_CHAN 2 @@ -49,6 +51,20 @@ struct stx104_iio { unsigned base; }; +/** + * struct stx104_gpio - GPIO device private data structure + * @chip: instance of the gpio_chip + * @lock: synchronization lock to prevent I/O race conditions + * @base: base port address of the GPIO device + * @out_state: output bits state + */ +struct stx104_gpio { + struct gpio_chip chip; + spinlock_t lock; + unsigned int base; + unsigned int out_state; +}; + static int stx104_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { @@ -88,15 +104,81 @@ static const struct iio_chan_spec stx104_channels[STX104_NUM_CHAN] = { STX104_CHAN(1) }; +static int stx104_gpio_get_direction(struct gpio_chip *chip, + unsigned int offset) +{ + if (offset < 4) + return 1; + + return 0; +} + +static int stx104_gpio_direction_input(struct gpio_chip *chip, + unsigned int offset) +{ + if (offset >= 4) + return -EINVAL; + + return 0; +} + +static int stx104_gpio_direction_output(struct gpio_chip *chip, + unsigned int offset, int value) +{ + if (offset < 4) + return -EINVAL; + + chip->set(chip, offset, value); + return 0; +} + +static int stx104_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip); + + if (offset >= 4) + return -EINVAL; + + return !!(inb(stx104gpio->base) & BIT(offset)); +} + +static void stx104_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) +{ + struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip); + const unsigned int mask = BIT(offset) >> 4; + unsigned long flags; + + if (offset < 4) + return; + + spin_lock_irqsave(&stx104gpio->lock, flags); + + if (value) + stx104gpio->out_state |= mask; + else + stx104gpio->out_state &= ~mask; + + outb(stx104gpio->out_state, stx104gpio->base); + + spin_unlock_irqrestore(&stx104gpio->lock, flags); +} + static int stx104_probe(struct device *dev, unsigned int id) { struct iio_dev *indio_dev; struct stx104_iio *priv; + struct stx104_gpio *stx104gpio; + int err; indio_dev = devm_iio_device_alloc(dev, sizeof(*priv)); if (!indio_dev) return -ENOMEM; + stx104gpio = devm_kzalloc(dev, sizeof(*stx104gpio), GFP_KERNEL); + if (!stx104gpio) + return -ENOMEM; + if (!devm_request_region(dev, base[id], STX104_EXTENT, dev_name(dev))) { dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n", @@ -117,14 +199,53 @@ static int stx104_probe(struct device *dev, unsigned int id) outw(0, base[id] + 4); outw(0, base[id] + 6); - return devm_iio_device_register(dev, indio_dev); + err = devm_iio_device_register(dev, indio_dev); + if (err) { + dev_err(dev, "IIO device registering failed (%d)\n", err); + return err; + } + + stx104gpio->chip.label = dev_name(dev); + stx104gpio->chip.parent = dev; + stx104gpio->chip.owner = THIS_MODULE; + stx104gpio->chip.base = -1; + stx104gpio->chip.ngpio = 8; + stx104gpio->chip.get_direction = stx104_gpio_get_direction; + stx104gpio->chip.direction_input = stx104_gpio_direction_input; + stx104gpio->chip.direction_output = stx104_gpio_direction_output; + stx104gpio->chip.get = stx104_gpio_get; + stx104gpio->chip.set = stx104_gpio_set; + stx104gpio->base = base[id] + 3; + stx104gpio->out_state = 0x0; + + spin_lock_init(&stx104gpio->lock); + + dev_set_drvdata(dev, stx104gpio); + + err = gpiochip_add_data(&stx104gpio->chip, stx104gpio); + if (err) { + dev_err(dev, "GPIO registering failed (%d)\n", err); + return err; + } + + return 0; +} + +static int stx104_remove(struct device *dev, unsigned int id) +{ + struct stx104_gpio *const stx104gpio = dev_get_drvdata(dev); + + gpiochip_remove(&stx104gpio->chip); + + return 0; } static struct isa_driver stx104_driver = { .probe = stx104_probe, .driver = { .name = "stx104" - } + }, + .remove = stx104_remove }; module_isa_driver(stx104_driver, num_stx104); @@ -10,6 +10,7 @@ config IIO_DUMMY_EVGEN config IIO_SIMPLE_DUMMY tristate "An example driver with no hardware requirements" + depends on IIO_SW_DEVICE help Driver intended mainly as documentation for how to write a driver. May also be useful for testing userspace code @@ -17,26 +17,18 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> +#include <linux/string.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/iio/events.h> #include <linux/iio/buffer.h> +#include <linux/iio/sw_device.h> #include "iio_simple_dummy.h" -/* - * A few elements needed to fake a bus for this driver - * Note instances parameter controls how many of these - * dummy devices are registered. - */ -static unsigned instances = 1; -module_param(instances, uint, 0); - -/* Pointer array used to fake bus elements */ -static struct iio_dev **iio_dummy_devs; - -/* Fake a name for the part number, usually obtained from the id table */ -static const char *iio_dummy_part_number = "iio_dummy_part_no"; +static struct config_item_type iio_dummy_type = { + .ct_owner = THIS_MODULE, +}; /** * struct iio_dummy_accel_calibscale - realworld to register mapping @@ -572,12 +564,18 @@ static int iio_dummy_init_device(struct iio_dev *indio_dev) * const struct i2c_device_id *id) * SPI: iio_dummy_probe(struct spi_device *spi) */ -static int iio_dummy_probe(int index) +static struct iio_sw_device *iio_dummy_probe(const char *name) { int ret; struct iio_dev *indio_dev; struct iio_dummy_state *st; + struct iio_sw_device *swd; + swd = kzalloc(sizeof(*swd), GFP_KERNEL); + if (!swd) { + ret = -ENOMEM; + goto error_kzalloc; + } /* * Allocate an IIO device. * @@ -608,7 +606,7 @@ static int iio_dummy_probe(int index) * i2c_set_clientdata(client, indio_dev); * spi_set_drvdata(spi, indio_dev); */ - iio_dummy_devs[index] = indio_dev; + swd->device = indio_dev; /* * Set the device name. @@ -619,7 +617,7 @@ static int iio_dummy_probe(int index) * indio_dev->name = id->name; * indio_dev->name = spi_get_device_id(spi)->name; */ - indio_dev->name = iio_dummy_part_number; + indio_dev->name = kstrdup(name, GFP_KERNEL); /* Provide description of available channels */ indio_dev->channels = iio_dummy_channels; @@ -646,7 +644,9 @@ static int iio_dummy_probe(int index) if (ret < 0) goto error_unconfigure_buffer; - return 0; + iio_swd_group_init_type_name(swd, name, &iio_dummy_type); + + return swd; error_unconfigure_buffer: iio_simple_dummy_unconfigure_buffer(indio_dev); error_unregister_events: @@ -654,16 +654,18 @@ error_unregister_events: error_free_device: iio_device_free(indio_dev); error_ret: - return ret; + kfree(swd); +error_kzalloc: + return ERR_PTR(ret); } /** * iio_dummy_remove() - device instance removal function - * @index: device index. + * @swd: pointer to software IIO device abstraction * * Parameters follow those of iio_dummy_probe for buses. */ -static void iio_dummy_remove(int index) +static int iio_dummy_remove(struct iio_sw_device *swd) { /* * Get a pointer to the device instance iio_dev structure @@ -671,7 +673,7 @@ static void iio_dummy_remove(int index) * struct iio_dev *indio_dev = i2c_get_clientdata(client); * struct iio_dev *indio_dev = spi_get_drvdata(spi); */ - struct iio_dev *indio_dev = iio_dummy_devs[index]; + struct iio_dev *indio_dev = swd->device; /* Unregister the device */ iio_device_unregister(indio_dev); @@ -684,11 +686,13 @@ static void iio_dummy_remove(int index) iio_simple_dummy_events_unregister(indio_dev); /* Free all structures */ + kfree(indio_dev->name); iio_device_free(indio_dev); -} + return 0; +} /** - * iio_dummy_init() - device driver registration + * module_iio_sw_device_driver() - device driver registration * * Varies depending on bus type of the device. As there is no device * here, call probe directly. For information on device registration @@ -697,50 +701,18 @@ static void iio_dummy_remove(int index) * spi: * Documentation/spi/spi-summary */ -static __init int iio_dummy_init(void) -{ - int i, ret; - - if (instances > 10) { - instances = 1; - return -EINVAL; - } - - /* Fake a bus */ - iio_dummy_devs = kcalloc(instances, sizeof(*iio_dummy_devs), - GFP_KERNEL); - /* Here we have no actual device so call probe */ - for (i = 0; i < instances; i++) { - ret = iio_dummy_probe(i); - if (ret < 0) - goto error_remove_devs; - } - return 0; - -error_remove_devs: - while (i--) - iio_dummy_remove(i); - - kfree(iio_dummy_devs); - return ret; -} -module_init(iio_dummy_init); +static const struct iio_sw_device_ops iio_dummy_device_ops = { + .probe = iio_dummy_probe, + .remove = iio_dummy_remove, +}; -/** - * iio_dummy_exit() - device driver removal - * - * Varies depending on bus type of the device. - * As there is no device here, call remove directly. - */ -static __exit void iio_dummy_exit(void) -{ - int i; +static struct iio_sw_device_type iio_dummy_device = { + .name = "dummy", + .owner = THIS_MODULE, + .ops = &iio_dummy_device_ops, +}; - for (i = 0; i < instances; i++) - iio_dummy_remove(i); - kfree(iio_dummy_devs); -} -module_exit(iio_dummy_exit); +module_iio_sw_device_driver(iio_dummy_device); MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>"); MODULE_DESCRIPTION("IIO dummy driver"); @@ -85,7 +85,8 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p) } } - iio_push_to_buffers_with_timestamp(indio_dev, data, iio_get_time_ns()); + iio_push_to_buffers_with_timestamp(indio_dev, data, + iio_get_time_ns(indio_dev)); kfree(data); @@ -158,7 +158,7 @@ static irqreturn_t iio_simple_dummy_get_timestamp(int irq, void *private) struct iio_dev *indio_dev = private; struct iio_dummy_state *st = iio_priv(indio_dev); - st->event_timestamp = iio_get_time_ns(); + st->event_timestamp = iio_get_time_ns(indio_dev); return IRQ_WAKE_THREAD; } @@ -50,6 +50,7 @@ #define BMG160_REG_PMU_BW 0x10 #define BMG160_NO_FILTER 0 #define BMG160_DEF_BW 100 +#define BMG160_REG_PMU_BW_RES BIT(7) #define BMG160_REG_INT_MAP_0 0x17 #define BMG160_INT_MAP_0_BIT_ANY BIT(1) @@ -100,7 +101,6 @@ struct bmg160_data { struct iio_trigger *motion_trig; struct mutex mutex; s16 buffer[8]; - u8 bw_bits; u32 dps_range; int ev_enable_state; int slope_thres; @@ -117,13 +117,16 @@ enum bmg160_axis { }; static const struct { - int val; + int odr; + int filter; int bw_bits; -} bmg160_samp_freq_table[] = { {100, 0x07}, - {200, 0x06}, - {400, 0x03}, - {1000, 0x02}, - {2000, 0x01} }; +} bmg160_samp_freq_table[] = { {100, 32, 0x07}, + {200, 64, 0x06}, + {100, 12, 0x05}, + {200, 23, 0x04}, + {400, 47, 0x03}, + {1000, 116, 0x02}, + {2000, 230, 0x01} }; static const struct { int scale; @@ -153,7 +156,7 @@ static int bmg160_convert_freq_to_bit(int val) int i; for (i = 0; i < ARRAY_SIZE(bmg160_samp_freq_table); ++i) { - if (bmg160_samp_freq_table[i].val == val) + if (bmg160_samp_freq_table[i].odr == val) return bmg160_samp_freq_table[i].bw_bits; } @@ -176,7 +179,53 @@ static int bmg160_set_bw(struct bmg160_data *data, int val) return ret; } - data->bw_bits = bw_bits; + return 0; +} + +static int bmg160_get_filter(struct bmg160_data *data, int *val) +{ + struct device *dev = regmap_get_device(data->regmap); + int ret; + int i; + unsigned int bw_bits; + + ret = regmap_read(data->regmap, BMG160_REG_PMU_BW, &bw_bits); + if (ret < 0) { + dev_err(dev, "Error reading reg_pmu_bw\n"); + return ret; + } + + /* Ignore the readonly reserved bit. */ + bw_bits &= ~BMG160_REG_PMU_BW_RES; + + for (i = 0; i < ARRAY_SIZE(bmg160_samp_freq_table); ++i) { + if (bmg160_samp_freq_table[i].bw_bits == bw_bits) + break; + } + + *val = bmg160_samp_freq_table[i].filter; + + return ret ? ret : IIO_VAL_INT; +} + + +static int bmg160_set_filter(struct bmg160_data *data, int val) +{ + struct device *dev = regmap_get_device(data->regmap); + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(bmg160_samp_freq_table); ++i) { + if (bmg160_samp_freq_table[i].filter == val) + break; + } + + ret = regmap_write(data->regmap, BMG160_REG_PMU_BW, + bmg160_samp_freq_table[i].bw_bits); + if (ret < 0) { + dev_err(dev, "Error writing reg_pmu_bw\n"); + return ret; + } return 0; } @@ -386,11 +435,23 @@ static int bmg160_setup_new_data_interrupt(struct bmg160_data *data, static int bmg160_get_bw(struct bmg160_data *data, int *val) { + struct device *dev = regmap_get_device(data->regmap); int i; + unsigned int bw_bits; + int ret; + + ret = regmap_read(data->regmap, BMG160_REG_PMU_BW, &bw_bits); + if (ret < 0) { + dev_err(dev, "Error reading reg_pmu_bw\n"); + return ret; + } + + /* Ignore the readonly reserved bit. */ + bw_bits &= ~BMG160_REG_PMU_BW_RES; for (i = 0; i < ARRAY_SIZE(bmg160_samp_freq_table); ++i) { - if (bmg160_samp_freq_table[i].bw_bits == data->bw_bits) { - *val = bmg160_samp_freq_table[i].val; + if (bmg160_samp_freq_table[i].bw_bits == bw_bits) { + *val = bmg160_samp_freq_table[i].odr; return IIO_VAL_INT; } } @@ -507,6 +568,8 @@ static int bmg160_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT; } else return -EINVAL; + case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: + return bmg160_get_filter(data, val); case IIO_CHAN_INFO_SCALE: *val = 0; switch (chan->type) { @@ -571,6 +634,26 @@ static int bmg160_write_raw(struct iio_dev *indio_dev, ret = bmg160_set_power_state(data, false); mutex_unlock(&data->mutex); return ret; + case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: + if (val2) + return -EINVAL; + + mutex_lock(&data->mutex); + ret = bmg160_set_power_state(data, true); + if (ret < 0) { + bmg160_set_power_state(data, false); + mutex_unlock(&data->mutex); + return ret; + } + ret = bmg160_set_filter(data, val); + if (ret < 0) { + bmg160_set_power_state(data, false); + mutex_unlock(&data->mutex); + return ret; + } + ret = bmg160_set_power_state(data, false); + mutex_unlock(&data->mutex); + return ret; case IIO_CHAN_INFO_SCALE: if (val) return -EINVAL; @@ -728,7 +811,8 @@ static const struct iio_event_spec bmg160_event = { .channel2 = IIO_MOD_##_axis, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ - BIT(IIO_CHAN_INFO_SAMP_FREQ), \ + BIT(IIO_CHAN_INFO_SAMP_FREQ) | \ + BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \ .scan_index = AXIS_##_axis, \ .scan_type = { \ .sign = 's', \ @@ -885,25 +969,25 @@ static irqreturn_t bmg160_event_handler(int irq, void *private) if (val & BMG160_ANY_MOTION_BIT_X) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL, - 0, - IIO_MOD_X, - IIO_EV_TYPE_ROC, - dir), - iio_get_time_ns()); + 0, + IIO_MOD_X, + IIO_EV_TYPE_ROC, + dir), + iio_get_time_ns(indio_dev)); if (val & BMG160_ANY_MOTION_BIT_Y) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL, - 0, - IIO_MOD_Y, - IIO_EV_TYPE_ROC, - dir), - iio_get_time_ns()); + 0, + IIO_MOD_Y, + IIO_EV_TYPE_ROC, + dir), + iio_get_time_ns(indio_dev)); if (val & BMG160_ANY_MOTION_BIT_Z) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL, - 0, - IIO_MOD_Z, - IIO_EV_TYPE_ROC, - dir), - iio_get_time_ns()); + 0, + IIO_MOD_Z, + IIO_EV_TYPE_ROC, + dir), + iio_get_time_ns(indio_dev)); ack_intr_status: if (!data->dready_trigger_on) { @@ -91,7 +91,7 @@ static const struct iio_buffer_setup_ops st_gyro_buffer_setup_ops = { int st_gyro_allocate_ring(struct iio_dev *indio_dev) { - return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, + return iio_triggered_buffer_setup(indio_dev, NULL, &st_sensors_trigger_handler, &st_gyro_buffer_setup_ops); } @@ -409,6 +409,7 @@ static const struct iio_info gyro_info = { static const struct iio_trigger_ops st_gyro_trigger_ops = { .owner = THIS_MODULE, .set_trigger_state = ST_GYRO_TRIGGER_SET_STATE, + .validate_device = st_sensors_validate_device, }; #define ST_GYRO_TRIGGER_OPS (&st_gyro_trigger_ops) #else @@ -425,13 +426,15 @@ int st_gyro_common_probe(struct iio_dev *indio_dev) indio_dev->info = &gyro_info; mutex_init(&gdata->tb.buf_lock); - st_sensors_power_enable(indio_dev); + err = st_sensors_power_enable(indio_dev); + if (err) + return err; err = st_sensors_check_device_support(indio_dev, ARRAY_SIZE(st_gyro_sensors_settings), st_gyro_sensors_settings); if (err < 0) - return err; + goto st_gyro_power_off; gdata->num_data_channels = ST_GYRO_NUMBER_DATA_CHANNELS; gdata->multiread_bit = gdata->sensor_settings->multi_read_bit; @@ -445,11 +448,11 @@ int st_gyro_common_probe(struct iio_dev *indio_dev) err = st_sensors_init_sensor(indio_dev, (struct st_sensors_platform_data *)&gyro_pdata); if (err < 0) - return err; + goto st_gyro_power_off; err = st_gyro_allocate_ring(indio_dev); if (err < 0) - return err; + goto st_gyro_power_off; if (irq > 0) { err = st_sensors_allocate_trigger(indio_dev, @@ -472,6 +475,8 @@ st_gyro_device_register_error: st_sensors_deallocate_trigger(indio_dev); st_gyro_probe_trigger_error: st_gyro_deallocate_ring(indio_dev); +st_gyro_power_off: + st_sensors_power_disable(indio_dev); return err; } @@ -1,7 +1,7 @@ /* * AFE4403 Heart Rate Monitors and Low-Cost Pulse Oximeters * - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ * Andrew F. Davis <afd@ti.com> * * This program is free software; you can redistribute it and/or modify @@ -39,127 +39,90 @@ #define AFE4403_TIAGAIN 0x20 #define AFE4403_TIA_AMB_GAIN 0x21 -/* AFE4403 GAIN register fields */ -#define AFE4403_TIAGAIN_RES_MASK GENMASK(2, 0) -#define AFE4403_TIAGAIN_RES_SHIFT 0 -#define AFE4403_TIAGAIN_CAP_MASK GENMASK(7, 3) -#define AFE4403_TIAGAIN_CAP_SHIFT 3 - -/* AFE4403 LEDCNTRL register fields */ -#define AFE440X_LEDCNTRL_LED1_MASK GENMASK(15, 8) -#define AFE440X_LEDCNTRL_LED1_SHIFT 8 -#define AFE440X_LEDCNTRL_LED2_MASK GENMASK(7, 0) -#define AFE440X_LEDCNTRL_LED2_SHIFT 0 -#define AFE440X_LEDCNTRL_LED_RANGE_MASK GENMASK(17, 16) -#define AFE440X_LEDCNTRL_LED_RANGE_SHIFT 16 - -/* AFE4403 CONTROL2 register fields */ -#define AFE440X_CONTROL2_PWR_DWN_TX BIT(2) -#define AFE440X_CONTROL2_EN_SLOW_DIAG BIT(8) -#define AFE440X_CONTROL2_DIAG_OUT_TRI BIT(10) -#define AFE440X_CONTROL2_TX_BRDG_MOD BIT(11) -#define AFE440X_CONTROL2_TX_REF_MASK GENMASK(18, 17) -#define AFE440X_CONTROL2_TX_REF_SHIFT 17 - -/* AFE4404 NULL fields */ -#define NULL_MASK 0 -#define NULL_SHIFT 0 - -/* AFE4403 LEDCNTRL values */ -#define AFE440X_LEDCNTRL_RANGE_TX_HALF 0x1 -#define AFE440X_LEDCNTRL_RANGE_TX_FULL 0x2 -#define AFE440X_LEDCNTRL_RANGE_TX_OFF 0x3 - -/* AFE4403 CONTROL2 values */ -#define AFE440X_CONTROL2_TX_REF_025 0x0 -#define AFE440X_CONTROL2_TX_REF_050 0x1 -#define AFE440X_CONTROL2_TX_REF_100 0x2 -#define AFE440X_CONTROL2_TX_REF_075 0x3 - -/* AFE4403 CONTROL3 values */ -#define AFE440X_CONTROL3_CLK_DIV_2 0x0 -#define AFE440X_CONTROL3_CLK_DIV_4 0x2 -#define AFE440X_CONTROL3_CLK_DIV_6 0x3 -#define AFE440X_CONTROL3_CLK_DIV_8 0x4 -#define AFE440X_CONTROL3_CLK_DIV_12 0x5 -#define AFE440X_CONTROL3_CLK_DIV_1 0x7 - -/* AFE4403 TIAGAIN_CAP values */ -#define AFE4403_TIAGAIN_CAP_5_P 0x0 -#define AFE4403_TIAGAIN_CAP_10_P 0x1 -#define AFE4403_TIAGAIN_CAP_20_P 0x2 -#define AFE4403_TIAGAIN_CAP_30_P 0x3 -#define AFE4403_TIAGAIN_CAP_55_P 0x8 -#define AFE4403_TIAGAIN_CAP_155_P 0x10 - -/* AFE4403 TIAGAIN_RES values */ -#define AFE4403_TIAGAIN_RES_500_K 0x0 -#define AFE4403_TIAGAIN_RES_250_K 0x1 -#define AFE4403_TIAGAIN_RES_100_K 0x2 -#define AFE4403_TIAGAIN_RES_50_K 0x3 -#define AFE4403_TIAGAIN_RES_25_K 0x4 -#define AFE4403_TIAGAIN_RES_10_K 0x5 -#define AFE4403_TIAGAIN_RES_1_M 0x6 -#define AFE4403_TIAGAIN_RES_NONE 0x7 +enum afe4403_fields { + /* Gains */ + F_RF_LED1, F_CF_LED1, + F_RF_LED, F_CF_LED, + + /* LED Current */ + F_ILED1, F_ILED2, + + /* sentinel */ + F_MAX_FIELDS +}; + +static const struct reg_field afe4403_reg_fields[] = { + /* Gains */ + [F_RF_LED1] = REG_FIELD(AFE4403_TIAGAIN, 0, 2), + [F_CF_LED1] = REG_FIELD(AFE4403_TIAGAIN, 3, 7), + [F_RF_LED] = REG_FIELD(AFE4403_TIA_AMB_GAIN, 0, 2), + [F_CF_LED] = REG_FIELD(AFE4403_TIA_AMB_GAIN, 3, 7), + /* LED Current */ + [F_ILED1] = REG_FIELD(AFE440X_LEDCNTRL, 0, 7), + [F_ILED2] = REG_FIELD(AFE440X_LEDCNTRL, 8, 15), +}; /** - * struct afe4403_data - * @dev - Device structure - * @spi - SPI device handle - * @regmap - Register map of the device - * @regulator - Pointer to the regulator for the IC - * @trig - IIO trigger for this device - * @irq - ADC_RDY line interrupt number + * struct afe4403_data - AFE4403 device instance data + * @dev: Device structure + * @spi: SPI device handle + * @regmap: Register map of the device + * @fields: Register fields of the device + * @regulator: Pointer to the regulator for the IC + * @trig: IIO trigger for this device + * @irq: ADC_RDY line interrupt number */ struct afe4403_data { struct device *dev; struct spi_device *spi; struct regmap *regmap; + struct regmap_field *fields[F_MAX_FIELDS]; struct regulator *regulator; struct iio_trigger *trig; int irq; }; enum afe4403_chan_id { + LED2 = 1, + ALED2, LED1, ALED1, - LED2, - ALED2, - LED1_ALED1, LED2_ALED2, - ILED1, - ILED2, + LED1_ALED1, }; -static const struct afe440x_reg_info afe4403_reg_info[] = { - [LED1] = AFE440X_REG_INFO(AFE440X_LED1VAL, 0, NULL), - [ALED1] = AFE440X_REG_INFO(AFE440X_ALED1VAL, 0, NULL), - [LED2] = AFE440X_REG_INFO(AFE440X_LED2VAL, 0, NULL), - [ALED2] = AFE440X_REG_INFO(AFE440X_ALED2VAL, 0, NULL), - [LED1_ALED1] = AFE440X_REG_INFO(AFE440X_LED1_ALED1VAL, 0, NULL), - [LED2_ALED2] = AFE440X_REG_INFO(AFE440X_LED2_ALED2VAL, 0, NULL), - [ILED1] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE440X_LEDCNTRL_LED1), - [ILED2] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE440X_LEDCNTRL_LED2), +static const unsigned int afe4403_channel_values[] = { + [LED2] = AFE440X_LED2VAL, + [ALED2] = AFE440X_ALED2VAL, + [LED1] = AFE440X_LED1VAL, + [ALED1] = AFE440X_ALED1VAL, + [LED2_ALED2] = AFE440X_LED2_ALED2VAL, + [LED1_ALED1] = AFE440X_LED1_ALED1VAL, +}; + +static const unsigned int afe4403_channel_leds[] = { + [LED2] = F_ILED2, + [LED1] = F_ILED1, }; static const struct iio_chan_spec afe4403_channels[] = { /* ADC values */ - AFE440X_INTENSITY_CHAN(LED1, "led1", 0), - AFE440X_INTENSITY_CHAN(ALED1, "led1_ambient", 0), - AFE440X_INTENSITY_CHAN(LED2, "led2", 0), - AFE440X_INTENSITY_CHAN(ALED2, "led2_ambient", 0), - AFE440X_INTENSITY_CHAN(LED1_ALED1, "led1-led1_ambient", 0), - AFE440X_INTENSITY_CHAN(LED2_ALED2, "led2-led2_ambient", 0), + AFE440X_INTENSITY_CHAN(LED2, 0), + AFE440X_INTENSITY_CHAN(ALED2, 0), + AFE440X_INTENSITY_CHAN(LED1, 0), + AFE440X_INTENSITY_CHAN(ALED1, 0), + AFE440X_INTENSITY_CHAN(LED2_ALED2, 0), + AFE440X_INTENSITY_CHAN(LED1_ALED1, 0), /* LED current */ - AFE440X_CURRENT_CHAN(ILED1, "led1"), - AFE440X_CURRENT_CHAN(ILED2, "led2"), + AFE440X_CURRENT_CHAN(LED2), + AFE440X_CURRENT_CHAN(LED1), }; static const struct afe440x_val_table afe4403_res_table[] = { { 500000 }, { 250000 }, { 100000 }, { 50000 }, { 25000 }, { 10000 }, { 1000000 }, { 0 }, }; -AFE440X_TABLE_ATTR(tia_resistance_available, afe4403_res_table); +AFE440X_TABLE_ATTR(in_intensity_resistance_available, afe4403_res_table); static const struct afe440x_val_table afe4403_cap_table[] = { { 0, 5000 }, { 0, 10000 }, { 0, 20000 }, { 0, 25000 }, @@ -171,7 +134,7 @@ static const struct afe440x_val_table afe4403_cap_table[] = { { 0, 205000 }, { 0, 210000 }, { 0, 220000 }, { 0, 225000 }, { 0, 230000 }, { 0, 235000 }, { 0, 245000 }, { 0, 250000 }, }; -AFE440X_TABLE_ATTR(tia_capacitance_available, afe4403_cap_table); +AFE440X_TABLE_ATTR(in_intensity_capacitance_available, afe4403_cap_table); static ssize_t afe440x_show_register(struct device *dev, struct device_attribute *attr, @@ -180,38 +143,21 @@ static ssize_t afe440x_show_register(struct device *dev, struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct afe4403_data *afe = iio_priv(indio_dev); struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr); - unsigned int reg_val, type; + unsigned int reg_val; int vals[2]; - int ret, val_len; + int ret; - ret = regmap_read(afe->regmap, afe440x_attr->reg, ®_val); + ret = regmap_field_read(afe->fields[afe440x_attr->field], ®_val); if (ret) return ret; - reg_val &= afe440x_attr->mask; - reg_val >>= afe440x_attr->shift; - - switch (afe440x_attr->type) { - case SIMPLE: - type = IIO_VAL_INT; - val_len = 1; - vals[0] = reg_val; - break; - case RESISTANCE: - case CAPACITANCE: - type = IIO_VAL_INT_PLUS_MICRO; - val_len = 2; - if (reg_val < afe440x_attr->table_size) { - vals[0] = afe440x_attr->val_table[reg_val].integer; - vals[1] = afe440x_attr->val_table[reg_val].fract; - break; - } - return -EINVAL; - default: + if (reg_val >= afe440x_attr->table_size) return -EINVAL; - } - return iio_format_value(buf, type, val_len, vals); + vals[0] = afe440x_attr->val_table[reg_val].integer; + vals[1] = afe440x_attr->val_table[reg_val].fract; + + return iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, vals); } static ssize_t afe440x_store_register(struct device *dev, @@ -227,48 +173,43 @@ static ssize_t afe440x_store_register(struct device *dev, if (ret) return ret; - switch (afe440x_attr->type) { - case SIMPLE: - val = integer; - break; - case RESISTANCE: - case CAPACITANCE: - for (val = 0; val < afe440x_attr->table_size; val++) - if (afe440x_attr->val_table[val].integer == integer && - afe440x_attr->val_table[val].fract == fract) - break; - if (val == afe440x_attr->table_size) - return -EINVAL; - break; - default: + for (val = 0; val < afe440x_attr->table_size; val++) + if (afe440x_attr->val_table[val].integer == integer && + afe440x_attr->val_table[val].fract == fract) + break; + if (val == afe440x_attr->table_size) return -EINVAL; - } - ret = regmap_update_bits(afe->regmap, afe440x_attr->reg, - afe440x_attr->mask, - (val << afe440x_attr->shift)); + ret = regmap_field_write(afe->fields[afe440x_attr->field], val); if (ret) return ret; return count; } -static AFE440X_ATTR(tia_separate_en, AFE4403_TIAGAIN, AFE440X_TIAGAIN_ENSEPGAIN, SIMPLE, NULL, 0); +static AFE440X_ATTR(in_intensity1_resistance, F_RF_LED, afe4403_res_table); +static AFE440X_ATTR(in_intensity1_capacitance, F_CF_LED, afe4403_cap_table); + +static AFE440X_ATTR(in_intensity2_resistance, F_RF_LED, afe4403_res_table); +static AFE440X_ATTR(in_intensity2_capacitance, F_CF_LED, afe4403_cap_table); -static AFE440X_ATTR(tia_resistance1, AFE4403_TIAGAIN, AFE4403_TIAGAIN_RES, RESISTANCE, afe4403_res_table, ARRAY_SIZE(afe4403_res_table)); -static AFE440X_ATTR(tia_capacitance1, AFE4403_TIAGAIN, AFE4403_TIAGAIN_CAP, CAPACITANCE, afe4403_cap_table, ARRAY_SIZE(afe4403_cap_table)); +static AFE440X_ATTR(in_intensity3_resistance, F_RF_LED1, afe4403_res_table); +static AFE440X_ATTR(in_intensity3_capacitance, F_CF_LED1, afe4403_cap_table); -static AFE440X_ATTR(tia_resistance2, AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES, RESISTANCE, afe4403_res_table, ARRAY_SIZE(afe4403_res_table)); -static AFE440X_ATTR(tia_capacitance2, AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES, CAPACITANCE, afe4403_cap_table, ARRAY_SIZE(afe4403_cap_table)); +static AFE440X_ATTR(in_intensity4_resistance, F_RF_LED1, afe4403_res_table); +static AFE440X_ATTR(in_intensity4_capacitance, F_CF_LED1, afe4403_cap_table); static struct attribute *afe440x_attributes[] = { - &afe440x_attr_tia_separate_en.dev_attr.attr, - &afe440x_attr_tia_resistance1.dev_attr.attr, - &afe440x_attr_tia_capacitance1.dev_attr.attr, - &afe440x_attr_tia_resistance2.dev_attr.attr, - &afe440x_attr_tia_capacitance2.dev_attr.attr, - &dev_attr_tia_resistance_available.attr, - &dev_attr_tia_capacitance_available.attr, + &dev_attr_in_intensity_resistance_available.attr, + &dev_attr_in_intensity_capacitance_available.attr, + &afe440x_attr_in_intensity1_resistance.dev_attr.attr, + &afe440x_attr_in_intensity1_capacitance.dev_attr.attr, + &afe440x_attr_in_intensity2_resistance.dev_attr.attr, + &afe440x_attr_in_intensity2_capacitance.dev_attr.attr, + &afe440x_attr_in_intensity3_resistance.dev_attr.attr, + &afe440x_attr_in_intensity3_capacitance.dev_attr.attr, + &afe440x_attr_in_intensity4_resistance.dev_attr.attr, + &afe440x_attr_in_intensity4_capacitance.dev_attr.attr, NULL }; @@ -309,35 +250,26 @@ static int afe4403_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct afe4403_data *afe = iio_priv(indio_dev); - const struct afe440x_reg_info reg_info = afe4403_reg_info[chan->address]; + unsigned int reg = afe4403_channel_values[chan->address]; + unsigned int field = afe4403_channel_leds[chan->address]; int ret; switch (chan->type) { case IIO_INTENSITY: switch (mask) { case IIO_CHAN_INFO_RAW: - ret = afe4403_read(afe, reg_info.reg, val); - if (ret) - return ret; - return IIO_VAL_INT; - case IIO_CHAN_INFO_OFFSET: - ret = regmap_read(afe->regmap, reg_info.offreg, - val); + ret = afe4403_read(afe, reg, val); if (ret) return ret; - *val &= reg_info.mask; - *val >>= reg_info.shift; return IIO_VAL_INT; } break; case IIO_CURRENT: switch (mask) { case IIO_CHAN_INFO_RAW: - ret = regmap_read(afe->regmap, reg_info.reg, val); + ret = regmap_field_read(afe->fields[field], val); if (ret) return ret; - *val &= reg_info.mask; - *val >>= reg_info.shift; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: *val = 0; @@ -357,25 +289,13 @@ static int afe4403_write_raw(struct iio_dev *indio_dev, int val, int val2, long mask) { struct afe4403_data *afe = iio_priv(indio_dev); - const struct afe440x_reg_info reg_info = afe4403_reg_info[chan->address]; + unsigned int field = afe4403_channel_leds[chan->address]; switch (chan->type) { - case IIO_INTENSITY: - switch (mask) { - case IIO_CHAN_INFO_OFFSET: - return regmap_update_bits(afe->regmap, - reg_info.offreg, - reg_info.mask, - (val << reg_info.shift)); - } - break; case IIO_CURRENT: switch (mask) { case IIO_CHAN_INFO_RAW: - return regmap_update_bits(afe->regmap, - reg_info.reg, - reg_info.mask, - (val << reg_info.shift)); + return regmap_field_write(afe->fields[field], val); } break; default: @@ -410,7 +330,7 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private) for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) { ret = spi_write_then_read(afe->spi, - &afe4403_reg_info[bit].reg, 1, + &afe4403_channel_values[bit], 1, rx, 3); if (ret) goto err; @@ -472,12 +392,8 @@ static const struct iio_trigger_ops afe4403_trigger_ops = { static const struct reg_sequence afe4403_reg_sequences[] = { AFE4403_TIMING_PAIRS, - { AFE440X_CONTROL1, AFE440X_CONTROL1_TIMEREN | 0x000007}, - { AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES_1_M }, - { AFE440X_LEDCNTRL, (0x14 << AFE440X_LEDCNTRL_LED1_SHIFT) | - (0x14 << AFE440X_LEDCNTRL_LED2_SHIFT) }, - { AFE440X_CONTROL2, AFE440X_CONTROL2_TX_REF_050 << - AFE440X_CONTROL2_TX_REF_SHIFT }, + { AFE440X_CONTROL1, AFE440X_CONTROL1_TIMEREN }, + { AFE4403_TIAGAIN, AFE440X_TIAGAIN_ENSEPGAIN }, }; static const struct regmap_range afe4403_yes_ranges[] = { @@ -498,13 +414,11 @@ static const struct regmap_config afe4403_regmap_config = { .volatile_table = &afe4403_volatile_table, }; -#ifdef CONFIG_OF static const struct of_device_id afe4403_of_match[] = { { .compatible = "ti,afe4403", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, afe4403_of_match); -#endif static int __maybe_unused afe4403_suspend(struct device *dev) { @@ -553,7 +467,7 @@ static int afe4403_probe(struct spi_device *spi) { struct iio_dev *indio_dev; struct afe4403_data *afe; - int ret; + int i, ret; indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*afe)); if (!indio_dev) @@ -572,6 +486,15 @@ static int afe4403_probe(struct spi_device *spi) return PTR_ERR(afe->regmap); } + for (i = 0; i < F_MAX_FIELDS; i++) { + afe->fields[i] = devm_regmap_field_alloc(afe->dev, afe->regmap, + afe4403_reg_fields[i]); + if (IS_ERR(afe->fields[i])) { + dev_err(afe->dev, "Unable to allocate regmap fields\n"); + return PTR_ERR(afe->fields[i]); + } + } + afe->regulator = devm_regulator_get(afe->dev, "tx_sup"); if (IS_ERR(afe->regulator)) { dev_err(afe->dev, "Unable to get regulator\n"); @@ -694,7 +617,7 @@ MODULE_DEVICE_TABLE(spi, afe4403_ids); static struct spi_driver afe4403_spi_driver = { .driver = { .name = AFE4403_DRIVER_NAME, - .of_match_table = of_match_ptr(afe4403_of_match), + .of_match_table = afe4403_of_match, .pm = &afe4403_pm_ops, }, .probe = afe4403_probe, @@ -704,5 +627,5 @@ static struct spi_driver afe4403_spi_driver = { module_spi_driver(afe4403_spi_driver); MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>"); -MODULE_DESCRIPTION("TI AFE4403 Heart Rate and Pulse Oximeter"); +MODULE_DESCRIPTION("TI AFE4403 Heart Rate Monitor and Pulse Oximeter AFE"); MODULE_LICENSE("GPL v2"); @@ -1,7 +1,7 @@ /* * AFE4404 Heart Rate Monitors and Low-Cost Pulse Oximeters * - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ * Andrew F. Davis <afd@ti.com> * * This program is free software; you can redistribute it and/or modify @@ -48,118 +48,102 @@ #define AFE4404_AVG_LED2_ALED2VAL 0x3f #define AFE4404_AVG_LED1_ALED1VAL 0x40 -/* AFE4404 GAIN register fields */ -#define AFE4404_TIA_GAIN_RES_MASK GENMASK(2, 0) -#define AFE4404_TIA_GAIN_RES_SHIFT 0 -#define AFE4404_TIA_GAIN_CAP_MASK GENMASK(5, 3) -#define AFE4404_TIA_GAIN_CAP_SHIFT 3 +/* AFE4404 CONTROL2 register fields */ +#define AFE440X_CONTROL2_OSC_ENABLE BIT(9) -/* AFE4404 LEDCNTRL register fields */ -#define AFE4404_LEDCNTRL_ILED1_MASK GENMASK(5, 0) -#define AFE4404_LEDCNTRL_ILED1_SHIFT 0 -#define AFE4404_LEDCNTRL_ILED2_MASK GENMASK(11, 6) -#define AFE4404_LEDCNTRL_ILED2_SHIFT 6 -#define AFE4404_LEDCNTRL_ILED3_MASK GENMASK(17, 12) -#define AFE4404_LEDCNTRL_ILED3_SHIFT 12 +enum afe4404_fields { + /* Gains */ + F_TIA_GAIN_SEP, F_TIA_CF_SEP, + F_TIA_GAIN, TIA_CF, -/* AFE4404 CONTROL2 register fields */ -#define AFE440X_CONTROL2_ILED_2X_MASK BIT(17) -#define AFE440X_CONTROL2_ILED_2X_SHIFT 17 - -/* AFE4404 CONTROL3 register fields */ -#define AFE440X_CONTROL3_OSC_ENABLE BIT(9) - -/* AFE4404 OFFDAC register current fields */ -#define AFE4404_OFFDAC_CURR_LED1_MASK GENMASK(9, 5) -#define AFE4404_OFFDAC_CURR_LED1_SHIFT 5 -#define AFE4404_OFFDAC_CURR_LED2_MASK GENMASK(19, 15) -#define AFE4404_OFFDAC_CURR_LED2_SHIFT 15 -#define AFE4404_OFFDAC_CURR_LED3_MASK GENMASK(4, 0) -#define AFE4404_OFFDAC_CURR_LED3_SHIFT 0 -#define AFE4404_OFFDAC_CURR_ALED1_MASK GENMASK(14, 10) -#define AFE4404_OFFDAC_CURR_ALED1_SHIFT 10 -#define AFE4404_OFFDAC_CURR_ALED2_MASK GENMASK(4, 0) -#define AFE4404_OFFDAC_CURR_ALED2_SHIFT 0 - -/* AFE4404 NULL fields */ -#define NULL_MASK 0 -#define NULL_SHIFT 0 - -/* AFE4404 TIA_GAIN_CAP values */ -#define AFE4404_TIA_GAIN_CAP_5_P 0x0 -#define AFE4404_TIA_GAIN_CAP_2_5_P 0x1 -#define AFE4404_TIA_GAIN_CAP_10_P 0x2 -#define AFE4404_TIA_GAIN_CAP_7_5_P 0x3 -#define AFE4404_TIA_GAIN_CAP_20_P 0x4 -#define AFE4404_TIA_GAIN_CAP_17_5_P 0x5 -#define AFE4404_TIA_GAIN_CAP_25_P 0x6 -#define AFE4404_TIA_GAIN_CAP_22_5_P 0x7 - -/* AFE4404 TIA_GAIN_RES values */ -#define AFE4404_TIA_GAIN_RES_500_K 0x0 -#define AFE4404_TIA_GAIN_RES_250_K 0x1 -#define AFE4404_TIA_GAIN_RES_100_K 0x2 -#define AFE4404_TIA_GAIN_RES_50_K 0x3 -#define AFE4404_TIA_GAIN_RES_25_K 0x4 -#define AFE4404_TIA_GAIN_RES_10_K 0x5 -#define AFE4404_TIA_GAIN_RES_1_M 0x6 -#define AFE4404_TIA_GAIN_RES_2_M 0x7 + /* LED Current */ + F_ILED1, F_ILED2, F_ILED3, + + /* Offset DAC */ + F_OFFDAC_AMB2, F_OFFDAC_LED1, F_OFFDAC_AMB1, F_OFFDAC_LED2, + + /* sentinel */ + F_MAX_FIELDS +}; + +static const struct reg_field afe4404_reg_fields[] = { + /* Gains */ + [F_TIA_GAIN_SEP] = REG_FIELD(AFE4404_TIA_GAIN_SEP, 0, 2), + [F_TIA_CF_SEP] = REG_FIELD(AFE4404_TIA_GAIN_SEP, 3, 5), + [F_TIA_GAIN] = REG_FIELD(AFE4404_TIA_GAIN, 0, 2), + [TIA_CF] = REG_FIELD(AFE4404_TIA_GAIN, 3, 5), + /* LED Current */ + [F_ILED1] = REG_FIELD(AFE440X_LEDCNTRL, 0, 5), + [F_ILED2] = REG_FIELD(AFE440X_LEDCNTRL, 6, 11), + [F_ILED3] = REG_FIELD(AFE440X_LEDCNTRL, 12, 17), + /* Offset DAC */ + [F_OFFDAC_AMB2] = REG_FIELD(AFE4404_OFFDAC, 0, 4), + [F_OFFDAC_LED1] = REG_FIELD(AFE4404_OFFDAC, 5, 9), + [F_OFFDAC_AMB1] = REG_FIELD(AFE4404_OFFDAC, 10, 14), + [F_OFFDAC_LED2] = REG_FIELD(AFE4404_OFFDAC, 15, 19), +}; /** - * struct afe4404_data - * @dev - Device structure - * @regmap - Register map of the device - * @regulator - Pointer to the regulator for the IC - * @trig - IIO trigger for this device - * @irq - ADC_RDY line interrupt number + * struct afe4404_data - AFE4404 device instance data + * @dev: Device structure + * @regmap: Register map of the device + * @fields: Register fields of the device + * @regulator: Pointer to the regulator for the IC + * @trig: IIO trigger for this device + * @irq: ADC_RDY line interrupt number */ struct afe4404_data { struct device *dev; struct regmap *regmap; + struct regmap_field *fields[F_MAX_FIELDS]; struct regulator *regulator; struct iio_trigger *trig; int irq; }; enum afe4404_chan_id { + LED2 = 1, + ALED2, LED1, ALED1, - LED2, - ALED2, - LED3, - LED1_ALED1, LED2_ALED2, - ILED1, - ILED2, - ILED3, + LED1_ALED1, +}; + +static const unsigned int afe4404_channel_values[] = { + [LED2] = AFE440X_LED2VAL, + [ALED2] = AFE440X_ALED2VAL, + [LED1] = AFE440X_LED1VAL, + [ALED1] = AFE440X_ALED1VAL, + [LED2_ALED2] = AFE440X_LED2_ALED2VAL, + [LED1_ALED1] = AFE440X_LED1_ALED1VAL, }; -static const struct afe440x_reg_info afe4404_reg_info[] = { - [LED1] = AFE440X_REG_INFO(AFE440X_LED1VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_LED1), - [ALED1] = AFE440X_REG_INFO(AFE440X_ALED1VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_ALED1), - [LED2] = AFE440X_REG_INFO(AFE440X_LED2VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_LED2), - [ALED2] = AFE440X_REG_INFO(AFE440X_ALED2VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_ALED2), - [LED3] = AFE440X_REG_INFO(AFE440X_ALED2VAL, 0, NULL), - [LED1_ALED1] = AFE440X_REG_INFO(AFE440X_LED1_ALED1VAL, 0, NULL), - [LED2_ALED2] = AFE440X_REG_INFO(AFE440X_LED2_ALED2VAL, 0, NULL), - [ILED1] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED1), - [ILED2] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED2), - [ILED3] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED3), +static const unsigned int afe4404_channel_leds[] = { + [LED2] = F_ILED2, + [ALED2] = F_ILED3, + [LED1] = F_ILED1, +}; + +static const unsigned int afe4404_channel_offdacs[] = { + [LED2] = F_OFFDAC_LED2, + [ALED2] = F_OFFDAC_AMB2, + [LED1] = F_OFFDAC_LED1, + [ALED1] = F_OFFDAC_AMB1, }; static const struct iio_chan_spec afe4404_channels[] = { /* ADC values */ - AFE440X_INTENSITY_CHAN(LED1, "led1", BIT(IIO_CHAN_INFO_OFFSET)), - AFE440X_INTENSITY_CHAN(ALED1, "led1_ambient", BIT(IIO_CHAN_INFO_OFFSET)), - AFE440X_INTENSITY_CHAN(LED2, "led2", BIT(IIO_CHAN_INFO_OFFSET)), - AFE440X_INTENSITY_CHAN(ALED2, "led2_ambient", BIT(IIO_CHAN_INFO_OFFSET)), - AFE440X_INTENSITY_CHAN(LED3, "led3", BIT(IIO_CHAN_INFO_OFFSET)), - AFE440X_INTENSITY_CHAN(LED1_ALED1, "led1-led1_ambient", 0), - AFE440X_INTENSITY_CHAN(LED2_ALED2, "led2-led2_ambient", 0), + AFE440X_INTENSITY_CHAN(LED2, BIT(IIO_CHAN_INFO_OFFSET)), + AFE440X_INTENSITY_CHAN(ALED2, BIT(IIO_CHAN_INFO_OFFSET)), + AFE440X_INTENSITY_CHAN(LED1, BIT(IIO_CHAN_INFO_OFFSET)), + AFE440X_INTENSITY_CHAN(ALED1, BIT(IIO_CHAN_INFO_OFFSET)), + AFE440X_INTENSITY_CHAN(LED2_ALED2, 0), + AFE440X_INTENSITY_CHAN(LED1_ALED1, 0), /* LED current */ - AFE440X_CURRENT_CHAN(ILED1, "led1"), - AFE440X_CURRENT_CHAN(ILED2, "led2"), - AFE440X_CURRENT_CHAN(ILED3, "led3"), + AFE440X_CURRENT_CHAN(LED2), + AFE440X_CURRENT_CHAN(ALED2), + AFE440X_CURRENT_CHAN(LED1), }; static const struct afe440x_val_table afe4404_res_table[] = { @@ -172,7 +156,7 @@ static const struct afe440x_val_table afe4404_res_table[] = { { .integer = 1000000, .fract = 0 }, { .integer = 2000000, .fract = 0 }, }; -AFE440X_TABLE_ATTR(tia_resistance_available, afe4404_res_table); +AFE440X_TABLE_ATTR(in_intensity_resistance_available, afe4404_res_table); static const struct afe440x_val_table afe4404_cap_table[] = { { .integer = 0, .fract = 5000 }, @@ -184,7 +168,7 @@ static const struct afe440x_val_table afe4404_cap_table[] = { { .integer = 0, .fract = 25000 }, { .integer = 0, .fract = 22500 }, }; -AFE440X_TABLE_ATTR(tia_capacitance_available, afe4404_cap_table); +AFE440X_TABLE_ATTR(in_intensity_capacitance_available, afe4404_cap_table); static ssize_t afe440x_show_register(struct device *dev, struct device_attribute *attr, @@ -193,38 +177,21 @@ static ssize_t afe440x_show_register(struct device *dev, struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct afe4404_data *afe = iio_priv(indio_dev); struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr); - unsigned int reg_val, type; + unsigned int reg_val; int vals[2]; - int ret, val_len; + int ret; - ret = regmap_read(afe->regmap, afe440x_attr->reg, ®_val); + ret = regmap_field_read(afe->fields[afe440x_attr->field], ®_val); if (ret) return ret; - reg_val &= afe440x_attr->mask; - reg_val >>= afe440x_attr->shift; - - switch (afe440x_attr->type) { - case SIMPLE: - type = IIO_VAL_INT; - val_len = 1; - vals[0] = reg_val; - break; - case RESISTANCE: - case CAPACITANCE: - type = IIO_VAL_INT_PLUS_MICRO; - val_len = 2; - if (reg_val < afe440x_attr->table_size) { - vals[0] = afe440x_attr->val_table[reg_val].integer; - vals[1] = afe440x_attr->val_table[reg_val].fract; - break; - } - return -EINVAL; - default: + if (reg_val >= afe440x_attr->table_size) return -EINVAL; - } - return iio_format_value(buf, type, val_len, vals); + vals[0] = afe440x_attr->val_table[reg_val].integer; + vals[1] = afe440x_attr->val_table[reg_val].fract; + + return iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, vals); } static ssize_t afe440x_store_register(struct device *dev, @@ -240,48 +207,43 @@ static ssize_t afe440x_store_register(struct device *dev, if (ret) return ret; - switch (afe440x_attr->type) { - case SIMPLE: - val = integer; - break; - case RESISTANCE: - case CAPACITANCE: - for (val = 0; val < afe440x_attr->table_size; val++) - if (afe440x_attr->val_table[val].integer == integer && - afe440x_attr->val_table[val].fract == fract) - break; - if (val == afe440x_attr->table_size) - return -EINVAL; - break; - default: + for (val = 0; val < afe440x_attr->table_size; val++) + if (afe440x_attr->val_table[val].integer == integer && + afe440x_attr->val_table[val].fract == fract) + break; + if (val == afe440x_attr->table_size) return -EINVAL; - } - ret = regmap_update_bits(afe->regmap, afe440x_attr->reg, - afe440x_attr->mask, - (val << afe440x_attr->shift)); + ret = regmap_field_write(afe->fields[afe440x_attr->field], val); if (ret) return ret; return count; } -static AFE440X_ATTR(tia_separate_en, AFE4404_TIA_GAIN_SEP, AFE440X_TIAGAIN_ENSEPGAIN, SIMPLE, NULL, 0); +static AFE440X_ATTR(in_intensity1_resistance, F_TIA_GAIN_SEP, afe4404_res_table); +static AFE440X_ATTR(in_intensity1_capacitance, F_TIA_CF_SEP, afe4404_cap_table); + +static AFE440X_ATTR(in_intensity2_resistance, F_TIA_GAIN_SEP, afe4404_res_table); +static AFE440X_ATTR(in_intensity2_capacitance, F_TIA_CF_SEP, afe4404_cap_table); -static AFE440X_ATTR(tia_resistance1, AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_RES, RESISTANCE, afe4404_res_table, ARRAY_SIZE(afe4404_res_table)); -static AFE440X_ATTR(tia_capacitance1, AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_CAP, CAPACITANCE, afe4404_cap_table, ARRAY_SIZE(afe4404_cap_table)); +static AFE440X_ATTR(in_intensity3_resistance, F_TIA_GAIN, afe4404_res_table); +static AFE440X_ATTR(in_intensity3_capacitance, TIA_CF, afe4404_cap_table); -static AFE440X_ATTR(tia_resistance2, AFE4404_TIA_GAIN_SEP, AFE4404_TIA_GAIN_RES, RESISTANCE, afe4404_res_table, ARRAY_SIZE(afe4404_res_table)); -static AFE440X_ATTR(tia_capacitance2, AFE4404_TIA_GAIN_SEP, AFE4404_TIA_GAIN_CAP, CAPACITANCE, afe4404_cap_table, ARRAY_SIZE(afe4404_cap_table)); +static AFE440X_ATTR(in_intensity4_resistance, F_TIA_GAIN, afe4404_res_table); +static AFE440X_ATTR(in_intensity4_capacitance, TIA_CF, afe4404_cap_table); static struct attribute *afe440x_attributes[] = { - &afe440x_attr_tia_separate_en.dev_attr.attr, - &afe440x_attr_tia_resistance1.dev_attr.attr, - &afe440x_attr_tia_capacitance1.dev_attr.attr, - &afe440x_attr_tia_resistance2.dev_attr.attr, - &afe440x_attr_tia_capacitance2.dev_attr.attr, - &dev_attr_tia_resistance_available.attr, - &dev_attr_tia_capacitance_available.attr, + &dev_attr_in_intensity_resistance_available.attr, + &dev_attr_in_intensity_capacitance_available.attr, + &afe440x_attr_in_intensity1_resistance.dev_attr.attr, + &afe440x_attr_in_intensity1_capacitance.dev_attr.attr, + &afe440x_attr_in_intensity2_resistance.dev_attr.attr, + &afe440x_attr_in_intensity2_capacitance.dev_attr.attr, + &afe440x_attr_in_intensity3_resistance.dev_attr.attr, + &afe440x_attr_in_intensity3_capacitance.dev_attr.attr, + &afe440x_attr_in_intensity4_resistance.dev_attr.attr, + &afe440x_attr_in_intensity4_capacitance.dev_attr.attr, NULL }; @@ -294,35 +256,32 @@ static int afe4404_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct afe4404_data *afe = iio_priv(indio_dev); - const struct afe440x_reg_info reg_info = afe4404_reg_info[chan->address]; + unsigned int value_reg = afe4404_channel_values[chan->address]; + unsigned int led_field = afe4404_channel_leds[chan->address]; + unsigned int offdac_field = afe4404_channel_offdacs[chan->address]; int ret; switch (chan->type) { case IIO_INTENSITY: switch (mask) { case IIO_CHAN_INFO_RAW: - ret = regmap_read(afe->regmap, reg_info.reg, val); + ret = regmap_read(afe->regmap, value_reg, val); if (ret) return ret; return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: - ret = regmap_read(afe->regmap, reg_info.offreg, - val); + ret = regmap_field_read(afe->fields[offdac_field], val); if (ret) return ret; - *val &= reg_info.mask; - *val >>= reg_info.shift; return IIO_VAL_INT; } break; case IIO_CURRENT: switch (mask) { case IIO_CHAN_INFO_RAW: - ret = regmap_read(afe->regmap, reg_info.reg, val); + ret = regmap_field_read(afe->fields[led_field], val); if (ret) return ret; - *val &= reg_info.mask; - *val >>= reg_info.shift; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: *val = 0; @@ -342,25 +301,20 @@ static int afe4404_write_raw(struct iio_dev *indio_dev, int val, int val2, long mask) { struct afe4404_data *afe = iio_priv(indio_dev); - const struct afe440x_reg_info reg_info = afe4404_reg_info[chan->address]; + unsigned int led_field = afe4404_channel_leds[chan->address]; + unsigned int offdac_field = afe4404_channel_offdacs[chan->address]; switch (chan->type) { case IIO_INTENSITY: switch (mask) { case IIO_CHAN_INFO_OFFSET: - return regmap_update_bits(afe->regmap, - reg_info.offreg, - reg_info.mask, - (val << reg_info.shift)); + return regmap_field_write(afe->fields[offdac_field], val); } break; case IIO_CURRENT: switch (mask) { case IIO_CHAN_INFO_RAW: - return regmap_update_bits(afe->regmap, - reg_info.reg, - reg_info.mask, - (val << reg_info.shift)); + return regmap_field_write(afe->fields[led_field], val); } break; default: @@ -387,7 +341,7 @@ static irqreturn_t afe4404_trigger_handler(int irq, void *private) for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) { - ret = regmap_read(afe->regmap, afe4404_reg_info[bit].reg, + ret = regmap_read(afe->regmap, afe4404_channel_values[bit], &buffer[i++]); if (ret) goto err; @@ -443,11 +397,8 @@ static const struct iio_trigger_ops afe4404_trigger_ops = { static const struct reg_sequence afe4404_reg_sequences[] = { AFE4404_TIMING_PAIRS, { AFE440X_CONTROL1, AFE440X_CONTROL1_TIMEREN }, - { AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_RES_50_K }, - { AFE440X_LEDCNTRL, (0xf << AFE4404_LEDCNTRL_ILED1_SHIFT) | - (0x3 << AFE4404_LEDCNTRL_ILED2_SHIFT) | - (0x3 << AFE4404_LEDCNTRL_ILED3_SHIFT) }, - { AFE440X_CONTROL2, AFE440X_CONTROL3_OSC_ENABLE }, + { AFE4404_TIA_GAIN_SEP, AFE440X_TIAGAIN_ENSEPGAIN }, + { AFE440X_CONTROL2, AFE440X_CONTROL2_OSC_ENABLE }, }; static const struct regmap_range afe4404_yes_ranges[] = { @@ -469,13 +420,11 @@ static const struct regmap_config afe4404_regmap_config = { .volatile_table = &afe4404_volatile_table, }; -#ifdef CONFIG_OF static const struct of_device_id afe4404_of_match[] = { { .compatible = "ti,afe4404", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, afe4404_of_match); -#endif static int __maybe_unused afe4404_suspend(struct device *dev) { @@ -525,7 +474,7 @@ static int afe4404_probe(struct i2c_client *client, { struct iio_dev *indio_dev; struct afe4404_data *afe; - int ret; + int i, ret; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*afe)); if (!indio_dev) @@ -543,6 +492,15 @@ static int afe4404_probe(struct i2c_client *client, return PTR_ERR(afe->regmap); } + for (i = 0; i < F_MAX_FIELDS; i++) { + afe->fields[i] = devm_regmap_field_alloc(afe->dev, afe->regmap, + afe4404_reg_fields[i]); + if (IS_ERR(afe->fields[i])) { + dev_err(afe->dev, "Unable to allocate regmap fields\n"); + return PTR_ERR(afe->fields[i]); + } + } + afe->regulator = devm_regulator_get(afe->dev, "tx_sup"); if (IS_ERR(afe->regulator)) { dev_err(afe->dev, "Unable to get regulator\n"); @@ -665,7 +623,7 @@ MODULE_DEVICE_TABLE(i2c, afe4404_ids); static struct i2c_driver afe4404_i2c_driver = { .driver = { .name = AFE4404_DRIVER_NAME, - .of_match_table = of_match_ptr(afe4404_of_match), + .of_match_table = afe4404_of_match, .pm = &afe4404_pm_ops, }, .probe = afe4404_probe, @@ -675,5 +633,5 @@ static struct i2c_driver afe4404_i2c_driver = { module_i2c_driver(afe4404_i2c_driver); MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>"); -MODULE_DESCRIPTION("TI AFE4404 Heart Rate and Pulse Oximeter"); +MODULE_DESCRIPTION("TI AFE4404 Heart Rate Monitor and Pulse Oximeter AFE"); MODULE_LICENSE("GPL v2"); @@ -71,8 +71,7 @@ #define AFE440X_CONTROL1_TIMEREN BIT(8) /* TIAGAIN register fields */ -#define AFE440X_TIAGAIN_ENSEPGAIN_MASK BIT(15) -#define AFE440X_TIAGAIN_ENSEPGAIN_SHIFT 15 +#define AFE440X_TIAGAIN_ENSEPGAIN BIT(15) /* CONTROL2 register fields */ #define AFE440X_CONTROL2_PDN_AFE BIT(0) @@ -89,22 +88,7 @@ #define AFE440X_CONTROL0_WRITE 0x0 #define AFE440X_CONTROL0_READ 0x1 -struct afe440x_reg_info { - unsigned int reg; - unsigned int offreg; - unsigned int shift; - unsigned int mask; -}; - -#define AFE440X_REG_INFO(_reg, _offreg, _sm) \ - { \ - .reg = _reg, \ - .offreg = _offreg, \ - .shift = _sm ## _SHIFT, \ - .mask = _sm ## _MASK, \ - } - -#define AFE440X_INTENSITY_CHAN(_index, _name, _mask) \ +#define AFE440X_INTENSITY_CHAN(_index, _mask) \ { \ .type = IIO_INTENSITY, \ .channel = _index, \ @@ -116,29 +100,23 @@ struct afe440x_reg_info { .storagebits = 32, \ .endianness = IIO_CPU, \ }, \ - .extend_name = _name, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ _mask, \ + .indexed = true, \ } -#define AFE440X_CURRENT_CHAN(_index, _name) \ +#define AFE440X_CURRENT_CHAN(_index) \ { \ .type = IIO_CURRENT, \ .channel = _index, \ .address = _index, \ - .scan_index = _index, \ - .extend_name = _name, \ + .scan_index = -1, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE), \ + .indexed = true, \ .output = true, \ } -enum afe440x_reg_type { - SIMPLE, - RESISTANCE, - CAPACITANCE, -}; - struct afe440x_val_table { int integer; int fract; @@ -164,10 +142,7 @@ static DEVICE_ATTR_RO(_name) struct afe440x_attr { struct device_attribute dev_attr; - unsigned int reg; - unsigned int shift; - unsigned int mask; - enum afe440x_reg_type type; + unsigned int field; const struct afe440x_val_table *val_table; unsigned int table_size; }; @@ -175,17 +150,14 @@ struct afe440x_attr { #define to_afe440x_attr(_dev_attr) \ container_of(_dev_attr, struct afe440x_attr, dev_attr) -#define AFE440X_ATTR(_name, _reg, _field, _type, _table, _size) \ +#define AFE440X_ATTR(_name, _field, _table) \ struct afe440x_attr afe440x_attr_##_name = { \ .dev_attr = __ATTR(_name, (S_IRUGO | S_IWUSR), \ afe440x_show_register, \ afe440x_store_register), \ - .reg = _reg, \ - .shift = _field ## _SHIFT, \ - .mask = _field ## _MASK, \ - .type = _type, \ + .field = _field, \ .val_table = _table, \ - .table_size = _size, \ + .table_size = ARRAY_SIZE(_table), \ } #endif /* _AFE440X_H */ @@ -165,10 +165,8 @@ static irqreturn_t am2315_trigger_handler(int irq, void *p) struct am2315_sensor_data sensor_data; ret = am2315_read_data(data, &sensor_data); - if (ret < 0) { - mutex_unlock(&data->lock); + if (ret < 0) goto err; - } mutex_lock(&data->lock); if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) { @@ -278,6 +276,7 @@ static const struct i2c_device_id am2315_i2c_id[] = { {"am2315", 0}, {} }; +MODULE_DEVICE_TABLE(i2c, am2315_i2c_id); static const struct acpi_device_id am2315_acpi_id[] = { {"AOS2315", 0}, @@ -55,7 +55,7 @@ static const struct { }, { /* IIO_HUMIDITYRELATIVE channel */ .shift = 8, - .mask = 2, + .mask = 3, }, }; @@ -164,14 +164,14 @@ static int hdc100x_get_measurement(struct hdc100x_data *data, dev_err(&client->dev, "cannot read high byte measurement"); return ret; } - val = ret << 6; + val = ret << 8; ret = i2c_smbus_read_byte(client); if (ret < 0) { dev_err(&client->dev, "cannot read low byte measurement"); return ret; } - val |= ret >> 2; + val |= ret; return val; } @@ -211,18 +211,18 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_SCALE: if (chan->type == IIO_TEMP) { - *val = 165; - *val2 = 65536 >> 2; + *val = 165000; + *val2 = 65536; return IIO_VAL_FRACTIONAL; } else { - *val = 0; - *val2 = 10000; - return IIO_VAL_INT_PLUS_MICRO; + *val = 100; + *val2 = 65536; + return IIO_VAL_FRACTIONAL; } break; case IIO_CHAN_INFO_OFFSET: - *val = -3971; - *val2 = 879096; + *val = -15887; + *val2 = 515151; return IIO_VAL_INT_PLUS_MICRO; default: return -EINVAL; @@ -236,6 +236,7 @@ static const struct i2c_device_id htu21_id[] = { {"ms8607-humidity", MS8607}, {} }; +MODULE_DEVICE_TABLE(i2c, htu21_id); static struct i2c_driver htu21_driver = { .probe = htu21_probe, @@ -79,4 +79,7 @@ void iio_device_unregister_eventset(struct iio_dev *indio_dev); void iio_device_wakeup_eventset(struct iio_dev *indio_dev); int iio_event_getfd(struct iio_dev *indio_dev); +struct iio_event_interface; +bool iio_event_enabled(const struct iio_event_interface *ev_int); + #endif @@ -20,6 +20,7 @@ #include <linux/iio/triggered_buffer.h> #include <linux/iio/trigger_consumer.h> #include <linux/iio/buffer.h> +#include <linux/iio/sysfs.h> #include "bmi160.h" @@ -209,11 +210,11 @@ static const struct bmi160_scale_item bmi160_scale_table[] = { }; static const struct bmi160_odr bmi160_accel_odr[] = { - {0x01, 0, 78125}, - {0x02, 1, 5625}, - {0x03, 3, 125}, - {0x04, 6, 25}, - {0x05, 12, 5}, + {0x01, 0, 781250}, + {0x02, 1, 562500}, + {0x03, 3, 125000}, + {0x04, 6, 250000}, + {0x05, 12, 500000}, {0x06, 25, 0}, {0x07, 50, 0}, {0x08, 100, 0}, @@ -229,7 +230,7 @@ static const struct bmi160_odr bmi160_gyro_odr[] = { {0x08, 100, 0}, {0x09, 200, 0}, {0x0A, 400, 0}, - {0x0B, 8000, 0}, + {0x0B, 800, 0}, {0x0C, 1600, 0}, {0x0D, 3200, 0}, }; @@ -364,8 +365,8 @@ int bmi160_set_odr(struct bmi160_data *data, enum bmi160_sensor_type t, return regmap_update_bits(data->regmap, bmi160_regs[t].config, - bmi160_odr_table[t].tbl[i].bits, - bmi160_regs[t].config_odr_mask); + bmi160_regs[t].config_odr_mask, + bmi160_odr_table[t].tbl[i].bits); } static int bmi160_get_odr(struct bmi160_data *data, enum bmi160_sensor_type t, @@ -410,7 +411,8 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p) buf[j++] = sample; } - iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns()); + iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_get_time_ns(indio_dev)); done: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; @@ -466,10 +468,36 @@ static int bmi160_write_raw(struct iio_dev *indio_dev, return 0; } +static +IIO_CONST_ATTR(in_accel_sampling_frequency_available, + "0.78125 1.5625 3.125 6.25 12.5 25 50 100 200 400 800 1600"); +static +IIO_CONST_ATTR(in_anglvel_sampling_frequency_available, + "25 50 100 200 400 800 1600 3200"); +static +IIO_CONST_ATTR(in_accel_scale_available, + "0.000598 0.001197 0.002394 0.004788"); +static +IIO_CONST_ATTR(in_anglvel_scale_available, + "0.001065 0.000532 0.000266 0.000133 0.000066"); + +static struct attribute *bmi160_attrs[] = { + &iio_const_attr_in_accel_sampling_frequency_available.dev_attr.attr, + &iio_const_attr_in_anglvel_sampling_frequency_available.dev_attr.attr, + &iio_const_attr_in_accel_scale_available.dev_attr.attr, + &iio_const_attr_in_anglvel_scale_available.dev_attr.attr, + NULL, +}; + +static const struct attribute_group bmi160_attrs_group = { + .attrs = bmi160_attrs, +}; + static const struct iio_info bmi160_info = { .driver_module = THIS_MODULE, .read_raw = bmi160_read_raw, .write_raw = bmi160_write_raw, + .attrs = &bmi160_attrs_group, }; static const char *bmi160_match_acpi_device(struct device *dev) @@ -13,8 +13,8 @@ config INV_MPU6050_I2C select INV_MPU6050_IIO select REGMAP_I2C help - This driver supports the Invensense MPU6050/6500/9150 motion tracking - devices over I2C. + This driver supports the Invensense MPU6050/6500/9150 and ICM20608 + motion tracking devices over I2C. This driver can be built as a module. The module will be called inv-mpu6050-i2c. @@ -24,7 +24,7 @@ config INV_MPU6050_SPI select INV_MPU6050_IIO select REGMAP_SPI help - This driver supports the Invensense MPU6000/6500/9150 motion tracking - devices over SPI. + This driver supports the Invensense MPU6050/6500/9150 and ICM20608 + motion tracking devices over SPI. This driver can be built as a module. The module will be called inv-mpu6050-spi. @@ -56,6 +56,7 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev, int i; acpi_status status; union acpi_object *cpm; + int ret; status = acpi_evaluate_object(adev->handle, "CNF0", NULL, &buffer); if (ACPI_FAILURE(status)) @@ -82,10 +83,10 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev, } } } - + ret = cpm->package.count; kfree(buffer.pointer); - return cpm->package.count; + return ret; } static int acpi_i2c_check_resource(struct acpi_resource *ares, void *data) @@ -113,6 +113,12 @@ static const struct inv_mpu6050_hw hw_info[] = { .reg = ®_set_6050, .config = &chip_config_6050, }, + { + .whoami = INV_ICM20608_WHOAMI_VALUE, + .name = "ICM20608", + .reg = ®_set_6500, + .config = &chip_config_6050, + }, }; int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask) @@ -170,6 +170,7 @@ static const struct i2c_device_id inv_mpu_id[] = { {"mpu6050", INV_MPU6050}, {"mpu6500", INV_MPU6500}, {"mpu9150", INV_MPU9150}, + {"icm20608", INV_ICM20608}, {} }; @@ -70,6 +70,7 @@ enum inv_devices { INV_MPU6500, INV_MPU6000, INV_MPU9150, + INV_ICM20608, INV_NUM_PARTS }; @@ -225,6 +226,7 @@ struct inv_mpu6050_state { #define INV_MPU6050_WHOAMI_VALUE 0x68 #define INV_MPU6500_WHOAMI_VALUE 0x70 #define INV_MPU9150_WHOAMI_VALUE 0x68 +#define INV_ICM20608_WHOAMI_VALUE 0xAF /* scan element definition */ enum inv_mpu6050_scan { @@ -107,7 +107,7 @@ irqreturn_t inv_mpu6050_irq_handler(int irq, void *p) struct inv_mpu6050_state *st = iio_priv(indio_dev); s64 timestamp; - timestamp = iio_get_time_ns(); + timestamp = iio_get_time_ns(indio_dev); kfifo_in_spinlocked(&st->timestamps, ×tamp, 1, &st->time_stamp_lock); @@ -82,6 +82,7 @@ static const struct spi_device_id inv_mpu_id[] = { {"mpu6000", INV_MPU6000}, {"mpu6500", INV_MPU6500}, {"mpu9150", INV_MPU9150}, + {"icm20608", INV_ICM20608}, {} }; @@ -80,6 +80,7 @@ static const char * const iio_chan_type_name_spec[] = { [IIO_RESISTANCE] = "resistance", [IIO_PH] = "ph", [IIO_UVINDEX] = "uvindex", + [IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity", }; static const char * const iio_modifier_names[] = { @@ -177,6 +178,86 @@ ssize_t iio_read_const_attr(struct device *dev, } EXPORT_SYMBOL(iio_read_const_attr); +static int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id) +{ + int ret; + const struct iio_event_interface *ev_int = indio_dev->event_interface; + + ret = mutex_lock_interruptible(&indio_dev->mlock); + if (ret) + return ret; + if ((ev_int && iio_event_enabled(ev_int)) || + iio_buffer_enabled(indio_dev)) { + mutex_unlock(&indio_dev->mlock); + return -EBUSY; + } + indio_dev->clock_id = clock_id; + mutex_unlock(&indio_dev->mlock); + + return 0; +} + +/** + * iio_get_time_ns() - utility function to get a time stamp for events etc + * @indio_dev: device + */ +s64 iio_get_time_ns(const struct iio_dev *indio_dev) +{ + struct timespec tp; + + switch (iio_device_get_clock(indio_dev)) { + case CLOCK_REALTIME: + ktime_get_real_ts(&tp); + break; + case CLOCK_MONOTONIC: + ktime_get_ts(&tp); + break; + case CLOCK_MONOTONIC_RAW: + getrawmonotonic(&tp); + break; + case CLOCK_REALTIME_COARSE: + tp = current_kernel_time(); + break; + case CLOCK_MONOTONIC_COARSE: + tp = get_monotonic_coarse(); + break; + case CLOCK_BOOTTIME: + get_monotonic_boottime(&tp); + break; + case CLOCK_TAI: + timekeeping_clocktai(&tp); + break; + default: + BUG(); + } + + return timespec_to_ns(&tp); +} +EXPORT_SYMBOL(iio_get_time_ns); + +/** + * iio_get_time_res() - utility function to get time stamp clock resolution in + * nano seconds. + * @indio_dev: device + */ +unsigned int iio_get_time_res(const struct iio_dev *indio_dev) +{ + switch (iio_device_get_clock(indio_dev)) { + case CLOCK_REALTIME: + case CLOCK_MONOTONIC: + case CLOCK_MONOTONIC_RAW: + case CLOCK_BOOTTIME: + case CLOCK_TAI: + return hrtimer_resolution; + case CLOCK_REALTIME_COARSE: + case CLOCK_MONOTONIC_COARSE: + return LOW_RES_NSEC; + default: + BUG(); + } +} +EXPORT_SYMBOL(iio_get_time_res); + static int __init iio_init(void) { int ret; @@ -989,11 +1070,91 @@ static ssize_t iio_show_dev_name(struct device *dev, static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL); +static ssize_t iio_show_timestamp_clock(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + const struct iio_dev *indio_dev = dev_to_iio_dev(dev); + const clockid_t clk = iio_device_get_clock(indio_dev); + const char *name; + ssize_t sz; + + switch (clk) { + case CLOCK_REALTIME: + name = "realtime\n"; + sz = sizeof("realtime\n"); + break; + case CLOCK_MONOTONIC: + name = "monotonic\n"; + sz = sizeof("monotonic\n"); + break; + case CLOCK_MONOTONIC_RAW: + name = "monotonic_raw\n"; + sz = sizeof("monotonic_raw\n"); + break; + case CLOCK_REALTIME_COARSE: + name = "realtime_coarse\n"; + sz = sizeof("realtime_coarse\n"); + break; + case CLOCK_MONOTONIC_COARSE: + name = "monotonic_coarse\n"; + sz = sizeof("monotonic_coarse\n"); + break; + case CLOCK_BOOTTIME: + name = "boottime\n"; + sz = sizeof("boottime\n"); + break; + case CLOCK_TAI: + name = "tai\n"; + sz = sizeof("tai\n"); + break; + default: + BUG(); + } + + memcpy(buf, name, sz); + return sz; +} + +static ssize_t iio_store_timestamp_clock(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + clockid_t clk; + int ret; + + if (sysfs_streq(buf, "realtime")) + clk = CLOCK_REALTIME; + else if (sysfs_streq(buf, "monotonic")) + clk = CLOCK_MONOTONIC; + else if (sysfs_streq(buf, "monotonic_raw")) + clk = CLOCK_MONOTONIC_RAW; + else if (sysfs_streq(buf, "realtime_coarse")) + clk = CLOCK_REALTIME_COARSE; + else if (sysfs_streq(buf, "monotonic_coarse")) + clk = CLOCK_MONOTONIC_COARSE; + else if (sysfs_streq(buf, "boottime")) + clk = CLOCK_BOOTTIME; + else if (sysfs_streq(buf, "tai")) + clk = CLOCK_TAI; + else + return -EINVAL; + + ret = iio_device_set_clock(dev_to_iio_dev(dev), clk); + if (ret) + return ret; + + return len; +} + +static DEVICE_ATTR(current_timestamp_clock, S_IRUGO | S_IWUSR, + iio_show_timestamp_clock, iio_store_timestamp_clock); + static int iio_device_register_sysfs(struct iio_dev *indio_dev) { int i, ret = 0, attrcount, attrn, attrcount_orig = 0; struct iio_dev_attr *p; - struct attribute **attr; + struct attribute **attr, *clk = NULL; /* First count elements in any existing group */ if (indio_dev->info->attrs) { @@ -1008,16 +1169,25 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev) */ if (indio_dev->channels) for (i = 0; i < indio_dev->num_channels; i++) { - ret = iio_device_add_channel_sysfs(indio_dev, - &indio_dev - ->channels[i]); + const struct iio_chan_spec *chan = + &indio_dev->channels[i]; + + if (chan->type == IIO_TIMESTAMP) + clk = &dev_attr_current_timestamp_clock.attr; + + ret = iio_device_add_channel_sysfs(indio_dev, chan); if (ret < 0) goto error_clear_attrs; attrcount += ret; } + if (indio_dev->event_interface) + clk = &dev_attr_current_timestamp_clock.attr; + if (indio_dev->name) attrcount++; + if (clk) + attrcount++; indio_dev->chan_attr_group.attrs = kcalloc(attrcount + 1, sizeof(indio_dev->chan_attr_group.attrs[0]), @@ -1038,6 +1208,8 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev) indio_dev->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr; if (indio_dev->name) indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr; + if (clk) + indio_dev->chan_attr_group.attrs[attrn++] = clk; indio_dev->groups[indio_dev->groupcounter++] = &indio_dev->chan_attr_group; @@ -44,6 +44,11 @@ struct iio_event_interface { struct mutex read_lock; }; +bool iio_event_enabled(const struct iio_event_interface *ev_int) +{ + return !!test_bit(IIO_BUSY_BIT_POS, &ev_int->flags); +} + /** * iio_push_event() - try to add event to the list for userspace reading * @indio_dev: IIO device structure @@ -60,7 +65,7 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp) int copied; /* Does anyone care? */ - if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { + if (iio_event_enabled(ev_int)) { ev.id = ev_code; ev.timestamp = timestamp; @@ -180,8 +185,14 @@ int iio_event_getfd(struct iio_dev *indio_dev) if (ev_int == NULL) return -ENODEV; - if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) - return -EBUSY; + fd = mutex_lock_interruptible(&indio_dev->mlock); + if (fd) + return fd; + + if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { + fd = -EBUSY; + goto unlock; + } iio_device_get(indio_dev); @@ -194,6 +205,8 @@ int iio_event_getfd(struct iio_dev *indio_dev) kfifo_reset_out(&ev_int->det_events); } +unlock: + mutex_unlock(&indio_dev->mlock); return fd; } diff --git a/drivers/iio/industrialio-sw-device.c b/drivers/iio/industrialio-sw-device.c new file mode 100644 index 000000000000..81b49cfca452 --- /dev/null +++ b/ drivers/iio/industrialio-sw-device.c@@ -0,0 +1,182 @@ +/* + * The Industrial I/O core, software IIO devices functions + * + * Copyright (c) 2016 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kmod.h> +#include <linux/list.h> +#include <linux/slab.h> + +#include <linux/iio/sw_device.h> +#include <linux/iio/configfs.h> +#include <linux/configfs.h> + +static struct config_group *iio_devices_group; +static struct config_item_type iio_device_type_group_type; + +static struct config_item_type iio_devices_group_type = { + .ct_owner = THIS_MODULE, +}; + +static LIST_HEAD(iio_device_types_list); +static DEFINE_MUTEX(iio_device_types_lock); + +static +struct iio_sw_device_type *__iio_find_sw_device_type(const char *name, + unsigned len) +{ + struct iio_sw_device_type *d = NULL, *iter; + + list_for_each_entry(iter, &iio_device_types_list, list) + if (!strcmp(iter->name, name)) { + d = iter; + break; + } + + return d; +} + +int iio_register_sw_device_type(struct iio_sw_device_type *d) +{ + struct iio_sw_device_type *iter; + int ret = 0; + + mutex_lock(&iio_device_types_lock); + iter = __iio_find_sw_device_type(d->name, strlen(d->name)); + if (iter) + ret = -EBUSY; + else + list_add_tail(&d->list, &iio_device_types_list); + mutex_unlock(&iio_device_types_lock); + + if (ret) + return ret; + + d->group = configfs_register_default_group(iio_devices_group, d->name, + &iio_device_type_group_type); + if (IS_ERR(d->group)) + ret = PTR_ERR(d->group); + + return ret; +} +EXPORT_SYMBOL(iio_register_sw_device_type); + +void iio_unregister_sw_device_type(struct iio_sw_device_type *dt) +{ + struct iio_sw_device_type *iter; + + mutex_lock(&iio_device_types_lock); + iter = __iio_find_sw_device_type(dt->name, strlen(dt->name)); + if (iter) + list_del(&dt->list); + mutex_unlock(&iio_device_types_lock); + + configfs_unregister_default_group(dt->group); +} +EXPORT_SYMBOL(iio_unregister_sw_device_type); + +static +struct iio_sw_device_type *iio_get_sw_device_type(const char *name) +{ + struct iio_sw_device_type *dt; + + mutex_lock(&iio_device_types_lock); + dt = __iio_find_sw_device_type(name, strlen(name)); + if (dt && !try_module_get(dt->owner)) + dt = NULL; + mutex_unlock(&iio_device_types_lock); + + return dt; +} + +struct iio_sw_device *iio_sw_device_create(const char *type, const char *name) +{ + struct iio_sw_device *d; + struct iio_sw_device_type *dt; + + dt = iio_get_sw_device_type(type); + if (!dt) { + pr_err("Invalid device type: %s\n", type); + return ERR_PTR(-EINVAL); + } + d = dt->ops->probe(name); + if (IS_ERR(d)) + goto out_module_put; + + d->device_type = dt; + + return d; +out_module_put: + module_put(dt->owner); + return d; +} +EXPORT_SYMBOL(iio_sw_device_create); + +void iio_sw_device_destroy(struct iio_sw_device *d) +{ + struct iio_sw_device_type *dt = d->device_type; + + dt->ops->remove(d); + module_put(dt->owner); +} +EXPORT_SYMBOL(iio_sw_device_destroy); + +static struct config_group *device_make_group(struct config_group *group, + const char *name) +{ + struct iio_sw_device *d; + + d = iio_sw_device_create(group->cg_item.ci_name, name); + if (IS_ERR(d)) + return ERR_CAST(d); + + config_item_set_name(&d->group.cg_item, "%s", name); + + return &d->group; +} + +static void device_drop_group(struct config_group *group, + struct config_item *item) +{ + struct iio_sw_device *d = to_iio_sw_device(item); + + iio_sw_device_destroy(d); + config_item_put(item); +} + +static struct configfs_group_operations device_ops = { + .make_group = &device_make_group, + .drop_item = &device_drop_group, +}; + +static struct config_item_type iio_device_type_group_type = { + .ct_group_ops = &device_ops, + .ct_owner = THIS_MODULE, +}; + +static int __init iio_sw_device_init(void) +{ + iio_devices_group = + configfs_register_default_group(&iio_configfs_subsys.su_group, + "devices", + &iio_devices_group_type); + return PTR_ERR_OR_ZERO(iio_devices_group); +} +module_init(iio_sw_device_init); + +static void __exit iio_sw_device_exit(void) +{ + configfs_unregister_default_group(iio_devices_group); +} +module_exit(iio_sw_device_exit); + +MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com>"); +MODULE_DESCRIPTION("Industrial I/O software devices support"); +MODULE_LICENSE("GPL v2"); @@ -64,10 +64,16 @@ static struct attribute *iio_trig_dev_attrs[] = { }; ATTRIBUTE_GROUPS(iio_trig_dev); +static struct iio_trigger *__iio_trigger_find_by_name(const char *name); + int iio_trigger_register(struct iio_trigger *trig_info) { int ret; + /* trig_info->ops is required for the module member */ + if (!trig_info->ops) + return -EINVAL; + trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL); if (trig_info->id < 0) return trig_info->id; @@ -82,11 +88,19 @@ int iio_trigger_register(struct iio_trigger *trig_info) /* Add to list of available triggers held by the IIO core */ mutex_lock(&iio_trigger_list_lock); + if (__iio_trigger_find_by_name(trig_info->name)) { + pr_err("Duplicate trigger name '%s'\n", trig_info->name); + ret = -EEXIST; + goto error_device_del; + } list_add_tail(&trig_info->list, &iio_trigger_list); mutex_unlock(&iio_trigger_list_lock); return 0; +error_device_del: + mutex_unlock(&iio_trigger_list_lock); + device_del(&trig_info->dev); error_unregister_id: ida_simple_remove(&iio_trigger_ida, trig_info->id); return ret; @@ -105,6 +119,18 @@ void iio_trigger_unregister(struct iio_trigger *trig_info) } EXPORT_SYMBOL(iio_trigger_unregister); +/* Search for trigger by name, assuming iio_trigger_list_lock held */ +static struct iio_trigger *__iio_trigger_find_by_name(const char *name) +{ + struct iio_trigger *iter; + + list_for_each_entry(iter, &iio_trigger_list, list) + if (!strcmp(iter->name, name)) + return iter; + + return NULL; +} + static struct iio_trigger *iio_trigger_find_by_name(const char *name, size_t len) { @@ -164,8 +190,7 @@ EXPORT_SYMBOL(iio_trigger_poll_chained); void iio_trigger_notify_done(struct iio_trigger *trig) { - if (atomic_dec_and_test(&trig->use_count) && trig->ops && - trig->ops->try_reenable) + if (atomic_dec_and_test(&trig->use_count) && trig->ops->try_reenable) if (trig->ops->try_reenable(trig)) /* Missed an interrupt so launch new poll now */ iio_trigger_poll(trig); @@ -210,22 +235,35 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig, /* Prevent the module from being removed whilst attached to a trigger */ __module_get(pf->indio_dev->info->driver_module); + + /* Get irq number */ pf->irq = iio_trigger_get_irq(trig); + if (pf->irq < 0) + goto out_put_module; + + /* Request irq */ ret = request_threaded_irq(pf->irq, pf->h, pf->thread, pf->type, pf->name, pf); - if (ret < 0) { - module_put(pf->indio_dev->info->driver_module); - return ret; - } + if (ret < 0) + goto out_put_irq; - if (trig->ops && trig->ops->set_trigger_state && notinuse) { + /* Enable trigger in driver */ + if (trig->ops->set_trigger_state && notinuse) { ret = trig->ops->set_trigger_state(trig, true); if (ret < 0) - module_put(pf->indio_dev->info->driver_module); + goto out_free_irq; } return ret; + +out_free_irq: + free_irq(pf->irq, pf); +out_put_irq: + iio_trigger_put_irq(trig, pf->irq); +out_put_module: + module_put(pf->indio_dev->info->driver_module); + return ret; } static int iio_trigger_detach_poll_func(struct iio_trigger *trig, @@ -236,7 +274,7 @@ static int iio_trigger_detach_poll_func(struct iio_trigger *trig, = (bitmap_weight(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER) == 1); - if (trig->ops && trig->ops->set_trigger_state && no_other_users) { + if (trig->ops->set_trigger_state && no_other_users) { ret = trig->ops->set_trigger_state(trig, false); if (ret) return ret; @@ -251,7 +289,7 @@ static int iio_trigger_detach_poll_func(struct iio_trigger *trig, irqreturn_t iio_pollfunc_store_time(int irq, void *p) { struct iio_poll_func *pf = p; - pf->timestamp = iio_get_time_ns(); + pf->timestamp = iio_get_time_ns(pf->indio_dev); return IRQ_WAKE_THREAD; } EXPORT_SYMBOL(iio_pollfunc_store_time); @@ -358,7 +396,7 @@ static ssize_t iio_trigger_write_current(struct device *dev, return ret; } - if (trig && trig->ops && trig->ops->validate_device) { + if (trig && trig->ops->validate_device) { ret = trig->ops->validate_device(trig, indio_dev); if (ret) return ret; @@ -118,7 +118,7 @@ static void acpi_als_notify(struct acpi_device *device, u32 event) struct iio_dev *indio_dev = acpi_driver_data(device); struct acpi_als *als = iio_priv(indio_dev); s32 *buffer = als->evt_buffer; - s64 time_ns = iio_get_time_ns(); + s64 time_ns = iio_get_time_ns(indio_dev); s32 val; int ret; @@ -118,7 +118,7 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct adjd_s311_data *data = iio_priv(indio_dev); - s64 time_ns = iio_get_time_ns(); + s64 time_ns = iio_get_time_ns(indio_dev); int i, j = 0; int ret = adjd_s311_req_data(indio_dev); @@ -396,7 +396,7 @@ static irqreturn_t apds9300_interrupt_handler(int irq, void *private) IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER), - iio_get_time_ns()); + iio_get_time_ns(dev_info)); apds9300_clear_intr(data); @@ -807,7 +807,7 @@ static irqreturn_t apds9960_interrupt_handler(int irq, void *private) IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER), - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); regmap_write(data->regmap, APDS9960_REG_CICLEAR, 1); } @@ -816,7 +816,7 @@ static irqreturn_t apds9960_interrupt_handler(int irq, void *private) IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER), - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); regmap_write(data->regmap, APDS9960_REG_PICLEAR, 1); } @@ -1011,6 +1011,7 @@ static int apds9960_probe(struct i2c_client *client, iio_device_attach_buffer(indio_dev, buffer); + indio_dev->dev.parent = &client->dev; indio_dev->info = &apds9960_info; indio_dev->name = APDS9960_DRV_NAME; indio_dev->channels = apds9960_channels; @@ -84,7 +84,7 @@ static int bh1780_debugfs_reg_access(struct iio_dev *indio_dev, int ret; if (!readval) - bh1780_write(bh1780, (u8)reg, (u8)writeval); + return bh1780_write(bh1780, (u8)reg, (u8)writeval); ret = bh1780_read(bh1780, (u8)reg); if (ret < 0) @@ -187,7 +187,7 @@ static int bh1780_probe(struct i2c_client *client, indio_dev->dev.parent = &client->dev; indio_dev->info = &bh1780_info; - indio_dev->name = id->name; + indio_dev->name = "bh1780"; indio_dev->channels = bh1780_channels; indio_dev->num_channels = ARRAY_SIZE(bh1780_channels); indio_dev->modes = INDIO_DIRECT_MODE; @@ -226,7 +226,8 @@ static int bh1780_remove(struct i2c_client *client) static int bh1780_runtime_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); - struct bh1780_data *bh1780 = i2c_get_clientdata(client); + struct iio_dev *indio_dev = i2c_get_clientdata(client); + struct bh1780_data *bh1780 = iio_priv(indio_dev); int ret; ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF); @@ -241,7 +242,8 @@ static int bh1780_runtime_suspend(struct device *dev) static int bh1780_runtime_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); - struct bh1780_data *bh1780 = i2c_get_clientdata(client); + struct iio_dev *indio_dev = i2c_get_clientdata(client); + struct bh1780_data *bh1780 = iio_priv(indio_dev); int ret; ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_PON); @@ -268,7 +268,7 @@ static irqreturn_t cm36651_irq_handler(int irq, void *data) CM36651_CMD_READ_RAW_PROXIMITY, IIO_EV_TYPE_THRESH, ev_dir); - iio_push_event(indio_dev, ev_code, iio_get_time_ns()); + iio_push_event(indio_dev, ev_code, iio_get_time_ns(indio_dev)); return IRQ_HANDLED; } @@ -851,7 +851,7 @@ static irqreturn_t gp2ap020a00f_prox_sensing_handler(int irq, void *data) GP2AP020A00F_SCAN_MODE_PROXIMITY, IIO_EV_TYPE_ROC, IIO_EV_DIR_RISING), - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); } else { iio_push_event(indio_dev, IIO_UNMOD_EVENT_CODE( @@ -859,7 +859,7 @@ static irqreturn_t gp2ap020a00f_prox_sensing_handler(int irq, void *data) GP2AP020A00F_SCAN_MODE_PROXIMITY, IIO_EV_TYPE_ROC, IIO_EV_DIR_FALLING), - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); } } @@ -925,7 +925,7 @@ static irqreturn_t gp2ap020a00f_thresh_event_handler(int irq, void *data) IIO_MOD_LIGHT_CLEAR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); } if (test_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV, &priv->flags)) { @@ -939,7 +939,7 @@ static irqreturn_t gp2ap020a00f_thresh_event_handler(int irq, void *data) IIO_MOD_LIGHT_CLEAR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); } } @@ -1287,22 +1287,14 @@ static int gp2ap020a00f_read_raw(struct iio_dev *indio_dev, struct gp2ap020a00f_data *data = iio_priv(indio_dev); int err = -EINVAL; - mutex_lock(&data->lock); - - switch (mask) { - case IIO_CHAN_INFO_RAW: - if (iio_buffer_enabled(indio_dev)) { - err = -EBUSY; - goto error_unlock; - } + if (mask == IIO_CHAN_INFO_RAW) { + err = iio_device_claim_direct_mode(indio_dev); + if (err) + return err; err = gp2ap020a00f_read_channel(data, chan, val); - break; + iio_device_release_direct_mode(indio_dev); } - -error_unlock: - mutex_unlock(&data->lock); - return err < 0 ? err : IIO_VAL_INT; } @@ -44,13 +44,15 @@ #define ISL29125_MODE_B 0x3 #define ISL29125_MODE_RGB 0x5 +#define ISL29125_SENSING_RANGE_0 5722 /* 375 lux full range */ +#define ISL29125_SENSING_RANGE_1 152590 /* 10k lux full range */ + #define ISL29125_MODE_RANGE BIT(3) #define ISL29125_STATUS_CONV BIT(1) struct isl29125_data { struct i2c_client *client; - struct mutex lock; u8 conf1; u16 buffer[8]; /* 3x 16-bit, padding, 8 bytes timestamp */ }; @@ -128,11 +130,11 @@ static int isl29125_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_RAW: - if (iio_buffer_enabled(indio_dev)) - return -EBUSY; - mutex_lock(&data->lock); + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; ret = isl29125_read_data(data, chan->scan_index); - mutex_unlock(&data->lock); + iio_device_release_direct_mode(indio_dev); if (ret < 0) return ret; *val = ret; @@ -140,9 +142,9 @@ static int isl29125_read_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_SCALE: *val = 0; if (data->conf1 & ISL29125_MODE_RANGE) - *val2 = 152590; /* 10k lux full range */ + *val2 = ISL29125_SENSING_RANGE_1; /*10k lux full range*/ else - *val2 = 5722; /* 375 lux full range */ + *val2 = ISL29125_SENSING_RANGE_0; /*375 lux full range*/ return IIO_VAL_INT_PLUS_MICRO; } return -EINVAL; @@ -158,9 +160,9 @@ static int isl29125_write_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_SCALE: if (val != 0) return -EINVAL; - if (val2 == 152590) + if (val2 == ISL29125_SENSING_RANGE_1) data->conf1 |= ISL29125_MODE_RANGE; - else if (val2 == 5722) + else if (val2 == ISL29125_SENSING_RANGE_0) data->conf1 &= ~ISL29125_MODE_RANGE; else return -EINVAL; @@ -189,7 +191,7 @@ static irqreturn_t isl29125_trigger_handler(int irq, void *p) } iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); done: iio_trigger_notify_done(indio_dev->trig); @@ -259,7 +261,6 @@ static int isl29125_probe(struct i2c_client *client, data = iio_priv(indio_dev); i2c_set_clientdata(client, indio_dev); data->client = client; - mutex_init(&data->lock); indio_dev->dev.parent = &client->dev; indio_dev->info = &isl29125_info; @@ -325,9 +325,6 @@ static int jsa1212_probe(struct i2c_client *client, struct regmap *regmap; int ret; - if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - return -EOPNOTSUPP; - indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); if (!indio_dev) return -ENOMEM; @@ -267,7 +267,7 @@ static irqreturn_t lm3533_als_isr(int irq, void *dev_id) 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER), - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); out: return IRQ_HANDLED; } @@ -1256,7 +1256,8 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) buf[j++] = psdata & LTR501_PS_DATA_MASK; } - iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns()); + iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_get_time_ns(indio_dev)); done: iio_trigger_notify_done(indio_dev->trig); @@ -1282,14 +1283,14 @@ static irqreturn_t ltr501_interrupt_handler(int irq, void *private) IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER), - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); if (status & LTR501_STATUS_PS_INTR) iio_push_event(indio_dev, IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER), - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); return IRQ_HANDLED; } @@ -147,7 +147,6 @@ static const struct iio_chan_spec max44000_channels[] = { { .type = IIO_PROXIMITY, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .scan_index = MAX44000_SCAN_INDEX_PRX, .scan_type = { .sign = 'u', @@ -512,7 +511,8 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p) } mutex_unlock(&data->lock); - iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns()); + iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_get_time_ns(indio_dev)); iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; @@ -713,13 +713,13 @@ static irqreturn_t opt3001_irq(int irq, void *_iio) IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), - iio_get_time_ns()); + iio_get_time_ns(iio)); if (ret & OPT3001_CONFIGURATION_FL) iio_push_event(iio, IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), - iio_get_time_ns()); + iio_get_time_ns(iio)); } else if (ret & OPT3001_CONFIGURATION_CRF) { ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_RESULT); if (ret < 0) { @@ -528,7 +528,7 @@ static irqreturn_t stk3310_irq_handler(int irq, void *private) struct iio_dev *indio_dev = private; struct stk3310_data *data = iio_priv(indio_dev); - data->timestamp = iio_get_time_ns(); + data->timestamp = iio_get_time_ns(indio_dev); return IRQ_WAKE_THREAD; } @@ -53,7 +53,6 @@ struct tcs3414_data { struct i2c_client *client; - struct mutex lock; u8 control; u8 gain; u8 timing; @@ -134,16 +133,16 @@ static int tcs3414_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_RAW: - if (iio_buffer_enabled(indio_dev)) - return -EBUSY; - mutex_lock(&data->lock); + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; ret = tcs3414_req_data(data); if (ret < 0) { - mutex_unlock(&data->lock); + iio_device_release_direct_mode(indio_dev); return ret; } ret = i2c_smbus_read_word_data(data->client, chan->address); - mutex_unlock(&data->lock); + iio_device_release_direct_mode(indio_dev); if (ret < 0) return ret; *val = ret; @@ -217,7 +216,7 @@ static irqreturn_t tcs3414_trigger_handler(int irq, void *p) } iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); done: iio_trigger_notify_done(indio_dev->trig); @@ -288,7 +287,6 @@ static int tcs3414_probe(struct i2c_client *client, data = iio_priv(indio_dev); i2c_set_clientdata(client, indio_dev); data->client = client; - mutex_init(&data->lock); indio_dev->dev.parent = &client->dev; indio_dev->info = &tcs3414_info; @@ -52,7 +52,6 @@ struct tcs3472_data { struct i2c_client *client; - struct mutex lock; u8 enable; u8 control; u8 atime; @@ -117,17 +116,16 @@ static int tcs3472_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_RAW: - if (iio_buffer_enabled(indio_dev)) - return -EBUSY; - - mutex_lock(&data->lock); + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; ret = tcs3472_req_data(data); if (ret < 0) { - mutex_unlock(&data->lock); + iio_device_release_direct_mode(indio_dev); return ret; } ret = i2c_smbus_read_word_data(data->client, chan->address); - mutex_unlock(&data->lock); + iio_device_release_direct_mode(indio_dev); if (ret < 0) return ret; *val = ret; @@ -204,7 +202,7 @@ static irqreturn_t tcs3472_trigger_handler(int irq, void *p) } iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); done: iio_trigger_notify_done(indio_dev->trig); @@ -263,7 +261,6 @@ static int tcs3472_probe(struct i2c_client *client, data = iio_priv(indio_dev); i2c_set_clientdata(client, indio_dev); data->client = client; - mutex_init(&data->lock); indio_dev->dev.parent = &client->dev; indio_dev->info = &tcs3472_info; @@ -630,7 +630,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private) 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER), - iio_get_time_ns()); + iio_get_time_ns(dev_info)); /* clear the interrupt and push the event */ i2c_smbus_write_byte(chip->client, TSL2563_CMD | TSL2563_CLEARINT); @@ -833,7 +833,7 @@ static irqreturn_t us5182d_irq_thread_handler(int irq, void *private) dir = ret & US5182D_CFG0_PROX ? IIO_EV_DIR_RISING : IIO_EV_DIR_FALLING; ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1, IIO_EV_TYPE_THRESH, dir); - iio_push_event(indio_dev, ev, iio_get_time_ns()); + iio_push_event(indio_dev, ev, iio_get_time_ns(indio_dev)); ret = i2c_smbus_write_byte_data(data->client, US5182D_REG_CFG0, ret & ~US5182D_CFG0_PX_IRQ); @@ -44,6 +44,7 @@ config BMC150_MAGN_I2C This driver is only implementing magnetometer part, which has its own address and register map. + This driver also supports I2C Bosch BMC156 and BMM150 chips. To compile this driver as a module, choose M here: the module will be called bmc150_magn_i2c. @@ -60,6 +61,7 @@ config BMC150_MAGN_SPI This driver is only implementing magnetometer part, which has its own address and register map. + This driver also supports SPI Bosch BMC156 and BMM150 chips. To compile this driver as a module, choose M here: the module will be called bmc150_magn_spi. @@ -33,6 +33,7 @@ #include <linux/of_gpio.h> #include <linux/acpi.h> #include <linux/regulator/consumer.h> +#include <linux/pm_runtime.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> @@ -379,37 +380,40 @@ struct ak8975_data { u8 cntl_cache; struct iio_mount_matrix orientation; struct regulator *vdd; + struct regulator *vid; }; /* Enable attached power regulator if any. */ -static int ak8975_power_on(struct i2c_client *client) +static int ak8975_power_on(const struct ak8975_data *data) { - const struct iio_dev *indio_dev = i2c_get_clientdata(client); - struct ak8975_data *data = iio_priv(indio_dev); int ret; - data->vdd = devm_regulator_get(&client->dev, "vdd"); - if (IS_ERR_OR_NULL(data->vdd)) { - ret = PTR_ERR(data->vdd); - if (ret == -ENODEV) - ret = 0; - } else { - ret = regulator_enable(data->vdd); + ret = regulator_enable(data->vdd); + if (ret) { + dev_warn(&data->client->dev, + "Failed to enable specified Vdd supply\n"); + return ret; } - - if (ret) - dev_err(&client->dev, "failed to enable Vdd supply: %d\n", ret); - return ret; + ret = regulator_enable(data->vid); + if (ret) { + dev_warn(&data->client->dev, + "Failed to enable specified Vid supply\n"); + return ret; + } + /* + * According to the datasheet the power supply rise time i 200us + * and the minimum wait time before mode setting is 100us, in + * total 300 us. Add some margin and say minimum 500us here. + */ + usleep_range(500, 1000); + return 0; } /* Disable attached power regulator if any. */ -static void ak8975_power_off(const struct i2c_client *client) +static void ak8975_power_off(const struct ak8975_data *data) { - const struct iio_dev *indio_dev = i2c_get_clientdata(client); - const struct ak8975_data *data = iio_priv(indio_dev); - - if (!IS_ERR_OR_NULL(data->vdd)) - regulator_disable(data->vdd); + regulator_disable(data->vid); + regulator_disable(data->vdd); } /* @@ -430,8 +434,8 @@ static int ak8975_who_i_am(struct i2c_client *client, * AK8975 | DEVICE_ID | NA * AK8963 | DEVICE_ID | NA */ - ret = i2c_smbus_read_i2c_block_data(client, AK09912_REG_WIA1, - 2, wia_val); + ret = i2c_smbus_read_i2c_block_data_or_emulated( + client, AK09912_REG_WIA1, 2, wia_val); if (ret < 0) { dev_err(&client->dev, "Error reading WIA\n"); return ret; @@ -543,9 +547,9 @@ static int ak8975_setup(struct i2c_client *client) } /* Get asa data and store in the device data. */ - ret = i2c_smbus_read_i2c_block_data(client, - data->def->ctrl_regs[ASA_BASE], - 3, data->asa); + ret = i2c_smbus_read_i2c_block_data_or_emulated( + client, data->def->ctrl_regs[ASA_BASE], + 3, data->asa); if (ret < 0) { dev_err(&client->dev, "Not able to read asa data\n"); return ret; @@ -686,22 +690,31 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val) struct ak8975_data *data = iio_priv(indio_dev); const struct i2c_client *client = data->client; const struct ak_def *def = data->def; + u16 buff; int ret; + pm_runtime_get_sync(&data->client->dev); + mutex_lock(&data->lock); ret = ak8975_start_read_axis(data, client); if (ret) goto exit; - ret = i2c_smbus_read_word_data(client, def->data_regs[index]); + ret = i2c_smbus_read_i2c_block_data_or_emulated( + client, def->data_regs[index], + sizeof(buff), (u8*)&buff); if (ret < 0) goto exit; mutex_unlock(&data->lock); - /* Clamp to valid range. */ - *val = clamp_t(s16, ret, -def->range, def->range); + pm_runtime_mark_last_busy(&data->client->dev); + pm_runtime_put_autosuspend(&data->client->dev); + + /* Swap bytes and convert to valid range. */ + buff = le16_to_cpu(buff); + *val = clamp_t(s16, buff, -def->range, def->range); return IIO_VAL_INT; exit: @@ -825,7 +838,8 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev) buff[1] = clamp_t(s16, le16_to_cpu(buff[1]), -def->range, def->range); buff[2] = clamp_t(s16, le16_to_cpu(buff[2]), -def->range, def->range); - iio_push_to_buffers_with_timestamp(indio_dev, buff, iio_get_time_ns()); + iio_push_to_buffers_with_timestamp(indio_dev, buff, + iio_get_time_ns(indio_dev)); return; unlock: @@ -919,7 +933,15 @@ static int ak8975_probe(struct i2c_client *client, data->def = &ak_def_array[chipset]; - err = ak8975_power_on(client); + /* Fetch the regulators */ + data->vdd = devm_regulator_get(&client->dev, "vdd"); + if (IS_ERR(data->vdd)) + return PTR_ERR(data->vdd); + data->vid = devm_regulator_get(&client->dev, "vid"); + if (IS_ERR(data->vid)) + return PTR_ERR(data->vid); + + err = ak8975_power_on(data); if (err) return err; @@ -959,26 +981,93 @@ static int ak8975_probe(struct i2c_client *client, goto cleanup_buffer; } + /* Enable runtime PM */ + pm_runtime_get_noresume(&client->dev); + pm_runtime_set_active(&client->dev); + pm_runtime_enable(&client->dev); + /* + * The device comes online in 500us, so add two orders of magnitude + * of delay before autosuspending: 50 ms. + */ + pm_runtime_set_autosuspend_delay(&client->dev, 50); + pm_runtime_use_autosuspend(&client->dev); + pm_runtime_put(&client->dev); + return 0; cleanup_buffer: iio_triggered_buffer_cleanup(indio_dev); power_off: - ak8975_power_off(client); + ak8975_power_off(data); return err; } static int ak8975_remove(struct i2c_client *client) { struct iio_dev *indio_dev = i2c_get_clientdata(client); + struct ak8975_data *data = iio_priv(indio_dev); + pm_runtime_get_sync(&client->dev); + pm_runtime_put_noidle(&client->dev); + pm_runtime_disable(&client->dev); iio_device_unregister(indio_dev); iio_triggered_buffer_cleanup(indio_dev); - ak8975_power_off(client); + ak8975_set_mode(data, POWER_DOWN); + ak8975_power_off(data); return 0; } +#ifdef CONFIG_PM +static int ak8975_runtime_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct iio_dev *indio_dev = i2c_get_clientdata(client); + struct ak8975_data *data = iio_priv(indio_dev); + int ret; + + /* Set the device in power down if it wasn't already */ + ret = ak8975_set_mode(data, POWER_DOWN); + if (ret < 0) { + dev_err(&client->dev, "Error in setting power-down mode\n"); + return ret; + } + /* Next cut the regulators */ + ak8975_power_off(data); + + return 0; +} + +static int ak8975_runtime_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct iio_dev *indio_dev = i2c_get_clientdata(client); + struct ak8975_data *data = iio_priv(indio_dev); + int ret; + + /* Take up the regulators */ + ak8975_power_on(data); + /* + * We come up in powered down mode, the reading routines will + * put us in the mode to read values later. + */ + ret = ak8975_set_mode(data, POWER_DOWN); + if (ret < 0) { + dev_err(&client->dev, "Error in setting power-down mode\n"); + return ret; + } + + return 0; +} +#endif /* CONFIG_PM */ + +static const struct dev_pm_ops ak8975_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(ak8975_runtime_suspend, + ak8975_runtime_resume, NULL) +}; + static const struct i2c_device_id ak8975_id[] = { {"ak8975", AK8975}, {"ak8963", AK8963}, @@ -1006,6 +1095,7 @@ MODULE_DEVICE_TABLE(of, ak8975_of_match); static struct i2c_driver ak8975_driver = { .driver = { .name = "ak8975", + .pm = &ak8975_dev_pm_ops, .of_match_table = of_match_ptr(ak8975_of_match), .acpi_match_table = ACPI_PTR(ak_acpi_match), }, @@ -2,6 +2,7 @@ * 3-axis magnetometer driver supporting following I2C Bosch-Sensortec chips: * - BMC150 * - BMC156 + * - BMM150 * * Copyright (c) 2016, Intel Corporation. * @@ -49,6 +50,7 @@ static int bmc150_magn_i2c_remove(struct i2c_client *client) static const struct acpi_device_id bmc150_magn_acpi_match[] = { {"BMC150B", 0}, {"BMC156B", 0}, + {"BMM150B", 0}, {}, }; MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match); @@ -56,6 +58,7 @@ MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match); static const struct i2c_device_id bmc150_magn_i2c_id[] = { {"bmc150_magn", 0}, {"bmc156_magn", 0}, + {"bmm150_magn", 0}, {} }; MODULE_DEVICE_TABLE(i2c, bmc150_magn_i2c_id); @@ -2,6 +2,7 @@ * 3-axis magnetometer driver support following SPI Bosch-Sensortec chips: * - BMC150 * - BMC156 + * - BMM150 * * Copyright (c) 2016, Intel Corporation. * @@ -41,6 +42,7 @@ static int bmc150_magn_spi_remove(struct spi_device *spi) static const struct spi_device_id bmc150_magn_spi_id[] = { {"bmc150_magn", 0}, {"bmc156_magn", 0}, + {"bmm150_magn", 0}, {} }; MODULE_DEVICE_TABLE(spi, bmc150_magn_spi_id); @@ -48,6 +50,7 @@ MODULE_DEVICE_TABLE(spi, bmc150_magn_spi_id); static const struct acpi_device_id bmc150_magn_acpi_match[] = { {"BMC150B", 0}, {"BMC156B", 0}, + {"BMM150B", 0}, {}, }; MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match); @@ -451,7 +451,7 @@ static irqreturn_t hmc5843_trigger_handler(int irq, void *p) goto done; iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); done: iio_trigger_notify_done(indio_dev->trig); @@ -261,7 +261,7 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p) } iio_push_to_buffers_with_timestamp(indio_dev, buffer, - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); done: iio_trigger_notify_done(indio_dev->trig); @@ -82,7 +82,7 @@ static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = { int st_magn_allocate_ring(struct iio_dev *indio_dev) { - return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, + return iio_triggered_buffer_setup(indio_dev, NULL, &st_sensors_trigger_handler, &st_magn_buffer_setup_ops); } @@ -572,6 +572,7 @@ static const struct iio_info magn_info = { static const struct iio_trigger_ops st_magn_trigger_ops = { .owner = THIS_MODULE, .set_trigger_state = ST_MAGN_TRIGGER_SET_STATE, + .validate_device = st_sensors_validate_device, }; #define ST_MAGN_TRIGGER_OPS (&st_magn_trigger_ops) #else @@ -588,13 +589,15 @@ int st_magn_common_probe(struct iio_dev *indio_dev) indio_dev->info = &magn_info; mutex_init(&mdata->tb.buf_lock); - st_sensors_power_enable(indio_dev); + err = st_sensors_power_enable(indio_dev); + if (err) + return err; err = st_sensors_check_device_support(indio_dev, ARRAY_SIZE(st_magn_sensors_settings), st_magn_sensors_settings); if (err < 0) - return err; + goto st_magn_power_off; mdata->num_data_channels = ST_MAGN_NUMBER_DATA_CHANNELS; mdata->multiread_bit = mdata->sensor_settings->multi_read_bit; @@ -607,11 +610,11 @@ int st_magn_common_probe(struct iio_dev *indio_dev) err = st_sensors_init_sensor(indio_dev, NULL); if (err < 0) - return err; + goto st_magn_power_off; err = st_magn_allocate_ring(indio_dev); if (err < 0) - return err; + goto st_magn_power_off; if (irq > 0) { err = st_sensors_allocate_trigger(indio_dev, @@ -634,6 +637,8 @@ st_magn_device_register_error: st_sensors_deallocate_trigger(indio_dev); st_magn_probe_trigger_error: st_magn_deallocate_ring(indio_dev); +st_magn_power_off: + st_sensors_power_disable(indio_dev); return err; } @@ -10,11 +10,22 @@ config DS1803 depends on I2C help Say yes here to build support for the Maxim Integrated DS1803 - digital potentiomenter chip. + digital potentiometer chip. To compile this driver as a module, choose M here: the module will be called ds1803. +config MAX5487 + tristate "Maxim MAX5487/MAX5488/MAX5489 Digital Potentiometer driver" + depends on SPI + help + Say yes here to build support for the Maxim + MAX5487, MAX5488, MAX5489 digital potentiometer + chips. + + To compile this driver as a module, choose M here: the + module will be called max5487. + config MCP4131 tristate "Microchip MCP413X/414X/415X/416X/423X/424X/425X/426X Digital Potentiometer driver" depends on SPI @@ -28,7 +39,7 @@ config MCP4131 MCP4241, MCP4242, MCP4251, MCP4252, MCP4261, MCP4262, - digital potentiomenter chips. + digital potentiometer chips. To compile this driver as a module, choose M here: the module will be called mcp4131. @@ -38,9 +49,11 @@ config MCP4531 depends on I2C help Say yes here to build support for the Microchip - MCP4531, MCP4532, MCP4551, MCP4552, - MCP4631, MCP4632, MCP4651, MCP4652 - digital potentiomenter chips. + MCP4531, MCP4532, MCP4541, MCP4542, + MCP4551, MCP4552, MCP4561, MCP4562, + MCP4631, MCP4632, MCP4641, MCP4642, + MCP4651, MCP4652, MCP4661, MCP4662 + digital potentiometer chips. To compile this driver as a module, choose M here: the module will be called mcp4531. @@ -4,6 +4,7 @@ # When adding new entries keep the list in alphabetical order obj-$(CONFIG_DS1803) += ds1803.o +obj-$(CONFIG_MAX5487) += max5487.o obj-$(CONFIG_MCP4131) += mcp4131.o obj-$(CONFIG_MCP4531) += mcp4531.o obj-$(CONFIG_TPL0102) += tpl0102.o diff --git a/drivers/iio/potentiometer/max5487.c b/drivers/iio/potentiometer/max5487.c new file mode 100644 index 000000000000..6c50939a2e83 --- /dev/null +++ b/ drivers/iio/potentiometer/max5487.c@@ -0,0 +1,161 @@ +/* + * max5487.c - Support for MAX5487, MAX5488, MAX5489 digital potentiometers + * + * Copyright (C) 2016 Cristina-Gabriela Moraru <cristina.moraru09@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#include <linux/module.h> +#include <linux/spi/spi.h> +#include <linux/acpi.h> + +#include <linux/iio/sysfs.h> +#include <linux/iio/iio.h> + +#define MAX5487_WRITE_WIPER_A (0x01 << 8) +#define MAX5487_WRITE_WIPER_B (0x02 << 8) + +/* copy both wiper regs to NV regs */ +#define MAX5487_COPY_AB_TO_NV (0x23 << 8) +/* copy both NV regs to wiper regs */ +#define MAX5487_COPY_NV_TO_AB (0x33 << 8) + +#define MAX5487_MAX_POS 255 + +struct max5487_data { + struct spi_device *spi; + int kohms; +}; + +#define MAX5487_CHANNEL(ch, addr) { \ + .type = IIO_RESISTANCE, \ + .indexed = 1, \ + .output = 1, \ + .channel = ch, \ + .address = addr, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ +} + +static const struct iio_chan_spec max5487_channels[] = { + MAX5487_CHANNEL(0, MAX5487_WRITE_WIPER_A), + MAX5487_CHANNEL(1, MAX5487_WRITE_WIPER_B), +}; + +static int max5487_write_cmd(struct spi_device *spi, u16 cmd) +{ + return spi_write(spi, (const void *) &cmd, sizeof(u16)); +} + +static int max5487_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct max5487_data *data = iio_priv(indio_dev); + + if (mask != IIO_CHAN_INFO_SCALE) + return -EINVAL; + + *val = 1000 * data->kohms; + *val2 = MAX5487_MAX_POS; + + return IIO_VAL_FRACTIONAL; +} + +static int max5487_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + struct max5487_data *data = iio_priv(indio_dev); + + if (mask != IIO_CHAN_INFO_RAW) + return -EINVAL; + + if (val < 0 || val > MAX5487_MAX_POS) + return -EINVAL; + + return max5487_write_cmd(data->spi, chan->address | val); +} + +static const struct iio_info max5487_info = { + .read_raw = max5487_read_raw, + .write_raw = max5487_write_raw, + .driver_module = THIS_MODULE, +}; + +static int max5487_spi_probe(struct spi_device *spi) +{ + struct iio_dev *indio_dev; + struct max5487_data *data; + const struct spi_device_id *id = spi_get_device_id(spi); + int ret; + + indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*data)); + if (!indio_dev) + return -ENOMEM; + + dev_set_drvdata(&spi->dev, indio_dev); + data = iio_priv(indio_dev); + + data->spi = spi; + data->kohms = id->driver_data; + + indio_dev->info = &max5487_info; + indio_dev->name = id->name; + indio_dev->dev.parent = &spi->dev; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = max5487_channels; + indio_dev->num_channels = ARRAY_SIZE(max5487_channels); + + /* restore both wiper regs from NV regs */ + ret = max5487_write_cmd(data->spi, MAX5487_COPY_NV_TO_AB); + if (ret < 0) + return ret; + + return iio_device_register(indio_dev); +} + +static int max5487_spi_remove(struct spi_device *spi) +{ + struct iio_dev *indio_dev = dev_get_drvdata(&spi->dev); + + iio_device_unregister(indio_dev); + + /* save both wiper regs to NV regs */ + return max5487_write_cmd(spi, MAX5487_COPY_AB_TO_NV); +} + +static const struct spi_device_id max5487_id[] = { + { "MAX5487", 10 }, + { "MAX5488", 50 }, + { "MAX5489", 100 }, + { } +}; +MODULE_DEVICE_TABLE(spi, max5487_id); + +static const struct acpi_device_id max5487_acpi_match[] = { + { "MAX5487", 10 }, + { "MAX5488", 50 }, + { "MAX5489", 100 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, max5487_acpi_match); + +static struct spi_driver max5487_driver = { + .driver = { + .name = "max5487", + .owner = THIS_MODULE, + .acpi_match_table = ACPI_PTR(max5487_acpi_match), + }, + .id_table = max5487_id, + .probe = max5487_spi_probe, + .remove = max5487_spi_remove +}; +module_spi_driver(max5487_driver); + +MODULE_AUTHOR("Cristina-Gabriela Moraru <cristina.moraru09@gmail.com>"); +MODULE_DESCRIPTION("max5487 SPI driver"); +MODULE_LICENSE("GPL v2"); @@ -8,12 +8,20 @@ * DEVID #Wipers #Positions Resistor Opts (kOhm) i2c address * mcp4531 1 129 5, 10, 50, 100 010111x * mcp4532 1 129 5, 10, 50, 100 01011xx + * mcp4541 1 129 5, 10, 50, 100 010111x + * mcp4542 1 129 5, 10, 50, 100 01011xx * mcp4551 1 257 5, 10, 50, 100 010111x * mcp4552 1 257 5, 10, 50, 100 01011xx + * mcp4561 1 257 5, 10, 50, 100 010111x + * mcp4562 1 257 5, 10, 50, 100 01011xx * mcp4631 2 129 5, 10, 50, 100 0101xxx * mcp4632 2 129 5, 10, 50, 100 01011xx + * mcp4641 2 129 5, 10, 50, 100 0101xxx + * mcp4642 2 129 5, 10, 50, 100 01011xx * mcp4651 2 257 5, 10, 50, 100 0101xxx * mcp4652 2 257 5, 10, 50, 100 01011xx + * mcp4661 2 257 5, 10, 50, 100 0101xxx + * mcp4662 2 257 5, 10, 50, 100 01011xx * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by @@ -23,6 +31,8 @@ #include <linux/module.h> #include <linux/i2c.h> #include <linux/err.h> +#include <linux/of.h> +#include <linux/of_device.h> #include <linux/iio/iio.h> @@ -37,18 +47,34 @@ enum mcp4531_type { MCP453x_103, MCP453x_503, MCP453x_104, + MCP454x_502, + MCP454x_103, + MCP454x_503, + MCP454x_104, MCP455x_502, MCP455x_103, MCP455x_503, MCP455x_104, + MCP456x_502, + MCP456x_103, + MCP456x_503, + MCP456x_104, MCP463x_502, MCP463x_103, MCP463x_503, MCP463x_104, + MCP464x_502, + MCP464x_103, + MCP464x_503, + MCP464x_104, MCP465x_502, MCP465x_103, MCP465x_503, MCP465x_104, + MCP466x_502, + MCP466x_103, + MCP466x_503, + MCP466x_104, }; static const struct mcp4531_cfg mcp4531_cfg[] = { @@ -56,18 +82,34 @@ static const struct mcp4531_cfg mcp4531_cfg[] = { [MCP453x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, }, [MCP453x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, }, [MCP453x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, }, + [MCP454x_502] = { .wipers = 1, .max_pos = 128, .kohms = 5, }, + [MCP454x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, }, + [MCP454x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, }, + [MCP454x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, }, [MCP455x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, }, [MCP455x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, }, [MCP455x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, }, [MCP455x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, }, + [MCP456x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, }, + [MCP456x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, }, + [MCP456x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, }, + [MCP456x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, }, [MCP463x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, }, [MCP463x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, }, [MCP463x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, }, [MCP463x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, }, + [MCP464x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, }, + [MCP464x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, }, + [MCP464x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, }, + [MCP464x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, }, [MCP465x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, }, [MCP465x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, }, [MCP465x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, }, [MCP465x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, }, + [MCP466x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, }, + [MCP466x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, }, + [MCP466x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, }, + [MCP466x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, }, }; #define MCP4531_WRITE (0 << 2) @@ -148,12 +190,89 @@ static const struct iio_info mcp4531_info = { .driver_module = THIS_MODULE, }; +#ifdef CONFIG_OF + +#define MCP4531_COMPATIBLE(of_compatible, cfg) { \ + .compatible = of_compatible, \ + .data = &mcp4531_cfg[cfg], \ +} + +static const struct of_device_id mcp4531_of_match[] = { + MCP4531_COMPATIBLE("microchip,mcp4531-502", MCP453x_502), + MCP4531_COMPATIBLE("microchip,mcp4531-103", MCP453x_103), + MCP4531_COMPATIBLE("microchip,mcp4531-503", MCP453x_503), + MCP4531_COMPATIBLE("microchip,mcp4531-104", MCP453x_104), + MCP4531_COMPATIBLE("microchip,mcp4532-502", MCP453x_502), + MCP4531_COMPATIBLE("microchip,mcp4532-103", MCP453x_103), + MCP4531_COMPATIBLE("microchip,mcp4532-503", MCP453x_503), + MCP4531_COMPATIBLE("microchip,mcp4532-104", MCP453x_104), + MCP4531_COMPATIBLE("microchip,mcp4541-502", MCP454x_502), + MCP4531_COMPATIBLE("microchip,mcp4541-103", MCP454x_103), + MCP4531_COMPATIBLE("microchip,mcp4541-503", MCP454x_503), + MCP4531_COMPATIBLE("microchip,mcp4541-104", MCP454x_104), + MCP4531_COMPATIBLE("microchip,mcp4542-502", MCP454x_502), + MCP4531_COMPATIBLE("microchip,mcp4542-103", MCP454x_103), + MCP4531_COMPATIBLE("microchip,mcp4542-503", MCP454x_503), + MCP4531_COMPATIBLE("microchip,mcp4542-104", MCP454x_104), + MCP4531_COMPATIBLE("microchip,mcp4551-502", MCP455x_502), + MCP4531_COMPATIBLE("microchip,mcp4551-103", MCP455x_103), + MCP4531_COMPATIBLE("microchip,mcp4551-503", MCP455x_503), + MCP4531_COMPATIBLE("microchip,mcp4551-104", MCP455x_104), + MCP4531_COMPATIBLE("microchip,mcp4552-502", MCP455x_502), + MCP4531_COMPATIBLE("microchip,mcp4552-103", MCP455x_103), + MCP4531_COMPATIBLE("microchip,mcp4552-503", MCP455x_503), + MCP4531_COMPATIBLE("microchip,mcp4552-104", MCP455x_104), + MCP4531_COMPATIBLE("microchip,mcp4561-502", MCP456x_502), + MCP4531_COMPATIBLE("microchip,mcp4561-103", MCP456x_103), + MCP4531_COMPATIBLE("microchip,mcp4561-503", MCP456x_503), + MCP4531_COMPATIBLE("microchip,mcp4561-104", MCP456x_104), + MCP4531_COMPATIBLE("microchip,mcp4562-502", MCP456x_502), + MCP4531_COMPATIBLE("microchip,mcp4562-103", MCP456x_103), + MCP4531_COMPATIBLE("microchip,mcp4562-503", MCP456x_503), + MCP4531_COMPATIBLE("microchip,mcp4562-104", MCP456x_104), + MCP4531_COMPATIBLE("microchip,mcp4631-502", MCP463x_502), + MCP4531_COMPATIBLE("microchip,mcp4631-103", MCP463x_103), + MCP4531_COMPATIBLE("microchip,mcp4631-503", MCP463x_503), + MCP4531_COMPATIBLE("microchip,mcp4631-104", MCP463x_104), + MCP4531_COMPATIBLE("microchip,mcp4632-502", MCP463x_502), + MCP4531_COMPATIBLE("microchip,mcp4632-103", MCP463x_103), + MCP4531_COMPATIBLE("microchip,mcp4632-503", MCP463x_503), + MCP4531_COMPATIBLE("microchip,mcp4632-104", MCP463x_104), + MCP4531_COMPATIBLE("microchip,mcp4641-502", MCP464x_502), + MCP4531_COMPATIBLE("microchip,mcp4641-103", MCP464x_103), + MCP4531_COMPATIBLE("microchip,mcp4641-503", MCP464x_503), + MCP4531_COMPATIBLE("microchip,mcp4641-104", MCP464x_104), + MCP4531_COMPATIBLE("microchip,mcp4642-502", MCP464x_502), + MCP4531_COMPATIBLE("microchip,mcp4642-103", MCP464x_103), + MCP4531_COMPATIBLE("microchip,mcp4642-503", MCP464x_503), + MCP4531_COMPATIBLE("microchip,mcp4642-104", MCP464x_104), + MCP4531_COMPATIBLE("microchip,mcp4651-502", MCP465x_502), + MCP4531_COMPATIBLE("microchip,mcp4651-103", MCP465x_103), + MCP4531_COMPATIBLE("microchip,mcp4651-503", MCP465x_503), + MCP4531_COMPATIBLE("microchip,mcp4651-104", MCP465x_104), + MCP4531_COMPATIBLE("microchip,mcp4652-502", MCP465x_502), + MCP4531_COMPATIBLE("microchip,mcp4652-103", MCP465x_103), + MCP4531_COMPATIBLE("microchip,mcp4652-503", MCP465x_503), + MCP4531_COMPATIBLE("microchip,mcp4652-104", MCP465x_104), + MCP4531_COMPATIBLE("microchip,mcp4661-502", MCP466x_502), + MCP4531_COMPATIBLE("microchip,mcp4661-103", MCP466x_103), + MCP4531_COMPATIBLE("microchip,mcp4661-503", MCP466x_503), + MCP4531_COMPATIBLE("microchip,mcp4661-104", MCP466x_104), + MCP4531_COMPATIBLE("microchip,mcp4662-502", MCP466x_502), + MCP4531_COMPATIBLE("microchip,mcp4662-103", MCP466x_103), + MCP4531_COMPATIBLE("microchip,mcp4662-503", MCP466x_503), + MCP4531_COMPATIBLE("microchip,mcp4662-104", MCP466x_104), + { /* sentinel */ } +}; +#endif + static int mcp4531_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct device *dev = &client->dev; struct mcp4531_data *data; struct iio_dev *indio_dev; + const struct of_device_id *match; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) { @@ -167,7 +286,12 @@ static int mcp4531_probe(struct i2c_client *client, data = iio_priv(indio_dev); i2c_set_clientdata(client, indio_dev); data->client = client; - data->cfg = &mcp4531_cfg[id->driver_data]; + + match = of_match_device(of_match_ptr(mcp4531_of_match), dev); + if (match) + data->cfg = of_device_get_match_data(dev); + else + data->cfg = &mcp4531_cfg[id->driver_data]; indio_dev->dev.parent = dev; indio_dev->info = &mcp4531_info; @@ -187,6 +311,14 @@ static const struct i2c_device_id mcp4531_id[] = { { "mcp4532-103", MCP453x_103 }, { "mcp4532-503", MCP453x_503 }, { "mcp4532-104", MCP453x_104 }, + { "mcp4541-502", MCP454x_502 }, + { "mcp4541-103", MCP454x_103 }, + { "mcp4541-503", MCP454x_503 }, + { "mcp4541-104", MCP454x_104 }, + { "mcp4542-502", MCP454x_502 }, + { "mcp4542-103", MCP454x_103 }, + { "mcp4542-503", MCP454x_503 }, + { "mcp4542-104", MCP454x_104 }, { "mcp4551-502", MCP455x_502 }, { "mcp4551-103", MCP455x_103 }, { "mcp4551-503", MCP455x_503 }, @@ -195,6 +327,14 @@ static const struct i2c_device_id mcp4531_id[] = { { "mcp4552-103", MCP455x_103 }, { "mcp4552-503", MCP455x_503 }, { "mcp4552-104", MCP455x_104 }, + { "mcp4561-502", MCP456x_502 }, + { "mcp4561-103", MCP456x_103 }, + { "mcp4561-503", MCP456x_503 }, + { "mcp4561-104", MCP456x_104 }, + { "mcp4562-502", MCP456x_502 }, + { "mcp4562-103", MCP456x_103 }, + { "mcp4562-503", MCP456x_503 }, + { "mcp4562-104", MCP456x_104 }, { "mcp4631-502", MCP463x_502 }, { "mcp4631-103", MCP463x_103 }, { "mcp4631-503", MCP463x_503 }, @@ -203,6 +343,14 @@ static const struct i2c_device_id mcp4531_id[] = { { "mcp4632-103", MCP463x_103 }, { "mcp4632-503", MCP463x_503 }, { "mcp4632-104", MCP463x_104 }, + { "mcp4641-502", MCP464x_502 }, + { "mcp4641-103", MCP464x_103 }, + { "mcp4641-503", MCP464x_503 }, + { "mcp4641-104", MCP464x_104 }, + { "mcp4642-502", MCP464x_502 }, + { "mcp4642-103", MCP464x_103 }, + { "mcp4642-503", MCP464x_503 }, + { "mcp4642-104", MCP464x_104 }, { "mcp4651-502", MCP465x_502 }, { "mcp4651-103", MCP465x_103 }, { "mcp4651-503", MCP465x_503 }, @@ -211,6 +359,14 @@ static const struct i2c_device_id mcp4531_id[] = { { "mcp4652-103", MCP465x_103 }, { "mcp4652-503", MCP465x_503 }, { "mcp4652-104", MCP465x_104 }, + { "mcp4661-502", MCP466x_502 }, + { "mcp4661-103", MCP466x_103 }, + { "mcp4661-503", MCP466x_503 }, + { "mcp4661-104", MCP466x_104 }, + { "mcp4662-502", MCP466x_502 }, + { "mcp4662-103", MCP466x_103 }, + { "mcp4662-503", MCP466x_503 }, + { "mcp4662-104", MCP466x_104 }, {} }; MODULE_DEVICE_TABLE(i2c, mcp4531_id); @@ -218,6 +374,7 @@ MODULE_DEVICE_TABLE(i2c, mcp4531_id); static struct i2c_driver mcp4531_driver = { .driver = { .name = "mcp4531", + .of_match_table = of_match_ptr(mcp4531_of_match), }, .probe = mcp4531_probe, .id_table = mcp4531_id, @@ -116,10 +116,6 @@ static int tpl0102_probe(struct i2c_client *client, struct tpl0102_data *data; struct iio_dev *indio_dev; - if (!i2c_check_functionality(client->adapter, - I2C_FUNC_SMBUS_WORD_DATA)) - return -EOPNOTSUPP; - indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); if (!indio_dev) return -ENOMEM; @@ -6,16 +6,33 @@ menu "Pressure sensors" config BMP280 - tristate "Bosch Sensortec BMP180 and BMP280 pressure sensor driver" - depends on I2C + tristate "Bosch Sensortec BMP180/BMP280 pressure sensor I2C driver" + depends on (I2C || SPI_MASTER) depends on !(BMP085_I2C=y || BMP085_I2C=m) - select REGMAP_I2C + depends on !(BMP085_SPI=y || BMP085_SPI=m) + select REGMAP + select BMP280_I2C if (I2C) + select BMP280_SPI if (SPI_MASTER) help Say yes here to build support for Bosch Sensortec BMP180 and BMP280 - pressure and temperature sensors. + pressure and temperature sensors. Also supports the BE280 with + an additional humidity sensor channel. - To compile this driver as a module, choose M here: the module - will be called bmp280. + To compile this driver as a module, choose M here: the core module + will be called bmp280 and you will also get bmp280-i2c for I2C + and/or bmp280-spi for SPI support. + +config BMP280_I2C + tristate + depends on BMP280 + depends on I2C + select REGMAP_I2C + +config BMP280_SPI + tristate + depends on BMP280 + depends on SPI_MASTER + select REGMAP config HID_SENSOR_PRESS depends on HID_SENSOR_HUB @@ -130,7 +147,7 @@ config IIO_ST_PRESS select IIO_TRIGGERED_BUFFER if (IIO_BUFFER) help Say yes here to build support for STMicroelectronics pressure - sensors: LPS001WP, LPS25H, LPS331AP. + sensors: LPS001WP, LPS25H, LPS331AP, LPS22HB. This driver can also be built as a module. If so, these modules will be created: @@ -4,6 +4,9 @@ # When adding new entries keep the list in alphabetical order obj-$(CONFIG_BMP280) += bmp280.o +bmp280-objs := bmp280-core.o bmp280-regmap.o +obj-$(CONFIG_BMP280_I2C) += bmp280-i2c.o +obj-$(CONFIG_BMP280_SPI) += bmp280-spi.o obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o obj-$(CONFIG_HP03) += hp03.o obj-$(CONFIG_MPL115) += mpl115.o @@ -1,5 +1,9 @@ /* + * Copyright (c) 2010 Christoph Mair <christoph.mair@gmail.com> + * Copyright (c) 2012 Bosch Sensortec GmbH + * Copyright (c) 2012 Unixphere AB * Copyright (c) 2014 Intel Corporation + * Copyright (c) 2016 Linus Walleij <linus.walleij@linaro.org> * * Driver for Bosch Sensortec BMP180 and BMP280 digital pressure sensor. * @@ -10,99 +14,63 @@ * Datasheet: * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BMP180-DS000-121.pdf * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BMP280-DS001-12.pdf + * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BME280_DS001-11.pdf */ #define pr_fmt(fmt) "bmp280: " fmt +#include <linux/device.h> #include <linux/module.h> -#include <linux/i2c.h> -#include <linux/acpi.h> #include <linux/regmap.h> #include <linux/delay.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> +#include <linux/gpio/consumer.h> +#include <linux/regulator/consumer.h> +#include <linux/interrupt.h> +#include <linux/irq.h> /* For irq_get_irq_data() */ +#include <linux/completion.h> +#include <linux/pm_runtime.h> +#include <linux/random.h> -/* BMP280 specific registers */ -#define BMP280_REG_TEMP_XLSB 0xFC -#define BMP280_REG_TEMP_LSB 0xFB -#define BMP280_REG_TEMP_MSB 0xFA -#define BMP280_REG_PRESS_XLSB 0xF9 -#define BMP280_REG_PRESS_LSB 0xF8 -#define BMP280_REG_PRESS_MSB 0xF7 - -#define BMP280_REG_CONFIG 0xF5 -#define BMP280_REG_STATUS 0xF3 - -#define BMP280_REG_COMP_TEMP_START 0x88 -#define BMP280_COMP_TEMP_REG_COUNT 6 - -#define BMP280_REG_COMP_PRESS_START 0x8E -#define BMP280_COMP_PRESS_REG_COUNT 18 - -#define BMP280_FILTER_MASK (BIT(4) | BIT(3) | BIT(2)) -#define BMP280_FILTER_OFF 0 -#define BMP280_FILTER_2X BIT(2) -#define BMP280_FILTER_4X BIT(3) -#define BMP280_FILTER_8X (BIT(3) | BIT(2)) -#define BMP280_FILTER_16X BIT(4) - -#define BMP280_OSRS_TEMP_MASK (BIT(7) | BIT(6) | BIT(5)) -#define BMP280_OSRS_TEMP_SKIP 0 -#define BMP280_OSRS_TEMP_X(osrs_t) ((osrs_t) << 5) -#define BMP280_OSRS_TEMP_1X BMP280_OSRS_TEMP_X(1) -#define BMP280_OSRS_TEMP_2X BMP280_OSRS_TEMP_X(2) -#define BMP280_OSRS_TEMP_4X BMP280_OSRS_TEMP_X(3) -#define BMP280_OSRS_TEMP_8X BMP280_OSRS_TEMP_X(4) -#define BMP280_OSRS_TEMP_16X BMP280_OSRS_TEMP_X(5) - -#define BMP280_OSRS_PRESS_MASK (BIT(4) | BIT(3) | BIT(2)) -#define BMP280_OSRS_PRESS_SKIP 0 -#define BMP280_OSRS_PRESS_X(osrs_p) ((osrs_p) << 2) -#define BMP280_OSRS_PRESS_1X BMP280_OSRS_PRESS_X(1) -#define BMP280_OSRS_PRESS_2X BMP280_OSRS_PRESS_X(2) -#define BMP280_OSRS_PRESS_4X BMP280_OSRS_PRESS_X(3) -#define BMP280_OSRS_PRESS_8X BMP280_OSRS_PRESS_X(4) -#define BMP280_OSRS_PRESS_16X BMP280_OSRS_PRESS_X(5) - -#define BMP280_MODE_MASK (BIT(1) | BIT(0)) -#define BMP280_MODE_SLEEP 0 -#define BMP280_MODE_FORCED BIT(0) -#define BMP280_MODE_NORMAL (BIT(1) | BIT(0)) - -/* BMP180 specific registers */ -#define BMP180_REG_OUT_XLSB 0xF8 -#define BMP180_REG_OUT_LSB 0xF7 -#define BMP180_REG_OUT_MSB 0xF6 - -#define BMP180_REG_CALIB_START 0xAA -#define BMP180_REG_CALIB_COUNT 22 - -#define BMP180_MEAS_SCO BIT(5) -#define BMP180_MEAS_TEMP (0x0E | BMP180_MEAS_SCO) -#define BMP180_MEAS_PRESS_X(oss) ((oss) << 6 | 0x14 | BMP180_MEAS_SCO) -#define BMP180_MEAS_PRESS_1X BMP180_MEAS_PRESS_X(0) -#define BMP180_MEAS_PRESS_2X BMP180_MEAS_PRESS_X(1) -#define BMP180_MEAS_PRESS_4X BMP180_MEAS_PRESS_X(2) -#define BMP180_MEAS_PRESS_8X BMP180_MEAS_PRESS_X(3) - -/* BMP180 and BMP280 common registers */ -#define BMP280_REG_CTRL_MEAS 0xF4 -#define BMP280_REG_RESET 0xE0 -#define BMP280_REG_ID 0xD0 - -#define BMP180_CHIP_ID 0x55 -#define BMP280_CHIP_ID 0x58 -#define BMP280_SOFT_RESET_VAL 0xB6 +#include "bmp280.h" + +/* + * These enums are used for indexing into the array of calibration + * coefficients for BMP180. + */ +enum { AC1, AC2, AC3, AC4, AC5, AC6, B1, B2, MB, MC, MD }; + +struct bmp180_calib { + s16 AC1; + s16 AC2; + s16 AC3; + u16 AC4; + u16 AC5; + u16 AC6; + s16 B1; + s16 B2; + s16 MB; + s16 MC; + s16 MD; +}; struct bmp280_data { - struct i2c_client *client; + struct device *dev; struct mutex lock; struct regmap *regmap; + struct completion done; + bool use_eoc; const struct bmp280_chip_info *chip_info; + struct bmp180_calib calib; + struct regulator *vddd; + struct regulator *vdda; + unsigned int start_up_time; /* in milliseconds */ /* log of base 2 of oversampling rate */ u8 oversampling_press; u8 oversampling_temp; + u8 oversampling_humid; /* * Carryover value from temperature conversion, used in pressure @@ -112,17 +80,19 @@ struct bmp280_data { }; struct bmp280_chip_info { - const struct regmap_config *regmap_config; - const int *oversampling_temp_avail; int num_oversampling_temp_avail; const int *oversampling_press_avail; int num_oversampling_press_avail; + const int *oversampling_humid_avail; + int num_oversampling_humid_avail; + int (*chip_config)(struct bmp280_data *); int (*read_temp)(struct bmp280_data *, int *); int (*read_press)(struct bmp280_data *, int *, int *); + int (*read_humid)(struct bmp280_data *, int *, int *); }; /* @@ -143,45 +113,75 @@ static const struct iio_chan_spec bmp280_channels[] = { .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), }, + { + .type = IIO_HUMIDITYRELATIVE, + .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), + }, }; -static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg) -{ - switch (reg) { - case BMP280_REG_CONFIG: - case BMP280_REG_CTRL_MEAS: - case BMP280_REG_RESET: - return true; - default: - return false; - }; -} +/* + * Returns humidity in percent, resolution is 0.01 percent. Output value of + * "47445" represents 47445/1024 = 46.333 %RH. + * + * Taken from BME280 datasheet, Section 4.2.3, "Compensation formula". + */ -static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg) +static u32 bmp280_compensate_humidity(struct bmp280_data *data, + s32 adc_humidity) { - switch (reg) { - case BMP280_REG_TEMP_XLSB: - case BMP280_REG_TEMP_LSB: - case BMP280_REG_TEMP_MSB: - case BMP280_REG_PRESS_XLSB: - case BMP280_REG_PRESS_LSB: - case BMP280_REG_PRESS_MSB: - case BMP280_REG_STATUS: - return true; - default: - return false; + struct device *dev = data->dev; + unsigned int H1, H3, tmp; + int H2, H4, H5, H6, ret, var; + + ret = regmap_read(data->regmap, BMP280_REG_COMP_H1, &H1); + if (ret < 0) { + dev_err(dev, "failed to read H1 comp value\n"); + return ret; } -} -static const struct regmap_config bmp280_regmap_config = { - .reg_bits = 8, - .val_bits = 8, + ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H2, &tmp, 2); + if (ret < 0) { + dev_err(dev, "failed to read H2 comp value\n"); + return ret; + } + H2 = sign_extend32(le16_to_cpu(tmp), 15); + + ret = regmap_read(data->regmap, BMP280_REG_COMP_H3, &H3); + if (ret < 0) { + dev_err(dev, "failed to read H3 comp value\n"); + return ret; + } + + ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H4, &tmp, 2); + if (ret < 0) { + dev_err(dev, "failed to read H4 comp value\n"); + return ret; + } + H4 = sign_extend32(((be16_to_cpu(tmp) >> 4) & 0xff0) | + (be16_to_cpu(tmp) & 0xf), 11); + + ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H5, &tmp, 2); + if (ret < 0) { + dev_err(dev, "failed to read H5 comp value\n"); + return ret; + } + H5 = sign_extend32(((le16_to_cpu(tmp) >> 4) & 0xfff), 11); + + ret = regmap_read(data->regmap, BMP280_REG_COMP_H6, &tmp); + if (ret < 0) { + dev_err(dev, "failed to read H6 comp value\n"); + return ret; + } + H6 = sign_extend32(tmp, 7); - .max_register = BMP280_REG_TEMP_XLSB, - .cache_type = REGCACHE_RBTREE, + var = ((s32)data->t_fine) - 76800; + var = ((((adc_humidity << 14) - (H4 << 20) - (H5 * var)) + 16384) >> 15) + * (((((((var * H6) >> 10) * (((var * H3) >> 11) + 32768)) >> 10) + + 2097152) * H2 + 8192) >> 14); + var -= ((((var >> 15) * (var >> 15)) >> 7) * H1) >> 4; - .writeable_reg = bmp280_is_writeable_reg, - .volatile_reg = bmp280_is_volatile_reg, + return var >> 12; }; /* @@ -201,7 +201,7 @@ static s32 bmp280_compensate_temp(struct bmp280_data *data, ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_TEMP_START, buf, BMP280_COMP_TEMP_REG_COUNT); if (ret < 0) { - dev_err(&data->client->dev, + dev_err(data->dev, "failed to read temperature calibration parameters\n"); return ret; } @@ -241,7 +241,7 @@ static u32 bmp280_compensate_press(struct bmp280_data *data, ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_PRESS_START, buf, BMP280_COMP_PRESS_REG_COUNT); if (ret < 0) { - dev_err(&data->client->dev, + dev_err(data->dev, "failed to read pressure calibration parameters\n"); return ret; } @@ -276,7 +276,7 @@ static int bmp280_read_temp(struct bmp280_data *data, ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB, (u8 *) &tmp, 3); if (ret < 0) { - dev_err(&data->client->dev, "failed to read temperature\n"); + dev_err(data->dev, "failed to read temperature\n"); return ret; } @@ -311,7 +311,7 @@ static int bmp280_read_press(struct bmp280_data *data, ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB, (u8 *) &tmp, 3); if (ret < 0) { - dev_err(&data->client->dev, "failed to read pressure\n"); + dev_err(data->dev, "failed to read pressure\n"); return ret; } @@ -324,6 +324,34 @@ static int bmp280_read_press(struct bmp280_data *data, return IIO_VAL_FRACTIONAL; } +static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2) +{ + int ret; + __be16 tmp = 0; + s32 adc_humidity; + u32 comp_humidity; + + /* Read and compensate temperature so we get a reading of t_fine. */ + ret = bmp280_read_temp(data, NULL); + if (ret < 0) + return ret; + + ret = regmap_bulk_read(data->regmap, BMP280_REG_HUMIDITY_MSB, + (u8 *) &tmp, 2); + if (ret < 0) { + dev_err(data->dev, "failed to read humidity\n"); + return ret; + } + + adc_humidity = be16_to_cpu(tmp); + comp_humidity = bmp280_compensate_humidity(data, adc_humidity); + + *val = comp_humidity; + *val2 = 1024; + + return IIO_VAL_FRACTIONAL; +} + static int bmp280_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) @@ -331,11 +359,15 @@ static int bmp280_read_raw(struct iio_dev *indio_dev, int ret; struct bmp280_data *data = iio_priv(indio_dev); + pm_runtime_get_sync(data->dev); mutex_lock(&data->lock); switch (mask) { case IIO_CHAN_INFO_PROCESSED: switch (chan->type) { + case IIO_HUMIDITYRELATIVE: + ret = data->chip_info->read_humid(data, val, val2); + break; case IIO_PRESSURE: ret = data->chip_info->read_press(data, val, val2); break; @@ -349,6 +381,10 @@ static int bmp280_read_raw(struct iio_dev *indio_dev, break; case IIO_CHAN_INFO_OVERSAMPLING_RATIO: switch (chan->type) { + case IIO_HUMIDITYRELATIVE: + *val = 1 << data->oversampling_humid; + ret = IIO_VAL_INT; + break; case IIO_PRESSURE: *val = 1 << data->oversampling_press; ret = IIO_VAL_INT; @@ -368,10 +404,29 @@ static int bmp280_read_raw(struct iio_dev *indio_dev, } mutex_unlock(&data->lock); + pm_runtime_mark_last_busy(data->dev); + pm_runtime_put_autosuspend(data->dev); return ret; } +static int bmp280_write_oversampling_ratio_humid(struct bmp280_data *data, + int val) +{ + int i; + const int *avail = data->chip_info->oversampling_humid_avail; + const int n = data->chip_info->num_oversampling_humid_avail; + + for (i = 0; i < n; i++) { + if (avail[i] == val) { + data->oversampling_humid = ilog2(val); + + return data->chip_info->chip_config(data); + } + } + return -EINVAL; +} + static int bmp280_write_oversampling_ratio_temp(struct bmp280_data *data, int val) { @@ -415,8 +470,12 @@ static int bmp280_write_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_OVERSAMPLING_RATIO: + pm_runtime_get_sync(data->dev); mutex_lock(&data->lock); switch (chan->type) { + case IIO_HUMIDITYRELATIVE: + ret = bmp280_write_oversampling_ratio_humid(data, val); + break; case IIO_PRESSURE: ret = bmp280_write_oversampling_ratio_press(data, val); break; @@ -428,6 +487,8 @@ static int bmp280_write_raw(struct iio_dev *indio_dev, break; } mutex_unlock(&data->lock); + pm_runtime_mark_last_busy(data->dev); + pm_runtime_put_autosuspend(data->dev); break; default: return -EINVAL; @@ -502,7 +563,7 @@ static int bmp280_chip_config(struct bmp280_data *data) BMP280_MODE_MASK, osrs | BMP280_MODE_NORMAL); if (ret < 0) { - dev_err(&data->client->dev, + dev_err(data->dev, "failed to write ctrl_meas register\n"); return ret; } @@ -511,7 +572,7 @@ static int bmp280_chip_config(struct bmp280_data *data) BMP280_FILTER_MASK, BMP280_FILTER_4X); if (ret < 0) { - dev_err(&data->client->dev, + dev_err(data->dev, "failed to write config register\n"); return ret; } @@ -522,8 +583,6 @@ static int bmp280_chip_config(struct bmp280_data *data) static const int bmp280_oversampling_avail[] = { 1, 2, 4, 8, 16 }; static const struct bmp280_chip_info bmp280_chip_info = { - .regmap_config = &bmp280_regmap_config, - .oversampling_temp_avail = bmp280_oversampling_avail, .num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail), @@ -535,39 +594,32 @@ static const struct bmp280_chip_info bmp280_chip_info = { .read_press = bmp280_read_press, }; -static bool bmp180_is_writeable_reg(struct device *dev, unsigned int reg) +static int bme280_chip_config(struct bmp280_data *data) { - switch (reg) { - case BMP280_REG_CTRL_MEAS: - case BMP280_REG_RESET: - return true; - default: - return false; - }; -} + int ret = bmp280_chip_config(data); + u8 osrs = BMP280_OSRS_HUMIDITIY_X(data->oversampling_humid + 1); -static bool bmp180_is_volatile_reg(struct device *dev, unsigned int reg) -{ - switch (reg) { - case BMP180_REG_OUT_XLSB: - case BMP180_REG_OUT_LSB: - case BMP180_REG_OUT_MSB: - case BMP280_REG_CTRL_MEAS: - return true; - default: - return false; - } + if (ret < 0) + return ret; + + return regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY, + BMP280_OSRS_HUMIDITY_MASK, osrs); } -static const struct regmap_config bmp180_regmap_config = { - .reg_bits = 8, - .val_bits = 8, +static const struct bmp280_chip_info bme280_chip_info = { + .oversampling_temp_avail = bmp280_oversampling_avail, + .num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail), + + .oversampling_press_avail = bmp280_oversampling_avail, + .num_oversampling_press_avail = ARRAY_SIZE(bmp280_oversampling_avail), - .max_register = BMP180_REG_OUT_XLSB, - .cache_type = REGCACHE_RBTREE, + .oversampling_humid_avail = bmp280_oversampling_avail, + .num_oversampling_humid_avail = ARRAY_SIZE(bmp280_oversampling_avail), - .writeable_reg = bmp180_is_writeable_reg, - .volatile_reg = bmp180_is_volatile_reg, + .chip_config = bme280_chip_config, + .read_temp = bmp280_read_temp, + .read_press = bmp280_read_press, + .read_humid = bmp280_read_humid, }; static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas) @@ -577,16 +629,32 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas) unsigned int delay_us; unsigned int ctrl; + if (data->use_eoc) + init_completion(&data->done); + ret = regmap_write(data->regmap, BMP280_REG_CTRL_MEAS, ctrl_meas); if (ret) return ret; - if (ctrl_meas == BMP180_MEAS_TEMP) - delay_us = 4500; - else - delay_us = conversion_time_max[data->oversampling_press]; - - usleep_range(delay_us, delay_us + 1000); + if (data->use_eoc) { + /* + * If we have a completion interrupt, use it, wait up to + * 100ms. The longest conversion time listed is 76.5 ms for + * advanced resolution mode. + */ + ret = wait_for_completion_timeout(&data->done, + 1 + msecs_to_jiffies(100)); + if (!ret) + dev_err(data->dev, "timeout waiting for completion\n"); + } else { + if (ctrl_meas == BMP180_MEAS_TEMP) + delay_us = 4500; + else + delay_us = + conversion_time_max[data->oversampling_press]; + + usleep_range(delay_us, delay_us + 1000); + } ret = regmap_read(data->regmap, BMP280_REG_CTRL_MEAS, &ctrl); if (ret) @@ -617,26 +685,6 @@ static int bmp180_read_adc_temp(struct bmp280_data *data, int *val) return 0; } -/* - * These enums are used for indexing into the array of calibration - * coefficients for BMP180. - */ -enum { AC1, AC2, AC3, AC4, AC5, AC6, B1, B2, MB, MC, MD }; - -struct bmp180_calib { - s16 AC1; - s16 AC2; - s16 AC3; - u16 AC4; - u16 AC5; - u16 AC6; - s16 B1; - s16 B2; - s16 MB; - s16 MC; - s16 MD; -}; - static int bmp180_read_calib(struct bmp280_data *data, struct bmp180_calib *calib) { @@ -656,6 +704,9 @@ static int bmp180_read_calib(struct bmp280_data *data, return -EIO; } + /* Toss the calibration data into the entropy pool */ + add_device_randomness(buf, sizeof(buf)); + calib->AC1 = be16_to_cpu(buf[AC1]); calib->AC2 = be16_to_cpu(buf[AC2]); calib->AC3 = be16_to_cpu(buf[AC3]); @@ -679,19 +730,11 @@ static int bmp180_read_calib(struct bmp280_data *data, */ static s32 bmp180_compensate_temp(struct bmp280_data *data, s32 adc_temp) { - int ret; s32 x1, x2; - struct bmp180_calib calib; + struct bmp180_calib *calib = &data->calib; - ret = bmp180_read_calib(data, &calib); - if (ret < 0) { - dev_err(&data->client->dev, - "failed to read calibration coefficients\n"); - return ret; - } - - x1 = ((adc_temp - calib.AC6) * calib.AC5) >> 15; - x2 = (calib.MC << 11) / (x1 + calib.MD); + x1 = ((adc_temp - calib->AC6) * calib->AC5) >> 15; + x2 = (calib->MC << 11) / (x1 + calib->MD); data->t_fine = x1 + x2; return (data->t_fine + 8) >> 4; @@ -746,29 +789,21 @@ static int bmp180_read_adc_press(struct bmp280_data *data, int *val) */ static u32 bmp180_compensate_press(struct bmp280_data *data, s32 adc_press) { - int ret; s32 x1, x2, x3, p; s32 b3, b6; u32 b4, b7; s32 oss = data->oversampling_press; - struct bmp180_calib calib; - - ret = bmp180_read_calib(data, &calib); - if (ret < 0) { - dev_err(&data->client->dev, - "failed to read calibration coefficients\n"); - return ret; - } + struct bmp180_calib *calib = &data->calib; b6 = data->t_fine - 4000; - x1 = (calib.B2 * (b6 * b6 >> 12)) >> 11; - x2 = calib.AC2 * b6 >> 11; + x1 = (calib->B2 * (b6 * b6 >> 12)) >> 11; + x2 = calib->AC2 * b6 >> 11; x3 = x1 + x2; - b3 = ((((s32)calib.AC1 * 4 + x3) << oss) + 2) / 4; - x1 = calib.AC3 * b6 >> 13; - x2 = (calib.B1 * ((b6 * b6) >> 12)) >> 16; + b3 = ((((s32)calib->AC1 * 4 + x3) << oss) + 2) / 4; + x1 = calib->AC3 * b6 >> 13; + x2 = (calib->B1 * ((b6 * b6) >> 12)) >> 16; x3 = (x1 + x2 + 2) >> 2; - b4 = calib.AC4 * (u32)(x3 + 32768) >> 15; + b4 = calib->AC4 * (u32)(x3 + 32768) >> 15; b7 = ((u32)adc_press - b3) * (50000 >> oss); if (b7 < 0x80000000) p = (b7 * 2) / b4; @@ -815,8 +850,6 @@ static const int bmp180_oversampling_temp_avail[] = { 1 }; static const int bmp180_oversampling_press_avail[] = { 1, 2, 4, 8 }; static const struct bmp280_chip_info bmp180_chip_info = { - .regmap_config = &bmp180_regmap_config, - .oversampling_temp_avail = bmp180_oversampling_temp_avail, .num_oversampling_temp_avail = ARRAY_SIZE(bmp180_oversampling_temp_avail), @@ -830,92 +863,254 @@ static const struct bmp280_chip_info bmp180_chip_info = { .read_press = bmp180_read_press, }; -static int bmp280_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static irqreturn_t bmp085_eoc_irq(int irq, void *d) +{ + struct bmp280_data *data = d; + + complete(&data->done); + + return IRQ_HANDLED; +} + +static int bmp085_fetch_eoc_irq(struct device *dev, + const char *name, + int irq, + struct bmp280_data *data) +{ + unsigned long irq_trig; + int ret; + + irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq)); + if (irq_trig != IRQF_TRIGGER_RISING) { + dev_err(dev, "non-rising trigger given for EOC interrupt, " + "trying to enforce it\n"); + irq_trig = IRQF_TRIGGER_RISING; + } + ret = devm_request_threaded_irq(dev, + irq, + bmp085_eoc_irq, + NULL, + irq_trig, + name, + data); + if (ret) { + /* Bail out without IRQ but keep the driver in place */ + dev_err(dev, "unable to request DRDY IRQ\n"); + return 0; + } + + data->use_eoc = true; + return 0; +} + +int bmp280_common_probe(struct device *dev, + struct regmap *regmap, + unsigned int chip, + const char *name, + int irq) { int ret; struct iio_dev *indio_dev; struct bmp280_data *data; unsigned int chip_id; + struct gpio_desc *gpiod; - indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); + indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); if (!indio_dev) return -ENOMEM; data = iio_priv(indio_dev); mutex_init(&data->lock); - data->client = client; + data->dev = dev; - indio_dev->dev.parent = &client->dev; - indio_dev->name = id->name; + indio_dev->dev.parent = dev; + indio_dev->name = name; indio_dev->channels = bmp280_channels; - indio_dev->num_channels = ARRAY_SIZE(bmp280_channels); indio_dev->info = &bmp280_info; indio_dev->modes = INDIO_DIRECT_MODE; - switch (id->driver_data) { + switch (chip) { case BMP180_CHIP_ID: + indio_dev->num_channels = 2; data->chip_info = &bmp180_chip_info; data->oversampling_press = ilog2(8); data->oversampling_temp = ilog2(1); + data->start_up_time = 10; break; case BMP280_CHIP_ID: + indio_dev->num_channels = 2; data->chip_info = &bmp280_chip_info; data->oversampling_press = ilog2(16); data->oversampling_temp = ilog2(2); + data->start_up_time = 2; + break; + case BME280_CHIP_ID: + indio_dev->num_channels = 3; + data->chip_info = &bme280_chip_info; + data->oversampling_press = ilog2(16); + data->oversampling_humid = ilog2(16); + data->oversampling_temp = ilog2(2); + data->start_up_time = 2; break; default: return -EINVAL; } - data->regmap = devm_regmap_init_i2c(client, - data->chip_info->regmap_config); - if (IS_ERR(data->regmap)) { - dev_err(&client->dev, "failed to allocate register map\n"); - return PTR_ERR(data->regmap); + /* Bring up regulators */ + data->vddd = devm_regulator_get(dev, "vddd"); + if (IS_ERR(data->vddd)) { + dev_err(dev, "failed to get VDDD regulator\n"); + return PTR_ERR(data->vddd); + } + ret = regulator_enable(data->vddd); + if (ret) { + dev_err(dev, "failed to enable VDDD regulator\n"); + return ret; + } + data->vdda = devm_regulator_get(dev, "vdda"); + if (IS_ERR(data->vdda)) { + dev_err(dev, "failed to get VDDA regulator\n"); + ret = PTR_ERR(data->vddd); + goto out_disable_vddd; + } + ret = regulator_enable(data->vdda); + if (ret) { + dev_err(dev, "failed to enable VDDA regulator\n"); + goto out_disable_vddd; + } + /* Wait to make sure we started up properly */ + mdelay(data->start_up_time); + + /* Bring chip out of reset if there is an assigned GPIO line */ + gpiod = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + /* Deassert the signal */ + if (!IS_ERR(gpiod)) { + dev_info(dev, "release reset\n"); + gpiod_set_value(gpiod, 0); } - ret = regmap_read(data->regmap, BMP280_REG_ID, &chip_id); + data->regmap = regmap; + ret = regmap_read(regmap, BMP280_REG_ID, &chip_id); if (ret < 0) - return ret; - if (chip_id != id->driver_data) { - dev_err(&client->dev, "bad chip id. expected %x got %x\n", - BMP280_CHIP_ID, chip_id); - return -EINVAL; + goto out_disable_vdda; + if (chip_id != chip) { + dev_err(dev, "bad chip id: expected %x got %x\n", + chip, chip_id); + ret = -EINVAL; + goto out_disable_vdda; } ret = data->chip_info->chip_config(data); if (ret < 0) - return ret; + goto out_disable_vdda; + + dev_set_drvdata(dev, indio_dev); + + /* + * The BMP085 and BMP180 has calibration in an E2PROM, read it out + * at probe time. It will not change. + */ + if (chip_id == BMP180_CHIP_ID) { + ret = bmp180_read_calib(data, &data->calib); + if (ret < 0) { + dev_err(data->dev, + "failed to read calibration coefficients\n"); + goto out_disable_vdda; + } + } + + /* + * Attempt to grab an optional EOC IRQ - only the BMP085 has this + * however as it happens, the BMP085 shares the chip ID of BMP180 + * so we look for an IRQ if we have that. + */ + if (irq > 0 || (chip_id == BMP180_CHIP_ID)) { + ret = bmp085_fetch_eoc_irq(dev, name, irq, data); + if (ret) + goto out_disable_vdda; + } + + /* Enable runtime PM */ + pm_runtime_get_noresume(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + /* + * Set autosuspend to two orders of magnitude larger than the + * start-up time. + */ + pm_runtime_set_autosuspend_delay(dev, data->start_up_time *100); + pm_runtime_use_autosuspend(dev); + pm_runtime_put(dev); + + ret = iio_device_register(indio_dev); + if (ret) + goto out_runtime_pm_disable; + - return devm_iio_device_register(&client->dev, indio_dev); + return 0; + +out_runtime_pm_disable: + pm_runtime_get_sync(data->dev); + pm_runtime_put_noidle(data->dev); + pm_runtime_disable(data->dev); +out_disable_vdda: + regulator_disable(data->vdda); +out_disable_vddd: + regulator_disable(data->vddd); + return ret; } +EXPORT_SYMBOL(bmp280_common_probe); -static const struct acpi_device_id bmp280_acpi_match[] = { - {"BMP0280", BMP280_CHIP_ID }, - {"BMP0180", BMP180_CHIP_ID }, - {"BMP0085", BMP180_CHIP_ID }, - { }, -}; -MODULE_DEVICE_TABLE(acpi, bmp280_acpi_match); +int bmp280_common_remove(struct device *dev) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct bmp280_data *data = iio_priv(indio_dev); -static const struct i2c_device_id bmp280_id[] = { - {"bmp280", BMP280_CHIP_ID }, - {"bmp180", BMP180_CHIP_ID }, - {"bmp085", BMP180_CHIP_ID }, - { }, -}; -MODULE_DEVICE_TABLE(i2c, bmp280_id); + iio_device_unregister(indio_dev); + pm_runtime_get_sync(data->dev); + pm_runtime_put_noidle(data->dev); + pm_runtime_disable(data->dev); + regulator_disable(data->vdda); + regulator_disable(data->vddd); + return 0; +} +EXPORT_SYMBOL(bmp280_common_remove); -static struct i2c_driver bmp280_driver = { - .driver = { - .name = "bmp280", - .acpi_match_table = ACPI_PTR(bmp280_acpi_match), - }, - .probe = bmp280_probe, - .id_table = bmp280_id, +#ifdef CONFIG_PM +static int bmp280_runtime_suspend(struct device *dev) +{ + struct bmp280_data *data = dev_get_drvdata(dev); + int ret; + + ret = regulator_disable(data->vdda); + if (ret) + return ret; + return regulator_disable(data->vddd); +} + +static int bmp280_runtime_resume(struct device *dev) +{ + struct bmp280_data *data = dev_get_drvdata(dev); + int ret; + + ret = regulator_enable(data->vddd); + if (ret) + return ret; + ret = regulator_enable(data->vdda); + if (ret) + return ret; + msleep(data->start_up_time); + return data->chip_info->chip_config(data); +} +#endif /* CONFIG_PM */ + +const struct dev_pm_ops bmp280_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(bmp280_runtime_suspend, + bmp280_runtime_resume, NULL) }; -module_i2c_driver(bmp280_driver); +EXPORT_SYMBOL(bmp280_dev_pm_ops); MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>"); MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP180/BMP280 pressure and temperature sensor"); diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c new file mode 100644 index 000000000000..03742b15b72a --- /dev/null +++ b/ drivers/iio/pressure/bmp280-i2c.c@@ -0,0 +1,91 @@ +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/acpi.h> +#include <linux/of.h> +#include <linux/regmap.h> + +#include "bmp280.h" + +static int bmp280_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct regmap *regmap; + const struct regmap_config *regmap_config; + + switch (id->driver_data) { + case BMP180_CHIP_ID: + regmap_config = &bmp180_regmap_config; + break; + case BMP280_CHIP_ID: + case BME280_CHIP_ID: + regmap_config = &bmp280_regmap_config; + break; + default: + return -EINVAL; + } + + regmap = devm_regmap_init_i2c(client, regmap_config); + if (IS_ERR(regmap)) { + dev_err(&client->dev, "failed to allocate register map\n"); + return PTR_ERR(regmap); + } + + return bmp280_common_probe(&client->dev, + regmap, + id->driver_data, + id->name, + client->irq); +} + +static int bmp280_i2c_remove(struct i2c_client *client) +{ + return bmp280_common_remove(&client->dev); +} + +static const struct acpi_device_id bmp280_acpi_i2c_match[] = { + {"BMP0280", BMP280_CHIP_ID }, + {"BMP0180", BMP180_CHIP_ID }, + {"BMP0085", BMP180_CHIP_ID }, + {"BME0280", BME280_CHIP_ID }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, bmp280_acpi_i2c_match); + +#ifdef CONFIG_OF +static const struct of_device_id bmp280_of_i2c_match[] = { + { .compatible = "bosch,bme280", .data = (void *)BME280_CHIP_ID }, + { .compatible = "bosch,bmp280", .data = (void *)BMP280_CHIP_ID }, + { .compatible = "bosch,bmp180", .data = (void *)BMP180_CHIP_ID }, + { .compatible = "bosch,bmp085", .data = (void *)BMP180_CHIP_ID }, + { }, +}; +MODULE_DEVICE_TABLE(of, bmp280_of_i2c_match); +#else +#define bmp280_of_i2c_match NULL +#endif + +static const struct i2c_device_id bmp280_i2c_id[] = { + {"bmp280", BMP280_CHIP_ID }, + {"bmp180", BMP180_CHIP_ID }, + {"bmp085", BMP180_CHIP_ID }, + {"bme280", BME280_CHIP_ID }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, bmp280_i2c_id); + +static struct i2c_driver bmp280_i2c_driver = { + .driver = { + .name = "bmp280", + .acpi_match_table = ACPI_PTR(bmp280_acpi_i2c_match), + .of_match_table = of_match_ptr(bmp280_of_i2c_match), + .pm = &bmp280_dev_pm_ops, + }, + .probe = bmp280_i2c_probe, + .remove = bmp280_i2c_remove, + .id_table = bmp280_i2c_id, +}; +module_i2c_driver(bmp280_i2c_driver); + +MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>"); +MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP180/BMP280 pressure and temperature sensor"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/pressure/bmp280-regmap.c b/drivers/iio/pressure/bmp280-regmap.c new file mode 100644 index 000000000000..6807113ec09f --- /dev/null +++ b/ drivers/iio/pressure/bmp280-regmap.c@@ -0,0 +1,84 @@ +#include <linux/device.h> +#include <linux/module.h> +#include <linux/regmap.h> + +#include "bmp280.h" + +static bool bmp180_is_writeable_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case BMP280_REG_CTRL_MEAS: + case BMP280_REG_RESET: + return true; + default: + return false; + }; +} + +static bool bmp180_is_volatile_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case BMP180_REG_OUT_XLSB: + case BMP180_REG_OUT_LSB: + case BMP180_REG_OUT_MSB: + case BMP280_REG_CTRL_MEAS: + return true; + default: + return false; + } +} + +const struct regmap_config bmp180_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + + .max_register = BMP180_REG_OUT_XLSB, + .cache_type = REGCACHE_RBTREE, + + .writeable_reg = bmp180_is_writeable_reg, + .volatile_reg = bmp180_is_volatile_reg, +}; +EXPORT_SYMBOL(bmp180_regmap_config); + +static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case BMP280_REG_CONFIG: + case BMP280_REG_CTRL_HUMIDITY: + case BMP280_REG_CTRL_MEAS: + case BMP280_REG_RESET: + return true; + default: + return false; + }; +} + +static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case BMP280_REG_HUMIDITY_LSB: + case BMP280_REG_HUMIDITY_MSB: + case BMP280_REG_TEMP_XLSB: + case BMP280_REG_TEMP_LSB: + case BMP280_REG_TEMP_MSB: + case BMP280_REG_PRESS_XLSB: + case BMP280_REG_PRESS_LSB: + case BMP280_REG_PRESS_MSB: + case BMP280_REG_STATUS: + return true; + default: + return false; + } +} + +const struct regmap_config bmp280_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + + .max_register = BMP280_REG_HUMIDITY_LSB, + .cache_type = REGCACHE_RBTREE, + + .writeable_reg = bmp280_is_writeable_reg, + .volatile_reg = bmp280_is_volatile_reg, +}; +EXPORT_SYMBOL(bmp280_regmap_config); diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c new file mode 100644 index 000000000000..17bc95586f9e --- /dev/null +++ b/ drivers/iio/pressure/bmp280-spi.c@@ -0,0 +1,125 @@ +/* + * SPI interface for the BMP280 driver + * + * Inspired by the older BMP085 driver drivers/misc/bmp085-spi.c + */ +#include <linux/module.h> +#include <linux/spi/spi.h> +#include <linux/err.h> +#include <linux/regmap.h> + +#include "bmp280.h" + +static int bmp280_regmap_spi_write(void *context, const void *data, + size_t count) +{ + struct device *dev = context; + struct spi_device *spi = to_spi_device(dev); + u8 buf[2]; + + memcpy(buf, data, 2); + /* + * The SPI register address (= full register address without bit 7) and + * the write command (bit7 = RW = '0') + */ + buf[0] &= ~0x80; + + return spi_write_then_read(spi, buf, 2, NULL, 0); +} + +static int bmp280_regmap_spi_read(void *context, const void *reg, + size_t reg_size, void *val, size_t val_size) +{ + struct device *dev = context; + struct spi_device *spi = to_spi_device(dev); + + return spi_write_then_read(spi, reg, reg_size, val, val_size); +} + +static struct regmap_bus bmp280_regmap_bus = { + .write = bmp280_regmap_spi_write, + .read = bmp280_regmap_spi_read, + .reg_format_endian_default = REGMAP_ENDIAN_BIG, + .val_format_endian_default = REGMAP_ENDIAN_BIG, +}; + +static int bmp280_spi_probe(struct spi_device *spi) +{ + const struct spi_device_id *id = spi_get_device_id(spi); + struct regmap *regmap; + const struct regmap_config *regmap_config; + int ret; + + spi->bits_per_word = 8; + ret = spi_setup(spi); + if (ret < 0) { + dev_err(&spi->dev, "spi_setup failed!\n"); + return ret; + } + + switch (id->driver_data) { + case BMP180_CHIP_ID: + regmap_config = &bmp180_regmap_config; + break; + case BMP280_CHIP_ID: + case BME280_CHIP_ID: + regmap_config = &bmp280_regmap_config; + break; + default: + return -EINVAL; + } + + regmap = devm_regmap_init(&spi->dev, + &bmp280_regmap_bus, + &spi->dev, + regmap_config); + if (IS_ERR(regmap)) { + dev_err(&spi->dev, "failed to allocate register map\n"); + return PTR_ERR(regmap); + } + + return bmp280_common_probe(&spi->dev, + regmap, + id->driver_data, + id->name, + spi->irq); +} + +static int bmp280_spi_remove(struct spi_device *spi) +{ + return bmp280_common_remove(&spi->dev); +} + +static const struct of_device_id bmp280_of_spi_match[] = { + { .compatible = "bosch,bmp085", }, + { .compatible = "bosch,bmp180", }, + { .compatible = "bosch,bmp181", }, + { .compatible = "bosch,bmp280", }, + { .compatible = "bosch,bme280", }, + { }, +}; +MODULE_DEVICE_TABLE(of, bmp280_of_spi_match); + +static const struct spi_device_id bmp280_spi_id[] = { + { "bmp180", BMP180_CHIP_ID }, + { "bmp181", BMP180_CHIP_ID }, + { "bmp280", BMP280_CHIP_ID }, + { "bme280", BME280_CHIP_ID }, + { } +}; +MODULE_DEVICE_TABLE(spi, bmp280_spi_id); + +static struct spi_driver bmp280_spi_driver = { + .driver = { + .name = "bmp280", + .of_match_table = bmp280_of_spi_match, + .pm = &bmp280_dev_pm_ops, + }, + .id_table = bmp280_spi_id, + .probe = bmp280_spi_probe, + .remove = bmp280_spi_remove, +}; +module_spi_driver(bmp280_spi_driver); + +MODULE_DESCRIPTION("BMP280 SPI bus driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h new file mode 100644 index 000000000000..2c770e13be0e --- /dev/null +++ b/ drivers/iio/pressure/bmp280.h@@ -0,0 +1,112 @@ +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/regmap.h> + +/* BMP280 specific registers */ +#define BMP280_REG_HUMIDITY_LSB 0xFE +#define BMP280_REG_HUMIDITY_MSB 0xFD +#define BMP280_REG_TEMP_XLSB 0xFC +#define BMP280_REG_TEMP_LSB 0xFB +#define BMP280_REG_TEMP_MSB 0xFA +#define BMP280_REG_PRESS_XLSB 0xF9 +#define BMP280_REG_PRESS_LSB 0xF8 +#define BMP280_REG_PRESS_MSB 0xF7 + +#define BMP280_REG_CONFIG 0xF5 +#define BMP280_REG_CTRL_MEAS 0xF4 +#define BMP280_REG_STATUS 0xF3 +#define BMP280_REG_CTRL_HUMIDITY 0xF2 + +/* Due to non linear mapping, and data sizes we can't do a bulk read */ +#define BMP280_REG_COMP_H1 0xA1 +#define BMP280_REG_COMP_H2 0xE1 +#define BMP280_REG_COMP_H3 0xE3 +#define BMP280_REG_COMP_H4 0xE4 +#define BMP280_REG_COMP_H5 0xE5 +#define BMP280_REG_COMP_H6 0xE7 + +#define BMP280_REG_COMP_TEMP_START 0x88 +#define BMP280_COMP_TEMP_REG_COUNT 6 + +#define BMP280_REG_COMP_PRESS_START 0x8E +#define BMP280_COMP_PRESS_REG_COUNT 18 + +#define BMP280_FILTER_MASK (BIT(4) | BIT(3) | BIT(2)) +#define BMP280_FILTER_OFF 0 +#define BMP280_FILTER_2X BIT(2) +#define BMP280_FILTER_4X BIT(3) +#define BMP280_FILTER_8X (BIT(3) | BIT(2)) +#define BMP280_FILTER_16X BIT(4) + +#define BMP280_OSRS_HUMIDITY_MASK (BIT(2) | BIT(1) | BIT(0)) +#define BMP280_OSRS_HUMIDITIY_X(osrs_h) ((osrs_h) << 0) +#define BMP280_OSRS_HUMIDITY_SKIP 0 +#define BMP280_OSRS_HUMIDITY_1X BMP280_OSRS_HUMIDITIY_X(1) +#define BMP280_OSRS_HUMIDITY_2X BMP280_OSRS_HUMIDITIY_X(2) +#define BMP280_OSRS_HUMIDITY_4X BMP280_OSRS_HUMIDITIY_X(3) +#define BMP280_OSRS_HUMIDITY_8X BMP280_OSRS_HUMIDITIY_X(4) +#define BMP280_OSRS_HUMIDITY_16X BMP280_OSRS_HUMIDITIY_X(5) + +#define BMP280_OSRS_TEMP_MASK (BIT(7) | BIT(6) | BIT(5)) +#define BMP280_OSRS_TEMP_SKIP 0 +#define BMP280_OSRS_TEMP_X(osrs_t) ((osrs_t) << 5) +#define BMP280_OSRS_TEMP_1X BMP280_OSRS_TEMP_X(1) +#define BMP280_OSRS_TEMP_2X BMP280_OSRS_TEMP_X(2) +#define BMP280_OSRS_TEMP_4X BMP280_OSRS_TEMP_X(3) +#define BMP280_OSRS_TEMP_8X BMP280_OSRS_TEMP_X(4) +#define BMP280_OSRS_TEMP_16X BMP280_OSRS_TEMP_X(5) + +#define BMP280_OSRS_PRESS_MASK (BIT(4) | BIT(3) | BIT(2)) +#define BMP280_OSRS_PRESS_SKIP 0 +#define BMP280_OSRS_PRESS_X(osrs_p) ((osrs_p) << 2) +#define BMP280_OSRS_PRESS_1X BMP280_OSRS_PRESS_X(1) +#define BMP280_OSRS_PRESS_2X BMP280_OSRS_PRESS_X(2) +#define BMP280_OSRS_PRESS_4X BMP280_OSRS_PRESS_X(3) +#define BMP280_OSRS_PRESS_8X BMP280_OSRS_PRESS_X(4) +#define BMP280_OSRS_PRESS_16X BMP280_OSRS_PRESS_X(5) + +#define BMP280_MODE_MASK (BIT(1) | BIT(0)) +#define BMP280_MODE_SLEEP 0 +#define BMP280_MODE_FORCED BIT(0) +#define BMP280_MODE_NORMAL (BIT(1) | BIT(0)) + +/* BMP180 specific registers */ +#define BMP180_REG_OUT_XLSB 0xF8 +#define BMP180_REG_OUT_LSB 0xF7 +#define BMP180_REG_OUT_MSB 0xF6 + +#define BMP180_REG_CALIB_START 0xAA +#define BMP180_REG_CALIB_COUNT 22 + +#define BMP180_MEAS_SCO BIT(5) +#define BMP180_MEAS_TEMP (0x0E | BMP180_MEAS_SCO) +#define BMP180_MEAS_PRESS_X(oss) ((oss) << 6 | 0x14 | BMP180_MEAS_SCO) +#define BMP180_MEAS_PRESS_1X BMP180_MEAS_PRESS_X(0) +#define BMP180_MEAS_PRESS_2X BMP180_MEAS_PRESS_X(1) +#define BMP180_MEAS_PRESS_4X BMP180_MEAS_PRESS_X(2) +#define BMP180_MEAS_PRESS_8X BMP180_MEAS_PRESS_X(3) + +/* BMP180 and BMP280 common registers */ +#define BMP280_REG_CTRL_MEAS 0xF4 +#define BMP280_REG_RESET 0xE0 +#define BMP280_REG_ID 0xD0 + +#define BMP180_CHIP_ID 0x55 +#define BMP280_CHIP_ID 0x58 +#define BME280_CHIP_ID 0x60 +#define BMP280_SOFT_RESET_VAL 0xB6 + +/* Regmap configurations */ +extern const struct regmap_config bmp180_regmap_config; +extern const struct regmap_config bmp280_regmap_config; + +/* Probe called from different transports */ +int bmp280_common_probe(struct device *dev, + struct regmap *regmap, + unsigned int chip, + const char *name, + int irq); +int bmp280_common_remove(struct device *dev); + +/* PM ops */ +extern const struct dev_pm_ops bmp280_dev_pm_ops; @@ -401,6 +401,7 @@ static const struct i2c_device_id hp206c_id[] = { {"hp206c"}, {} }; +MODULE_DEVICE_TABLE(i2c, hp206c_id); #ifdef CONFIG_ACPI static const struct acpi_device_id hp206c_acpi_match[] = { @@ -171,7 +171,7 @@ static irqreturn_t mpl3115_trigger_handler(int irq, void *p) mutex_unlock(&data->lock); iio_push_to_buffers_with_timestamp(indio_dev, buffer, - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); done: iio_trigger_notify_done(indio_dev->trig); @@ -224,7 +224,8 @@ static irqreturn_t ms5611_trigger_handler(int irq, void *p) if (ret < 0) goto err; - iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns()); + iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_get_time_ns(indio_dev)); err: iio_trigger_notify_done(indio_dev->trig); @@ -1,6 +1,6 @@ /* - * ms5637.c - Support for Measurement-Specialties ms5637 and ms8607 - * pressure & temperature sensor + * ms5637.c - Support for Measurement-Specialties MS5637, MS5805 + * MS5837 and MS8607 pressure & temperature sensor * * Copyright (c) 2015 Measurement-Specialties * @@ -11,6 +11,10 @@ * Datasheet: * http://www.meas-spec.com/downloads/MS5637-02BA03.pdf * Datasheet: + * http://www.meas-spec.com/downloads/MS5805-02BA01.pdf + * Datasheet: + * http://www.meas-spec.com/downloads/MS5837-30BA.pdf + * Datasheet: * http://www.meas-spec.com/downloads/MS8607-02BA01.pdf */ @@ -170,9 +174,12 @@ static int ms5637_probe(struct i2c_client *client, static const struct i2c_device_id ms5637_id[] = { {"ms5637", 0}, - {"ms8607-temppressure", 1}, + {"ms5805", 0}, + {"ms5837", 0}, + {"ms8607-temppressure", 0}, {} }; +MODULE_DEVICE_TABLE(i2c, ms5637_id); static struct i2c_driver ms5637_driver = { .probe = ms5637_probe, @@ -17,6 +17,7 @@ #define LPS001WP_PRESS_DEV_NAME "lps001wp" #define LPS25H_PRESS_DEV_NAME "lps25h" #define LPS331AP_PRESS_DEV_NAME "lps331ap" +#define LPS22HB_PRESS_DEV_NAME "lps22hb" /** * struct st_sensors_platform_data - default press platform data @@ -82,7 +82,7 @@ static const struct iio_buffer_setup_ops st_press_buffer_setup_ops = { int st_press_allocate_ring(struct iio_dev *indio_dev) { - return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, + return iio_triggered_buffer_setup(indio_dev, NULL, &st_sensors_trigger_handler, &st_press_buffer_setup_ops); } @@ -28,21 +28,95 @@ #include <linux/iio/common/st_sensors.h> #include "st_pressure.h" +/* + * About determining pressure scaling factors + * ------------------------------------------ + * + * Datasheets specify typical pressure sensitivity so that pressure is computed + * according to the following equation : + * pressure[mBar] = raw / sensitivity + * where : + * raw the 24 bits long raw sampled pressure + * sensitivity a scaling factor specified by the datasheet in LSB/mBar + * + * IIO ABI expects pressure to be expressed as kPascal, hence pressure should be + * computed according to : + * pressure[kPascal] = pressure[mBar] / 10 + * = raw / (sensitivity * 10) (1) + * + * Finally, st_press_read_raw() returns pressure scaling factor as an + * IIO_VAL_INT_PLUS_NANO with a zero integral part and "gain" as decimal part. + * Therefore, from (1), "gain" becomes : + * gain = 10^9 / (sensitivity * 10) + * = 10^8 / sensitivity + * + * About determining temperature scaling factors and offsets + * --------------------------------------------------------- + * + * Datasheets specify typical temperature sensitivity and offset so that + * temperature is computed according to the following equation : + * temp[Celsius] = offset[Celsius] + (raw / sensitivity) + * where : + * raw the 16 bits long raw sampled temperature + * offset a constant specified by the datasheet in degree Celsius + * (sometimes zero) + * sensitivity a scaling factor specified by the datasheet in LSB/Celsius + * + * IIO ABI expects temperature to be expressed as milli degree Celsius such as + * user space should compute temperature according to : + * temp[mCelsius] = temp[Celsius] * 10^3 + * = (offset[Celsius] + (raw / sensitivity)) * 10^3 + * = ((offset[Celsius] * sensitivity) + raw) * + * (10^3 / sensitivity) (2) + * + * IIO ABI expects user space to apply offset and scaling factors to raw samples + * according to : + * temp[mCelsius] = (OFFSET + raw) * SCALE + * where : + * OFFSET an arbitrary constant exposed by device + * SCALE an arbitrary scaling factor exposed by device + * + * Matching OFFSET and SCALE with members of (2) gives : + * OFFSET = offset[Celsius] * sensitivity (3) + * SCALE = 10^3 / sensitivity (4) + * + * st_press_read_raw() returns temperature scaling factor as an + * IIO_VAL_FRACTIONAL with a 10^3 numerator and "gain2" as denominator. + * Therefore, from (3), "gain2" becomes : + * gain2 = sensitivity + * + * When declared within channel, i.e. for a non zero specified offset, + * st_press_read_raw() will return the latter as an IIO_VAL_FRACTIONAL such as : + * numerator = OFFSET * 10^3 + * denominator = 10^3 + * giving from (4): + * numerator = offset[Celsius] * 10^3 * sensitivity + * = offset[mCelsius] * gain2 + */ + +#define MCELSIUS_PER_CELSIUS 1000 + +/* Default pressure sensitivity */ #define ST_PRESS_LSB_PER_MBAR 4096UL #define ST_PRESS_KPASCAL_NANO_SCALE (100000000UL / \ ST_PRESS_LSB_PER_MBAR) + +/* Default temperature sensitivity */ #define ST_PRESS_LSB_PER_CELSIUS 480UL -#define ST_PRESS_CELSIUS_NANO_SCALE (1000000000UL / \ - ST_PRESS_LSB_PER_CELSIUS) -#define ST_PRESS_NUMBER_DATA_CHANNELS 1 +#define ST_PRESS_MILLI_CELSIUS_OFFSET 42500UL /* FULLSCALE */ +#define ST_PRESS_FS_AVL_1100MB 1100 #define ST_PRESS_FS_AVL_1260MB 1260 #define ST_PRESS_1_OUT_XL_ADDR 0x28 #define ST_TEMP_1_OUT_L_ADDR 0x2b -/* CUSTOM VALUES FOR LPS331AP SENSOR */ +/* + * CUSTOM VALUES FOR LPS331AP SENSOR + * See LPS331AP datasheet: + * http://www2.st.com/resource/en/datasheet/lps331ap.pdf + */ #define ST_PRESS_LPS331AP_WAI_EXP 0xbb #define ST_PRESS_LPS331AP_ODR_ADDR 0x20 #define ST_PRESS_LPS331AP_ODR_MASK 0x70 @@ -54,9 +128,6 @@ #define ST_PRESS_LPS331AP_PW_MASK 0x80 #define ST_PRESS_LPS331AP_FS_ADDR 0x23 #define ST_PRESS_LPS331AP_FS_MASK 0x30 -#define ST_PRESS_LPS331AP_FS_AVL_1260_VAL 0x00 -#define ST_PRESS_LPS331AP_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE -#define ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE #define ST_PRESS_LPS331AP_BDU_ADDR 0x20 #define ST_PRESS_LPS331AP_BDU_MASK 0x04 #define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22 @@ -67,9 +138,16 @@ #define ST_PRESS_LPS331AP_OD_IRQ_ADDR 0x22 #define ST_PRESS_LPS331AP_OD_IRQ_MASK 0x40 #define ST_PRESS_LPS331AP_MULTIREAD_BIT true -#define ST_PRESS_LPS331AP_TEMP_OFFSET 42500 -/* CUSTOM VALUES FOR LPS001WP SENSOR */ +/* + * CUSTOM VALUES FOR THE OBSOLETE LPS001WP SENSOR + */ + +/* LPS001WP pressure resolution */ +#define ST_PRESS_LPS001WP_LSB_PER_MBAR 16UL +/* LPS001WP temperature resolution */ +#define ST_PRESS_LPS001WP_LSB_PER_CELSIUS 64UL + #define ST_PRESS_LPS001WP_WAI_EXP 0xba #define ST_PRESS_LPS001WP_ODR_ADDR 0x20 #define ST_PRESS_LPS001WP_ODR_MASK 0x30 @@ -78,13 +156,19 @@ #define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03 #define ST_PRESS_LPS001WP_PW_ADDR 0x20 #define ST_PRESS_LPS001WP_PW_MASK 0x40 +#define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \ + (100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR) #define ST_PRESS_LPS001WP_BDU_ADDR 0x20 #define ST_PRESS_LPS001WP_BDU_MASK 0x04 #define ST_PRESS_LPS001WP_MULTIREAD_BIT true #define ST_PRESS_LPS001WP_OUT_L_ADDR 0x28 #define ST_TEMP_LPS001WP_OUT_L_ADDR 0x2a -/* CUSTOM VALUES FOR LPS25H SENSOR */ +/* + * CUSTOM VALUES FOR LPS25H SENSOR + * See LPS25H datasheet: + * http://www2.st.com/resource/en/datasheet/lps25h.pdf + */ #define ST_PRESS_LPS25H_WAI_EXP 0xbd #define ST_PRESS_LPS25H_ODR_ADDR 0x20 #define ST_PRESS_LPS25H_ODR_MASK 0x70 @@ -94,11 +178,6 @@ #define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04 #define ST_PRESS_LPS25H_PW_ADDR 0x20 #define ST_PRESS_LPS25H_PW_MASK 0x80 -#define ST_PRESS_LPS25H_FS_ADDR 0x00 -#define ST_PRESS_LPS25H_FS_MASK 0x00 -#define ST_PRESS_LPS25H_FS_AVL_1260_VAL 0x00 -#define ST_PRESS_LPS25H_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE -#define ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE #define ST_PRESS_LPS25H_BDU_ADDR 0x20 #define ST_PRESS_LPS25H_BDU_MASK 0x04 #define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23 @@ -109,31 +188,57 @@ #define ST_PRESS_LPS25H_OD_IRQ_ADDR 0x22 #define ST_PRESS_LPS25H_OD_IRQ_MASK 0x40 #define ST_PRESS_LPS25H_MULTIREAD_BIT true -#define ST_PRESS_LPS25H_TEMP_OFFSET 42500 #define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28 #define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b +/* + * CUSTOM VALUES FOR LPS22HB SENSOR + * See LPS22HB datasheet: + * http://www2.st.com/resource/en/datasheet/lps22hb.pdf + */ + +/* LPS22HB temperature sensitivity */ +#define ST_PRESS_LPS22HB_LSB_PER_CELSIUS 100UL + +#define ST_PRESS_LPS22HB_WAI_EXP 0xb1 +#define ST_PRESS_LPS22HB_ODR_ADDR 0x10 +#define ST_PRESS_LPS22HB_ODR_MASK 0x70 +#define ST_PRESS_LPS22HB_ODR_AVL_1HZ_VAL 0x01 +#define ST_PRESS_LPS22HB_ODR_AVL_10HZ_VAL 0x02 +#define ST_PRESS_LPS22HB_ODR_AVL_25HZ_VAL 0x03 +#define ST_PRESS_LPS22HB_ODR_AVL_50HZ_VAL 0x04 +#define ST_PRESS_LPS22HB_ODR_AVL_75HZ_VAL 0x05 +#define ST_PRESS_LPS22HB_PW_ADDR 0x10 +#define ST_PRESS_LPS22HB_PW_MASK 0x70 +#define ST_PRESS_LPS22HB_BDU_ADDR 0x10 +#define ST_PRESS_LPS22HB_BDU_MASK 0x02 +#define ST_PRESS_LPS22HB_DRDY_IRQ_ADDR 0x12 +#define ST_PRESS_LPS22HB_DRDY_IRQ_INT1_MASK 0x04 +#define ST_PRESS_LPS22HB_DRDY_IRQ_INT2_MASK 0x08 +#define ST_PRESS_LPS22HB_IHL_IRQ_ADDR 0x12 +#define ST_PRESS_LPS22HB_IHL_IRQ_MASK 0x80 +#define ST_PRESS_LPS22HB_OD_IRQ_ADDR 0x12 +#define ST_PRESS_LPS22HB_OD_IRQ_MASK 0x40 +#define ST_PRESS_LPS22HB_MULTIREAD_BIT true + static const struct iio_chan_spec st_press_1_channels[] = { { .type = IIO_PRESSURE, - .channel2 = IIO_NO_MOD, .address = ST_PRESS_1_OUT_XL_ADDR, - .scan_index = ST_SENSORS_SCAN_X, + .scan_index = 0, .scan_type = { .sign = 'u', .realbits = 24, - .storagebits = 24, + .storagebits = 32, .endianness = IIO_LE, }, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), - .modified = 0, }, { .type = IIO_TEMP, - .channel2 = IIO_NO_MOD, .address = ST_TEMP_1_OUT_L_ADDR, - .scan_index = -1, + .scan_index = 1, .scan_type = { .sign = 'u', .realbits = 16, @@ -144,31 +249,29 @@ static const struct iio_chan_spec st_press_1_channels[] = { BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET), - .modified = 0, }, - IIO_CHAN_SOFT_TIMESTAMP(1) + IIO_CHAN_SOFT_TIMESTAMP(2) }; static const struct iio_chan_spec st_press_lps001wp_channels[] = { { .type = IIO_PRESSURE, - .channel2 = IIO_NO_MOD, .address = ST_PRESS_LPS001WP_OUT_L_ADDR, - .scan_index = ST_SENSORS_SCAN_X, + .scan_index = 0, .scan_type = { .sign = 'u', .realbits = 16, .storagebits = 16, .endianness = IIO_LE, }, - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .modified = 0, + .info_mask_separate = + BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE), }, { .type = IIO_TEMP, - .channel2 = IIO_NO_MOD, .address = ST_TEMP_LPS001WP_OUT_L_ADDR, - .scan_index = -1, + .scan_index = 1, .scan_type = { .sign = 'u', .realbits = 16, @@ -177,10 +280,43 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = { }, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | - BIT(IIO_CHAN_INFO_OFFSET), - .modified = 0, + BIT(IIO_CHAN_INFO_SCALE), }, - IIO_CHAN_SOFT_TIMESTAMP(1) + IIO_CHAN_SOFT_TIMESTAMP(2) +}; + +static const struct iio_chan_spec st_press_lps22hb_channels[] = { + { + .type = IIO_PRESSURE, + .address = ST_PRESS_1_OUT_XL_ADDR, + .scan_index = 0, + .scan_type = { + .sign = 'u', + .realbits = 24, + .storagebits = 32, + .endianness = IIO_LE, + }, + .info_mask_separate = + BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), + }, + { + .type = IIO_TEMP, + .address = ST_TEMP_1_OUT_L_ADDR, + .scan_index = 1, + .scan_type = { + .sign = 's', + .realbits = 16, + .storagebits = 16, + .endianness = IIO_LE, + }, + .info_mask_separate = + BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), + }, + IIO_CHAN_SOFT_TIMESTAMP(2) }; static const struct st_sensor_settings st_press_sensors_settings[] = { @@ -212,11 +348,14 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .addr = ST_PRESS_LPS331AP_FS_ADDR, .mask = ST_PRESS_LPS331AP_FS_MASK, .fs_avl = { + /* + * Pressure and temperature sensitivity values + * as defined in table 3 of LPS331AP datasheet. + */ [0] = { .num = ST_PRESS_FS_AVL_1260MB, - .value = ST_PRESS_LPS331AP_FS_AVL_1260_VAL, - .gain = ST_PRESS_LPS331AP_FS_AVL_1260_GAIN, - .gain2 = ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN, + .gain = ST_PRESS_KPASCAL_NANO_SCALE, + .gain2 = ST_PRESS_LSB_PER_CELSIUS, }, }, }, @@ -261,7 +400,17 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, }, .fs = { - .addr = 0, + .fs_avl = { + /* + * Pressure and temperature resolution values + * as defined in table 3 of LPS001WP datasheet. + */ + [0] = { + .num = ST_PRESS_FS_AVL_1100MB, + .gain = ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN, + .gain2 = ST_PRESS_LPS001WP_LSB_PER_CELSIUS, + }, + }, }, .bdu = { .addr = ST_PRESS_LPS001WP_BDU_ADDR, @@ -298,14 +447,15 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, }, .fs = { - .addr = ST_PRESS_LPS25H_FS_ADDR, - .mask = ST_PRESS_LPS25H_FS_MASK, .fs_avl = { + /* + * Pressure and temperature sensitivity values + * as defined in table 3 of LPS25H datasheet. + */ [0] = { .num = ST_PRESS_FS_AVL_1260MB, - .value = ST_PRESS_LPS25H_FS_AVL_1260_VAL, - .gain = ST_PRESS_LPS25H_FS_AVL_1260_GAIN, - .gain2 = ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN, + .gain = ST_PRESS_KPASCAL_NANO_SCALE, + .gain2 = ST_PRESS_LSB_PER_CELSIUS, }, }, }, @@ -326,6 +476,59 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .multi_read_bit = ST_PRESS_LPS25H_MULTIREAD_BIT, .bootime = 2, }, + { + .wai = ST_PRESS_LPS22HB_WAI_EXP, + .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, + .sensors_supported = { + [0] = LPS22HB_PRESS_DEV_NAME, + }, + .ch = (struct iio_chan_spec *)st_press_lps22hb_channels, + .num_ch = ARRAY_SIZE(st_press_lps22hb_channels), + .odr = { + .addr = ST_PRESS_LPS22HB_ODR_ADDR, + .mask = ST_PRESS_LPS22HB_ODR_MASK, + .odr_avl = { + { 1, ST_PRESS_LPS22HB_ODR_AVL_1HZ_VAL, }, + { 10, ST_PRESS_LPS22HB_ODR_AVL_10HZ_VAL, }, + { 25, ST_PRESS_LPS22HB_ODR_AVL_25HZ_VAL, }, + { 50, ST_PRESS_LPS22HB_ODR_AVL_50HZ_VAL, }, + { 75, ST_PRESS_LPS22HB_ODR_AVL_75HZ_VAL, }, + }, + }, + .pw = { + .addr = ST_PRESS_LPS22HB_PW_ADDR, + .mask = ST_PRESS_LPS22HB_PW_MASK, + .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, + }, + .fs = { + .fs_avl = { + /* + * Pressure and temperature sensitivity values + * as defined in table 3 of LPS22HB datasheet. + */ + [0] = { + .num = ST_PRESS_FS_AVL_1260MB, + .gain = ST_PRESS_KPASCAL_NANO_SCALE, + .gain2 = ST_PRESS_LPS22HB_LSB_PER_CELSIUS, + }, + }, + }, + .bdu = { + .addr = ST_PRESS_LPS22HB_BDU_ADDR, + .mask = ST_PRESS_LPS22HB_BDU_MASK, + }, + .drdy_irq = { + .addr = ST_PRESS_LPS22HB_DRDY_IRQ_ADDR, + .mask_int1 = ST_PRESS_LPS22HB_DRDY_IRQ_INT1_MASK, + .mask_int2 = ST_PRESS_LPS22HB_DRDY_IRQ_INT2_MASK, + .addr_ihl = ST_PRESS_LPS22HB_IHL_IRQ_ADDR, + .mask_ihl = ST_PRESS_LPS22HB_IHL_IRQ_MASK, + .addr_od = ST_PRESS_LPS22HB_OD_IRQ_ADDR, + .mask_od = ST_PRESS_LPS22HB_OD_IRQ_MASK, + .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + }, + .multi_read_bit = ST_PRESS_LPS22HB_MULTIREAD_BIT, + }, }; static int st_press_write_raw(struct iio_dev *indio_dev, @@ -364,26 +567,26 @@ static int st_press_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: - *val = 0; - switch (ch->type) { case IIO_PRESSURE: + *val = 0; *val2 = press_data->current_fullscale->gain; - break; + return IIO_VAL_INT_PLUS_NANO; case IIO_TEMP: + *val = MCELSIUS_PER_CELSIUS; *val2 = press_data->current_fullscale->gain2; - break; + return IIO_VAL_FRACTIONAL; default: err = -EINVAL; goto read_error; } - return IIO_VAL_INT_PLUS_NANO; case IIO_CHAN_INFO_OFFSET: switch (ch->type) { case IIO_TEMP: - *val = 425; - *val2 = 10; + *val = ST_PRESS_MILLI_CELSIUS_OFFSET * + press_data->current_fullscale->gain2; + *val2 = MCELSIUS_PER_CELSIUS; break; default: err = -EINVAL; @@ -425,6 +628,7 @@ static const struct iio_info press_info = { static const struct iio_trigger_ops st_press_trigger_ops = { .owner = THIS_MODULE, .set_trigger_state = ST_PRESS_TRIGGER_SET_STATE, + .validate_device = st_sensors_validate_device, }; #define ST_PRESS_TRIGGER_OPS (&st_press_trigger_ops) #else @@ -441,23 +645,30 @@ int st_press_common_probe(struct iio_dev *indio_dev) indio_dev->info = &press_info; mutex_init(&press_data->tb.buf_lock); - st_sensors_power_enable(indio_dev); + err = st_sensors_power_enable(indio_dev); + if (err) + return err; err = st_sensors_check_device_support(indio_dev, ARRAY_SIZE(st_press_sensors_settings), st_press_sensors_settings); if (err < 0) - return err; - - press_data->num_data_channels = ST_PRESS_NUMBER_DATA_CHANNELS; + goto st_press_power_off; + + /* + * Skip timestamping channel while declaring available channels to + * common st_sensor layer. Look at st_sensors_get_buffer_element() to + * see how timestamps are explicitly pushed as last samples block + * element. + */ + press_data->num_data_channels = press_data->sensor_settings->num_ch - 1; press_data->multiread_bit = press_data->sensor_settings->multi_read_bit; indio_dev->channels = press_data->sensor_settings->ch; indio_dev->num_channels = press_data->sensor_settings->num_ch; - if (press_data->sensor_settings->fs.addr != 0) - press_data->current_fullscale = - (struct st_sensor_fullscale_avl *) - &press_data->sensor_settings->fs.fs_avl[0]; + press_data->current_fullscale = + (struct st_sensor_fullscale_avl *) + &press_data->sensor_settings->fs.fs_avl[0]; press_data->odr = press_data->sensor_settings->odr.odr_avl[0].hz; @@ -469,11 +680,11 @@ int st_press_common_probe(struct iio_dev *indio_dev) err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data); if (err < 0) - return err; + goto st_press_power_off; err = st_press_allocate_ring(indio_dev); if (err < 0) - return err; + goto st_press_power_off; if (irq > 0) { err = st_sensors_allocate_trigger(indio_dev, @@ -496,6 +707,8 @@ st_press_device_register_error: st_sensors_deallocate_trigger(indio_dev); st_press_probe_trigger_error: st_press_deallocate_ring(indio_dev); +st_press_power_off: + st_sensors_power_disable(indio_dev); return err; } @@ -32,6 +32,10 @@ static const struct of_device_id st_press_of_match[] = { .compatible = "st,lps331ap-press", .data = LPS331AP_PRESS_DEV_NAME, }, + { + .compatible = "st,lps22hb-press", + .data = LPS22HB_PRESS_DEV_NAME, + }, {}, }; MODULE_DEVICE_TABLE(of, st_press_of_match); @@ -50,6 +50,7 @@ static const struct spi_device_id st_press_id_table[] = { { LPS001WP_PRESS_DEV_NAME }, { LPS25H_PRESS_DEV_NAME }, { LPS331AP_PRESS_DEV_NAME }, + { LPS22HB_PRESS_DEV_NAME }, {}, }; MODULE_DEVICE_TABLE(spi, st_press_id_table); @@ -64,6 +64,7 @@ struct as3935_state { struct delayed_work work; u32 tune_cap; + u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */ u8 buf[2] ____cacheline_aligned; }; @@ -72,7 +73,8 @@ static const struct iio_chan_spec as3935_channels[] = { .type = IIO_PROXIMITY, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | - BIT(IIO_CHAN_INFO_PROCESSED), + BIT(IIO_CHAN_INFO_PROCESSED) | + BIT(IIO_CHAN_INFO_SCALE), .scan_index = 0, .scan_type = { .sign = 'u', @@ -181,7 +183,12 @@ static int as3935_read_raw(struct iio_dev *indio_dev, /* storm out of range */ if (*val == AS3935_DATA_MASK) return -EINVAL; - *val *= 1000; + + if (m == IIO_CHAN_INFO_PROCESSED) + *val *= 1000; + break; + case IIO_CHAN_INFO_SCALE: + *val = 1000; break; default: return -EINVAL; @@ -206,10 +213,10 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private) ret = as3935_read(st, AS3935_DATA, &val); if (ret) goto err_read; - val &= AS3935_DATA_MASK; - val *= 1000; - iio_push_to_buffers_with_timestamp(indio_dev, &val, pf->timestamp); + st->buffer[0] = val & AS3935_DATA_MASK; + iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer, + pf->timestamp); err_read: iio_trigger_notify_done(indio_dev->trig); @@ -224,10 +231,16 @@ static void as3935_event_work(struct work_struct *work) { struct as3935_state *st; int val; + int ret; st = container_of(work, struct as3935_state, work.work); - as3935_read(st, AS3935_INT, &val); + ret = as3935_read(st, AS3935_INT, &val); + if (ret) { + dev_warn(&st->spi->dev, "read error\n"); + return; + } + val &= AS3935_INT_MASK; switch (val) { @@ -235,7 +248,7 @@ static void as3935_event_work(struct work_struct *work) iio_trigger_poll(st->trig); break; case AS3935_NOISE_INT: - dev_warn(&st->spi->dev, "noise level is too high"); + dev_warn(&st->spi->dev, "noise level is too high\n"); break; } } @@ -339,7 +352,6 @@ static int as3935_probe(struct spi_device *spi) st = iio_priv(indio_dev); st->spi = spi; - st->tune_cap = 0; spi_set_drvdata(spi, indio_dev); mutex_init(&st->lock); @@ -461,4 +473,3 @@ module_spi_driver(as3935_driver); MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>"); MODULE_DESCRIPTION("AS3935 lightning sensor"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("spi:as3935"); @@ -203,22 +203,19 @@ static int lidar_read_raw(struct iio_dev *indio_dev, struct lidar_data *data = iio_priv(indio_dev); int ret = -EINVAL; - mutex_lock(&indio_dev->mlock); - - if (iio_buffer_enabled(indio_dev) && mask == IIO_CHAN_INFO_RAW) { - ret = -EBUSY; - goto error_busy; - } - switch (mask) { case IIO_CHAN_INFO_RAW: { u16 reg; + if (iio_device_claim_direct_mode(indio_dev)) + return -EBUSY; + ret = lidar_get_measurement(data, ®); if (!ret) { *val = reg; ret = IIO_VAL_INT; } + iio_device_release_direct_mode(indio_dev); break; } case IIO_CHAN_INFO_SCALE: @@ -228,9 +225,6 @@ static int lidar_read_raw(struct iio_dev *indio_dev, break; } -error_busy: - mutex_unlock(&indio_dev->mlock); - return ret; } @@ -244,7 +238,7 @@ static irqreturn_t lidar_trigger_handler(int irq, void *private) ret = lidar_get_measurement(data, data->buffer); if (!ret) { iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); } else if (ret != -EINVAL) { dev_err(&data->client->dev, "cannot read LIDAR measurement"); } @@ -492,7 +492,7 @@ static void sx9500_push_events(struct iio_dev *indio_dev) dir = new_prox ? IIO_EV_DIR_FALLING : IIO_EV_DIR_RISING; ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, chan, IIO_EV_TYPE_THRESH, dir); - iio_push_event(indio_dev, ev, iio_get_time_ns()); + iio_push_event(indio_dev, ev, iio_get_time_ns(indio_dev)); data->prox_stat[chan] = new_prox; } } @@ -669,7 +669,7 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private) } iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, - iio_get_time_ns()); + iio_get_time_ns(indio_dev)); out: mutex_unlock(&data->mutex); @@ -174,6 +174,7 @@ static const struct i2c_device_id tsys02d_id[] = { {"tsys02d", 0}, {} }; +MODULE_DEVICE_TABLE(i2c, tsys02d_id); static struct i2c_driver tsys02d_driver = { .probe = tsys02d_probe, @@ -24,6 +24,18 @@ config IIO_INTERRUPT_TRIGGER To compile this driver as a module, choose M here: the module will be called iio-trig-interrupt. +config IIO_TIGHTLOOP_TRIGGER + tristate "A kthread based hammering loop trigger" + depends on IIO_SW_TRIGGER + help + An experimental trigger, used to allow sensors to be sampled as fast + as possible under the limitations of whatever else is going on. + Uses a tight loop in a kthread. Will only work with lower half only + trigger consumers. + + To compile this driver as a module, choose M here: the + module will be called iio-trig-loop. + config IIO_SYSFS_TRIGGER tristate "SYSFS trigger" depends on SYSFS @@ -7,3 +7,4 @@ obj-$(CONFIG_IIO_HRTIMER_TRIGGER) += iio-trig-hrtimer.o obj-$(CONFIG_IIO_INTERRUPT_TRIGGER) += iio-trig-interrupt.o obj-$(CONFIG_IIO_SYSFS_TRIGGER) += iio-trig-sysfs.o +obj-$(CONFIG_IIO_TIGHTLOOP_TRIGGER) += iio-trig-loop.o diff --git a/drivers/iio/trigger/iio-trig-loop.c b/drivers/iio/trigger/iio-trig-loop.c new file mode 100644 index 000000000000..dc6be28f96fe --- /dev/null +++ b/ drivers/iio/trigger/iio-trig-loop.c@@ -0,0 +1,143 @@ +/* + * Copyright 2016 Jonathan Cameron <jic23@kernel.org> + * + * Licensed under the GPL-2. + * + * Based on a mashup of the hrtimer trigger and continuous sampling proposal of + * Gregor Boirie <gregor.boirie@parrot.com> + * + * Note this is still rather experimental and may eat babies. + * + * Todo + * * Protect against connection of devices that 'need' the top half + * handler. + * * Work out how to run top half handlers in this context if it is + * safe to do so (timestamp grabbing for example) + * + * Tested against a max1363. Used about 33% cpu for the thread and 20% + * for generic_buffer piping to /dev/null. Watermark set at 64 on a 128 + * element kfifo buffer. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/irq_work.h> +#include <linux/kthread.h> +#include <linux/freezer.h> + +#include <linux/iio/iio.h> +#include <linux/iio/trigger.h> +#include <linux/iio/sw_trigger.h> + +struct iio_loop_info { + struct iio_sw_trigger swt; + struct task_struct *task; +}; + +static struct config_item_type iio_loop_type = { + .ct_owner = THIS_MODULE, +}; + +static int iio_loop_thread(void *data) +{ + struct iio_trigger *trig = data; + + set_freezable(); + + do { + iio_trigger_poll_chained(trig); + } while (likely(!kthread_freezable_should_stop(NULL))); + + return 0; +} + +static int iio_loop_trigger_set_state(struct iio_trigger *trig, bool state) +{ + struct iio_loop_info *loop_trig = iio_trigger_get_drvdata(trig); + + if (state) { + loop_trig->task = kthread_run(iio_loop_thread, + trig, trig->name); + if (unlikely(IS_ERR(loop_trig->task))) { + dev_err(&trig->dev, + "failed to create trigger loop thread\n"); + return PTR_ERR(loop_trig->task); + } + } else { + kthread_stop(loop_trig->task); + } + + return 0; +} + +static const struct iio_trigger_ops iio_loop_trigger_ops = { + .set_trigger_state = iio_loop_trigger_set_state, + .owner = THIS_MODULE, +}; + +static struct iio_sw_trigger *iio_trig_loop_probe(const char *name) +{ + struct iio_loop_info *trig_info; + int ret; + + trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL); + if (!trig_info) + return ERR_PTR(-ENOMEM); + + trig_info->swt.trigger = iio_trigger_alloc("%s", name); + if (!trig_info->swt.trigger) { + ret = -ENOMEM; + goto err_free_trig_info; + } + + iio_trigger_set_drvdata(trig_info->swt.trigger, trig_info); + trig_info->swt.trigger->ops = &iio_loop_trigger_ops; + + ret = iio_trigger_register(trig_info->swt.trigger); + if (ret) + goto err_free_trigger; + + iio_swt_group_init_type_name(&trig_info->swt, name, &iio_loop_type); + + return &trig_info->swt; + +err_free_trigger: + iio_trigger_free(trig_info->swt.trigger); +err_free_trig_info: + kfree(trig_info); + + return ERR_PTR(ret); +} + +static int iio_trig_loop_remove(struct iio_sw_trigger *swt) +{ + struct iio_loop_info *trig_info; + + trig_info = iio_trigger_get_drvdata(swt->trigger); + + iio_trigger_unregister(swt->trigger); + iio_trigger_free(swt->trigger); + kfree(trig_info); + + return 0; +} + +static const struct iio_sw_trigger_ops iio_trig_loop_ops = { + .probe = iio_trig_loop_probe, + .remove = iio_trig_loop_remove, +}; + +static struct iio_sw_trigger_type iio_trig_loop = { + .name = "loop", + .owner = THIS_MODULE, + .ops = &iio_trig_loop_ops, +}; + +module_iio_sw_trigger_driver(iio_trig_loop); + +MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>"); +MODULE_DESCRIPTION("Loop based trigger for the iio subsystem"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:iio-trig-loop"); @@ -178,6 +178,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port, { int ret = 0; struct net_device *old_net_dev; + enum ib_gid_type old_gid_type; /* in rdma_cap_roce_gid_table, this funciton should be protected by a * sleep-able lock. @@ -199,6 +200,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port, } old_net_dev = table->data_vec[ix].attr.ndev; + old_gid_type = table->data_vec[ix].attr.gid_type; if (old_net_dev && old_net_dev != attr->ndev) dev_put(old_net_dev); /* if modify_gid failed, just delete the old gid */ @@ -207,10 +209,14 @@ static int write_gid(struct ib_device *ib_dev, u8 port, attr = &zattr; table->data_vec[ix].context = NULL; } - if (default_gid) - table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT; + memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid)); memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr)); + if (default_gid) { + table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT; + if (action == GID_TABLE_WRITE_ACTION_DEL) + table->data_vec[ix].attr.gid_type = old_gid_type; + } if (table->data_vec[ix].attr.ndev && table->data_vec[ix].attr.ndev != old_net_dev) dev_hold(table->data_vec[ix].attr.ndev); @@ -405,7 +411,9 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, for (ix = 0; ix < table->sz; ix++) if (table->data_vec[ix].attr.ndev == ndev) - if (!del_gid(ib_dev, port, table, ix, false)) + if (!del_gid(ib_dev, port, table, ix, + !!(table->data_vec[ix].props & + GID_TABLE_ENTRY_DEFAULT))) deleted = true; write_unlock_irq(&table->rwlock); @@ -3452,14 +3452,14 @@ static int cm_establish(struct ib_cm_id *cm_id) work->cm_event.event = IB_CM_USER_ESTABLISHED; /* Check if the device started its remove_one */ - spin_lock_irq(&cm.lock); + spin_lock_irqsave(&cm.lock, flags); if (!cm_dev->going_down) { queue_delayed_work(cm.wq, &work->work, 0); } else { kfree(work); ret = -ENODEV; } - spin_unlock_irq(&cm.lock); + spin_unlock_irqrestore(&cm.lock, flags); out: return ret; @@ -708,17 +708,6 @@ static void cma_deref_id(struct rdma_id_private *id_priv) complete(&id_priv->comp); } -static int cma_disable_callback(struct rdma_id_private *id_priv, - enum rdma_cm_state state) -{ - mutex_lock(&id_priv->handler_mutex); - if (id_priv->state != state) { - mutex_unlock(&id_priv->handler_mutex); - return -EINVAL; - } - return 0; -} - struct rdma_cm_id *rdma_create_id(struct net *net, rdma_cm_event_handler event_handler, void *context, enum rdma_port_space ps, @@ -1671,11 +1660,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) struct rdma_cm_event event; int ret = 0; + mutex_lock(&id_priv->handler_mutex); if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && - cma_disable_callback(id_priv, RDMA_CM_CONNECT)) || + id_priv->state != RDMA_CM_CONNECT) || (ib_event->event == IB_CM_TIMEWAIT_EXIT && - cma_disable_callback(id_priv, RDMA_CM_DISCONNECT))) - return 0; + id_priv->state != RDMA_CM_DISCONNECT)) + goto out; memset(&event, 0, sizeof event); switch (ib_event->event) { @@ -1870,7 +1860,7 @@ static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_e static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) { - struct rdma_id_private *listen_id, *conn_id; + struct rdma_id_private *listen_id, *conn_id = NULL; struct rdma_cm_event event; struct net_device *net_dev; int offset, ret; @@ -1884,9 +1874,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) goto net_dev_put; } - if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) { + mutex_lock(&listen_id->handler_mutex); + if (listen_id->state != RDMA_CM_LISTEN) { ret = -ECONNABORTED; - goto net_dev_put; + goto err1; } memset(&event, 0, sizeof event); @@ -1976,8 +1967,9 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; - if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) - return 0; + mutex_lock(&id_priv->handler_mutex); + if (id_priv->state != RDMA_CM_CONNECT) + goto out; memset(&event, 0, sizeof event); switch (iw_event->event) { @@ -2029,6 +2021,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) return ret; } +out: mutex_unlock(&id_priv->handler_mutex); return ret; } @@ -2039,13 +2032,15 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, struct rdma_cm_id *new_cm_id; struct rdma_id_private *listen_id, *conn_id; struct rdma_cm_event event; - int ret; + int ret = -ECONNABORTED; struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; listen_id = cm_id->context; - if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) - return -ECONNABORTED; + + mutex_lock(&listen_id->handler_mutex); + if (listen_id->state != RDMA_CM_LISTEN) + goto out; /* Create a new RDMA id for the new IW CM ID */ new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net, @@ -3216,8 +3211,9 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; int ret = 0; - if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) - return 0; + mutex_lock(&id_priv->handler_mutex); + if (id_priv->state != RDMA_CM_CONNECT) + goto out; memset(&event, 0, sizeof event); switch (ib_event->event) { @@ -3673,12 +3669,13 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) struct rdma_id_private *id_priv; struct cma_multicast *mc = multicast->context; struct rdma_cm_event event; - int ret; + int ret = 0; id_priv = mc->id_priv; - if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) && - cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED)) - return 0; + mutex_lock(&id_priv->handler_mutex); + if (id_priv->state != RDMA_CM_ADDR_BOUND && + id_priv->state != RDMA_CM_ADDR_RESOLVED) + goto out; if (!status) status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); @@ -3720,6 +3717,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) return 0; } +out: mutex_unlock(&id_priv->handler_mutex); return 0; } @@ -3878,12 +3876,12 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - rdma_start_port(id_priv->cma_dev->device)]; if (addr->sa_family == AF_INET) { - if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) + if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { + mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, true); - if (!err) { - mc->igmp_joined = true; - mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; + if (!err) + mc->igmp_joined = true; } } else { if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) @@ -661,6 +661,9 @@ int ib_query_port(struct ib_device *device, if (err || port_attr->subnet_prefix) return err; + if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND) + return 0; + err = ib_query_gid(device, port_num, 0, &gid, NULL); if (err) return err; @@ -1024,7 +1027,8 @@ static int __init ib_core_init(void) goto err_mad; } - if (ib_add_ibnl_clients()) { + ret = ib_add_ibnl_clients(); + if (ret) { pr_warn("Couldn't register ibnl clients\n"); goto err_sa; } @@ -506,7 +506,7 @@ int iwpm_add_and_query_mapping_cb(struct sk_buff *skb, if (!nlmsg_request) { pr_info("%s: Could not find a matching request (seq = %u)\n", __func__, msg_seq); - return -EINVAL; + return -EINVAL; } pm_msg = nlmsg_request->req_buffer; local_sockaddr = (struct sockaddr_storage *) @@ -1638,9 +1638,9 @@ static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) /* Now, check to see if there are any methods still in use */ if (!check_method_table(method)) { /* If not, release management method table */ - kfree(method); - class->method_table[mgmt_class] = NULL; - /* Any management classes left ? */ + kfree(method); + class->method_table[mgmt_class] = NULL; + /* Any management classes left ? */ if (!check_class_table(class)) { /* If not, release management class table */ kfree(class); @@ -530,6 +530,7 @@ static PORT_PMA_ATTR(port_xmit_data , 12, 32, 192); static PORT_PMA_ATTR(port_rcv_data , 13, 32, 224); static PORT_PMA_ATTR(port_xmit_packets , 14, 32, 256); static PORT_PMA_ATTR(port_rcv_packets , 15, 32, 288); +static PORT_PMA_ATTR(port_xmit_wait , 0, 32, 320); /* * Counters added by extended set @@ -560,6 +561,7 @@ static struct attribute *pma_attrs[] = { &port_pma_attr_port_rcv_data.attr.attr, &port_pma_attr_port_xmit_packets.attr.attr, &port_pma_attr_port_rcv_packets.attr.attr, + &port_pma_attr_port_xmit_wait.attr.attr, NULL }; @@ -579,6 +581,7 @@ static struct attribute *pma_attrs_ext[] = { &port_pma_attr_ext_port_xmit_data.attr.attr, &port_pma_attr_ext_port_rcv_data.attr.attr, &port_pma_attr_ext_port_xmit_packets.attr.attr, + &port_pma_attr_port_xmit_wait.attr.attr, &port_pma_attr_ext_port_rcv_packets.attr.attr, &port_pma_attr_ext_unicast_rcv_packets.attr.attr, &port_pma_attr_ext_unicast_xmit_packets.attr.attr, @@ -604,6 +607,7 @@ static struct attribute *pma_attrs_noietf[] = { &port_pma_attr_ext_port_rcv_data.attr.attr, &port_pma_attr_ext_port_xmit_packets.attr.attr, &port_pma_attr_ext_port_rcv_packets.attr.attr, + &port_pma_attr_port_xmit_wait.attr.attr, NULL }; @@ -889,9 +893,9 @@ static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num) static void setup_hw_stats(struct ib_device *device, struct ib_port *port, u8 port_num) { - struct attribute_group *hsag = NULL; + struct attribute_group *hsag; struct rdma_hw_stats *stats; - int i = 0, ret; + int i, ret; stats = device->alloc_hw_stats(device, port_num); @@ -899,19 +903,22 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port, return; if (!stats->names || stats->num_counters <= 0) - goto err; + goto err_free_stats; + /* + * Two extra attribue elements here, one for the lifespan entry and + * one to NULL terminate the list for the sysfs core code + */ hsag = kzalloc(sizeof(*hsag) + - // 1 extra for the lifespan config entry - sizeof(void *) * (stats->num_counters + 1), + sizeof(void *) * (stats->num_counters + 2), GFP_KERNEL); if (!hsag) - return; + goto err_free_stats; ret = device->get_hw_stats(device, stats, port_num, stats->num_counters); if (ret != stats->num_counters) - goto err; + goto err_free_hsag; stats->timestamp = jiffies; @@ -922,10 +929,13 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port, hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]); if (!hsag->attrs[i]) goto err; + sysfs_attr_init(hsag->attrs[i]); } /* treat an error here as non-fatal */ hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num); + if (hsag->attrs[i]) + sysfs_attr_init(hsag->attrs[i]); if (port) { struct kobject *kobj = &port->kobj; @@ -946,10 +956,12 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port, return; err: - kfree(stats); for (; i >= 0; i--) kfree(hsag->attrs[i]); +err_free_hsag: kfree(hsag); +err_free_stats: + kfree(stats); return; } @@ -1747,7 +1747,7 @@ static int create_qp(struct ib_uverbs_file *file, struct ib_srq *srq = NULL; struct ib_qp *qp; char *buf; - struct ib_qp_init_attr attr; + struct ib_qp_init_attr attr = {}; struct ib_uverbs_ex_create_qp_resp resp; int ret; @@ -511,12 +511,16 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, ah_attr->grh.dgid = sgid; if (!rdma_cap_eth_ah(device, port_num)) { - ret = ib_find_cached_gid_by_port(device, &dgid, - IB_GID_TYPE_IB, - port_num, NULL, - &gid_index); - if (ret) - return ret; + if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) { + ret = ib_find_cached_gid_by_port(device, &dgid, + IB_GID_TYPE_IB, + port_num, NULL, + &gid_index); + if (ret) + return ret; + } else { + gid_index = 0; + } } ah_attr->grh.sgid_index = (u8) gid_index; @@ -300,16 +300,15 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) const struct cpumask *node_mask, *proc_mask = tsk_cpus_allowed(current); struct cpu_mask_set *set = &dd->affinity->proc; - char buf[1024]; /* * check whether process/context affinity has already * been set */ if (cpumask_weight(proc_mask) == 1) { - scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask)); - hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %s", - current->pid, current->comm, buf); + hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl", + current->pid, current->comm, + cpumask_pr_args(proc_mask)); /* * Mark the pre-set CPU as used. This is atomic so we don't * need the lock @@ -318,9 +317,9 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) cpumask_set_cpu(cpu, &set->used); goto done; } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) { - scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask)); - hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %s", - current->pid, current->comm, buf); + hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl", + current->pid, current->comm, + cpumask_pr_args(proc_mask)); goto done; } @@ -356,8 +355,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ? &dd->affinity->rcv_intr.mask : &dd->affinity->rcv_intr.used)); - scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(intrs)); - hfi1_cdbg(PROC, "CPUs used by interrupts: %s", buf); + hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl", + cpumask_pr_args(intrs)); /* * If we don't have a NUMA node requested, preference is towards @@ -366,18 +365,16 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) if (node == -1) node = dd->node; node_mask = cpumask_of_node(node); - scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(node_mask)); - hfi1_cdbg(PROC, "device on NUMA %u, CPUs %s", node, buf); + hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node, + cpumask_pr_args(node_mask)); /* diff will hold all unused cpus */ cpumask_andnot(diff, &set->mask, &set->used); - scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(diff)); - hfi1_cdbg(PROC, "unused CPUs (all) %s", buf); + hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff)); /* get cpumask of available CPUs on preferred NUMA */ cpumask_and(mask, diff, node_mask); - scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask)); - hfi1_cdbg(PROC, "available cpus on NUMA %s", buf); + hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask)); /* * At first, we don't want to place processes on the same @@ -395,8 +392,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) cpumask_andnot(diff, &set->mask, &set->used); cpumask_andnot(mask, diff, node_mask); } - scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask)); - hfi1_cdbg(PROC, "possible CPUs for process %s", buf); + hfi1_cdbg(PROC, "possible CPUs for process %*pbl", + cpumask_pr_args(mask)); cpu = cpumask_first(mask); if (cpu >= nr_cpu_ids) /* empty */ @@ -1037,7 +1037,7 @@ static void dc_shutdown(struct hfi1_devdata *); static void dc_start(struct hfi1_devdata *); static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, unsigned int *np); -static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd); +static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd); /* * Error interrupt table entry. This is used as input to the interrupt @@ -6962,8 +6962,6 @@ void handle_link_down(struct work_struct *work) } reset_neighbor_info(ppd); - if (ppd->mgmt_allowed) - remove_full_mgmt_pkey(ppd); /* disable the port */ clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); @@ -7070,12 +7068,16 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd) __func__, ppd->pkeys[2], FULL_MGMT_P_KEY); ppd->pkeys[2] = FULL_MGMT_P_KEY; (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); + hfi1_event_pkey_change(ppd->dd, ppd->port); } -static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd) +static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd) { - ppd->pkeys[2] = 0; - (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); + if (ppd->pkeys[2] != 0) { + ppd->pkeys[2] = 0; + (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); + hfi1_event_pkey_change(ppd->dd, ppd->port); + } } /* @@ -7832,8 +7834,8 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) * save first 2 flits in the packet that caused * the error */ - dd->err_info_rcvport.packet_flit1 = hdr0; - dd->err_info_rcvport.packet_flit2 = hdr1; + dd->err_info_rcvport.packet_flit1 = hdr0; + dd->err_info_rcvport.packet_flit2 = hdr1; } switch (info) { case 1: @@ -9168,6 +9170,13 @@ int start_link(struct hfi1_pportdata *ppd) return 0; } + /* + * FULL_MGMT_P_KEY is cleared from the pkey table, so that the + * pkey table can be configured properly if the HFI unit is connected + * to switch port with MgmtAllowed=NO + */ + clear_full_mgmt_pkey(ppd); + return set_link_state(ppd, HLS_DN_POLL); } @@ -9777,7 +9786,7 @@ static void set_send_length(struct hfi1_pportdata *ppd) u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2) & SEND_LEN_CHECK1_LEN_VL15_MASK) << SEND_LEN_CHECK1_LEN_VL15_SHIFT; - int i; + int i, j; u32 thres; for (i = 0; i < ppd->vls_supported; i++) { @@ -9801,7 +9810,10 @@ static void set_send_length(struct hfi1_pportdata *ppd) sc_mtu_to_threshold(dd->vld[i].sc, dd->vld[i].mtu, dd->rcd[0]->rcvhdrqentsize)); - sc_set_cr_threshold(dd->vld[i].sc, thres); + for (j = 0; j < INIT_SC_PER_VL; j++) + sc_set_cr_threshold( + pio_select_send_context_vl(dd, j, i), + thres); } thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50), sc_mtu_to_threshold(dd->vld[15].sc, @@ -11906,7 +11918,7 @@ static void update_synth_timer(unsigned long opaque) hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit); } -mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); + mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); } #define C_MAX_NAME 13 /* 12 chars + one for /0 */ @@ -14101,8 +14113,14 @@ static int init_asic_data(struct hfi1_devdata *dd) { unsigned long flags; struct hfi1_devdata *tmp, *peer = NULL; + struct hfi1_asic_data *asic_data; int ret = 0; + /* pre-allocate the asic structure in case we are the first device */ + asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL); + if (!asic_data) + return -ENOMEM; + spin_lock_irqsave(&hfi1_devs_lock, flags); /* Find our peer device */ list_for_each_entry(tmp, &hfi1_dev_list, list) { @@ -14114,18 +14132,14 @@ static int init_asic_data(struct hfi1_devdata *dd) } if (peer) { + /* use already allocated structure */ dd->asic_data = peer->asic_data; + kfree(asic_data); } else { - dd->asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL); - if (!dd->asic_data) { - ret = -ENOMEM; - goto done; - } + dd->asic_data = asic_data; mutex_init(&dd->asic_data->asic_resource_mutex); } dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */ - -done: spin_unlock_irqrestore(&hfi1_devs_lock, flags); return ret; } @@ -203,6 +203,9 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, switch (cmd) { case HFI1_IOCTL_ASSIGN_CTXT: + if (uctxt) + return -EINVAL; + if (copy_from_user(&uinfo, (struct hfi1_user_info __user *)arg, sizeof(uinfo))) @@ -1337,7 +1337,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd) dma_free_coherent(&dd->pcidev->dev, sizeof(u64), (void *)dd->rcvhdrtail_dummy_kvaddr, dd->rcvhdrtail_dummy_physaddr); - dd->rcvhdrtail_dummy_kvaddr = NULL; + dd->rcvhdrtail_dummy_kvaddr = NULL; } for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) { @@ -1383,7 +1383,7 @@ static void postinit_cleanup(struct hfi1_devdata *dd) static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int ret = 0, j, pidx, initfail; - struct hfi1_devdata *dd = NULL; + struct hfi1_devdata *dd = ERR_PTR(-EINVAL); struct hfi1_pportdata *ppd; /* First, lock the non-writable module parameters */ @@ -78,6 +78,16 @@ static inline void clear_opa_smp_data(struct opa_smp *smp) memset(data, 0, size); } +void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port) +{ + struct ib_event event; + + event.event = IB_EVENT_PKEY_CHANGE; + event.device = &dd->verbs_dev.rdi.ibdev; + event.element.port_num = port; + ib_dispatch_event(&event); +} + static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len) { struct ib_mad_send_buf *send_buf; @@ -1418,15 +1428,10 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys) } if (changed) { - struct ib_event event; - (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); - - event.event = IB_EVENT_PKEY_CHANGE; - event.device = &dd->verbs_dev.rdi.ibdev; - event.element.port_num = port; - ib_dispatch_event(&event); + hfi1_event_pkey_change(dd, port); } + return 0; } @@ -434,4 +434,6 @@ struct sc2vlnt { COUNTER_MASK(1, 3) | \ COUNTER_MASK(1, 4)) +void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port); + #endif /* _HFI1_MAD_H */ @@ -995,7 +995,7 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause) /* counter is reset if occupancy count changes */ if (reg != reg_prev) loop = 0; - if (loop > 500) { + if (loop > 50000) { /* timed out - bounce the link */ dd_dev_err(dd, "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n", @@ -1798,6 +1798,21 @@ static void pio_map_rcu_callback(struct rcu_head *list) } /* + * Set credit return threshold for the kernel send context + */ +static void set_threshold(struct hfi1_devdata *dd, int scontext, int i) +{ + u32 thres; + + thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext], + 50), + sc_mtu_to_threshold(dd->kernel_send_context[scontext], + dd->vld[i].mtu, + dd->rcd[0]->rcvhdrqentsize)); + sc_set_cr_threshold(dd->kernel_send_context[scontext], thres); +} + +/* * pio_map_init - called when #vls change * @dd: hfi1_devdata * @port: port number @@ -1872,11 +1887,16 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts) if (!newmap->map[i]) goto bail; newmap->map[i]->mask = (1 << ilog2(sz)) - 1; - /* assign send contexts */ + /* + * assign send contexts and + * adjust credit return threshold + */ for (j = 0; j < sz; j++) { - if (dd->kernel_send_context[scontext]) + if (dd->kernel_send_context[scontext]) { newmap->map[i]->ksc[j] = dd->kernel_send_context[scontext]; + set_threshold(dd, scontext, i); + } if (++scontext >= first_scontext + vl_scontexts[i]) /* wrap back to first send context */ @@ -579,7 +579,8 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len) if (ppd->qsfp_info.cache_valid) { if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS])) - sprintf(lenstr, "%dM ", cache[QSFP_MOD_LEN_OFFS]); + snprintf(lenstr, sizeof(lenstr), "%dM ", + cache[QSFP_MOD_LEN_OFFS]); power_byte = cache[QSFP_MOD_PWR_OFFS]; sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", @@ -214,19 +214,6 @@ const char *print_u32_array( return ret; } -const char *print_u64_array( - struct trace_seq *p, - u64 *arr, int len) -{ - int i; - const char *ret = trace_seq_buffer_ptr(p); - - for (i = 0; i < len; i++) - trace_seq_printf(p, "%s0x%016llx", i == 0 ? "" : " ", arr[i]); - trace_seq_putc(p, 0); - return ret; -} - __hfi1_trace_fn(PKT); __hfi1_trace_fn(PROC); __hfi1_trace_fn(SDMA); @@ -678,8 +678,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) u32 tlen = packet->tlen; struct rvt_qp *qp = packet->qp; bool has_grh = rcv_flags & HFI1_HAS_GRH; - bool sc4_bit = has_sc4_bit(packet); - u8 sc; + u8 sc5 = hdr2sc((struct hfi1_message_header *)hdr, packet->rhf); u32 bth1; int is_mcast; struct ib_grh *grh = NULL; @@ -697,10 +696,8 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) */ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; - u8 sl, sc5; + u8 sl; - sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf; - sc5 |= sc4_bit; sl = ibp->sc_to_sl[sc5]; process_becn(ppd, sl, 0, lqpn, 0, IB_CC_SVCTYPE_UD); @@ -717,10 +714,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) if (!is_mcast && (opcode != IB_OPCODE_CNP) && bth1 & HFI1_FECN_SMASK) { u16 slid = be16_to_cpu(hdr->lrh[3]); - u8 sc5; - - sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf; - sc5 |= sc4_bit; return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh); } @@ -745,10 +738,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) if (qp->ibqp.qp_num > 1) { struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); u16 slid; - u8 sc5; - - sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf; - sc5 |= sc4_bit; slid = be16_to_cpu(hdr->lrh[3]); if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) { @@ -790,10 +779,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) /* Received on QP0, and so by definition, this is an SMP */ struct opa_smp *smp = (struct opa_smp *)data; u16 slid = be16_to_cpu(hdr->lrh[3]); - u8 sc5; - - sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf; - sc5 |= sc4_bit; if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp)) goto drop; @@ -890,9 +875,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) } wc.slid = be16_to_cpu(hdr->lrh[3]); - sc = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf; - sc |= sc4_bit; - wc.sl = ibp->sc_to_sl[sc]; + wc.sl = ibp->sc_to_sl[sc5]; /* * Save the LMC lower bits if the destination LID is a unicast LID. @@ -183,7 +183,7 @@ struct user_sdma_iovec { struct sdma_mmu_node *node; }; -#define SDMA_CACHE_NODE_EVICT BIT(0) +#define SDMA_CACHE_NODE_EVICT 0 struct sdma_mmu_node { struct mmu_rb_node rb; @@ -1355,11 +1355,11 @@ static int set_txreq_header(struct user_sdma_request *req, */ SDMA_DBG(req, "TID offset %ubytes %uunits om%u", req->tidoffset, req->tidoffset / req->omfactor, - !!(req->omfactor - KDETH_OM_SMALL)); + req->omfactor != KDETH_OM_SMALL); KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET, req->tidoffset / req->omfactor); KDETH_SET(hdr->kdeth.ver_tid_offset, OM, - !!(req->omfactor - KDETH_OM_SMALL)); + req->omfactor != KDETH_OM_SMALL); } done: trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt, @@ -92,11 +92,10 @@ void hfi1_put_txreq(struct verbs_txreq *tx) struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, struct rvt_qp *qp) + __must_hold(&qp->s_lock) { struct verbs_txreq *tx = ERR_PTR(-EBUSY); - unsigned long flags; - spin_lock_irqsave(&qp->s_lock, flags); write_seqlock(&dev->iowait_lock); if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { struct hfi1_qp_priv *priv; @@ -116,7 +115,6 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, } out: write_sequnlock(&dev->iowait_lock); - spin_unlock_irqrestore(&qp->s_lock, flags); return tx; } @@ -73,6 +73,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, struct rvt_qp *qp) + __must_hold(&qp->slock) { struct verbs_txreq *tx; struct hfi1_qp_priv *priv = qp->priv; @@ -113,6 +113,8 @@ #define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types) #define IW_CFG_FPM_QP_COUNT 32768 +#define I40IW_MAX_PAGES_PER_FMR 512 +#define I40IW_MIN_PAGES_PER_FMR 1 #define I40IW_MTU_TO_MSS 40 #define I40IW_DEFAULT_MSS 1460 @@ -600,8 +600,7 @@ static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev) cqp_init_info.scratch_array = cqp->scratch_array; status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info); if (status) { - i40iw_pr_err("cqp init status %d maj_err %d min_err %d\n", - status, maj_err, min_err); + i40iw_pr_err("cqp init status %d\n", status); goto exit; } status = dev->cqp_ops->cqp_create(dev->cqp, true, &maj_err, &min_err); @@ -79,6 +79,7 @@ static int i40iw_query_device(struct ib_device *ibdev, props->max_qp_init_rd_atom = props->max_qp_rd_atom; props->atomic_cap = IB_ATOMIC_NONE; props->max_map_per_fmr = 1; + props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR; return 0; } @@ -1473,6 +1474,7 @@ static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT; info->pd_id = iwpd->sc_pd.pd_id; info->total_len = iwmr->length; + info->remote_access = true; cqp_info->cqp_cmd = OP_ALLOC_STAG; cqp_info->post_sq = 1; cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev; @@ -1527,7 +1529,7 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, mutex_lock(&iwdev->pbl_mutex); status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt); mutex_unlock(&iwdev->pbl_mutex); - if (!status) + if (status) goto err1; if (palloc->level != I40IW_LEVEL_1) @@ -2149,6 +2151,7 @@ static int i40iw_post_send(struct ib_qp *ibqp, struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev; struct i40iw_fast_reg_stag_info info; + memset(&info, 0, sizeof(info)); info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD; info.access_rights |= i40iw_get_user_access(flags); info.stag_key = reg_wr(ib_wr)->key & 0xff; @@ -2158,10 +2161,14 @@ static int i40iw_post_send(struct ib_qp *ibqp, info.addr_type = I40IW_ADDR_TYPE_VA_BASED; info.va = (void *)(uintptr_t)iwmr->ibmr.iova; info.total_len = iwmr->ibmr.length; + info.reg_addr_pa = *(u64 *)palloc->level1.addr; info.first_pm_pbl_index = palloc->level1.idx; info.local_fence = ib_wr->send_flags & IB_SEND_FENCE; info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED; + if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR) + info.chunk_size = 1; + if (page_shift == 21) info.page_size = 1; /* 2M page */ @@ -2327,13 +2334,16 @@ static int i40iw_req_notify_cq(struct ib_cq *ibcq, { struct i40iw_cq *iwcq; struct i40iw_cq_uk *ukcq; - enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_SOLICITED; + unsigned long flags; + enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT; iwcq = (struct i40iw_cq *)ibcq; ukcq = &iwcq->sc_cq.cq_uk; - if (notify_flags == IB_CQ_NEXT_COMP) - cq_notify = IW_CQ_COMPL_EVENT; + if (notify_flags == IB_CQ_SOLICITED) + cq_notify = IW_CQ_COMPL_SOLICITED; + spin_lock_irqsave(&iwcq->lock, flags); ukcq->ops.iw_cq_request_notification(ukcq, cq_notify); + spin_unlock_irqrestore(&iwcq->lock, flags); return 0; } @@ -47,6 +47,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); ah->av.ib.g_slid = ah_attr->src_path_bits; + ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); if (ah_attr->ah_flags & IB_AH_GRH) { ah->av.ib.g_slid |= 0x80; ah->av.ib.gid_index = ah_attr->grh.sgid_index; @@ -64,7 +65,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support)) --ah->av.ib.stat_rate; } - ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); return &ah->ibah; } @@ -527,7 +527,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); spin_unlock(&tun_qp->tx_lock); if (ret) - goto out; + goto end; tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); if (tun_qp->tx_ring[tun_tx_ix].ah) @@ -596,9 +596,15 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, wr.wr.send_flags = IB_SEND_SIGNALED; ret = ib_post_send(src_qp, &wr.wr, &bad_wr); -out: - if (ret) - ib_destroy_ah(ah); + if (!ret) + return 0; + out: + spin_lock(&tun_qp->tx_lock); + tun_qp->tx_ix_tail++; + spin_unlock(&tun_qp->tx_lock); + tun_qp->tx_ring[tun_tx_ix].ah = NULL; +end: + ib_destroy_ah(ah); return ret; } @@ -1326,9 +1332,15 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, ret = ib_post_send(send_qp, &wr.wr, &bad_wr); + if (!ret) + return 0; + + spin_lock(&sqp->tx_lock); + sqp->tx_ix_tail++; + spin_unlock(&sqp->tx_lock); + sqp->tx_ring[wire_tx_ix].ah = NULL; out: - if (ret) - ib_destroy_ah(ah); + ib_destroy_ah(ah); return ret; } @@ -505,9 +505,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; else props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A; - if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) - props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; } + if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) + props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; @@ -1704,6 +1704,9 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, struct mlx4_dev *dev = (to_mdev(qp->device))->dev; int is_bonded = mlx4_is_bonded(dev); + if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt) + return ERR_PTR(-EINVAL); + if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && (flow_attr->type != IB_FLOW_ATTR_NORMAL)) return ERR_PTR(-EOPNOTSUPP); @@ -139,7 +139,7 @@ struct mlx4_ib_mr { u32 max_pages; struct mlx4_mr mmr; struct ib_umem *umem; - void *pages_alloc; + size_t page_map_size; }; struct mlx4_ib_mw { @@ -277,20 +277,23 @@ mlx4_alloc_priv_pages(struct ib_device *device, struct mlx4_ib_mr *mr, int max_pages) { - int size = max_pages * sizeof(u64); - int add_size; int ret; - add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0); + /* Ensure that size is aligned to DMA cacheline + * requirements. + * max_pages is limited to MLX4_MAX_FAST_REG_PAGES + * so page_map_size will never cross PAGE_SIZE. + */ + mr->page_map_size = roundup(max_pages * sizeof(u64), + MLX4_MR_PAGES_ALIGN); - mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL); - if (!mr->pages_alloc) + /* Prevent cross page boundary allocation. */ + mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL); + if (!mr->pages) return -ENOMEM; - mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN); - mr->page_map = dma_map_single(device->dma_device, mr->pages, - size, DMA_TO_DEVICE); + mr->page_map_size, DMA_TO_DEVICE); if (dma_mapping_error(device->dma_device, mr->page_map)) { ret = -ENOMEM; @@ -298,9 +301,9 @@ mlx4_alloc_priv_pages(struct ib_device *device, } return 0; -err: - kfree(mr->pages_alloc); +err: + free_page((unsigned long)mr->pages); return ret; } @@ -309,11 +312,10 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr) { if (mr->pages) { struct ib_device *device = mr->ibmr.device; - int size = mr->max_pages * sizeof(u64); dma_unmap_single(device->dma_device, mr->page_map, - size, DMA_TO_DEVICE); - kfree(mr->pages_alloc); + mr->page_map_size, DMA_TO_DEVICE); + free_page((unsigned long)mr->pages); mr->pages = NULL; } } @@ -537,14 +539,12 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, mr->npages = 0; ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, - sizeof(u64) * mr->max_pages, - DMA_TO_DEVICE); + mr->page_map_size, DMA_TO_DEVICE); rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page); ib_dma_sync_single_for_device(ibmr->device, mr->page_map, - sizeof(u64) * mr->max_pages, - DMA_TO_DEVICE); + mr->page_map_size, DMA_TO_DEVICE); return rc; } @@ -232,7 +232,7 @@ static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size) } } else { ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); - s = (ctrl->fence_size & 0x3f) << 4; + s = (ctrl->qpn_vlan.fence_size & 0x3f) << 4; for (i = 64; i < s; i += 64) { wqe = buf + i; *wqe = cpu_to_be32(0xffffffff); @@ -264,7 +264,7 @@ static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size) inl->byte_count = cpu_to_be32(1 << 31 | (size - s - sizeof *inl)); } ctrl->srcrb_flags = 0; - ctrl->fence_size = size / 16; + ctrl->qpn_vlan.fence_size = size / 16; /* * Make sure descriptor is fully written before setting ownership bit * (because HW can start executing as soon as we do). @@ -362,7 +362,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags) sizeof (struct mlx4_wqe_raddr_seg); case MLX4_IB_QPT_RC: return sizeof (struct mlx4_wqe_ctrl_seg) + - sizeof (struct mlx4_wqe_atomic_seg) + + sizeof (struct mlx4_wqe_masked_atomic_seg) + sizeof (struct mlx4_wqe_raddr_seg); case MLX4_IB_QPT_SMI: case MLX4_IB_QPT_GSI: @@ -1191,8 +1191,10 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, { err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, 0, &qp, gfp); - if (err) + if (err) { + kfree(qp); return ERR_PTR(err); + } qp->ibqp.qp_num = qp->mqp.qpn; qp->xrcdn = xrcdn; @@ -1990,7 +1992,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, ctrl = get_send_wqe(qp, i); ctrl->owner_opcode = cpu_to_be32(1 << 31); if (qp->sq_max_wqes_per_wr == 1) - ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4); + ctrl->qpn_vlan.fence_size = + 1 << (qp->sq.wqe_shift - 4); stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift); } @@ -3167,8 +3170,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, wmb(); *lso_wqe = lso_hdr_sz; - ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? - MLX4_WQE_CTRL_FENCE : 0) | size; + ctrl->qpn_vlan.fence_size = (wr->send_flags & IB_SEND_FENCE ? + MLX4_WQE_CTRL_FENCE : 0) | size; /* * Make sure descriptor is fully written before @@ -822,7 +822,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int eqn; int err; - if (entries < 0) + if (entries < 0 || + (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))) return ERR_PTR(-EINVAL); if (check_cq_create_flags(attr->flags)) @@ -1168,11 +1169,16 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) return -ENOSYS; } - if (entries < 1) + if (entries < 1 || + entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) { + mlx5_ib_warn(dev, "wrong entries number %d, max %d\n", + entries, + 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)); return -EINVAL; + } entries = roundup_pow_of_two(entries + 1); - if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) + if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) return -EINVAL; if (entries == ibcq->cqe + 1) @@ -121,7 +121,7 @@ static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext, pma_cnt_ext->port_xmit_data = cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets, transmitted_ib_multicast.octets) >> 2); - pma_cnt_ext->port_xmit_data = + pma_cnt_ext->port_rcv_data = cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets, received_ib_multicast.octets) >> 2); pma_cnt_ext->port_xmit_packets = @@ -524,6 +524,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, MLX5_CAP_ETH(dev->mdev, scatter_fcs)) props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS; + if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) + props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; + props->vendor_part_id = mdev->pdev->device; props->hw_ver = mdev->pdev->revision; @@ -915,7 +918,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); - resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); + if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) + resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); resp.cache_line_size = L1_CACHE_BYTES; resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); @@ -988,7 +992,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, if (field_avail(typeof(resp), cqe_version, udata->outlen)) resp.response_length += sizeof(resp.cqe_version); - if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { + /* + * We don't want to expose information from the PCI bar that is located + * after 4096 bytes, so if the arch only supports larger pages, let's + * pretend we don't support reading the HCA's core clock. This is also + * forced by mmap function. + */ + if (PAGE_SIZE <= 4096 && + field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { resp.comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; resp.hca_core_clock_offset = @@ -1517,21 +1528,18 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, { struct mlx5_flow_table *ft = ft_prio->flow_table; struct mlx5_ib_flow_handler *handler; + struct mlx5_flow_spec *spec; void *ib_flow = flow_attr + 1; - u8 match_criteria_enable = 0; unsigned int spec_index; - u32 *match_c; - u32 *match_v; u32 action; int err = 0; if (!is_valid_attr(flow_attr)) return ERR_PTR(-EINVAL); - match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); - match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + spec = mlx5_vzalloc(sizeof(*spec)); handler = kzalloc(sizeof(*handler), GFP_KERNEL); - if (!handler || !match_c || !match_v) { + if (!handler || !spec) { err = -ENOMEM; goto free; } @@ -1539,7 +1547,8 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, INIT_LIST_HEAD(&handler->list); for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { - err = parse_flow_attr(match_c, match_v, ib_flow); + err = parse_flow_attr(spec->match_criteria, + spec->match_value, ib_flow); if (err < 0) goto free; @@ -1547,11 +1556,11 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, } /* Outer header support only */ - match_criteria_enable = (!outer_header_zero(match_c)) << 0; + spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria)) + << 0; action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; - handler->rule = mlx5_add_flow_rule(ft, match_criteria_enable, - match_c, match_v, + handler->rule = mlx5_add_flow_rule(ft, spec, action, MLX5_FS_DEFAULT_FLOW_TAG, dst); @@ -1567,8 +1576,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, free: if (err) kfree(handler); - kfree(match_c); - kfree(match_v); + kvfree(spec); return err ? ERR_PTR(err) : handler; } @@ -1798,7 +1806,7 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, { struct mlx5_ib_dev *dev = container_of(device, struct mlx5_ib_dev, ib_dev.dev); - return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev), + return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); } @@ -1866,14 +1874,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, break; case MLX5_DEV_EVENT_PORT_DOWN: + case MLX5_DEV_EVENT_PORT_INITIALIZED: ibev.event = IB_EVENT_PORT_ERR; port = (u8)param; break; - case MLX5_DEV_EVENT_PORT_INITIALIZED: - /* not used by ULPs */ - return; - case MLX5_DEV_EVENT_LID_CHANGE: ibev.event = IB_EVENT_LID_CHANGE; port = (u8)param; @@ -235,6 +235,8 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, qp->rq.max_gs = 0; qp->rq.wqe_cnt = 0; qp->rq.wqe_shift = 0; + cap->max_recv_wr = 0; + cap->max_recv_sge = 0; } else { if (ucmd) { qp->rq.wqe_cnt = ucmd->rq_wqe_count; @@ -1851,13 +1853,15 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev, static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_ah_attr *ah, struct mlx5_qp_path *path, u8 port, int attr_mask, - u32 path_flags, const struct ib_qp_attr *attr) + u32 path_flags, const struct ib_qp_attr *attr, + bool alt) { enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port); int err; if (attr_mask & IB_QP_PKEY_INDEX) - path->pkey_index = attr->pkey_index; + path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index : + attr->pkey_index); if (ah->ah_flags & IB_AH_GRH) { if (ah->grh.sgid_index >= @@ -1877,9 +1881,9 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ah->grh.sgid_index); path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4; } else { - path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; - path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : - 0; + path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; + path->fl_free_ar |= + (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0; path->rlid = cpu_to_be16(ah->dlid); path->grh_mlid = ah->src_path_bits & 0x7f; if (ah->ah_flags & IB_AH_GRH) @@ -1903,7 +1907,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, path->port = port; if (attr_mask & IB_QP_TIMEOUT) - path->ackto_lt = attr->timeout << 3; + path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3; if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt) return modify_raw_packet_eth_prio(dev->mdev, @@ -2264,7 +2268,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num); if (attr_mask & IB_QP_PKEY_INDEX) - context->pri_path.pkey_index = attr->pkey_index; + context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index); /* todo implement counter_index functionality */ @@ -2277,7 +2281,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, if (attr_mask & IB_QP_AV) { err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path, attr_mask & IB_QP_PORT ? attr->port_num : qp->port, - attr_mask, 0, attr); + attr_mask, 0, attr, false); if (err) goto out; } @@ -2288,7 +2292,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, if (attr_mask & IB_QP_ALT_PATH) { err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, &context->alt_path, - attr->alt_port_num, attr_mask, 0, attr); + attr->alt_port_num, + attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT, + 0, attr, true); if (err) goto out; } @@ -3326,10 +3332,11 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr) return MLX5_FENCE_MODE_SMALL_AND_FENCE; else return fence; - - } else { - return 0; + } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) { + return MLX5_FENCE_MODE_FENCE; } + + return 0; } static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, @@ -4013,11 +4020,12 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); - qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f; + qp_attr->alt_pkey_index = + be16_to_cpu(context->alt_path.pkey_index); qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; } - qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f; + qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index); qp_attr->port_num = context->pri_path.port; /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ @@ -4079,17 +4087,19 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, qp_attr->cap.max_recv_sge = qp->rq.max_gs; if (!ibqp->uobject) { - qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; + qp_attr->cap.max_send_wr = qp->sq.max_post; qp_attr->cap.max_send_sge = qp->sq.max_gs; + qp_init_attr->qp_context = ibqp->qp_context; } else { qp_attr->cap.max_send_wr = 0; qp_attr->cap.max_send_sge = 0; } - /* We don't support inline sends for kernel QPs (yet), and we - * don't know what userspace's value should be. - */ - qp_attr->cap.max_inline_data = 0; + qp_init_attr->qp_type = ibqp->qp_type; + qp_init_attr->recv_cq = ibqp->recv_cq; + qp_init_attr->send_cq = ibqp->send_cq; + qp_init_attr->srq = ibqp->srq; + qp_attr->cap.max_inline_data = qp->max_inline_data; qp_init_attr->cap = qp_attr->cap; @@ -2178,6 +2178,11 @@ static ssize_t qib_write(struct file *fp, const char __user *data, switch (cmd.type) { case QIB_CMD_ASSIGN_CTXT: + if (rcd) { + ret = -EINVAL; + goto bail; + } + ret = qib_assign_ctxt(fp, &cmd.cmd.user_info); if (ret) goto bail; @@ -36,7 +36,6 @@ #include <linux/dma-mapping.h> #include <linux/sched.h> #include <linux/hugetlb.h> -#include <linux/dma-attrs.h> #include <linux/iommu.h> #include <linux/workqueue.h> #include <linux/list.h> @@ -112,10 +111,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, int i; int flags; dma_addr_t pa; - DEFINE_DMA_ATTRS(attrs); - - if (dmasync) - dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); if (!can_do_mlock()) return -EPERM; @@ -369,8 +369,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, /* wrap to first map page, invert bit 0 */ offset = qpt->incr | ((offset & 1) ^ 1); } - /* there can be no bits at shift and below */ - WARN_ON(offset & (rdi->dparms.qos_shift - 1)); + /* there can be no set bits in low-order QoS bits */ + WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1)); qpn = mk_qpn(qpt, map, offset); } @@ -502,6 +502,12 @@ static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) */ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, enum ib_qp_type type) + __releases(&qp->s_lock) + __releases(&qp->s_hlock) + __releases(&qp->r_lock) + __acquires(&qp->r_lock) + __acquires(&qp->s_hlock) + __acquires(&qp->s_lock) { if (qp->state != IB_QPS_RESET) { qp->state = IB_QPS_RESET; @@ -570,12 +576,6 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, qp->s_ssn = 1; qp->s_lsn = 0; qp->s_mig_state = IB_MIG_MIGRATED; - if (qp->s_ack_queue) - memset( - qp->s_ack_queue, - 0, - rvt_max_atomic(rdi) * - sizeof(*qp->s_ack_queue)); qp->r_head_ack_queue = 0; qp->s_tail_ack_queue = 0; qp->s_num_rd_atomic = 0; @@ -699,8 +699,10 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, * initialization that is needed. */ priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp); - if (!priv) + if (IS_ERR(priv)) { + ret = priv; goto bail_qp; + } qp->priv = priv; qp->timeout_jiffies = usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / @@ -501,9 +501,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb) !rdi->driver_f.quiesce_qp || !rdi->driver_f.notify_error_qp || !rdi->driver_f.mtu_from_qp || - !rdi->driver_f.mtu_to_path_mtu || - !rdi->driver_f.shut_down_port || - !rdi->driver_f.cap_mask_chg) + !rdi->driver_f.mtu_to_path_mtu) return -EINVAL; break; @@ -94,6 +94,7 @@ enum { IPOIB_NEIGH_TBL_FLUSH = 12, IPOIB_FLAG_DEV_ADDR_SET = 13, IPOIB_FLAG_DEV_ADDR_CTRL = 14, + IPOIB_FLAG_GOING_DOWN = 15, IPOIB_MAX_BACKOFF_SECONDS = 16, @@ -1486,6 +1486,10 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr, { struct net_device *dev = to_net_dev(d); int ret; + struct ipoib_dev_priv *priv = netdev_priv(dev); + + if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags)) + return -EPERM; if (!rtnl_trylock()) return restart_syscall(); @@ -1015,7 +1015,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL)) return false; - netif_addr_lock(priv->dev); + netif_addr_lock_bh(priv->dev); /* The subnet prefix may have changed, update it now so we won't have * to do it later @@ -1026,12 +1026,12 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) search_gid.global.interface_id = priv->local_gid.global.interface_id; - netif_addr_unlock(priv->dev); + netif_addr_unlock_bh(priv->dev); err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB, priv->dev, &port, &index); - netif_addr_lock(priv->dev); + netif_addr_lock_bh(priv->dev); if (search_gid.global.interface_id != priv->local_gid.global.interface_id) @@ -1092,7 +1092,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) } out: - netif_addr_unlock(priv->dev); + netif_addr_unlock_bh(priv->dev); return ret; } @@ -1206,7 +1206,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr) neigh = NULL; goto out_unlock; } - neigh->alive = jiffies; + + if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)) + neigh->alive = jiffies; goto out_unlock; } } @@ -1851,7 +1853,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid) struct ipoib_dev_priv *child_priv; struct net_device *netdev = priv->dev; - netif_addr_lock(netdev); + netif_addr_lock_bh(netdev); memcpy(&priv->local_gid.global.interface_id, &gid->global.interface_id, @@ -1859,7 +1861,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid) memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid)); clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); - netif_addr_unlock(netdev); + netif_addr_unlock_bh(netdev); if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { down_read(&priv->vlan_rwsem); @@ -1875,7 +1877,7 @@ static int ipoib_check_lladdr(struct net_device *dev, union ib_gid *gid = (union ib_gid *)(ss->__data + 4); int ret = 0; - netif_addr_lock(dev); + netif_addr_lock_bh(dev); /* Make sure the QPN, reserved and subnet prefix match the current * lladdr, it also makes sure the lladdr is unicast. @@ -1885,7 +1887,7 @@ static int ipoib_check_lladdr(struct net_device *dev, gid->global.interface_id == 0) ret = -EINVAL; - netif_addr_unlock(dev); + netif_addr_unlock_bh(dev); return ret; } @@ -2141,6 +2143,9 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) ib_unregister_event_handler(&priv->event_handler); flush_workqueue(ipoib_workqueue); + /* mark interface in the middle of destruction */ + set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags); + rtnl_lock(); dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); rtnl_unlock(); @@ -582,13 +582,13 @@ void ipoib_mcast_join_task(struct work_struct *work) return; } priv->local_lid = port_attr.lid; - netif_addr_lock(dev); + netif_addr_lock_bh(dev); if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { - netif_addr_unlock(dev); + netif_addr_unlock_bh(dev); return; } - netif_addr_unlock(dev); + netif_addr_unlock_bh(dev); spin_lock_irq(&priv->lock); if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) @@ -131,6 +131,9 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) ppriv = netdev_priv(pdev); + if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags)) + return -EPERM; + snprintf(intf_name, sizeof intf_name, "%s.%04x", ppriv->dev->name, pkey); priv = ipoib_intf_alloc(intf_name); @@ -183,6 +186,9 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) ppriv = netdev_priv(pdev); + if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags)) + return -EPERM; + if (!rtnl_trylock()) return restart_syscall(); @@ -1457,7 +1457,6 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, { unsigned int sg_offset = 0; - state->desc = req->indirect_desc; state->fr.next = req->fr_list; state->fr.end = req->fr_list + ch->target->mr_per_cmd; state->sg = scat; @@ -1489,7 +1488,6 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch, struct scatterlist *sg; int i; - state->desc = req->indirect_desc; for_each_sg(scat, sg, count, i) { srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), ib_sg_dma_len(dev->dev, sg), @@ -1655,6 +1653,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, target->indirect_size, DMA_TO_DEVICE); memset(&state, 0, sizeof(state)); + state.desc = req->indirect_desc; if (dev->use_fast_reg) ret = srp_map_sg_fr(&state, ch, req, scat, count); else if (dev->use_fmr) @@ -3526,7 +3525,7 @@ static void srp_add_one(struct ib_device *device) int mr_page_shift, p; u64 max_pages_per_mr; - srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL); + srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL); if (!srp_dev) return; @@ -3586,8 +3585,6 @@ static void srp_add_one(struct ib_device *device) IB_ACCESS_REMOTE_WRITE); if (IS_ERR(srp_dev->global_mr)) goto err_pd; - } else { - srp_dev->global_mr = NULL; } for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { @@ -1638,8 +1638,7 @@ retry: */ qp_init->cap.max_send_wr = srp_sq_size / 2; qp_init->cap.max_rdma_ctxs = srp_sq_size / 2; - qp_init->cap.max_send_sge = max(sdev->device->attrs.max_sge_rd, - sdev->device->attrs.max_sge); + qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE; qp_init->port_num = ch->sport->port; ch->qp = ib_create_qp(sdev->pd, qp_init); @@ -106,6 +106,7 @@ enum { SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2, SRPT_DEF_SG_TABLESIZE = 128, + SRPT_DEF_SG_PER_WQE = 16, MIN_SRPT_SQ_SIZE = 16, DEF_SRPT_SQ_SIZE = 4096, @@ -218,8 +218,23 @@ void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count) } input_event(dev, EV_KEY, BTN_TOUCH, count > 0); - if (use_count) + + if (use_count) { + if (count == 0 && + !test_bit(ABS_MT_DISTANCE, dev->absbit) && + test_bit(ABS_DISTANCE, dev->absbit) && + input_abs_get_val(dev, ABS_DISTANCE) != 0) { + /* + * Force reporting BTN_TOOL_FINGER for devices that + * only report general hover (and not per-contact + * distance) when contact is in proximity but not + * on the surface. + */ + count = 1; + } + input_mt_report_finger_count(dev, count); + } if (oldest) { int x = input_mt_get_value(oldest, ABS_MT_POSITION_X); @@ -153,8 +153,6 @@ static void input_pass_values(struct input_dev *dev, rcu_read_unlock(); - add_input_randomness(vals->type, vals->code, vals->value); - /* trigger auto repeat for key events */ if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) { for (v = vals; v != vals + count; v++) { @@ -371,9 +369,10 @@ static int input_get_disposition(struct input_dev *dev, static void input_handle_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { - int disposition; + int disposition = input_get_disposition(dev, type, code, &value); - disposition = input_get_disposition(dev, type, code, &value); + if (disposition != INPUT_IGNORE_EVENT && type != EV_SYN) + add_input_randomness(type, code, value); if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) dev->event(dev, type, code, value); @@ -1031,17 +1031,17 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect case XTYPE_XBOXONE: packet->data[0] = 0x09; /* activate rumble */ - packet->data[1] = 0x08; + packet->data[1] = 0x00; packet->data[2] = xpad->odata_serial++; - packet->data[3] = 0x08; /* continuous effect */ - packet->data[4] = 0x00; /* simple rumble mode */ - packet->data[5] = 0x03; /* L and R actuator only */ - packet->data[6] = 0x00; /* TODO: LT actuator */ - packet->data[7] = 0x00; /* TODO: RT actuator */ + packet->data[3] = 0x09; + packet->data[4] = 0x00; + packet->data[5] = 0x0F; + packet->data[6] = 0x00; + packet->data[7] = 0x00; packet->data[8] = strong / 512; /* left actuator */ packet->data[9] = weak / 512; /* right actuator */ - packet->data[10] = 0x80; /* length of pulse */ - packet->data[11] = 0x00; /* stop period of pulse */ + packet->data[10] = 0xFF; + packet->data[11] = 0x00; packet->data[12] = 0x00; packet->len = 13; packet->pending = true; @@ -1431,22 +1431,15 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id int ep_irq_in_idx; int i, error; + if (intf->cur_altsetting->desc.bNumEndpoints != 2) + return -ENODEV; + for (i = 0; xpad_device[i].idVendor; i++) { if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) && (le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct)) break; } - if (xpad_device[i].xtype == XTYPE_XBOXONE && - intf->cur_altsetting->desc.bInterfaceNumber != 0) { - /* - * The Xbox One controller lists three interfaces all with the - * same interface class, subclass and protocol. Differentiate by - * interface number. - */ - return -ENODEV; - } - xpad = kzalloc(sizeof(struct usb_xpad), GFP_KERNEL); if (!xpad) return -ENOMEM; @@ -1478,6 +1471,8 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id if (intf->cur_altsetting->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) { if (intf->cur_altsetting->desc.bInterfaceProtocol == 129) xpad->xtype = XTYPE_XBOX360W; + else if (intf->cur_altsetting->desc.bInterfaceProtocol == 208) + xpad->xtype = XTYPE_XBOXONE; else xpad->xtype = XTYPE_XBOX360; } else { @@ -1492,6 +1487,17 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id xpad->mapping |= MAP_STICKS_TO_NULL; } + if (xpad->xtype == XTYPE_XBOXONE && + intf->cur_altsetting->desc.bInterfaceNumber != 0) { + /* + * The Xbox One controller lists three interfaces all with the + * same interface class, subclass and protocol. Differentiate by + * interface number. + */ + error = -ENODEV; + goto err_free_in_urb; + } + error = xpad_init_output(intf, xpad); if (error) goto err_free_in_urb; @@ -32,7 +32,7 @@ #define TC3589x_PULL_DOWN_MASK 0x1 #define TC3589x_PULL_UP_MASK 0x2 #define TC3589x_PULLUP_ALL_MASK 0xAA -#define TC3589x_IO_PULL_VAL(index, mask) ((mask)<<((index)%4)*2)) +#define TC3589x_IO_PULL_VAL(index, mask) ((mask)<<((index)%4)*2) /* Bit masks for IOCFG register */ #define IOCFG_BALLCFG 0x01 @@ -552,7 +552,7 @@ static int tegra_kbc_parse_dt(struct tegra_kbc *kbc) if (!num_rows || !num_cols || ((num_rows + num_cols) > KBC_MAX_GPIO)) { dev_err(kbc->dev, - "keypad rows/columns not porperly specified\n"); + "keypad rows/columns not properly specified\n"); return -EINVAL; } @@ -82,6 +82,20 @@ config INPUT_ARIZONA_HAPTICS To compile this driver as a module, choose M here: the module will be called arizona-haptics. +config INPUT_ATMEL_CAPTOUCH + tristate "Atmel Capacitive Touch Button Driver" + depends on OF || COMPILE_TEST + depends on I2C + help + Say Y here if an Atmel Capacitive Touch Button device which + implements "captouch" protocol is connected to I2C bus. Typically + this device consists of Atmel Touch sensor controlled by AtMegaXX + MCU running firmware based on Qtouch library. + One should find "atmel,captouch" node in the board specific DTS. + + To compile this driver as a module, choose M here: the + module will be called atmel_captouch. + config INPUT_BMA150 tristate "BMA150/SMB380 acceleration sensor support" depends on I2C @@ -796,4 +810,13 @@ config INPUT_DRV2667_HAPTICS To compile this driver as a module, choose M here: the module will be called drv2667-haptics. +config INPUT_HISI_POWERKEY + tristate "Hisilicon PMIC ONKEY support" + depends on ARCH_HISI || COMPILE_TEST + help + Say Y to enable support for PMIC ONKEY. + + To compile this driver as a module, choose M here: the + module will be called hisi_powerkey. + endif @@ -17,6 +17,7 @@ obj-$(CONFIG_INPUT_APANEL) += apanel.o obj-$(CONFIG_INPUT_ARIZONA_HAPTICS) += arizona-haptics.o obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o +obj-$(CONFIG_INPUT_ATMEL_CAPTOUCH) += atmel_captouch.o obj-$(CONFIG_INPUT_BFIN_ROTARY) += bfin_rotary.o obj-$(CONFIG_INPUT_BMA150) += bma150.o obj-$(CONFIG_INPUT_CM109) += cm109.o @@ -34,6 +35,7 @@ obj-$(CONFIG_INPUT_DRV2667_HAPTICS) += drv2667.o obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o obj-$(CONFIG_INPUT_GPIO_BEEPER) += gpio-beeper.o obj-$(CONFIG_INPUT_GPIO_TILT_POLLED) += gpio_tilt_polled.o +obj-$(CONFIG_INPUT_HISI_POWERKEY) += hisi_powerkey.o obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o @@ -297,7 +297,7 @@ static int __init apanel_init(void) if (slave != i2c_addr) { pr_notice(APANEL ": only one SMBus slave " - "address supported, skiping device...\n"); + "address supported, skipping device...\n"); continue; } diff --git a/drivers/input/misc/atmel_captouch.c b/drivers/input/misc/atmel_captouch.c new file mode 100644 index 000000000000..941265415a89 --- /dev/null +++ b/ drivers/input/misc/atmel_captouch.c@@ -0,0 +1,290 @@ +/* + * Atmel Atmegaxx Capacitive Touch Button Driver + * + * Copyright (C) 2016 Google, inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * It's irrelevant that the HW used to develop captouch driver is based + * on Atmega88PA part and uses QtouchADC parts for sensing touch. + * Calling this driver "captouch" is an arbitrary way to distinguish + * the protocol this driver supported by other atmel/qtouch drivers. + * + * Captouch driver supports a newer/different version of the I2C + * registers/commands than the qt1070.c driver. + * Don't let the similarity of the general driver structure fool you. + * + * For raw i2c access from userspace, use i2cset/i2cget + * to poke at /dev/i2c-N devices. + */ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/i2c.h> +#include <linux/input.h> +#include <linux/interrupt.h> +#include <linux/slab.h> + +/* Maximum number of buttons supported */ +#define MAX_NUM_OF_BUTTONS 8 + +/* Registers */ +#define REG_KEY1_THRESHOLD 0x02 +#define REG_KEY2_THRESHOLD 0x03 +#define REG_KEY3_THRESHOLD 0x04 +#define REG_KEY4_THRESHOLD 0x05 + +#define REG_KEY1_REF_H 0x20 +#define REG_KEY1_REF_L 0x21 +#define REG_KEY2_REF_H 0x22 +#define REG_KEY2_REF_L 0x23 +#define REG_KEY3_REF_H 0x24 +#define REG_KEY3_REF_L 0x25 +#define REG_KEY4_REF_H 0x26 +#define REG_KEY4_REF_L 0x27 + +#define REG_KEY1_DLT_H 0x30 +#define REG_KEY1_DLT_L 0x31 +#define REG_KEY2_DLT_H 0x32 +#define REG_KEY2_DLT_L 0x33 +#define REG_KEY3_DLT_H 0x34 +#define REG_KEY3_DLT_L 0x35 +#define REG_KEY4_DLT_H 0x36 +#define REG_KEY4_DLT_L 0x37 + +#define REG_KEY_STATE 0x3C + +/* + * @i2c_client: I2C slave device client pointer + * @input: Input device pointer + * @num_btn: Number of buttons + * @keycodes: map of button# to KeyCode + * @prev_btn: Previous key state to detect button "press" or "release" + * @xfer_buf: I2C transfer buffer + */ +struct atmel_captouch_device { + struct i2c_client *client; + struct input_dev *input; + u32 num_btn; + u32 keycodes[MAX_NUM_OF_BUTTONS]; + u8 prev_btn; + u8 xfer_buf[8] ____cacheline_aligned; +}; + +/* + * Read from I2C slave device + * The protocol is that the client has to provide both the register address + * and the length, and while reading back the device would prepend the data + * with address and length for verification. + */ +static int atmel_read(struct atmel_captouch_device *capdev, + u8 reg, u8 *data, size_t len) +{ + struct i2c_client *client = capdev->client; + struct device *dev = &client->dev; + struct i2c_msg msg[2]; + int err; + + if (len > sizeof(capdev->xfer_buf) - 2) + return -EINVAL; + + capdev->xfer_buf[0] = reg; + capdev->xfer_buf[1] = len; + + msg[0].addr = client->addr; + msg[0].flags = 0; + msg[0].buf = capdev->xfer_buf; + msg[0].len = 2; + + msg[1].addr = client->addr; + msg[1].flags = I2C_M_RD; + msg[1].buf = capdev->xfer_buf; + msg[1].len = len + 2; + + err = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); + if (err != ARRAY_SIZE(msg)) + return err < 0 ? err : -EIO; + + if (capdev->xfer_buf[0] != reg) { + dev_err(dev, + "I2C read error: register address does not match (%#02x vs %02x)\n", + capdev->xfer_buf[0], reg); + return -ECOMM; + } + + memcpy(data, &capdev->xfer_buf[2], len); + + return 0; +} + +/* + * Handle interrupt and report the key changes to the input system. + * Multi-touch can be supported; however, it really depends on whether + * the device can multi-touch. + */ +static irqreturn_t atmel_captouch_isr(int irq, void *data) +{ + struct atmel_captouch_device *capdev = data; + struct device *dev = &capdev->client->dev; + int error; + int i; + u8 new_btn; + u8 changed_btn; + + error = atmel_read(capdev, REG_KEY_STATE, &new_btn, 1); + if (error) { + dev_err(dev, "failed to read button state: %d\n", error); + goto out; + } + + dev_dbg(dev, "%s: button state %#02x\n", __func__, new_btn); + + changed_btn = new_btn ^ capdev->prev_btn; + capdev->prev_btn = new_btn; + + for (i = 0; i < capdev->num_btn; i++) { + if (changed_btn & BIT(i)) + input_report_key(capdev->input, + capdev->keycodes[i], + new_btn & BIT(i)); + } + + input_sync(capdev->input); + +out: + return IRQ_HANDLED; +} + +/* + * Probe function to setup the device, input system and interrupt + */ +static int atmel_captouch_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct atmel_captouch_device *capdev; + struct device *dev = &client->dev; + struct device_node *node; + int i; + int err; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_WORD_DATA | + I2C_FUNC_SMBUS_I2C_BLOCK)) { + dev_err(dev, "needed i2c functionality is not supported\n"); + return -EINVAL; + } + + capdev = devm_kzalloc(dev, sizeof(*capdev), GFP_KERNEL); + if (!capdev) + return -ENOMEM; + + capdev->client = client; + i2c_set_clientdata(client, capdev); + + err = atmel_read(capdev, REG_KEY_STATE, + &capdev->prev_btn, sizeof(capdev->prev_btn)); + if (err) { + dev_err(dev, "failed to read initial button state: %d\n", err); + return err; + } + + capdev->input = devm_input_allocate_device(dev); + if (!capdev->input) { + dev_err(dev, "failed to allocate input device\n"); + return -ENOMEM; + } + + capdev->input->id.bustype = BUS_I2C; + capdev->input->id.product = 0x880A; + capdev->input->id.version = 0; + capdev->input->name = "ATMegaXX Capacitive Button Controller"; + __set_bit(EV_KEY, capdev->input->evbit); + + node = dev->of_node; + if (!node) { + dev_err(dev, "failed to find matching node in device tree\n"); + return -EINVAL; + } + + if (of_property_read_bool(node, "autorepeat")) + __set_bit(EV_REP, capdev->input->evbit); + + capdev->num_btn = of_property_count_u32_elems(node, "linux,keymap"); + if (capdev->num_btn > MAX_NUM_OF_BUTTONS) + capdev->num_btn = MAX_NUM_OF_BUTTONS; + + err = of_property_read_u32_array(node, "linux,keycodes", + capdev->keycodes, + capdev->num_btn); + if (err) { + dev_err(dev, + "failed to read linux,keycode property: %d\n", err); + return err; + } + + for (i = 0; i < capdev->num_btn; i++) + __set_bit(capdev->keycodes[i], capdev->input->keybit); + + capdev->input->keycode = capdev->keycodes; + capdev->input->keycodesize = sizeof(capdev->keycodes[0]); + capdev->input->keycodemax = capdev->num_btn; + + err = input_register_device(capdev->input); + if (err) + return err; + + err = devm_request_threaded_irq(dev, client->irq, + NULL, atmel_captouch_isr, + IRQF_ONESHOT, + "atmel_captouch", capdev); + if (err) { + dev_err(dev, "failed to request irq %d: %d\n", + client->irq, err); + return err; + } + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id atmel_captouch_of_id[] = { + { + .compatible = "atmel,captouch", + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, atmel_captouch_of_id); +#endif + +static const struct i2c_device_id atmel_captouch_id[] = { + { "atmel_captouch", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, atmel_captouch_id); + +static struct i2c_driver atmel_captouch_driver = { + .probe = atmel_captouch_probe, + .id_table = atmel_captouch_id, + .driver = { + .name = "atmel_captouch", + .of_match_table = of_match_ptr(atmel_captouch_of_id), + }, +}; +module_i2c_driver(atmel_captouch_driver); + +/* Module information */ +MODULE_AUTHOR("Hung-yu Wu <hywu@google.com>"); +MODULE_DESCRIPTION("Atmel ATmegaXX Capacitance Touch Sensor I2C Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/input/misc/hisi_powerkey.c b/drivers/input/misc/hisi_powerkey.c new file mode 100644 index 000000000000..675539c529ce --- /dev/null +++ b/ drivers/input/misc/hisi_powerkey.c@@ -0,0 +1,142 @@ +/* + * Hisilicon PMIC powerkey driver + * + * Copyright (C) 2013 Hisilicon Ltd. + * Copyright (C) 2015, 2016 Linaro Ltd. + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of this + * archive for more details. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/reboot.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_irq.h> +#include <linux/input.h> +#include <linux/slab.h> + +/* the held interrupt will trigger after 4 seconds */ +#define MAX_HELD_TIME (4 * MSEC_PER_SEC) + +static irqreturn_t hi65xx_power_press_isr(int irq, void *q) +{ + struct input_dev *input = q; + + pm_wakeup_event(input->dev.parent, MAX_HELD_TIME); + input_report_key(input, KEY_POWER, 1); + input_sync(input); + + return IRQ_HANDLED; +} + +static irqreturn_t hi65xx_power_release_isr(int irq, void *q) +{ + struct input_dev *input = q; + + pm_wakeup_event(input->dev.parent, MAX_HELD_TIME); + input_report_key(input, KEY_POWER, 0); + input_sync(input); + + return IRQ_HANDLED; +} + +static irqreturn_t hi65xx_restart_toggle_isr(int irq, void *q) +{ + struct input_dev *input = q; + int value = test_bit(KEY_RESTART, input->key); + + pm_wakeup_event(input->dev.parent, MAX_HELD_TIME); + input_report_key(input, KEY_RESTART, !value); + input_sync(input); + + return IRQ_HANDLED; +} + +static const struct { + const char *name; + irqreturn_t (*handler)(int irq, void *q); +} hi65xx_irq_info[] = { + { "down", hi65xx_power_press_isr }, + { "up", hi65xx_power_release_isr }, + { "hold 4s", hi65xx_restart_toggle_isr }, +}; + +static int hi65xx_powerkey_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct input_dev *input; + int irq, i, error; + + input = devm_input_allocate_device(&pdev->dev); + if (!input) { + dev_err(&pdev->dev, "failed to allocate input device\n"); + return -ENOMEM; + } + + input->phys = "hisi_on/input0"; + input->name = "HISI 65xx PowerOn Key"; + + input_set_capability(input, EV_KEY, KEY_POWER); + input_set_capability(input, EV_KEY, KEY_RESTART); + + for (i = 0; i < ARRAY_SIZE(hi65xx_irq_info); i++) { + + irq = platform_get_irq_byname(pdev, hi65xx_irq_info[i].name); + if (irq < 0) { + error = irq; + dev_err(dev, "couldn't get irq %s: %d\n", + hi65xx_irq_info[i].name, error); + return error; + } + + error = devm_request_any_context_irq(dev, irq, + hi65xx_irq_info[i].handler, + IRQF_ONESHOT, + hi65xx_irq_info[i].name, + input); + if (error < 0) { + dev_err(dev, "couldn't request irq %s: %d\n", + hi65xx_irq_info[i].name, error); + return error; + } + } + + error = input_register_device(input); + if (error) { + dev_err(&pdev->dev, "failed to register input device: %d\n", + error); + return error; + } + + device_init_wakeup(&pdev->dev, 1); + + return 0; +} + +static int hi65xx_powerkey_remove(struct platform_device *pdev) +{ + device_init_wakeup(&pdev->dev, 0); + + return 0; +} + +static struct platform_driver hi65xx_powerkey_driver = { + .driver = { + .name = "hi65xx-powerkey", + }, + .probe = hi65xx_powerkey_probe, + .remove = hi65xx_powerkey_remove, +}; +module_platform_driver(hi65xx_powerkey_driver); + +MODULE_AUTHOR("Zhiliang Xue <xuezhiliang@huawei.com"); +MODULE_DESCRIPTION("Hisi PMIC Power key driver"); +MODULE_LICENSE("GPL v2"); @@ -124,7 +124,7 @@ regulator_haptic_parse_dt(struct device *dev, struct regulator_haptic *haptic) node = dev->of_node; if(!node) { - dev_err(dev, "Missing dveice tree data\n"); + dev_err(dev, "Missing device tree data\n"); return -EINVAL; } @@ -130,8 +130,8 @@ static int xenkbd_probe(struct xenbus_device *dev, if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-abs-pointer", "%d", &abs) < 0) abs = 0; if (abs) { - ret = xenbus_printf(XBT_NIL, dev->nodename, - "request-abs-pointer", "1"); + ret = xenbus_write(XBT_NIL, dev->nodename, + "request-abs-pointer", "1"); if (ret) { pr_warning("xenkbd: can't request abs-pointer"); abs = 0; @@ -327,8 +327,8 @@ InitWait: if (ret < 0) val = 0; if (val) { - ret = xenbus_printf(XBT_NIL, info->xbdev->nodename, - "request-abs-pointer", "1"); + ret = xenbus_write(XBT_NIL, info->xbdev->nodename, + "request-abs-pointer", "1"); if (ret) pr_warning("xenkbd: can't request abs-pointer"); } @@ -1568,13 +1568,7 @@ static int elantech_set_properties(struct elantech_data *etd) case 5: etd->hw_version = 3; break; - case 6: - case 7: - case 8: - case 9: - case 10: - case 13: - case 14: + case 6 ... 14: etd->hw_version = 4; break; default: @@ -1714,7 +1708,7 @@ int elantech_init(struct psmouse *psmouse) snprintf(etd->tp_phys, sizeof(etd->tp_phys), "%s/input1", psmouse->ps2dev.serio->phys); tp_dev->phys = etd->tp_phys; - tp_dev->name = "Elantech PS/2 TrackPoint"; + tp_dev->name = "ETPS/2 Elantech TrackPoint"; tp_dev->id.bustype = BUS_I8042; tp_dev->id.vendor = 0x0002; tp_dev->id.product = PSMOUSE_ELANTECH; @@ -287,7 +287,7 @@ static int lifebook_create_relative_device(struct psmouse *psmouse) "%s/input1", psmouse->ps2dev.serio->phys); dev2->phys = priv->phys; - dev2->name = "PS/2 Touchpad"; + dev2->name = "LBPS/2 Fujitsu Lifebook Touchpad"; dev2->id.bustype = BUS_I8042; dev2->id.vendor = 0x0002; dev2->id.product = PSMOUSE_LIFEBOOK; @@ -355,18 +355,11 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties) return -ENXIO; } - if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) { - psmouse_dbg(psmouse, "VMMouse port in use.\n"); - return -EBUSY; - } - /* Check if the device is present */ response = ~VMMOUSE_PROTO_MAGIC; VMMOUSE_CMD(GETVERSION, 0, version, response, dummy1, dummy2); - if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU) { - release_region(VMMOUSE_PROTO_PORT, 4); + if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU) return -ENXIO; - } if (set_properties) { psmouse->vendor = VMMOUSE_VENDOR; @@ -374,8 +367,6 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties) psmouse->model = version; } - release_region(VMMOUSE_PROTO_PORT, 4); - return 0; } @@ -394,7 +385,6 @@ static void vmmouse_disconnect(struct psmouse *psmouse) psmouse_reset(psmouse); input_unregister_device(priv->abs_dev); kfree(priv); - release_region(VMMOUSE_PROTO_PORT, 4); } /** @@ -438,15 +428,10 @@ int vmmouse_init(struct psmouse *psmouse) struct input_dev *rel_dev = psmouse->dev, *abs_dev; int error; - if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) { - psmouse_dbg(psmouse, "VMMouse port in use.\n"); - return -EBUSY; - } - psmouse_reset(psmouse); error = vmmouse_enable(psmouse); if (error) - goto release_region; + return error; priv = kzalloc(sizeof(*priv), GFP_KERNEL); abs_dev = input_allocate_device(); @@ -502,8 +487,5 @@ init_fail: kfree(priv); psmouse->private = NULL; -release_region: - release_region(VMMOUSE_PROTO_PORT, 4); - return error; } @@ -157,11 +157,11 @@ static int rmi_function_match(struct device *dev, struct device_driver *drv) static void rmi_function_of_probe(struct rmi_function *fn) { char of_name[9]; + struct device_node *node = fn->rmi_dev->xport->dev->of_node; snprintf(of_name, sizeof(of_name), "rmi4-f%02x", fn->fd.function_number); - fn->dev.of_node = of_find_node_by_name( - fn->rmi_dev->xport->dev->of_node, of_name); + fn->dev.of_node = of_get_child_by_name(node, of_name); } #else static inline void rmi_function_of_probe(struct rmi_function *fn) @@ -81,26 +81,26 @@ struct f01_basic_properties { * This bit disables whatever sleep mode may be selected by the sleep_mode * field and forces the device to run at full power without sleeping. */ -#define RMI_F01_CRTL0_NOSLEEP_BIT BIT(2) +#define RMI_F01_CTRL0_NOSLEEP_BIT BIT(2) /* * When this bit is set, the touch controller employs a noise-filtering * algorithm designed for use with a connected battery charger. */ -#define RMI_F01_CRTL0_CHARGER_BIT BIT(5) +#define RMI_F01_CTRL0_CHARGER_BIT BIT(5) /* * Sets the report rate for the device. The effect of this setting is * highly product dependent. Check the spec sheet for your particular * touch sensor. */ -#define RMI_F01_CRTL0_REPORTRATE_BIT BIT(6) +#define RMI_F01_CTRL0_REPORTRATE_BIT BIT(6) /* * Written by the host as an indicator that the device has been * successfully configured. */ -#define RMI_F01_CRTL0_CONFIGURED_BIT BIT(7) +#define RMI_F01_CTRL0_CONFIGURED_BIT BIT(7) /** * @ctrl0 - see the bit definitions above. @@ -330,10 +330,10 @@ static int rmi_f01_probe(struct rmi_function *fn) case RMI_F01_NOSLEEP_DEFAULT: break; case RMI_F01_NOSLEEP_OFF: - f01->device_control.ctrl0 &= ~RMI_F01_CRTL0_NOSLEEP_BIT; + f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_NOSLEEP_BIT; break; case RMI_F01_NOSLEEP_ON: - f01->device_control.ctrl0 |= RMI_F01_CRTL0_NOSLEEP_BIT; + f01->device_control.ctrl0 |= RMI_F01_CTRL0_NOSLEEP_BIT; break; } @@ -349,7 +349,7 @@ static int rmi_f01_probe(struct rmi_function *fn) f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_SLEEP_MODE_MASK; } - f01->device_control.ctrl0 |= RMI_F01_CRTL0_CONFIGURED_BIT; + f01->device_control.ctrl0 |= RMI_F01_CTRL0_CONFIGURED_BIT; error = rmi_write(rmi_dev, fn->fd.control_base_addr, f01->device_control.ctrl0); @@ -535,8 +535,8 @@ static int rmi_f01_suspend(struct rmi_function *fn) int error; f01->old_nosleep = - f01->device_control.ctrl0 & RMI_F01_CRTL0_NOSLEEP_BIT; - f01->device_control.ctrl0 &= ~RMI_F01_CRTL0_NOSLEEP_BIT; + f01->device_control.ctrl0 & RMI_F01_CTRL0_NOSLEEP_BIT; + f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_NOSLEEP_BIT; f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_SLEEP_MODE_MASK; if (device_may_wakeup(fn->rmi_dev->xport->dev)) @@ -549,7 +549,7 @@ static int rmi_f01_suspend(struct rmi_function *fn) if (error) { dev_err(&fn->dev, "Failed to write sleep mode: %d.\n", error); if (f01->old_nosleep) - f01->device_control.ctrl0 |= RMI_F01_CRTL0_NOSLEEP_BIT; + f01->device_control.ctrl0 |= RMI_F01_CTRL0_NOSLEEP_BIT; f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_SLEEP_MODE_MASK; f01->device_control.ctrl0 |= RMI_SLEEP_MODE_NORMAL; return error; @@ -564,7 +564,7 @@ static int rmi_f01_resume(struct rmi_function *fn) int error; if (f01->old_nosleep) - f01->device_control.ctrl0 |= RMI_F01_CRTL0_NOSLEEP_BIT; + f01->device_control.ctrl0 |= RMI_F01_CTRL0_NOSLEEP_BIT; f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_SLEEP_MODE_MASK; f01->device_control.ctrl0 |= RMI_SLEEP_MODE_NORMAL; @@ -530,8 +530,8 @@ static void rmi_f11_rel_pos_report(struct f11_data *f11, u8 n_finger) struct f11_2d_data *data = &f11->data; s8 x, y; - x = data->rel_pos[n_finger * 2]; - y = data->rel_pos[n_finger * 2 + 1]; + x = data->rel_pos[n_finger * RMI_F11_REL_BYTES]; + y = data->rel_pos[n_finger * RMI_F11_REL_BYTES + 1]; rmi_2d_sensor_rel_report(sensor, x, y); } @@ -1241,7 +1241,6 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits) struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); struct f11_data *f11 = dev_get_drvdata(&fn->dev); u16 data_base_addr = fn->fd.data_base_addr; - u16 data_base_addr_offset = 0; int error; if (rmi_dev->xport->attn_data) { @@ -1251,8 +1250,7 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits) rmi_dev->xport->attn_size -= f11->sensor.attn_size; } else { error = rmi_read_block(rmi_dev, - data_base_addr + data_base_addr_offset, - f11->sensor.data_pkt, + data_base_addr, f11->sensor.data_pkt, f11->sensor.pkt_size); if (error < 0) return error; @@ -1260,7 +1258,6 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits) rmi_f11_finger_handler(f11, &f11->sensor, irq_bits, drvdata->num_of_irq_regs); - data_base_addr_offset += f11->sensor.pkt_size; return 0; } @@ -27,7 +27,6 @@ enum rmi_f12_object_type { }; struct f12_data { - struct rmi_function *fn; struct rmi_2d_sensor sensor; struct rmi_2d_sensor_platform_data sensor_pdata; @@ -66,7 +65,7 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12) struct rmi_device *rmi_dev = fn->rmi_dev; int ret; int offset; - u8 buf[14]; + u8 buf[15]; int pitch_x = 0; int pitch_y = 0; int clip_x_low = 0; @@ -86,9 +85,10 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12) offset = rmi_register_desc_calc_reg_offset(&f12->control_reg_desc, 8); - if (item->reg_size > 14) { - dev_err(&fn->dev, "F12 control8 should be 14 bytes, not: %ld\n", - item->reg_size); + if (item->reg_size > sizeof(buf)) { + dev_err(&fn->dev, + "F12 control8 should be no bigger than %zd bytes, not: %ld\n", + sizeof(buf), item->reg_size); return -ENODEV; } @@ -11,6 +11,8 @@ #include <linux/rmi.h> #include <linux/irq.h> #include <linux/of.h> +#include <linux/delay.h> +#include <linux/regulator/consumer.h> #include "rmi_driver.h" #define BUFFER_SIZE_INCREMENT 32 @@ -37,6 +39,9 @@ struct rmi_i2c_xport { u8 *tx_buf; size_t tx_buf_size; + + struct regulator_bulk_data supplies[2]; + u32 startup_delay; }; #define RMI_PAGE_SELECT_REGISTER 0xff @@ -246,6 +251,24 @@ static int rmi_i2c_probe(struct i2c_client *client, return -ENODEV; } + rmi_i2c->supplies[0].supply = "vdd"; + rmi_i2c->supplies[1].supply = "vio"; + retval = devm_regulator_bulk_get(&client->dev, + ARRAY_SIZE(rmi_i2c->supplies), + rmi_i2c->supplies); + if (retval < 0) + return retval; + + retval = regulator_bulk_enable(ARRAY_SIZE(rmi_i2c->supplies), + rmi_i2c->supplies); + if (retval < 0) + return retval; + + of_property_read_u32(client->dev.of_node, "syna,startup-delay-ms", + &rmi_i2c->startup_delay); + + msleep(rmi_i2c->startup_delay); + rmi_i2c->client = client; mutex_init(&rmi_i2c->page_mutex); @@ -286,6 +309,8 @@ static int rmi_i2c_remove(struct i2c_client *client) struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client); rmi_unregister_transport_device(&rmi_i2c->xport); + regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies), + rmi_i2c->supplies); return 0; } @@ -308,6 +333,10 @@ static int rmi_i2c_suspend(struct device *dev) dev_warn(dev, "Failed to enable irq for wake: %d\n", ret); } + + regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies), + rmi_i2c->supplies); + return ret; } @@ -317,6 +346,13 @@ static int rmi_i2c_resume(struct device *dev) struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client); int ret; + ret = regulator_bulk_enable(ARRAY_SIZE(rmi_i2c->supplies), + rmi_i2c->supplies); + if (ret) + return ret; + + msleep(rmi_i2c->startup_delay); + enable_irq(rmi_i2c->irq); if (device_may_wakeup(&client->dev)) { ret = disable_irq_wake(rmi_i2c->irq); @@ -346,6 +382,9 @@ static int rmi_i2c_runtime_suspend(struct device *dev) disable_irq(rmi_i2c->irq); + regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies), + rmi_i2c->supplies); + return 0; } @@ -355,6 +394,13 @@ static int rmi_i2c_runtime_resume(struct device *dev) struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client); int ret; + ret = regulator_bulk_enable(ARRAY_SIZE(rmi_i2c->supplies), + rmi_i2c->supplies); + if (ret) + return ret; + + msleep(rmi_i2c->startup_delay); + enable_irq(rmi_i2c->irq); ret = rmi_driver_resume(rmi_i2c->xport.rmi_dev); @@ -56,7 +56,7 @@ static int check_data(int data) /* it should be odd */ if (!(parity & 0x01)) { dev_warn(&ams_delta_serio->dev, - "paritiy check failed, data=0x%X parity=0x%X\n", + "parity check failed, data=0x%X parity=0x%X\n", data, parity); return SERIO_PARITY; } @@ -73,6 +73,21 @@ config TABLET_USB_KBTAB To compile this driver as a module, choose M here: the module will be called kbtab. +config TABLET_USB_PEGASUS + tristate "Pegasus Mobile Notetaker Pen input tablet support" + depends on USB_ARCH_HAS_HCD + select USB + help + Say Y here if you want to use the Pegasus Mobile Notetaker, + also known as: + Genie e-note The Notetaker, + Staedtler Digital ballpoint pen 990 01, + IRISnotes Express or + NEWLink Digital Note Taker. + + To compile this driver as a module, choose M here: the + module will be called pegasus_notetaker. + config TABLET_SERIAL_WACOM4 tristate "Wacom protocol 4 serial tablet support" select SERIO @@ -8,4 +8,5 @@ obj-$(CONFIG_TABLET_USB_AIPTEK) += aiptek.o obj-$(CONFIG_TABLET_USB_GTCO) += gtco.o obj-$(CONFIG_TABLET_USB_HANWANG) += hanwang.o obj-$(CONFIG_TABLET_USB_KBTAB) += kbtab.o +obj-$(CONFIG_TABLET_USB_PEGASUS) += pegasus_notetaker.o obj-$(CONFIG_TABLET_SERIAL_WACOM4) += wacom_serial4.o diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c new file mode 100644 index 000000000000..949dacc78664 --- /dev/null +++ b/ drivers/input/tablet/pegasus_notetaker.c@@ -0,0 +1,450 @@ +/* + * Pegasus Mobile Notetaker Pen input tablet driver + * + * Copyright (c) 2016 Martin Kepplinger <martink@posteo.de> + */ + +/* + * request packet (control endpoint): + * |-------------------------------------| + * | Report ID | Nr of bytes | command | + * | (1 byte) | (1 byte) | (n bytes) | + * |-------------------------------------| + * | 0x02 | n | | + * |-------------------------------------| + * + * data packet after set xy mode command, 0x80 0xb5 0x02 0x01 + * and pen is in range: + * + * byte byte name value (bits) + * -------------------------------------------- + * 0 status 0 1 0 0 0 0 X X + * 1 color 0 0 0 0 H 0 S T + * 2 X low + * 3 X high + * 4 Y low + * 5 Y high + * + * X X battery state: + * no state reported 0x00 + * battery low 0x01 + * battery good 0x02 + * + * H Hovering + * S Switch 1 (pen button) + * T Tip + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/input.h> +#include <linux/usb/input.h> +#include <linux/slab.h> + +/* USB HID defines */ +#define USB_REQ_GET_REPORT 0x01 +#define USB_REQ_SET_REPORT 0x09 + +#define USB_VENDOR_ID_PEGASUSTECH 0x0e20 +#define USB_DEVICE_ID_PEGASUS_NOTETAKER_EN100 0x0101 + +/* device specific defines */ +#define NOTETAKER_REPORT_ID 0x02 +#define NOTETAKER_SET_CMD 0x80 +#define NOTETAKER_SET_MODE 0xb5 + +#define NOTETAKER_LED_MOUSE 0x02 +#define PEN_MODE_XY 0x01 + +#define SPECIAL_COMMAND 0x80 +#define BUTTON_PRESSED 0xb5 +#define COMMAND_VERSION 0xa9 + +/* in xy data packet */ +#define BATTERY_NO_REPORT 0x40 +#define BATTERY_LOW 0x41 +#define BATTERY_GOOD 0x42 +#define PEN_BUTTON_PRESSED BIT(1) +#define PEN_TIP BIT(0) + +struct pegasus { + unsigned char *data; + u8 data_len; + dma_addr_t data_dma; + struct input_dev *dev; + struct usb_device *usbdev; + struct usb_interface *intf; + struct urb *irq; + char name[128]; + char phys[64]; + struct work_struct init; +}; + +static int pegasus_control_msg(struct pegasus *pegasus, u8 *data, int len) +{ + const int sizeof_buf = len + 2; + int result; + int error; + u8 *cmd_buf; + + cmd_buf = kmalloc(sizeof_buf, GFP_KERNEL); + if (!cmd_buf) + return -ENOMEM; + + cmd_buf[0] = NOTETAKER_REPORT_ID; + cmd_buf[1] = len; + memcpy(cmd_buf + 2, data, len); + + result = usb_control_msg(pegasus->usbdev, + usb_sndctrlpipe(pegasus->usbdev, 0), + USB_REQ_SET_REPORT, + USB_TYPE_VENDOR | USB_DIR_OUT, + 0, 0, cmd_buf, sizeof_buf, + USB_CTRL_SET_TIMEOUT); + + kfree(cmd_buf); + + if (unlikely(result != sizeof_buf)) { + error = result < 0 ? result : -EIO; + dev_err(&pegasus->usbdev->dev, "control msg error: %d\n", + error); + return error; + } + + return 0; +} + +static int pegasus_set_mode(struct pegasus *pegasus, u8 mode, u8 led) +{ + u8 cmd[] = { NOTETAKER_SET_CMD, NOTETAKER_SET_MODE, led, mode }; + + return pegasus_control_msg(pegasus, cmd, sizeof(cmd)); +} + +static void pegasus_parse_packet(struct pegasus *pegasus) +{ + unsigned char *data = pegasus->data; + struct input_dev *dev = pegasus->dev; + u16 x, y; + + switch (data[0]) { + case SPECIAL_COMMAND: + /* device button pressed */ + if (data[1] == BUTTON_PRESSED) + schedule_work(&pegasus->init); + + break; + + /* xy data */ + case BATTERY_LOW: + dev_warn_once(&dev->dev, "Pen battery low\n"); + /* fall through */ + + case BATTERY_NO_REPORT: + case BATTERY_GOOD: + x = le16_to_cpup((__le16 *)&data[2]); + y = le16_to_cpup((__le16 *)&data[4]); + + /* pen-up event */ + if (x == 0 && y == 0) + break; + + input_report_key(dev, BTN_TOUCH, data[1] & PEN_TIP); + input_report_key(dev, BTN_RIGHT, data[1] & PEN_BUTTON_PRESSED); + input_report_key(dev, BTN_TOOL_PEN, 1); + input_report_abs(dev, ABS_X, (s16)x); + input_report_abs(dev, ABS_Y, y); + + input_sync(dev); + break; + + default: + dev_warn_once(&pegasus->usbdev->dev, + "unknown answer from device\n"); + } +} + +static void pegasus_irq(struct urb *urb) +{ + struct pegasus *pegasus = urb->context; + struct usb_device *dev = pegasus->usbdev; + int retval; + + switch (urb->status) { + case 0: + pegasus_parse_packet(pegasus); + usb_mark_last_busy(pegasus->usbdev); + break; + + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + dev_err(&dev->dev, "%s - urb shutting down with status: %d", + __func__, urb->status); + return; + + default: + dev_err(&dev->dev, "%s - nonzero urb status received: %d", + __func__, urb->status); + break; + } + + retval = usb_submit_urb(urb, GFP_ATOMIC); + if (retval) + dev_err(&dev->dev, "%s - usb_submit_urb failed with result %d", + __func__, retval); +} + +static void pegasus_init(struct work_struct *work) +{ + struct pegasus *pegasus = container_of(work, struct pegasus, init); + int error; + + error = pegasus_set_mode(pegasus, PEN_MODE_XY, NOTETAKER_LED_MOUSE); + if (error) + dev_err(&pegasus->usbdev->dev, "pegasus_set_mode error: %d\n", + error); +} + +static int pegasus_open(struct input_dev *dev) +{ + struct pegasus *pegasus = input_get_drvdata(dev); + int error; + + error = usb_autopm_get_interface(pegasus->intf); + if (error) + return error; + + pegasus->irq->dev = pegasus->usbdev; + if (usb_submit_urb(pegasus->irq, GFP_KERNEL)) { + error = -EIO; + goto err_autopm_put; + } + + error = pegasus_set_mode(pegasus, PEN_MODE_XY, NOTETAKER_LED_MOUSE); + if (error) + goto err_kill_urb; + + return 0; + +err_kill_urb: + usb_kill_urb(pegasus->irq); + cancel_work_sync(&pegasus->init); +err_autopm_put: + usb_autopm_put_interface(pegasus->intf); + return error; +} + +static void pegasus_close(struct input_dev *dev) +{ + struct pegasus *pegasus = input_get_drvdata(dev); + + usb_kill_urb(pegasus->irq); + cancel_work_sync(&pegasus->init); + usb_autopm_put_interface(pegasus->intf); +} + +static int pegasus_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_device *dev = interface_to_usbdev(intf); + struct usb_endpoint_descriptor *endpoint; + struct pegasus *pegasus; + struct input_dev *input_dev; + int error; + int pipe; + + /* We control interface 0 */ + if (intf->cur_altsetting->desc.bInterfaceNumber >= 1) + return -ENODEV; + + /* Sanity check that the device has an endpoint */ + if (intf->altsetting[0].desc.bNumEndpoints < 1) { + dev_err(&intf->dev, "Invalid number of endpoints\n"); + return -EINVAL; + } + + endpoint = &intf->cur_altsetting->endpoint[0].desc; + + pegasus = kzalloc(sizeof(*pegasus), GFP_KERNEL); + input_dev = input_allocate_device(); + if (!pegasus || !input_dev) { + error = -ENOMEM; + goto err_free_mem; + } + + pegasus->usbdev = dev; + pegasus->dev = input_dev; + pegasus->intf = intf; + + pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress); + pegasus->data_len = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); + + pegasus->data = usb_alloc_coherent(dev, pegasus->data_len, GFP_KERNEL, + &pegasus->data_dma); + if (!pegasus->data) { + error = -ENOMEM; + goto err_free_mem; + } + + pegasus->irq = usb_alloc_urb(0, GFP_KERNEL); + if (!pegasus->irq) { + error = -ENOMEM; + goto err_free_dma; + } + + usb_fill_int_urb(pegasus->irq, dev, pipe, + pegasus->data, pegasus->data_len, + pegasus_irq, pegasus, endpoint->bInterval); + + pegasus->irq->transfer_dma = pegasus->data_dma; + pegasus->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + + if (dev->manufacturer) + strlcpy(pegasus->name, dev->manufacturer, + sizeof(pegasus->name)); + + if (dev->product) { + if (dev->manufacturer) + strlcat(pegasus->name, " ", sizeof(pegasus->name)); + strlcat(pegasus->name, dev->product, sizeof(pegasus->name)); + } + + if (!strlen(pegasus->name)) + snprintf(pegasus->name, sizeof(pegasus->name), + "USB Pegasus Device %04x:%04x", + le16_to_cpu(dev->descriptor.idVendor), + le16_to_cpu(dev->descriptor.idProduct)); + + usb_make_path(dev, pegasus->phys, sizeof(pegasus->phys)); + strlcat(pegasus->phys, "/input0", sizeof(pegasus->phys)); + + INIT_WORK(&pegasus->init, pegasus_init); + + usb_set_intfdata(intf, pegasus); + + input_dev->name = pegasus->name; + input_dev->phys = pegasus->phys; + usb_to_input_id(dev, &input_dev->id); + input_dev->dev.parent = &intf->dev; + + input_set_drvdata(input_dev, pegasus); + + input_dev->open = pegasus_open; + input_dev->close = pegasus_close; + + __set_bit(EV_ABS, input_dev->evbit); + __set_bit(EV_KEY, input_dev->evbit); + + __set_bit(ABS_X, input_dev->absbit); + __set_bit(ABS_Y, input_dev->absbit); + + __set_bit(BTN_TOUCH, input_dev->keybit); + __set_bit(BTN_RIGHT, input_dev->keybit); + __set_bit(BTN_TOOL_PEN, input_dev->keybit); + + __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); + + input_set_abs_params(input_dev, ABS_X, -1500, 1500, 8, 0); + input_set_abs_params(input_dev, ABS_Y, 1600, 3000, 8, 0); + + error = input_register_device(pegasus->dev); + if (error) + goto err_free_urb; + + return 0; + +err_free_urb: + usb_free_urb(pegasus->irq); +err_free_dma: + usb_free_coherent(dev, pegasus->data_len, + pegasus->data, pegasus->data_dma); +err_free_mem: + input_free_device(input_dev); + kfree(pegasus); + usb_set_intfdata(intf, NULL); + + return error; +} + +static void pegasus_disconnect(struct usb_interface *intf) +{ + struct pegasus *pegasus = usb_get_intfdata(intf); + + input_unregister_device(pegasus->dev); + + usb_free_urb(pegasus->irq); + usb_free_coherent(interface_to_usbdev(intf), + pegasus->data_len, pegasus->data, + pegasus->data_dma); + + kfree(pegasus); + usb_set_intfdata(intf, NULL); +} + +static int pegasus_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct pegasus *pegasus = usb_get_intfdata(intf); + + mutex_lock(&pegasus->dev->mutex); + usb_kill_urb(pegasus->irq); + cancel_work_sync(&pegasus->init); + mutex_unlock(&pegasus->dev->mutex); + + return 0; +} + +static int pegasus_resume(struct usb_interface *intf) +{ + struct pegasus *pegasus = usb_get_intfdata(intf); + int retval = 0; + + mutex_lock(&pegasus->dev->mutex); + if (pegasus->dev->users && usb_submit_urb(pegasus->irq, GFP_NOIO) < 0) + retval = -EIO; + mutex_unlock(&pegasus->dev->mutex); + + return retval; +} + +static int pegasus_reset_resume(struct usb_interface *intf) +{ + struct pegasus *pegasus = usb_get_intfdata(intf); + int retval = 0; + + mutex_lock(&pegasus->dev->mutex); + if (pegasus->dev->users) { + retval = pegasus_set_mode(pegasus, PEN_MODE_XY, + NOTETAKER_LED_MOUSE); + if (!retval && usb_submit_urb(pegasus->irq, GFP_NOIO) < 0) + retval = -EIO; + } + mutex_unlock(&pegasus->dev->mutex); + + return retval; +} + +static const struct usb_device_id pegasus_ids[] = { + { USB_DEVICE(USB_VENDOR_ID_PEGASUSTECH, + USB_DEVICE_ID_PEGASUS_NOTETAKER_EN100) }, + { } +}; +MODULE_DEVICE_TABLE(usb, pegasus_ids); + +static struct usb_driver pegasus_driver = { + .name = "pegasus_notetaker", + .probe = pegasus_probe, + .disconnect = pegasus_disconnect, + .suspend = pegasus_suspend, + .resume = pegasus_resume, + .reset_resume = pegasus_reset_resume, + .id_table = pegasus_ids, + .supports_autosuspend = 1, +}; + +module_usb_driver(pegasus_driver); + +MODULE_AUTHOR("Martin Kepplinger <martink@posteo.de>"); +MODULE_DESCRIPTION("Pegasus Mobile Notetaker Pen tablet driver"); +MODULE_LICENSE("GPL"); @@ -632,7 +632,7 @@ config TOUCHSCREEN_EDT_FT5X06 config TOUCHSCREEN_MIGOR tristate "Renesas MIGO-R touchscreen" - depends on SH_MIGOR && I2C + depends on (SH_MIGOR || COMPILE_TEST) && I2C help Say Y here to enable MIGO-R touchscreen support. @@ -1046,6 +1046,19 @@ config TOUCHSCREEN_PCAP To compile this driver as a module, choose M here: the module will be called pcap_ts. +config TOUCHSCREEN_RM_TS + tristate "Raydium I2C Touchscreen" + depends on I2C + depends on GPIOLIB || COMPILE_TEST + help + Say Y here if you have Raydium series I2C touchscreen, + such as RM32380, connected to your system. + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called raydium_i2c_ts. + config TOUCHSCREEN_ST1232 tristate "Sitronix ST1232 touchscreen controllers" depends on I2C @@ -1094,6 +1107,19 @@ config TOUCHSCREEN_SUR40 To compile this driver as a module, choose M here: the module will be called sur40. +config TOUCHSCREEN_SURFACE3_SPI + tristate "Ntrig/Microsoft Surface 3 SPI touchscreen" + depends on SPI + depends on GPIOLIB || COMPILE_TEST + help + Say Y here if you have the Ntrig/Microsoft SPI touchscreen + controller chip as found on the Surface 3 in your system. + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called surface3_spi. + config TOUCHSCREEN_SX8654 tristate "Semtech SX8654 touchscreen" depends on I2C @@ -62,11 +62,13 @@ obj-$(CONFIG_TOUCHSCREEN_USB_COMPOSITE) += usbtouchscreen.o obj-$(CONFIG_TOUCHSCREEN_PCAP) += pcap_ts.o obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o obj-$(CONFIG_TOUCHSCREEN_PIXCIR) += pixcir_i2c_ts.o +obj-$(CONFIG_TOUCHSCREEN_RM_TS) += raydium_i2c_ts.o obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o obj-$(CONFIG_TOUCHSCREEN_SUN4I) += sun4i-ts.o obj-$(CONFIG_TOUCHSCREEN_SUR40) += sur40.o +obj-$(CONFIG_TOUCHSCREEN_SURFACE3_SPI) += surface3_spi.o obj-$(CONFIG_TOUCHSCREEN_TI_AM335X_TSC) += ti_am335x_tsc.o obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o @@ -595,7 +595,7 @@ struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq, } else { input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0); input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0); - touchscreen_parse_properties(input_dev, false); + touchscreen_parse_properties(input_dev, false, NULL); if (!input_abs_get_max(input_dev, ABS_PRESSURE)) { dev_err(dev, "Touchscreen pressure is not specified\n"); return ERR_PTR(-EINVAL); @@ -17,6 +17,7 @@ #include <linux/i2c.h> #include <linux/input.h> #include <linux/input/mt.h> +#include <linux/input/touchscreen.h> #include <linux/module.h> #include <linux/of.h> @@ -52,11 +53,7 @@ struct icn8318_data { struct i2c_client *client; struct input_dev *input; struct gpio_desc *wake_gpio; - u32 max_x; - u32 max_y; - bool invert_x; - bool invert_y; - bool swap_x_y; + struct touchscreen_properties prop; }; static int icn8318_read_touch_data(struct i2c_client *client, @@ -91,7 +88,7 @@ static irqreturn_t icn8318_irq(int irq, void *dev_id) struct icn8318_data *data = dev_id; struct device *dev = &data->client->dev; struct icn8318_touch_data touch_data; - int i, ret, x, y; + int i, ret; ret = icn8318_read_touch_data(data->client, &touch_data); if (ret < 0) { @@ -124,22 +121,9 @@ static irqreturn_t icn8318_irq(int irq, void *dev_id) if (!act) continue; - x = be16_to_cpu(touch->x); - y = be16_to_cpu(touch->y); - - if (data->invert_x) - x = data->max_x - x; - - if (data->invert_y) - y = data->max_y - y; - - if (!data->swap_x_y) { - input_event(data->input, EV_ABS, ABS_MT_POSITION_X, x); - input_event(data->input, EV_ABS, ABS_MT_POSITION_Y, y); - } else { - input_event(data->input, EV_ABS, ABS_MT_POSITION_X, y); - input_event(data->input, EV_ABS, ABS_MT_POSITION_Y, x); - } + touchscreen_report_pos(data->input, &data->prop, + be16_to_cpu(touch->x), + be16_to_cpu(touch->y), true); } input_mt_sync_frame(data->input); @@ -200,10 +184,8 @@ static int icn8318_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct device *dev = &client->dev; - struct device_node *np = dev->of_node; struct icn8318_data *data; struct input_dev *input; - u32 fuzz_x = 0, fuzz_y = 0; int error; if (!client->irq) { @@ -223,19 +205,6 @@ static int icn8318_probe(struct i2c_client *client, return error; } - if (of_property_read_u32(np, "touchscreen-size-x", &data->max_x) || - of_property_read_u32(np, "touchscreen-size-y", &data->max_y)) { - dev_err(dev, "Error touchscreen-size-x and/or -y missing\n"); - return -EINVAL; - } - - /* Optional */ - of_property_read_u32(np, "touchscreen-fuzz-x", &fuzz_x); - of_property_read_u32(np, "touchscreen-fuzz-y", &fuzz_y); - data->invert_x = of_property_read_bool(np, "touchscreen-inverted-x"); - data->invert_y = of_property_read_bool(np, "touchscreen-inverted-y"); - data->swap_x_y = of_property_read_bool(np, "touchscreen-swapped-x-y"); - input = devm_input_allocate_device(dev); if (!input) return -ENOMEM; @@ -246,16 +215,14 @@ static int icn8318_probe(struct i2c_client *client, input->close = icn8318_stop; input->dev.parent = dev; - if (!data->swap_x_y) { - input_set_abs_params(input, ABS_MT_POSITION_X, 0, - data->max_x, fuzz_x, 0); - input_set_abs_params(input, ABS_MT_POSITION_Y, 0, - data->max_y, fuzz_y, 0); - } else { - input_set_abs_params(input, ABS_MT_POSITION_X, 0, - data->max_y, fuzz_y, 0); - input_set_abs_params(input, ABS_MT_POSITION_Y, 0, - data->max_x, fuzz_x, 0); + input_set_capability(input, EV_ABS, ABS_MT_POSITION_X); + input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y); + + touchscreen_parse_properties(input, true, &data->prop); + if (!input_abs_get_max(input, ABS_MT_POSITION_X) || + !input_abs_get_max(input, ABS_MT_POSITION_Y)) { + dev_err(dev, "Error touchscreen-size-x and/or -y missing\n"); + return -EINVAL; } error = input_mt_init_slots(input, ICN8318_MAX_TOUCHES, @@ -657,7 +657,7 @@ struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops, input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X); input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y); - touchscreen_parse_properties(input_dev, true); + touchscreen_parse_properties(input_dev, true, NULL); error = input_mt_init_slots(input_dev, CY_MAX_ID, 0); if (error) { @@ -86,6 +86,7 @@ struct edt_reg_addr { struct edt_ft5x06_ts_data { struct i2c_client *client; struct input_dev *input; + struct touchscreen_properties prop; u16 num_x; u16 num_y; @@ -246,8 +247,8 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id) if (!down) continue; - input_report_abs(tsdata->input, ABS_MT_POSITION_X, x); - input_report_abs(tsdata->input, ABS_MT_POSITION_Y, y); + touchscreen_report_pos(tsdata->input, &tsdata->prop, x, y, + true); } input_mt_report_pointer_emulation(tsdata->input, true); @@ -972,7 +973,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client, input_set_abs_params(input, ABS_MT_POSITION_Y, 0, tsdata->num_y * 64 - 1, 0, 0); - touchscreen_parse_properties(input, true); + touchscreen_parse_properties(input, true, &tsdata->prop); error = input_mt_init_slots(input, tsdata->max_support_points, INPUT_MT_DIRECT); @@ -202,7 +202,7 @@ static int migor_ts_remove(struct i2c_client *client) return 0; } -static int migor_ts_suspend(struct device *dev) +static int __maybe_unused migor_ts_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct migor_ts_priv *priv = i2c_get_clientdata(client); @@ -213,7 +213,7 @@ static int migor_ts_suspend(struct device *dev) return 0; } -static int migor_ts_resume(struct device *dev) +static int __maybe_unused migor_ts_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct migor_ts_priv *priv = i2c_get_clientdata(client); @@ -230,7 +230,7 @@ static const struct i2c_device_id migor_ts_id[] = { { "migor_ts", 0 }, { } }; -MODULE_DEVICE_TABLE(i2c, migor_ts); +MODULE_DEVICE_TABLE(i2c, migor_ts_id); static struct i2c_driver migor_ts_driver = { .driver = { @@ -55,12 +55,16 @@ static void touchscreen_set_params(struct input_dev *dev, * @input: input device that should be parsed * @multitouch: specifies whether parsed properties should be applied to * single-touch or multi-touch axes + * @prop: pointer to a struct touchscreen_properties into which to store + * axis swap and invert info for use with touchscreen_report_x_y(); + * or %NULL * * This function parses common DT properties for touchscreens and setups the * input device accordingly. The function keeps previously set up default * values if no value is specified via DT. */ -void touchscreen_parse_properties(struct input_dev *input, bool multitouch) +void touchscreen_parse_properties(struct input_dev *input, bool multitouch, + struct touchscreen_properties *prop) { struct device *dev = input->dev.parent; unsigned int axis; @@ -104,5 +108,80 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch) &fuzz); if (data_present) touchscreen_set_params(input, axis, maximum, fuzz); + + if (!prop) + return; + + axis = multitouch ? ABS_MT_POSITION_X : ABS_X; + + prop->max_x = input_abs_get_max(input, axis); + prop->max_y = input_abs_get_max(input, axis + 1); + prop->invert_x = + device_property_read_bool(dev, "touchscreen-inverted-x"); + prop->invert_y = + device_property_read_bool(dev, "touchscreen-inverted-y"); + prop->swap_x_y = + device_property_read_bool(dev, "touchscreen-swapped-x-y"); + + if (prop->swap_x_y) + swap(input->absinfo[axis], input->absinfo[axis + 1]); } EXPORT_SYMBOL(touchscreen_parse_properties); + +static void +touchscreen_apply_prop_to_x_y(const struct touchscreen_properties *prop, + unsigned int *x, unsigned int *y) +{ + if (prop->invert_x) + *x = prop->max_x - *x; + + if (prop->invert_y) + *y = prop->max_y - *y; + + if (prop->swap_x_y) + swap(*x, *y); +} + +/** + * touchscreen_set_mt_pos - Set input_mt_pos coordinates + * @pos: input_mt_pos to set coordinates of + * @prop: pointer to a struct touchscreen_properties + * @x: X coordinate to store in pos + * @y: Y coordinate to store in pos + * + * Adjust the passed in x and y values applying any axis inversion and + * swapping requested in the passed in touchscreen_properties and store + * the result in a struct input_mt_pos. + */ +void touchscreen_set_mt_pos(struct input_mt_pos *pos, + const struct touchscreen_properties *prop, + unsigned int x, unsigned int y) +{ + touchscreen_apply_prop_to_x_y(prop, &x, &y); + pos->x = x; + pos->y = y; +} +EXPORT_SYMBOL(touchscreen_set_mt_pos); + +/** + * touchscreen_report_pos - Report touchscreen coordinates + * @input: input_device to report coordinates for + * @prop: pointer to a struct touchscreen_properties + * @x: X coordinate to report + * @y: Y coordinate to report + * @multitouch: Report coordinates on single-touch or multi-touch axes + * + * Adjust the passed in x and y values applying any axis inversion and + * swapping requested in the passed in touchscreen_properties and then + * report the resulting coordinates on the input_dev's x and y axis. + */ +void touchscreen_report_pos(struct input_dev *input, + const struct touchscreen_properties *prop, + unsigned int x, unsigned int y, + bool multitouch) +{ + touchscreen_apply_prop_to_x_y(prop, &x, &y); + input_report_abs(input, multitouch ? ABS_MT_POSITION_X : ABS_X, x); + input_report_abs(input, multitouch ? ABS_MT_POSITION_Y : ABS_Y, y); +} +EXPORT_SYMBOL(touchscreen_report_pos); @@ -27,9 +27,9 @@ #include <linux/input/touchscreen.h> #include <linux/gpio.h> #include <linux/gpio/consumer.h> -/*#include <linux/of.h>*/ #include <linux/of_device.h> #include <linux/platform_data/pixcir_i2c_ts.h> +#include <asm/unaligned.h> #define PIXCIR_MAX_SLOTS 5 /* Max fingers supported by driver */ @@ -41,19 +41,15 @@ struct pixcir_i2c_ts_data { struct gpio_desc *gpio_enable; struct gpio_desc *gpio_wake; const struct pixcir_i2c_chip_data *chip; + struct touchscreen_properties prop; int max_fingers; /* Max fingers supported in this instance */ bool running; }; -struct pixcir_touch { - int x; - int y; - int id; -}; - struct pixcir_report_data { int num_touches; - struct pixcir_touch touches[PIXCIR_MAX_SLOTS]; + struct input_mt_pos pos[PIXCIR_MAX_SLOTS]; + int ids[PIXCIR_MAX_SLOTS]; }; static void pixcir_ts_parse(struct pixcir_i2c_ts_data *tsdata, @@ -98,11 +94,11 @@ static void pixcir_ts_parse(struct pixcir_i2c_ts_data *tsdata, bufptr = &rdbuf[2]; for (i = 0; i < touch; i++) { - report->touches[i].x = (bufptr[1] << 8) | bufptr[0]; - report->touches[i].y = (bufptr[3] << 8) | bufptr[2]; - + touchscreen_set_mt_pos(&report->pos[i], &tsdata->prop, + get_unaligned_le16(bufptr), + get_unaligned_le16(bufptr + 2)); if (chip->has_hw_ids) { - report->touches[i].id = bufptr[4]; + report->ids[i] = bufptr[4]; bufptr = bufptr + 5; } else { bufptr = bufptr + 4; @@ -113,9 +109,7 @@ static void pixcir_ts_parse(struct pixcir_i2c_ts_data *tsdata, static void pixcir_ts_report(struct pixcir_i2c_ts_data *ts, struct pixcir_report_data *report) { - struct input_mt_pos pos[PIXCIR_MAX_SLOTS]; int slots[PIXCIR_MAX_SLOTS]; - struct pixcir_touch *touch; int n, i, slot; struct device *dev = &ts->client->dev; const struct pixcir_i2c_chip_data *chip = ts->chip; @@ -124,24 +118,16 @@ static void pixcir_ts_report(struct pixcir_i2c_ts_data *ts, if (n > PIXCIR_MAX_SLOTS) n = PIXCIR_MAX_SLOTS; - if (!ts->chip->has_hw_ids) { - for (i = 0; i < n; i++) { - touch = &report->touches[i]; - pos[i].x = touch->x; - pos[i].y = touch->y; - } - - input_mt_assign_slots(ts->input, slots, pos, n, 0); - } + if (!ts->chip->has_hw_ids) + input_mt_assign_slots(ts->input, slots, report->pos, n, 0); for (i = 0; i < n; i++) { - touch = &report->touches[i]; - if (chip->has_hw_ids) { - slot = input_mt_get_slot_by_key(ts->input, touch->id); + slot = input_mt_get_slot_by_key(ts->input, + report->ids[i]); if (slot < 0) { dev_dbg(dev, "no free slot for id 0x%x\n", - touch->id); + report->ids[i]); continue; } } else { @@ -149,14 +135,15 @@ static void pixcir_ts_report(struct pixcir_i2c_ts_data *ts, } input_mt_slot(ts->input, slot); - input_mt_report_slot_state(ts->input, - MT_TOOL_FINGER, true); + input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, true); - input_event(ts->input, EV_ABS, ABS_MT_POSITION_X, touch->x); - input_event(ts->input, EV_ABS, ABS_MT_POSITION_Y, touch->y); + input_report_abs(ts->input, ABS_MT_POSITION_X, + report->pos[i].x); + input_report_abs(ts->input, ABS_MT_POSITION_Y, + report->pos[i].y); dev_dbg(dev, "%d: slot %d, x %d, y %d\n", - i, slot, touch->x, touch->y); + i, slot, report->pos[i].x, report->pos[i].y); } input_mt_sync_frame(ts->input); @@ -515,7 +502,7 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client, } else { input_set_capability(input, EV_ABS, ABS_MT_POSITION_X); input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y); - touchscreen_parse_properties(input, true); + touchscreen_parse_properties(input, true, &tsdata->prop); if (!input_abs_get_max(input, ABS_MT_POSITION_X) || !input_abs_get_max(input, ABS_MT_POSITION_Y)) { dev_err(dev, "Touchscreen size is not specified\n"); diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c new file mode 100644 index 000000000000..a99fb5cac5a0 --- /dev/null +++ b/ drivers/input/touchscreen/raydium_i2c_ts.c@@ -0,0 +1,1238 @@ +/* + * Raydium touchscreen I2C driver. + * + * Copyright (C) 2012-2014, Raydium Semiconductor Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2, and only version 2, as published by the + * Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Raydium reserves the right to make changes without further notice + * to the materials described herein. Raydium does not assume any + * liability arising out of the application described herein. + * + * Contact Raydium Semiconductor Corporation at www.rad-ic.com + */ + +#include <linux/acpi.h> +#include <linux/delay.h> +#include <linux/firmware.h> +#include <linux/gpio/consumer.h> +#include <linux/i2c.h> +#include <linux/input.h> +#include <linux/input/mt.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> +#include <linux/slab.h> +#include <asm/unaligned.h> + +/* Slave I2C mode */ +#define RM_BOOT_BLDR 0x02 +#define RM_BOOT_MAIN 0x03 + +/* I2C bootoloader commands */ +#define RM_CMD_BOOT_PAGE_WRT 0x0B /* send bl page write */ +#define RM_CMD_BOOT_WRT 0x11 /* send bl write */ +#define RM_CMD_BOOT_ACK 0x22 /* send ack*/ +#define RM_CMD_BOOT_CHK 0x33 /* send data check */ +#define RM_CMD_BOOT_READ 0x44 /* send wait bl data ready*/ + +#define RM_BOOT_RDY 0xFF /* bl data ready */ + +/* I2C main commands */ +#define RM_CMD_QUERY_BANK 0x2B +#define RM_CMD_DATA_BANK 0x4D +#define RM_CMD_ENTER_SLEEP 0x4E +#define RM_CMD_BANK_SWITCH 0xAA + +#define RM_RESET_MSG_ADDR 0x40000004 + +#define RM_MAX_READ_SIZE 56 +#define RM_PACKET_CRC_SIZE 2 + +/* Touch relative info */ +#define RM_MAX_RETRIES 3 +#define RM_MAX_TOUCH_NUM 10 +#define RM_BOOT_DELAY_MS 100 + +/* Offsets in contact data */ +#define RM_CONTACT_STATE_POS 0 +#define RM_CONTACT_X_POS 1 +#define RM_CONTACT_Y_POS 3 +#define RM_CONTACT_PRESSURE_POS 5 +#define RM_CONTACT_WIDTH_X_POS 6 +#define RM_CONTACT_WIDTH_Y_POS 7 + +/* Bootloader relative info */ +#define RM_BL_WRT_CMD_SIZE 3 /* bl flash wrt cmd size */ +#define RM_BL_WRT_PKG_SIZE 32 /* bl wrt pkg size */ +#define RM_BL_WRT_LEN (RM_BL_WRT_PKG_SIZE + RM_BL_WRT_CMD_SIZE) +#define RM_FW_PAGE_SIZE 128 +#define RM_MAX_FW_RETRIES 30 +#define RM_MAX_FW_SIZE 0xD000 + +#define RM_POWERON_DELAY_USEC 500 +#define RM_RESET_DELAY_MSEC 50 + +enum raydium_bl_cmd { + BL_HEADER = 0, + BL_PAGE_STR, + BL_PKG_IDX, + BL_DATA_STR, +}; + +enum raydium_bl_ack { + RAYDIUM_ACK_NULL = 0, + RAYDIUM_WAIT_READY, + RAYDIUM_PATH_READY, +}; + +enum raydium_boot_mode { + RAYDIUM_TS_MAIN = 0, + RAYDIUM_TS_BLDR, +}; + +/* Response to RM_CMD_DATA_BANK request */ +struct raydium_data_info { + __le32 data_bank_addr; + u8 pkg_size; + u8 tp_info_size; +}; + +struct raydium_info { + __le32 hw_ver; /*device version */ + u8 main_ver; + u8 sub_ver; + __le16 ft_ver; /* test version */ + u8 x_num; + u8 y_num; + __le16 x_max; + __le16 y_max; + u8 x_res; /* units/mm */ + u8 y_res; /* units/mm */ +}; + +/* struct raydium_data - represents state of Raydium touchscreen device */ +struct raydium_data { + struct i2c_client *client; + struct input_dev *input; + + struct regulator *avdd; + struct regulator *vccio; + struct gpio_desc *reset_gpio; + + struct raydium_info info; + + struct mutex sysfs_mutex; + + u8 *report_data; + + u32 data_bank_addr; + u8 report_size; + u8 contact_size; + u8 pkg_size; + + enum raydium_boot_mode boot_mode; + + bool wake_irq_enabled; +}; + +static int raydium_i2c_send(struct i2c_client *client, + u8 addr, const void *data, size_t len) +{ + u8 *buf; + int tries = 0; + int ret; + + buf = kmalloc(len + 1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + buf[0] = addr; + memcpy(buf + 1, data, len); + + do { + ret = i2c_master_send(client, buf, len + 1); + if (likely(ret == len + 1)) + break; + + msleep(20); + } while (++tries < RM_MAX_RETRIES); + + kfree(buf); + + if (unlikely(ret != len + 1)) { + if (ret >= 0) + ret = -EIO; + dev_err(&client->dev, "%s failed: %d\n", __func__, ret); + return ret; + } + + return 0; +} + +static int raydium_i2c_read(struct i2c_client *client, + u8 addr, void *data, size_t len) +{ + struct i2c_msg xfer[] = { + { + .addr = client->addr, + .len = 1, + .buf = &addr, + }, + { + .addr = client->addr, + .flags = I2C_M_RD, + .len = len, + .buf = data, + } + }; + int ret; + + ret = i2c_transfer(client->adapter, xfer, ARRAY_SIZE(xfer)); + if (unlikely(ret != ARRAY_SIZE(xfer))) + return ret < 0 ? ret : -EIO; + + return 0; +} + +static int raydium_i2c_read_message(struct i2c_client *client, + u32 addr, void *data, size_t len) +{ + __be32 be_addr; + size_t xfer_len; + int error; + + while (len) { + xfer_len = min_t(size_t, len, RM_MAX_READ_SIZE); + + be_addr = cpu_to_be32(addr); + + error = raydium_i2c_send(client, RM_CMD_BANK_SWITCH, + &be_addr, sizeof(be_addr)); + if (!error) + error = raydium_i2c_read(client, addr & 0xff, + data, xfer_len); + if (error) + return error; + + len -= xfer_len; + data += xfer_len; + addr += xfer_len; + } + + return 0; +} + +static int raydium_i2c_send_message(struct i2c_client *client, + u32 addr, const void *data, size_t len) +{ + __be32 be_addr = cpu_to_be32(addr); + int error; + + error = raydium_i2c_send(client, RM_CMD_BANK_SWITCH, + &be_addr, sizeof(be_addr)); + if (!error) + error = raydium_i2c_send(client, addr & 0xff, data, len); + + return error; +} + +static int raydium_i2c_sw_reset(struct i2c_client *client) +{ + const u8 soft_rst_cmd = 0x01; + int error; + + error = raydium_i2c_send_message(client, RM_RESET_MSG_ADDR, + &soft_rst_cmd, sizeof(soft_rst_cmd)); + if (error) { + dev_err(&client->dev, "software reset failed: %d\n", error); + return error; + } + + msleep(RM_RESET_DELAY_MSEC); + + return 0; +} + +static int raydium_i2c_query_ts_info(struct raydium_data *ts) +{ + struct i2c_client *client = ts->client; + struct raydium_data_info data_info; + __le32 query_bank_addr; + + int error, retry_cnt; + + for (retry_cnt = 0; retry_cnt < RM_MAX_RETRIES; retry_cnt++) { + error = raydium_i2c_read(client, RM_CMD_DATA_BANK, + &data_info, sizeof(data_info)); + if (error) + continue; + + /* + * Warn user if we already allocated memory for reports and + * then the size changed (due to firmware update?) and keep + * old size instead. + */ + if (ts->report_data && ts->pkg_size != data_info.pkg_size) { + dev_warn(&client->dev, + "report size changes, was: %d, new: %d\n", + ts->pkg_size, data_info.pkg_size); + } else { + ts->pkg_size = data_info.pkg_size; + ts->report_size = ts->pkg_size - RM_PACKET_CRC_SIZE; + } + + ts->contact_size = data_info.tp_info_size; + ts->data_bank_addr = le32_to_cpu(data_info.data_bank_addr); + + dev_dbg(&client->dev, + "data_bank_addr: %#08x, report_size: %d, contact_size: %d\n", + ts->data_bank_addr, ts->report_size, ts->contact_size); + + error = raydium_i2c_read(client, RM_CMD_QUERY_BANK, + &query_bank_addr, + sizeof(query_bank_addr)); + if (error) + continue; + + error = raydium_i2c_read_message(client, + le32_to_cpu(query_bank_addr), + &ts->info, sizeof(ts->info)); + if (error) + continue; + + return 0; + } + + dev_err(&client->dev, "failed to query device parameters: %d\n", error); + return error; +} + +static int raydium_i2c_check_fw_status(struct raydium_data *ts) +{ + struct i2c_client *client = ts->client; + static const u8 bl_ack = 0x62; + static const u8 main_ack = 0x66; + u8 buf[4]; + int error; + + error = raydium_i2c_read(client, RM_CMD_BOOT_READ, buf, sizeof(buf)); + if (!error) { + if (buf[0] == bl_ack) + ts->boot_mode = RAYDIUM_TS_BLDR; + else if (buf[0] == main_ack) + ts->boot_mode = RAYDIUM_TS_MAIN; + return 0; + } + + return error; +} + +static int raydium_i2c_initialize(struct raydium_data *ts) +{ + struct i2c_client *client = ts->client; + int error, retry_cnt; + + for (retry_cnt = 0; retry_cnt < RM_MAX_RETRIES; retry_cnt++) { + /* Wait for Hello packet */ + msleep(RM_BOOT_DELAY_MS); + + error = raydium_i2c_check_fw_status(ts); + if (error) { + dev_err(&client->dev, + "failed to read 'hello' packet: %d\n", error); + continue; + } + + if (ts->boot_mode == RAYDIUM_TS_BLDR || + ts->boot_mode == RAYDIUM_TS_MAIN) { + break; + } + } + + if (error) + ts->boot_mode = RAYDIUM_TS_BLDR; + + if (ts->boot_mode == RAYDIUM_TS_BLDR) { + ts->info.hw_ver = cpu_to_le32(0xffffffffUL); + ts->info.main_ver = 0xff; + ts->info.sub_ver = 0xff; + } else { + raydium_i2c_query_ts_info(ts); + } + + return error; +} + +static int raydium_i2c_bl_chk_state(struct i2c_client *client, + enum raydium_bl_ack state) +{ + static const u8 ack_ok[] = { 0xFF, 0x39, 0x30, 0x30, 0x54 }; + u8 rbuf[sizeof(ack_ok)]; + u8 retry; + int error; + + for (retry = 0; retry < RM_MAX_FW_RETRIES; retry++) { + switch (state) { + case RAYDIUM_ACK_NULL: + return 0; + + case RAYDIUM_WAIT_READY: + error = raydium_i2c_read(client, RM_CMD_BOOT_CHK, + &rbuf[0], 1); + if (!error && rbuf[0] == RM_BOOT_RDY) + return 0; + + break; + + case RAYDIUM_PATH_READY: + error = raydium_i2c_read(client, RM_CMD_BOOT_CHK, + rbuf, sizeof(rbuf)); + if (!error && !memcmp(rbuf, ack_ok, sizeof(ack_ok))) + return 0; + + break; + + default: + dev_err(&client->dev, "%s: invalid target state %d\n", + __func__, state); + return -EINVAL; + } + + msleep(20); + } + + return -ETIMEDOUT; +} + +static int raydium_i2c_write_object(struct i2c_client *client, + const void *data, size_t len, + enum raydium_bl_ack state) +{ + int error; + + error = raydium_i2c_send(client, RM_CMD_BOOT_WRT, data, len); + if (error) { + dev_err(&client->dev, "WRT obj command failed: %d\n", + error); + return error; + } + + error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, NULL, 0); + if (error) { + dev_err(&client->dev, "Ack obj command failed: %d\n", error); + return error; + } + + error = raydium_i2c_bl_chk_state(client, state); + if (error) { + dev_err(&client->dev, "BL check state failed: %d\n", error); + return error; + } + return 0; +} + +static bool raydium_i2c_boot_trigger(struct i2c_client *client) +{ + static const u8 cmd[7][6] = { + { 0x08, 0x0C, 0x09, 0x00, 0x50, 0xD7 }, + { 0x08, 0x04, 0x09, 0x00, 0x50, 0xA5 }, + { 0x08, 0x04, 0x09, 0x00, 0x50, 0x00 }, + { 0x08, 0x04, 0x09, 0x00, 0x50, 0xA5 }, + { 0x08, 0x0C, 0x09, 0x00, 0x50, 0x00 }, + { 0x06, 0x01, 0x00, 0x00, 0x00, 0x00 }, + { 0x02, 0xA2, 0x00, 0x00, 0x00, 0x00 }, + }; + int i; + int error; + + for (i = 0; i < 7; i++) { + error = raydium_i2c_write_object(client, cmd[i], sizeof(cmd[i]), + RAYDIUM_WAIT_READY); + if (error) { + dev_err(&client->dev, + "boot trigger failed at step %d: %d\n", + i, error); + return error; + } + } + + return 0; +} + +static bool raydium_i2c_fw_trigger(struct i2c_client *client) +{ + static const u8 cmd[5][11] = { + { 0, 0x09, 0x71, 0x0C, 0x09, 0x00, 0x50, 0xD7, 0, 0, 0 }, + { 0, 0x09, 0x71, 0x04, 0x09, 0x00, 0x50, 0xA5, 0, 0, 0 }, + { 0, 0x09, 0x71, 0x04, 0x09, 0x00, 0x50, 0x00, 0, 0, 0 }, + { 0, 0x09, 0x71, 0x04, 0x09, 0x00, 0x50, 0xA5, 0, 0, 0 }, + { 0, 0x09, 0x71, 0x0C, 0x09, 0x00, 0x50, 0x00, 0, 0, 0 }, + }; + int i; + int error; + + for (i = 0; i < 5; i++) { + error = raydium_i2c_write_object(client, cmd[i], sizeof(cmd[i]), + RAYDIUM_ACK_NULL); + if (error) { + dev_err(&client->dev, + "fw trigger failed at step %d: %d\n", + i, error); + return error; + } + } + + return 0; +} + +static int raydium_i2c_check_path(struct i2c_client *client) +{ + static const u8 cmd[] = { 0x09, 0x00, 0x09, 0x00, 0x50, 0x10, 0x00 }; + int error; + + error = raydium_i2c_write_object(client, cmd, sizeof(cmd), + RAYDIUM_PATH_READY); + if (error) { + dev_err(&client->dev, "check path command failed: %d\n", error); + return error; + } + + return 0; +} + +static int raydium_i2c_enter_bl(struct i2c_client *client) +{ + static const u8 cal_cmd[] = { 0x00, 0x01, 0x52 }; + int error; + + error = raydium_i2c_write_object(client, cal_cmd, sizeof(cal_cmd), + RAYDIUM_ACK_NULL); + if (error) { + dev_err(&client->dev, "enter bl command failed: %d\n", error); + return error; + } + + msleep(RM_BOOT_DELAY_MS); + return 0; +} + +static int raydium_i2c_leave_bl(struct i2c_client *client) +{ + static const u8 leave_cmd[] = { 0x05, 0x00 }; + int error; + + error = raydium_i2c_write_object(client, leave_cmd, sizeof(leave_cmd), + RAYDIUM_ACK_NULL); + if (error) { + dev_err(&client->dev, "leave bl command failed: %d\n", error); + return error; + } + + msleep(RM_BOOT_DELAY_MS); + return 0; +} + +static int raydium_i2c_write_checksum(struct i2c_client *client, + size_t length, u16 checksum) +{ + u8 checksum_cmd[] = { 0x00, 0x05, 0x6D, 0x00, 0x00, 0x00, 0x00 }; + int error; + + put_unaligned_le16(length, &checksum_cmd[3]); + put_unaligned_le16(checksum, &checksum_cmd[5]); + + error = raydium_i2c_write_object(client, + checksum_cmd, sizeof(checksum_cmd), + RAYDIUM_ACK_NULL); + if (error) { + dev_err(&client->dev, "failed to write checksum: %d\n", + error); + return error; + } + + return 0; +} + +static int raydium_i2c_disable_watch_dog(struct i2c_client *client) +{ + static const u8 cmd[] = { 0x0A, 0xAA }; + int error; + + error = raydium_i2c_write_object(client, cmd, sizeof(cmd), + RAYDIUM_WAIT_READY); + if (error) { + dev_err(&client->dev, "disable watchdog command failed: %d\n", + error); + return error; + } + + return 0; +} + +static int raydium_i2c_fw_write_page(struct i2c_client *client, + u16 page_idx, const void *data, size_t len) +{ + u8 buf[RM_BL_WRT_LEN]; + size_t xfer_len; + int error; + int i; + + BUILD_BUG_ON((RM_FW_PAGE_SIZE % RM_BL_WRT_PKG_SIZE) != 0); + + for (i = 0; i < RM_FW_PAGE_SIZE / RM_BL_WRT_PKG_SIZE; i++) { + buf[BL_HEADER] = RM_CMD_BOOT_PAGE_WRT; + buf[BL_PAGE_STR] = page_idx ? 0xff : 0; + buf[BL_PKG_IDX] = i + 1; + + xfer_len = min_t(size_t, len, RM_BL_WRT_PKG_SIZE); + memcpy(&buf[BL_DATA_STR], data, xfer_len); + if (len < RM_BL_WRT_PKG_SIZE) + memset(&buf[BL_DATA_STR + xfer_len], 0xff, + RM_BL_WRT_PKG_SIZE - xfer_len); + + error = raydium_i2c_write_object(client, buf, RM_BL_WRT_LEN, + RAYDIUM_WAIT_READY); + if (error) { + dev_err(&client->dev, + "page write command failed for page %d, chunk %d: %d\n", + page_idx, i, error); + return error; + } + + data += xfer_len; + len -= xfer_len; + } + + return error; +} + +static u16 raydium_calc_chksum(const u8 *buf, u16 len) +{ + u16 checksum = 0; + u16 i; + + for (i = 0; i < len; i++) + checksum += buf[i]; + + return checksum; +} + +static int raydium_i2c_do_update_firmware(struct raydium_data *ts, + const struct firmware *fw) +{ + struct i2c_client *client = ts->client; + const void *data; + size_t data_len; + size_t len; + int page_nr; + int i; + int error; + u16 fw_checksum; + + if (fw->size == 0 || fw->size > RM_MAX_FW_SIZE) { + dev_err(&client->dev, "Invalid firmware length\n"); + return -EINVAL; + } + + error = raydium_i2c_check_fw_status(ts); + if (error) { + dev_err(&client->dev, "Unable to access IC %d\n", error); + return error; + } + + if (ts->boot_mode == RAYDIUM_TS_MAIN) { + for (i = 0; i < RM_MAX_RETRIES; i++) { + error = raydium_i2c_enter_bl(client); + if (!error) { + error = raydium_i2c_check_fw_status(ts); + if (error) { + dev_err(&client->dev, + "unable to access IC: %d\n", + error); + return error; + } + + if (ts->boot_mode == RAYDIUM_TS_BLDR) + break; + } + } + + if (ts->boot_mode == RAYDIUM_TS_MAIN) { + dev_err(&client->dev, + "failied to jump to boot loader: %d\n", + error); + return -EIO; + } + } + + error = raydium_i2c_disable_watch_dog(client); + if (error) + return error; + + error = raydium_i2c_check_path(client); + if (error) + return error; + + error = raydium_i2c_boot_trigger(client); + if (error) { + dev_err(&client->dev, "send boot trigger fail: %d\n", error); + return error; + } + + msleep(RM_BOOT_DELAY_MS); + + data = fw->data; + data_len = fw->size; + page_nr = 0; + + while (data_len) { + len = min_t(size_t, data_len, RM_FW_PAGE_SIZE); + + error = raydium_i2c_fw_write_page(client, page_nr++, data, len); + if (error) + return error; + + msleep(20); + + data += len; + data_len -= len; + } + + error = raydium_i2c_leave_bl(client); + if (error) { + dev_err(&client->dev, + "failed to leave boot loader: %d\n", error); + return error; + } + + dev_dbg(&client->dev, "left boot loader mode\n"); + msleep(RM_BOOT_DELAY_MS); + + error = raydium_i2c_check_fw_status(ts); + if (error) { + dev_err(&client->dev, + "failed to check fw status after write: %d\n", + error); + return error; + } + + if (ts->boot_mode != RAYDIUM_TS_MAIN) { + dev_err(&client->dev, + "failed to switch to main fw after writing firmware: %d\n", + error); + return -EINVAL; + } + + error = raydium_i2c_fw_trigger(client); + if (error) { + dev_err(&client->dev, "failed to trigger fw: %d\n", error); + return error; + } + + fw_checksum = raydium_calc_chksum(fw->data, fw->size); + + error = raydium_i2c_write_checksum(client, fw->size, fw_checksum); + if (error) + return error; + + return 0; +} + +static int raydium_i2c_fw_update(struct raydium_data *ts) +{ + struct i2c_client *client = ts->client; + const struct firmware *fw = NULL; + const char *fw_file = "raydium.fw"; + int error; + + error = request_firmware(&fw, fw_file, &client->dev); + if (error) { + dev_err(&client->dev, "Unable to open firmware %s\n", fw_file); + return error; + } + + disable_irq(client->irq); + + error = raydium_i2c_do_update_firmware(ts, fw); + if (error) { + dev_err(&client->dev, "firmware update failed: %d\n", error); + ts->boot_mode = RAYDIUM_TS_BLDR; + goto out_enable_irq; + } + + error = raydium_i2c_initialize(ts); + if (error) { + dev_err(&client->dev, + "failed to initialize device after firmware update: %d\n", + error); + ts->boot_mode = RAYDIUM_TS_BLDR; + goto out_enable_irq; + } + + ts->boot_mode = RAYDIUM_TS_MAIN; + +out_enable_irq: + enable_irq(client->irq); + msleep(100); + + release_firmware(fw); + + return error; +} + +static void raydium_mt_event(struct raydium_data *ts) +{ + int i; + + for (i = 0; i < ts->report_size / ts->contact_size; i++) { + u8 *contact = &ts->report_data[ts->contact_size * i]; + bool state = contact[RM_CONTACT_STATE_POS]; + u8 wx, wy; + + input_mt_slot(ts->input, i); + input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, state); + + if (!state) + continue; + + input_report_abs(ts->input, ABS_MT_POSITION_X, + get_unaligned_le16(&contact[RM_CONTACT_X_POS])); + input_report_abs(ts->input, ABS_MT_POSITION_Y, + get_unaligned_le16(&contact[RM_CONTACT_Y_POS])); + input_report_abs(ts->input, ABS_MT_PRESSURE, + contact[RM_CONTACT_PRESSURE_POS]); + + wx = contact[RM_CONTACT_WIDTH_X_POS]; + wy = contact[RM_CONTACT_WIDTH_Y_POS]; + + input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR, max(wx, wy)); + input_report_abs(ts->input, ABS_MT_TOUCH_MINOR, min(wx, wy)); + } + + input_mt_sync_frame(ts->input); + input_sync(ts->input); +} + +static irqreturn_t raydium_i2c_irq(int irq, void *_dev) +{ + struct raydium_data *ts = _dev; + int error; + u16 fw_crc; + u16 calc_crc; + + if (ts->boot_mode != RAYDIUM_TS_MAIN) + goto out; + + error = raydium_i2c_read_message(ts->client, ts->data_bank_addr, + ts->report_data, ts->pkg_size); + if (error) + goto out; + + fw_crc = get_unaligned_le16(&ts->report_data[ts->report_size]); + calc_crc = raydium_calc_chksum(ts->report_data, ts->report_size); + if (unlikely(fw_crc != calc_crc)) { + dev_warn(&ts->client->dev, + "%s: invalid packet crc %#04x vs %#04x\n", + __func__, calc_crc, fw_crc); + goto out; + } + + raydium_mt_event(ts); + +out: + return IRQ_HANDLED; +} + +static ssize_t raydium_i2c_fw_ver_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct raydium_data *ts = i2c_get_clientdata(client); + + return sprintf(buf, "%d.%d\n", ts->info.main_ver, ts->info.sub_ver); +} + +static ssize_t raydium_i2c_hw_ver_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct raydium_data *ts = i2c_get_clientdata(client); + + return sprintf(buf, "%#04x\n", le32_to_cpu(ts->info.hw_ver)); +} + +static ssize_t raydium_i2c_boot_mode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct raydium_data *ts = i2c_get_clientdata(client); + + return sprintf(buf, "%s\n", + ts->boot_mode == RAYDIUM_TS_MAIN ? + "Normal" : "Recovery"); +} + +static ssize_t raydium_i2c_update_fw_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct raydium_data *ts = i2c_get_clientdata(client); + int error; + + error = mutex_lock_interruptible(&ts->sysfs_mutex); + if (error) + return error; + + error = raydium_i2c_fw_update(ts); + + mutex_unlock(&ts->sysfs_mutex); + + return error ?: count; +} + +static ssize_t raydium_i2c_calibrate_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct raydium_data *ts = i2c_get_clientdata(client); + static const u8 cal_cmd[] = { 0x00, 0x01, 0x9E }; + int error; + + error = mutex_lock_interruptible(&ts->sysfs_mutex); + if (error) + return error; + + error = raydium_i2c_write_object(client, cal_cmd, sizeof(cal_cmd), + RAYDIUM_WAIT_READY); + if (error) + dev_err(&client->dev, "calibrate command failed: %d\n", error); + + mutex_unlock(&ts->sysfs_mutex); + return error ?: count; +} + +static DEVICE_ATTR(fw_version, S_IRUGO, raydium_i2c_fw_ver_show, NULL); +static DEVICE_ATTR(hw_version, S_IRUGO, raydium_i2c_hw_ver_show, NULL); +static DEVICE_ATTR(boot_mode, S_IRUGO, raydium_i2c_boot_mode_show, NULL); +static DEVICE_ATTR(update_fw, S_IWUSR, NULL, raydium_i2c_update_fw_store); +static DEVICE_ATTR(calibrate, S_IWUSR, NULL, raydium_i2c_calibrate_store); + +static struct attribute *raydium_i2c_attributes[] = { + &dev_attr_update_fw.attr, + &dev_attr_boot_mode.attr, + &dev_attr_fw_version.attr, + &dev_attr_hw_version.attr, + &dev_attr_calibrate.attr, + NULL +}; + +static struct attribute_group raydium_i2c_attribute_group = { + .attrs = raydium_i2c_attributes, +}; + +static void raydium_i2c_remove_sysfs_group(void *_data) +{ + struct raydium_data *ts = _data; + + sysfs_remove_group(&ts->client->dev.kobj, &raydium_i2c_attribute_group); +} + +static int raydium_i2c_power_on(struct raydium_data *ts) +{ + int error; + + if (!ts->reset_gpio) + return 0; + + gpiod_set_value_cansleep(ts->reset_gpio, 1); + + error = regulator_enable(ts->avdd); + if (error) { + dev_err(&ts->client->dev, + "failed to enable avdd regulator: %d\n", error); + goto release_reset_gpio; + } + + error = regulator_enable(ts->vccio); + if (error) { + regulator_disable(ts->avdd); + dev_err(&ts->client->dev, + "failed to enable vccio regulator: %d\n", error); + goto release_reset_gpio; + } + + udelay(RM_POWERON_DELAY_USEC); + +release_reset_gpio: + gpiod_set_value_cansleep(ts->reset_gpio, 0); + + if (error) + return error; + + msleep(RM_RESET_DELAY_MSEC); + + return 0; +} + +static void raydium_i2c_power_off(void *_data) +{ + struct raydium_data *ts = _data; + + if (ts->reset_gpio) { + gpiod_set_value_cansleep(ts->reset_gpio, 1); + regulator_disable(ts->vccio); + regulator_disable(ts->avdd); + } +} + +static int raydium_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + union i2c_smbus_data dummy; + struct raydium_data *ts; + int error; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + dev_err(&client->dev, + "i2c check functionality error (need I2C_FUNC_I2C)\n"); + return -ENXIO; + } + + ts = devm_kzalloc(&client->dev, sizeof(*ts), GFP_KERNEL); + if (!ts) + return -ENOMEM; + + mutex_init(&ts->sysfs_mutex); + + ts->client = client; + i2c_set_clientdata(client, ts); + + ts->avdd = devm_regulator_get(&client->dev, "avdd"); + if (IS_ERR(ts->avdd)) { + error = PTR_ERR(ts->avdd); + if (error != -EPROBE_DEFER) + dev_err(&client->dev, + "Failed to get 'avdd' regulator: %d\n", error); + return error; + } + + ts->vccio = devm_regulator_get(&client->dev, "vccio"); + if (IS_ERR(ts->vccio)) { + error = PTR_ERR(ts->vccio); + if (error != -EPROBE_DEFER) + dev_err(&client->dev, + "Failed to get 'vccio' regulator: %d\n", error); + return error; + } + + ts->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(ts->reset_gpio)) { + error = PTR_ERR(ts->reset_gpio); + if (error != -EPROBE_DEFER) + dev_err(&client->dev, + "failed to get reset gpio: %d\n", error); + return error; + } + + error = raydium_i2c_power_on(ts); + if (error) + return error; + + error = devm_add_action(&client->dev, raydium_i2c_power_off, ts); + if (error) { + dev_err(&client->dev, + "failed to install power off action: %d\n", error); + raydium_i2c_power_off(ts); + return error; + } + + /* Make sure there is something at this address */ + if (i2c_smbus_xfer(client->adapter, client->addr, 0, + I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &dummy) < 0) { + dev_err(&client->dev, "nothing at this address\n"); + return -ENXIO; + } + + error = raydium_i2c_initialize(ts); + if (error) { + dev_err(&client->dev, "failed to initialize: %d\n", error); + return error; + } + + ts->report_data = devm_kmalloc(&client->dev, + ts->pkg_size, GFP_KERNEL); + if (!ts->report_data) + return -ENOMEM; + + ts->input = devm_input_allocate_device(&client->dev); + if (!ts->input) { + dev_err(&client->dev, "Failed to allocate input device\n"); + return -ENOMEM; + } + + ts->input->name = "Raydium Touchscreen"; + ts->input->id.bustype = BUS_I2C; + + input_set_drvdata(ts->input, ts); + + input_set_abs_params(ts->input, ABS_MT_POSITION_X, + 0, le16_to_cpu(ts->info.x_max), 0, 0); + input_set_abs_params(ts->input, ABS_MT_POSITION_Y, + 0, le16_to_cpu(ts->info.y_max), 0, 0); + input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->info.x_res); + input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->info.y_res); + + input_set_abs_params(ts->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); + input_set_abs_params(ts->input, ABS_MT_PRESSURE, 0, 255, 0, 0); + + error = input_mt_init_slots(ts->input, RM_MAX_TOUCH_NUM, + INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED); + if (error) { + dev_err(&client->dev, + "failed to initialize MT slots: %d\n", error); + return error; + } + + error = input_register_device(ts->input); + if (error) { + dev_err(&client->dev, + "unable to register input device: %d\n", error); + return error; + } + + error = devm_request_threaded_irq(&client->dev, client->irq, + NULL, raydium_i2c_irq, + IRQF_ONESHOT, client->name, ts); + if (error) { + dev_err(&client->dev, "Failed to register interrupt\n"); + return error; + } + + error = sysfs_create_group(&client->dev.kobj, + &raydium_i2c_attribute_group); + if (error) { + dev_err(&client->dev, "failed to create sysfs attributes: %d\n", + error); + return error; + } + + error = devm_add_action(&client->dev, + raydium_i2c_remove_sysfs_group, ts); + if (error) { + raydium_i2c_remove_sysfs_group(ts); + dev_err(&client->dev, + "Failed to add sysfs cleanup action: %d\n", error); + return error; + } + + return 0; +} + +static void __maybe_unused raydium_enter_sleep(struct i2c_client *client) +{ + static const u8 sleep_cmd[] = { 0x5A, 0xff, 0x00, 0x0f }; + int error; + + error = raydium_i2c_send(client, RM_CMD_ENTER_SLEEP, + sleep_cmd, sizeof(sleep_cmd)); + if (error) + dev_err(&client->dev, + "sleep command failed: %d\n", error); +} + +static int __maybe_unused raydium_i2c_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct raydium_data *ts = i2c_get_clientdata(client); + + /* Sleep is not available in BLDR recovery mode */ + if (ts->boot_mode != RAYDIUM_TS_MAIN) + return -EBUSY; + + disable_irq(client->irq); + + if (device_may_wakeup(dev)) { + raydium_enter_sleep(client); + + ts->wake_irq_enabled = (enable_irq_wake(client->irq) == 0); + } else { + raydium_i2c_power_off(ts); + } + + return 0; +} + +static int __maybe_unused raydium_i2c_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct raydium_data *ts = i2c_get_clientdata(client); + + if (device_may_wakeup(dev)) { + if (ts->wake_irq_enabled) + disable_irq_wake(client->irq); + raydium_i2c_sw_reset(client); + } else { + raydium_i2c_power_on(ts); + raydium_i2c_initialize(ts); + } + + enable_irq(client->irq); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(raydium_i2c_pm_ops, + raydium_i2c_suspend, raydium_i2c_resume); + +static const struct i2c_device_id raydium_i2c_id[] = { + { "raydium_i2c" , 0 }, + { "rm32380", 0 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(i2c, raydium_i2c_id); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id raydium_acpi_id[] = { + { "RAYD0001", 0 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(acpi, raydium_acpi_id); +#endif + +#ifdef CONFIG_OF +static const struct of_device_id raydium_of_match[] = { + { .compatible = "raydium,rm32380", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, raydium_of_match); +#endif + +static struct i2c_driver raydium_i2c_driver = { + .probe = raydium_i2c_probe, + .id_table = raydium_i2c_id, + .driver = { + .name = "raydium_ts", + .pm = &raydium_i2c_pm_ops, + .acpi_match_table = ACPI_PTR(raydium_acpi_id), + .of_match_table = of_match_ptr(raydium_of_match), + }, +}; +module_i2c_driver(raydium_i2c_driver); + +MODULE_AUTHOR("Raydium"); +MODULE_DESCRIPTION("Raydium I2c Touchscreen driver"); +MODULE_LICENSE("GPL v2"); @@ -126,7 +126,7 @@ struct sur40_image_header { #define VIDEO_PACKET_SIZE 16384 /* polling interval (ms) */ -#define POLL_INTERVAL 4 +#define POLL_INTERVAL 1 /* maximum number of contacts FIXME: this is a guess? */ #define MAX_CONTACTS 64 @@ -151,7 +151,6 @@ struct sur40_state { struct mutex lock; struct vb2_queue queue; - struct vb2_alloc_ctx *alloc_ctx; struct list_head buf_list; spinlock_t qlock; int sequence; @@ -448,7 +447,7 @@ static void sur40_process_video(struct sur40_state *sur40) /* return error if streaming was stopped in the meantime */ if (sur40->sequence == -1) - goto err_poll; + return; /* mark as finished */ new_buf->vb.vb2_buf.timestamp = ktime_get_ns(); @@ -580,19 +579,13 @@ static int sur40_probe(struct usb_interface *interface, sur40->queue = sur40_queue; sur40->queue.drv_priv = sur40; sur40->queue.lock = &sur40->lock; + sur40->queue.dev = sur40->dev; /* initialize the queue */ error = vb2_queue_init(&sur40->queue); if (error) goto err_unreg_v4l2; - sur40->alloc_ctx = vb2_dma_sg_init_ctx(sur40->dev); - if (IS_ERR(sur40->alloc_ctx)) { - dev_err(sur40->dev, "Can't allocate buffer context"); - error = PTR_ERR(sur40->alloc_ctx); - goto err_unreg_v4l2; - } - sur40->vdev = sur40_video_device; sur40->vdev.v4l2_dev = &sur40->v4l2; sur40->vdev.lock = &sur40->lock; @@ -633,7 +626,6 @@ static void sur40_disconnect(struct usb_interface *interface) video_unregister_device(&sur40->vdev); v4l2_device_unregister(&sur40->v4l2); - vb2_dma_sg_cleanup_ctx(sur40->alloc_ctx); input_unregister_polled_device(sur40->input); input_free_polled_device(sur40->input); @@ -653,13 +645,10 @@ static void sur40_disconnect(struct usb_interface *interface) */ static int sur40_queue_setup(struct vb2_queue *q, unsigned int *nbuffers, unsigned int *nplanes, - unsigned int sizes[], void *alloc_ctxs[]) + unsigned int sizes[], struct device *alloc_devs[]) { - struct sur40_state *sur40 = vb2_get_drv_priv(q); - if (q->num_buffers + *nbuffers < 3) *nbuffers = 3 - q->num_buffers; - alloc_ctxs[0] = sur40->alloc_ctx; if (*nplanes) return sizes[0] < sur40_video_format.sizeimage ? -EINVAL : 0; @@ -736,6 +725,7 @@ static int sur40_start_streaming(struct vb2_queue *vq, unsigned int count) static void sur40_stop_streaming(struct vb2_queue *vq) { struct sur40_state *sur40 = vb2_get_drv_priv(vq); + vb2_wait_for_all_buffers(vq); sur40->sequence = -1; /* Release all active buffers */ @@ -793,7 +783,6 @@ static int sur40_vidioc_enum_fmt(struct file *file, void *priv, { if (f->index != 0) return -EINVAL; - strlcpy(f->description, "8-bit greyscale", sizeof(f->description)); f->pixelformat = V4L2_PIX_FMT_GREY; f->flags = 0; return 0; diff --git a/drivers/input/touchscreen/surface3_spi.c b/drivers/input/touchscreen/surface3_spi.c new file mode 100644 index 000000000000..e12fb9b63f31 --- /dev/null +++ b/ drivers/input/touchscreen/surface3_spi.c@@ -0,0 +1,427 @@ +/* + * Driver for Ntrig/Microsoft Touchscreens over SPI + * + * Copyright (c) 2016 Red Hat Inc. + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; version 2 of the License. + */ + +#include <linux/kernel.h> + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/input.h> +#include <linux/input/mt.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/spi/spi.h> +#include <linux/acpi.h> + +#include <asm/unaligned.h> + +#define SURFACE3_PACKET_SIZE 264 + +#define SURFACE3_REPORT_TOUCH 0xd2 +#define SURFACE3_REPORT_PEN 0x16 + +struct surface3_ts_data { + struct spi_device *spi; + struct gpio_desc *gpiod_rst[2]; + struct input_dev *input_dev; + struct input_dev *pen_input_dev; + int pen_tool; + + u8 rd_buf[SURFACE3_PACKET_SIZE] ____cacheline_aligned; +}; + +struct surface3_ts_data_finger { + u8 status; + __le16 tracking_id; + __le16 x; + __le16 cx; + __le16 y; + __le16 cy; + __le16 width; + __le16 height; + u32 padding; +} __packed; + +struct surface3_ts_data_pen { + u8 status; + __le16 x; + __le16 y; + __le16 pressure; + u8 padding; +} __packed; + +static int surface3_spi_read(struct surface3_ts_data *ts_data) +{ + struct spi_device *spi = ts_data->spi; + + memset(ts_data->rd_buf, 0, sizeof(ts_data->rd_buf)); + return spi_read(spi, ts_data->rd_buf, sizeof(ts_data->rd_buf)); +} + +static void surface3_spi_report_touch(struct surface3_ts_data *ts_data, + struct surface3_ts_data_finger *finger) +{ + int st = finger->status & 0x01; + int slot; + + slot = input_mt_get_slot_by_key(ts_data->input_dev, + get_unaligned_le16(&finger->tracking_id)); + if (slot < 0) + return; + + input_mt_slot(ts_data->input_dev, slot); + input_mt_report_slot_state(ts_data->input_dev, MT_TOOL_FINGER, st); + if (st) { + input_report_abs(ts_data->input_dev, + ABS_MT_POSITION_X, + get_unaligned_le16(&finger->x)); + input_report_abs(ts_data->input_dev, + ABS_MT_POSITION_Y, + get_unaligned_le16(&finger->y)); + input_report_abs(ts_data->input_dev, + ABS_MT_WIDTH_MAJOR, + get_unaligned_le16(&finger->width)); + input_report_abs(ts_data->input_dev, + ABS_MT_WIDTH_MINOR, + get_unaligned_le16(&finger->height)); + } +} + +static void surface3_spi_process_touch(struct surface3_ts_data *ts_data, u8 *data) +{ + u16 timestamp; + unsigned int i; + timestamp = get_unaligned_le16(&data[15]); + + for (i = 0; i < 13; i++) { + struct surface3_ts_data_finger *finger; + + finger = (struct surface3_ts_data_finger *)&data[17 + + i * sizeof(struct surface3_ts_data_finger)]; + + /* + * When bit 5 of status is 1, it marks the end of the report: + * - touch present: 0xe7 + * - touch released: 0xe4 + * - nothing valuable: 0xff + */ + if (finger->status & 0x10) + break; + + surface3_spi_report_touch(ts_data, finger); + } + + input_mt_sync_frame(ts_data->input_dev); + input_sync(ts_data->input_dev); +} + +static void surface3_spi_report_pen(struct surface3_ts_data *ts_data, + struct surface3_ts_data_pen *pen) +{ + struct input_dev *dev = ts_data->pen_input_dev; + int st = pen->status; + int prox = st & 0x01; + int rubber = st & 0x18; + int tool = (prox && rubber) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; + + /* fake proximity out to switch tools */ + if (ts_data->pen_tool != tool) { + input_report_key(dev, ts_data->pen_tool, 0); + input_sync(dev); + ts_data->pen_tool = tool; + } + + input_report_key(dev, BTN_TOUCH, st & 0x12); + + input_report_key(dev, ts_data->pen_tool, prox); + + if (st) { + input_report_key(dev, + BTN_STYLUS, + st & 0x04); + + input_report_abs(dev, + ABS_X, + get_unaligned_le16(&pen->x)); + input_report_abs(dev, + ABS_Y, + get_unaligned_le16(&pen->y)); + input_report_abs(dev, + ABS_PRESSURE, + get_unaligned_le16(&pen->pressure)); + } +} + +static void surface3_spi_process_pen(struct surface3_ts_data *ts_data, u8 *data) +{ + struct surface3_ts_data_pen *pen; + + pen = (struct surface3_ts_data_pen *)&data[15]; + + surface3_spi_report_pen(ts_data, pen); + input_sync(ts_data->pen_input_dev); +} + +static void surface3_spi_process(struct surface3_ts_data *ts_data) +{ + const char header[] = { + 0xff, 0xff, 0xff, 0xff, 0xa5, 0x5a, 0xe7, 0x7e, 0x01 + }; + u8 *data = ts_data->rd_buf; + + if (memcmp(header, data, sizeof(header))) + dev_err(&ts_data->spi->dev, + "%s header error: %*ph, ignoring...\n", + __func__, (int)sizeof(header), data); + + switch (data[9]) { + case SURFACE3_REPORT_TOUCH: + surface3_spi_process_touch(ts_data, data); + break; + case SURFACE3_REPORT_PEN: + surface3_spi_process_pen(ts_data, data); + break; + default: + dev_err(&ts_data->spi->dev, + "%s unknown packet type: %x, ignoring...\n", + __func__, data[9]); + break; + } +} + +static irqreturn_t surface3_spi_irq_handler(int irq, void *dev_id) +{ + struct surface3_ts_data *data = dev_id; + + if (surface3_spi_read(data)) + return IRQ_HANDLED; + + dev_dbg(&data->spi->dev, "%s received -> %*ph\n", + __func__, SURFACE3_PACKET_SIZE, data->rd_buf); + surface3_spi_process(data); + + return IRQ_HANDLED; +} + +static void surface3_spi_power(struct surface3_ts_data *data, bool on) +{ + gpiod_set_value(data->gpiod_rst[0], on); + gpiod_set_value(data->gpiod_rst[1], on); + /* let the device settle a little */ + msleep(20); +} + +/** + * surface3_spi_get_gpio_config - Get GPIO config from ACPI/DT + * + * @ts: surface3_spi_ts_data pointer + */ +static int surface3_spi_get_gpio_config(struct surface3_ts_data *data) +{ + int error; + struct device *dev; + struct gpio_desc *gpiod; + int i; + + dev = &data->spi->dev; + + /* Get the reset lines GPIO pin number */ + for (i = 0; i < 2; i++) { + gpiod = devm_gpiod_get_index(dev, NULL, i, GPIOD_OUT_LOW); + if (IS_ERR(gpiod)) { + error = PTR_ERR(gpiod); + if (error != -EPROBE_DEFER) + dev_err(dev, + "Failed to get power GPIO %d: %d\n", + i, + error); + return error; + } + + data->gpiod_rst[i] = gpiod; + } + + return 0; +} + +static int surface3_spi_create_touch_input(struct surface3_ts_data *data) +{ + struct input_dev *input; + int error; + + input = devm_input_allocate_device(&data->spi->dev); + if (!input) + return -ENOMEM; + + data->input_dev = input; + + input_set_abs_params(input, ABS_MT_POSITION_X, 0, 9600, 0, 0); + input_abs_set_res(input, ABS_MT_POSITION_X, 40); + input_set_abs_params(input, ABS_MT_POSITION_Y, 0, 7200, 0, 0); + input_abs_set_res(input, ABS_MT_POSITION_Y, 48); + input_set_abs_params(input, ABS_MT_WIDTH_MAJOR, 0, 1024, 0, 0); + input_set_abs_params(input, ABS_MT_WIDTH_MINOR, 0, 1024, 0, 0); + input_mt_init_slots(input, 10, INPUT_MT_DIRECT); + + input->name = "Surface3 SPI Capacitive TouchScreen"; + input->phys = "input/ts"; + input->id.bustype = BUS_SPI; + input->id.vendor = 0x045e; /* Microsoft */ + input->id.product = 0x0001; + input->id.version = 0x0000; + + error = input_register_device(input); + if (error) { + dev_err(&data->spi->dev, + "Failed to register input device: %d", error); + return error; + } + + return 0; +} + +static int surface3_spi_create_pen_input(struct surface3_ts_data *data) +{ + struct input_dev *input; + int error; + + input = devm_input_allocate_device(&data->spi->dev); + if (!input) + return -ENOMEM; + + data->pen_input_dev = input; + data->pen_tool = BTN_TOOL_PEN; + + __set_bit(INPUT_PROP_DIRECT, input->propbit); + __set_bit(INPUT_PROP_POINTER, input->propbit); + input_set_abs_params(input, ABS_X, 0, 9600, 0, 0); + input_abs_set_res(input, ABS_X, 40); + input_set_abs_params(input, ABS_Y, 0, 7200, 0, 0); + input_abs_set_res(input, ABS_Y, 48); + input_set_abs_params(input, ABS_PRESSURE, 0, 1024, 0, 0); + input_set_capability(input, EV_KEY, BTN_TOUCH); + input_set_capability(input, EV_KEY, BTN_STYLUS); + input_set_capability(input, EV_KEY, BTN_TOOL_PEN); + input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER); + + input->name = "Surface3 SPI Pen Input"; + input->phys = "input/ts"; + input->id.bustype = BUS_SPI; + input->id.vendor = 0x045e; /* Microsoft */ + input->id.product = 0x0002; + input->id.version = 0x0000; + + error = input_register_device(input); + if (error) { + dev_err(&data->spi->dev, + "Failed to register input device: %d", error); + return error; + } + + return 0; +} + +static int surface3_spi_probe(struct spi_device *spi) +{ + struct surface3_ts_data *data; + int error; + + /* Set up SPI*/ + spi->bits_per_word = 8; + spi->mode = SPI_MODE_0; + error = spi_setup(spi); + if (error) + return error; + + data = devm_kzalloc(&spi->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->spi = spi; + spi_set_drvdata(spi, data); + + error = surface3_spi_get_gpio_config(data); + if (error) + return error; + + surface3_spi_power(data, true); + surface3_spi_power(data, false); + surface3_spi_power(data, true); + + error = surface3_spi_create_touch_input(data); + if (error) + return error; + + error = surface3_spi_create_pen_input(data); + if (error) + return error; + + error = devm_request_threaded_irq(&spi->dev, spi->irq, + NULL, surface3_spi_irq_handler, + IRQF_ONESHOT, + "Surface3-irq", data); + if (error) + return error; + + return 0; +} + +static int __maybe_unused surface3_spi_suspend(struct device *dev) +{ + struct spi_device *spi = to_spi_device(dev); + struct surface3_ts_data *data = spi_get_drvdata(spi); + + disable_irq(data->spi->irq); + + surface3_spi_power(data, false); + + return 0; +} + +static int __maybe_unused surface3_spi_resume(struct device *dev) +{ + struct spi_device *spi = to_spi_device(dev); + struct surface3_ts_data *data = spi_get_drvdata(spi); + + surface3_spi_power(data, true); + + enable_irq(data->spi->irq); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(surface3_spi_pm_ops, + surface3_spi_suspend, + surface3_spi_resume); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id surface3_spi_acpi_match[] = { + { "MSHW0037", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, surface3_spi_acpi_match); +#endif + +static struct spi_driver surface3_spi_driver = { + .driver = { + .name = "Surface3-spi", + .acpi_match_table = ACPI_PTR(surface3_spi_acpi_match), + .pm = &surface3_spi_pm_ops, + }, + .probe = surface3_spi_probe, +}; + +module_spi_driver(surface3_spi_driver); + +MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>"); +MODULE_DESCRIPTION("Surface 3 SPI touchscreen driver"); +MODULE_LICENSE("GPL v2"); @@ -406,7 +406,7 @@ static int titsc_probe(struct platform_device *pdev) int err; /* Allocate memory for device */ - ts_dev = kzalloc(sizeof(struct titsc), GFP_KERNEL); + ts_dev = kzalloc(sizeof(*ts_dev), GFP_KERNEL); input_dev = input_allocate_device(); if (!ts_dev || !input_dev) { dev_err(&pdev->dev, "failed to allocate memory.\n"); @@ -118,6 +118,13 @@ static int ts4800_parse_dt(struct platform_device *pdev, return -ENODEV; } + ts->regmap = syscon_node_to_regmap(syscon_np); + of_node_put(syscon_np); + if (IS_ERR(ts->regmap)) { + dev_err(dev, "cannot get parent's regmap\n"); + return PTR_ERR(ts->regmap); + } + error = of_property_read_u32_index(np, "syscon", 1, ®); if (error < 0) { dev_err(dev, "no offset in syscon\n"); @@ -134,12 +141,6 @@ static int ts4800_parse_dt(struct platform_device *pdev, ts->bit = BIT(bit); - ts->regmap = syscon_node_to_regmap(syscon_np); - if (IS_ERR(ts->regmap)) { - dev_err(dev, "cannot get parent's regmap\n"); - return PTR_ERR(ts->regmap); - } - return 0; } @@ -22,6 +22,11 @@ #include <linux/regmap.h> #include "tsc200x-core.h" +static const struct input_id tsc2004_input_id = { + .bustype = BUS_I2C, + .product = 2004, +}; + static int tsc2004_cmd(struct device *dev, u8 cmd) { u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd; @@ -42,7 +47,7 @@ static int tsc2004_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { - return tsc200x_probe(&i2c->dev, i2c->irq, BUS_I2C, + return tsc200x_probe(&i2c->dev, i2c->irq, &tsc2004_input_id, devm_regmap_init_i2c(i2c, &tsc200x_regmap_config), tsc2004_cmd); } @@ -24,6 +24,11 @@ #include <linux/regmap.h> #include "tsc200x-core.h" +static const struct input_id tsc2005_input_id = { + .bustype = BUS_SPI, + .product = 2005, +}; + static int tsc2005_cmd(struct device *dev, u8 cmd) { u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd; @@ -62,7 +67,7 @@ static int tsc2005_probe(struct spi_device *spi) if (error) return error; - return tsc200x_probe(&spi->dev, spi->irq, BUS_SPI, + return tsc200x_probe(&spi->dev, spi->irq, &tsc2005_input_id, devm_regmap_init_spi(spi, &tsc200x_regmap_config), tsc2005_cmd); } @@ -450,7 +450,7 @@ static void tsc200x_close(struct input_dev *input) mutex_unlock(&ts->mutex); } -int tsc200x_probe(struct device *dev, int irq, __u16 bustype, +int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id, struct regmap *regmap, int (*tsc200x_cmd)(struct device *dev, u8 cmd)) { @@ -547,9 +547,18 @@ int tsc200x_probe(struct device *dev, int irq, __u16 bustype, snprintf(ts->phys, sizeof(ts->phys), "%s/input-ts", dev_name(dev)); - input_dev->name = "TSC200X touchscreen"; + if (tsc_id->product == 2004) { + input_dev->name = "TSC200X touchscreen"; + } else { + input_dev->name = devm_kasprintf(dev, GFP_KERNEL, + "TSC%04d touchscreen", + tsc_id->product); + if (!input_dev->name) + return -ENOMEM; + } + input_dev->phys = ts->phys; - input_dev->id.bustype = bustype; + input_dev->id = *tsc_id; input_dev->dev.parent = dev; input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); @@ -559,7 +568,7 @@ int tsc200x_probe(struct device *dev, int irq, __u16 bustype, input_set_abs_params(input_dev, ABS_PRESSURE, 0, max_p, fudge_p, 0); if (np) - touchscreen_parse_properties(input_dev, false); + touchscreen_parse_properties(input_dev, false, NULL); input_dev->open = tsc200x_open; input_dev->close = tsc200x_close; @@ -70,7 +70,7 @@ extern const struct regmap_config tsc200x_regmap_config; extern const struct dev_pm_ops tsc200x_pm_ops; -int tsc200x_probe(struct device *dev, int irq, __u16 bustype, +int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id, struct regmap *regmap, int (*tsc200x_cmd)(struct device *dev, u8 cmd)); int tsc200x_remove(struct device *dev); @@ -27,7 +27,7 @@ MODULE_AUTHOR("Jaya Kumar <jayakumar.lkml@gmail.com>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); -#define W8001_MAX_LENGTH 11 +#define W8001_MAX_LENGTH 13 #define W8001_LEAD_MASK 0x80 #define W8001_LEAD_BYTE 0x80 #define W8001_TAB_MASK 0x40 @@ -155,6 +155,7 @@ static void parse_multi_touch(struct w8001 *w8001) bool touch = data[0] & (1 << i); input_mt_slot(dev, i); + input_mt_report_slot_state(dev, MT_TOOL_FINGER, touch); if (touch) { x = (data[6 * i + 1] << 7) | data[6 * i + 2]; y = (data[6 * i + 3] << 7) | data[6 * i + 4]; @@ -339,6 +340,15 @@ static irqreturn_t w8001_interrupt(struct serio *serio, w8001->idx = 0; parse_multi_touch(w8001); break; + + default: + /* + * ThinkPad X60 Tablet PC (pen only device) sometimes + * sends invalid data packets that are larger than + * W8001_PKTLEN_TPCPEN. Let's start over again. + */ + if (!w8001->touch_dev && w8001->idx > W8001_PKTLEN_TPCPEN - 1) + w8001->idx = 0; } return IRQ_HANDLED; @@ -508,11 +518,21 @@ static int w8001_setup_touch(struct w8001 *w8001, char *basename, w8001->pktlen = W8001_PKTLEN_TOUCH2FG; __set_bit(BTN_TOOL_DOUBLETAP, dev->keybit); - input_mt_init_slots(dev, 2, 0); + error = input_mt_init_slots(dev, 2, 0); + if (error) { + dev_err(&w8001->serio->dev, + "failed to initialize MT slots: %d\n", error); + return error; + } + input_set_abs_params(dev, ABS_MT_POSITION_X, 0, touch.x, 0, 0); input_set_abs_params(dev, ABS_MT_POSITION_Y, 0, touch.y, 0, 0); + input_set_abs_params(dev, ABS_MT_TOOL_TYPE, + 0, MT_TOOL_MAX, 0, 0); + input_abs_set_res(dev, ABS_MT_POSITION_X, touch.panel_res); + input_abs_set_res(dev, ABS_MT_POSITION_Y, touch.panel_res); strlcat(basename, " 2FG", basename_sz); if (w8001->max_pen_x && w8001->max_pen_y) @@ -1107,13 +1107,13 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, break; } + devid = e->devid; DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n", hid, uid, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid)); - devid = e->devid; flags = e->flags; ret = add_acpi_hid_device(hid, uid, &devid, false); @@ -1568,13 +1568,23 @@ static int __init amd_iommu_init_pci(void) break; } + /* + * Order is important here to make sure any unity map requirements are + * fulfilled. The unity mappings are created and written to the device + * table during the amd_iommu_init_api() call. + * + * After that we call init_device_table_dma() to make sure any + * uninitialized DTE will block DMA, and in the end we flush the caches + * of all IOMMUs to make sure the changes to the device table are + * active. + */ + ret = amd_iommu_init_api(); + init_device_table_dma(); for_each_iommu(iommu) iommu_flush_all_caches(iommu); - ret = amd_iommu_init_api(); - if (!ret) print_iommu_info(); @@ -538,8 +538,7 @@ static void do_fault(struct work_struct *work) if (access_error(vma, fault)) goto out; - ret = handle_mm_fault(mm, vma, address, flags); - + ret = handle_mm_fault(vma, address, flags); out: up_read(&mm->mmap_sem); @@ -1941,6 +1941,7 @@ static struct iommu_ops arm_smmu_ops = { .attach_dev = arm_smmu_attach_dev, .map = arm_smmu_map, .unmap = arm_smmu_unmap, + .map_sg = default_iommu_map_sg, .iova_to_phys = arm_smmu_iova_to_phys, .add_device = arm_smmu_add_device, .remove_device = arm_smmu_remove_device, @@ -1871,10 +1871,11 @@ static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg) /* * All PCI devices managed by this unit should have been destroyed. */ - if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) + if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) { for_each_active_dev_scope(dmaru->devices, dmaru->devices_cnt, i, dev) return -EBUSY; + } ret = dmar_ir_hotplug(dmaru, false); if (ret == 0) @@ -3222,11 +3222,6 @@ static int __init init_dmars(void) } } - iommu_flush_write_buffer(iommu); - iommu_set_root_entry(iommu); - iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); - iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); - if (!ecap_pass_through(iommu->ecap)) hw_pass_through = 0; #ifdef CONFIG_INTEL_IOMMU_SVM @@ -3235,6 +3230,18 @@ static int __init init_dmars(void) #endif } + /* + * Now that qi is enabled on all iommus, set the root entry and flush + * caches. This is required on some Intel X58 chipsets, otherwise the + * flush_context function will loop forever and the boot hangs. + */ + for_each_active_iommu(iommu, drhd) { + iommu_flush_write_buffer(iommu); + iommu_set_root_entry(iommu); + iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); + iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); + } + if (iommu_pass_through) iommu_identity_mapping |= IDENTMAP_ALL; @@ -4265,10 +4272,11 @@ int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg) if (!atsru) return 0; - if (!atsru->include_all && atsru->devices && atsru->devices_cnt) + if (!atsru->include_all && atsru->devices && atsru->devices_cnt) { for_each_active_dev_scope(atsru->devices, atsru->devices_cnt, i, dev) return -EBUSY; + } return 0; } @@ -4595,13 +4603,13 @@ static void free_all_cpu_cached_iovas(unsigned int cpu) for (i = 0; i < g_num_of_iommus; i++) { struct intel_iommu *iommu = g_iommus[i]; struct dmar_domain *domain; - u16 did; + int did; if (!iommu) continue; - for (did = 0; did < 0xffff; did++) { - domain = get_iommu_domain(iommu, did); + for (did = 0; did < cap_ndoms(iommu->cap); did++) { + domain = get_iommu_domain(iommu, (u16)did); if (!domain) continue; @@ -583,7 +583,7 @@ static irqreturn_t prq_event_thread(int irq, void *d) if (access_error(vma, req)) goto invalid; - ret = handle_mm_fault(svm->mm, vma, address, + ret = handle_mm_fault(vma, address, req->wr_req ? FAULT_FLAG_WRITE : 0); if (ret & VM_FAULT_ERROR) goto invalid; @@ -420,8 +420,10 @@ retry: /* Try replenishing IOVAs by flushing rcache. */ flushed_rcache = true; + preempt_disable(); for_each_online_cpu(cpu) free_cpu_cached_iovas(cpu, iovad); + preempt_enable(); goto retry; } @@ -749,7 +751,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad, bool can_insert = false; unsigned long flags; - cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches); + cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches); spin_lock_irqsave(&cpu_rcache->lock, flags); if (!iova_magazine_full(cpu_rcache->loaded)) { @@ -779,6 +781,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad, iova_magazine_push(cpu_rcache->loaded, iova_pfn); spin_unlock_irqrestore(&cpu_rcache->lock, flags); + put_cpu_ptr(rcache->cpu_rcaches); if (mag_to_free) { iova_magazine_free_pfns(mag_to_free, iovad); @@ -812,7 +815,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache, bool has_pfn = false; unsigned long flags; - cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches); + cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches); spin_lock_irqsave(&cpu_rcache->lock, flags); if (!iova_magazine_empty(cpu_rcache->loaded)) { @@ -834,6 +837,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache, iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn); spin_unlock_irqrestore(&cpu_rcache->lock, flags); + put_cpu_ptr(rcache->cpu_rcaches); return iova_pfn; } @@ -815,7 +815,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, dte_addr = virt_to_phys(rk_domain->dt); for (i = 0; i < iommu->num_mmu; i++) { rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr); - rk_iommu_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); + rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); } @@ -8,6 +8,12 @@ config ARM_GIC select IRQ_DOMAIN_HIERARCHY select MULTI_IRQ_HANDLER +config ARM_GIC_PM + bool + depends on PM + select ARM_GIC + select PM_CLK + config ARM_GIC_MAX_NR int default 2 if ARCH_REALVIEW @@ -24,6 +24,7 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o +obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o obj-$(CONFIG_REALVIEW_DT) += irq-gic-realview.o obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o @@ -69,3 +70,4 @@ obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o +obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o @@ -55,14 +55,14 @@ static void combiner_mask_irq(struct irq_data *data) { u32 mask = 1 << (data->hwirq % 32); - __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR); + writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR); } static void combiner_unmask_irq(struct irq_data *data) { u32 mask = 1 << (data->hwirq % 32); - __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET); + writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_SET); } static void combiner_handle_cascade_irq(struct irq_desc *desc) @@ -75,7 +75,7 @@ static void combiner_handle_cascade_irq(struct irq_desc *desc) chained_irq_enter(chip, desc); spin_lock(&irq_controller_lock); - status = __raw_readl(chip_data->base + COMBINER_INT_STATUS); + status = readl_relaxed(chip_data->base + COMBINER_INT_STATUS); spin_unlock(&irq_controller_lock); status &= chip_data->irq_mask; @@ -135,7 +135,7 @@ static void __init combiner_init_one(struct combiner_chip_data *combiner_data, combiner_data->parent_irq = irq; /* Disable all interrupts */ - __raw_writel(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR); + writel_relaxed(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR); } static int combiner_irq_domain_xlate(struct irq_domain *d, @@ -218,7 +218,7 @@ static int combiner_suspend(void) for (i = 0; i < max_nr; i++) combiner_data[i].pm_save = - __raw_readl(combiner_data[i].base + COMBINER_ENABLE_SET); + readl_relaxed(combiner_data[i].base + COMBINER_ENABLE_SET); return 0; } @@ -235,9 +235,9 @@ static void combiner_resume(void) int i; for (i = 0; i < max_nr; i++) { - __raw_writel(combiner_data[i].irq_mask, + writel_relaxed(combiner_data[i].irq_mask, combiner_data[i].base + COMBINER_ENABLE_CLEAR); - __raw_writel(combiner_data[i].pm_save, + writel_relaxed(combiner_data[i].pm_save, combiner_data[i].base + COMBINER_ENABLE_SET); } } @@ -541,7 +541,7 @@ static void armada_370_xp_mpic_resume(void) writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); } -struct syscore_ops armada_370_xp_mpic_syscore_ops = { +static struct syscore_ops armada_370_xp_mpic_syscore_ops = { .suspend = armada_370_xp_mpic_suspend, .resume = armada_370_xp_mpic_resume, }; diff --git a/drivers/irqchip/irq-aspeed-vic.c b/drivers/irqchip/irq-aspeed-vic.c new file mode 100644 index 000000000000..d24451d5bf8a --- /dev/null +++ b/ drivers/irqchip/irq-aspeed-vic.c@@ -0,0 +1,230 @@ +/* + * Copyright (C) 2015 - Ben Herrenschmidt, IBM Corp. + * + * Driver for Aspeed "new" VIC as found in SoC generation 3 and later + * + * Based on irq-vic.c: + * + * Copyright (C) 1999 - 2003 ARM Limited + * Copyright (C) 2000 Deep Blue Solutions Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/export.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/syscore_ops.h> +#include <linux/device.h> +#include <linux/slab.h> + +#include <asm/exception.h> +#include <asm/irq.h> + +/* These definitions correspond to the "new mapping" of the + * register set that interleaves "high" and "low". The offsets + * below are for the "low" register, add 4 to get to the high one + */ +#define AVIC_IRQ_STATUS 0x00 +#define AVIC_FIQ_STATUS 0x08 +#define AVIC_RAW_STATUS 0x10 +#define AVIC_INT_SELECT 0x18 +#define AVIC_INT_ENABLE 0x20 +#define AVIC_INT_ENABLE_CLR 0x28 +#define AVIC_INT_TRIGGER 0x30 +#define AVIC_INT_TRIGGER_CLR 0x38 +#define AVIC_INT_SENSE 0x40 +#define AVIC_INT_DUAL_EDGE 0x48 +#define AVIC_INT_EVENT 0x50 +#define AVIC_EDGE_CLR 0x58 +#define AVIC_EDGE_STATUS 0x60 + +#define NUM_IRQS 64 + +struct aspeed_vic { + void __iomem *base; + u32 edge_sources[2]; + struct irq_domain *dom; +}; +static struct aspeed_vic *system_avic; + +static void vic_init_hw(struct aspeed_vic *vic) +{ + u32 sense; + + /* Disable all interrupts */ + writel(0xffffffff, vic->base + AVIC_INT_ENABLE_CLR); + writel(0xffffffff, vic->base + AVIC_INT_ENABLE_CLR + 4); + + /* Make sure no soft trigger is on */ + writel(0xffffffff, vic->base + AVIC_INT_TRIGGER_CLR); + writel(0xffffffff, vic->base + AVIC_INT_TRIGGER_CLR + 4); + + /* Set everything to be IRQ */ + writel(0, vic->base + AVIC_INT_SELECT); + writel(0, vic->base + AVIC_INT_SELECT + 4); + + /* Some interrupts have a programable high/low level trigger + * (4 GPIO direct inputs), for now we assume this was configured + * by firmware. We read which ones are edge now. + */ + sense = readl(vic->base + AVIC_INT_SENSE); + vic->edge_sources[0] = ~sense; + sense = readl(vic->base + AVIC_INT_SENSE + 4); + vic->edge_sources[1] = ~sense; + + /* Clear edge detection latches */ + writel(0xffffffff, vic->base + AVIC_EDGE_CLR); + writel(0xffffffff, vic->base + AVIC_EDGE_CLR + 4); +} + +static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs) +{ + struct aspeed_vic *vic = system_avic; + u32 stat, irq; + + for (;;) { + irq = 0; + stat = readl_relaxed(vic->base + AVIC_IRQ_STATUS); + if (!stat) { + stat = readl_relaxed(vic->base + AVIC_IRQ_STATUS + 4); + irq = 32; + } + if (stat == 0) + break; + irq += ffs(stat) - 1; + handle_domain_irq(vic->dom, irq, regs); + } +} + +static void avic_ack_irq(struct irq_data *d) +{ + struct aspeed_vic *vic = irq_data_get_irq_chip_data(d); + unsigned int sidx = d->hwirq >> 5; + unsigned int sbit = 1u << (d->hwirq & 0x1f); + + /* Clear edge latch for edge interrupts, nop for level */ + if (vic->edge_sources[sidx] & sbit) + writel(sbit, vic->base + AVIC_EDGE_CLR + sidx * 4); +} + +static void avic_mask_irq(struct irq_data *d) +{ + struct aspeed_vic *vic = irq_data_get_irq_chip_data(d); + unsigned int sidx = d->hwirq >> 5; + unsigned int sbit = 1u << (d->hwirq & 0x1f); + + writel(sbit, vic->base + AVIC_INT_ENABLE_CLR + sidx * 4); +} + +static void avic_unmask_irq(struct irq_data *d) +{ + struct aspeed_vic *vic = irq_data_get_irq_chip_data(d); + unsigned int sidx = d->hwirq >> 5; + unsigned int sbit = 1u << (d->hwirq & 0x1f); + + writel(sbit, vic->base + AVIC_INT_ENABLE + sidx * 4); +} + +/* For level irq, faster than going through a nop "ack" and mask */ +static void avic_mask_ack_irq(struct irq_data *d) +{ + struct aspeed_vic *vic = irq_data_get_irq_chip_data(d); + unsigned int sidx = d->hwirq >> 5; + unsigned int sbit = 1u << (d->hwirq & 0x1f); + + /* First mask */ + writel(sbit, vic->base + AVIC_INT_ENABLE_CLR + sidx * 4); + + /* Then clear edge latch for edge interrupts */ + if (vic->edge_sources[sidx] & sbit) + writel(sbit, vic->base + AVIC_EDGE_CLR + sidx * 4); +} + +static struct irq_chip avic_chip = { + .name = "AVIC", + .irq_ack = avic_ack_irq, + .irq_mask = avic_mask_irq, + .irq_unmask = avic_unmask_irq, + .irq_mask_ack = avic_mask_ack_irq, +}; + +static int avic_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hwirq) +{ + struct aspeed_vic *vic = d->host_data; + unsigned int sidx = hwirq >> 5; + unsigned int sbit = 1u << (hwirq & 0x1f); + + /* Check if interrupt exists */ + if (sidx > 1) + return -EPERM; + + if (vic->edge_sources[sidx] & sbit) + irq_set_chip_and_handler(irq, &avic_chip, handle_edge_irq); + else + irq_set_chip_and_handler(irq, &avic_chip, handle_level_irq); + irq_set_chip_data(irq, vic); + irq_set_probe(irq); + return 0; +} + +static struct irq_domain_ops avic_dom_ops = { + .map = avic_map, + .xlate = irq_domain_xlate_onetwocell, +}; + +static int __init avic_of_init(struct device_node *node, + struct device_node *parent) +{ + void __iomem *regs; + struct aspeed_vic *vic; + + if (WARN(parent, "non-root Aspeed VIC not supported")) + return -EINVAL; + if (WARN(system_avic, "duplicate Aspeed VIC not supported")) + return -EINVAL; + + regs = of_iomap(node, 0); + if (WARN_ON(!regs)) + return -EIO; + + vic = kzalloc(sizeof(struct aspeed_vic), GFP_KERNEL); + if (WARN_ON(!vic)) { + iounmap(regs); + return -ENOMEM; + } + vic->base = regs; + + /* Initialize soures, all masked */ + vic_init_hw(vic); + + /* Ready to receive interrupts */ + system_avic = vic; + set_handle_irq(avic_handle_irq); + + /* Register our domain */ + vic->dom = irq_domain_add_simple(node, NUM_IRQS, 0, + &avic_dom_ops, vic); + + return 0; +} + +IRQCHIP_DECLARE(aspeed_new_vic, "aspeed,ast2400-vic", avic_of_init); @@ -52,7 +52,6 @@ #include <linux/irqdomain.h> #include <asm/exception.h> -#include <asm/mach/irq.h> /* Put the bank and irq (32 bits) into the hwirq */ #define MAKE_HWIRQ(b, n) ((b << 5) | (n)) @@ -242,7 +241,7 @@ static void __exception_irq_entry bcm2835_handle_irq( u32 hwirq; while ((hwirq = get_next_armctrl_hwirq()) != ~0) - handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); + handle_domain_irq(intc.domain, hwirq, regs); } static void bcm2836_chained_handle_irq(struct irq_desc *desc) @@ -180,7 +180,7 @@ __exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs) } else if (stat) { u32 hwirq = ffs(stat) - 1; - handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); + handle_domain_irq(intc.domain, hwirq, regs); } } @@ -224,8 +224,8 @@ static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = { }; #ifdef CONFIG_ARM -int __init bcm2836_smp_boot_secondary(unsigned int cpu, - struct task_struct *idle) +static int __init bcm2836_smp_boot_secondary(unsigned int cpu, + struct task_struct *idle) { unsigned long secondary_startup_phys = (unsigned long)virt_to_phys((void *)secondary_startup); @@ -215,7 +215,7 @@ static int __init bcm7120_l2_intc_iomap_3380(struct device_node *dn, return 0; } -int __init bcm7120_l2_intc_probe(struct device_node *dn, +static int __init bcm7120_l2_intc_probe(struct device_node *dn, struct device_node *parent, int (*iomap_regs_fn)(struct device_node *, struct bcm7120_l2_intc_data *), @@ -339,15 +339,15 @@ out_unmap: return ret; } -int __init bcm7120_l2_intc_probe_7120(struct device_node *dn, - struct device_node *parent) +static int __init bcm7120_l2_intc_probe_7120(struct device_node *dn, + struct device_node *parent) { return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_7120, "BCM7120 L2"); } -int __init bcm7120_l2_intc_probe_3380(struct device_node *dn, - struct device_node *parent) +static int __init bcm7120_l2_intc_probe_3380(struct device_node *dn, + struct device_node *parent) { return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_3380, "BCM3380 L2"); @@ -112,8 +112,8 @@ static void brcmstb_l2_intc_resume(struct irq_data *d) irq_gc_unlock(gc); } -int __init brcmstb_l2_intc_of_init(struct device_node *np, - struct device_node *parent) +static int __init brcmstb_l2_intc_of_init(struct device_node *np, + struct device_node *parent) { unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; struct brcmstb_l2_intc_data *data; @@ -90,8 +90,8 @@ int gic_configure_irq(unsigned int irq, unsigned int type, return ret; } -void __init gic_dist_config(void __iomem *base, int gic_irqs, - void (*sync_access)(void)) +void gic_dist_config(void __iomem *base, int gic_irqs, + void (*sync_access)(void)) { unsigned int i; diff --git a/drivers/irqchip/irq-gic-pm.c b/drivers/irqchip/irq-gic-pm.c new file mode 100644 index 000000000000..4cbffba3ff13 --- /dev/null +++ b/ drivers/irqchip/irq-gic-pm.c@@ -0,0 +1,184 @@ +/* + * Copyright (C) 2016 NVIDIA CORPORATION, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/module.h> +#include <linux/clk.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/irqchip/arm-gic.h> +#include <linux/platform_device.h> +#include <linux/pm_clock.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> + +struct gic_clk_data { + unsigned int num_clocks; + const char *const *clocks; +}; + +static int gic_runtime_resume(struct device *dev) +{ + struct gic_chip_data *gic = dev_get_drvdata(dev); + int ret; + + ret = pm_clk_resume(dev); + if (ret) + return ret; + + /* + * On the very first resume, the pointer to the driver data + * will be NULL and this is intentional, because we do not + * want to restore the GIC on the very first resume. So if + * the pointer is not valid just return. + */ + if (!gic) + return 0; + + gic_dist_restore(gic); + gic_cpu_restore(gic); + + return 0; +} + +static int gic_runtime_suspend(struct device *dev) +{ + struct gic_chip_data *gic = dev_get_drvdata(dev); + + gic_dist_save(gic); + gic_cpu_save(gic); + + return pm_clk_suspend(dev); +} + +static int gic_get_clocks(struct device *dev, const struct gic_clk_data *data) +{ + struct clk *clk; + unsigned int i; + int ret; + + if (!dev || !data) + return -EINVAL; + + ret = pm_clk_create(dev); + if (ret) + return ret; + + for (i = 0; i < data->num_clocks; i++) { + clk = of_clk_get_by_name(dev->of_node, data->clocks[i]); + if (IS_ERR(clk)) { + dev_err(dev, "failed to get clock %s\n", + data->clocks[i]); + ret = PTR_ERR(clk); + goto error; + } + + ret = pm_clk_add_clk(dev, clk); + if (ret) { + dev_err(dev, "failed to add clock at index %d\n", i); + clk_put(clk); + goto error; + } + } + + return 0; + +error: + pm_clk_destroy(dev); + + return ret; +} + +static int gic_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct gic_clk_data *data; + struct gic_chip_data *gic; + int ret, irq; + + data = of_device_get_match_data(&pdev->dev); + if (!data) { + dev_err(&pdev->dev, "no device match found\n"); + return -ENODEV; + } + + irq = irq_of_parse_and_map(dev->of_node, 0); + if (!irq) { + dev_err(dev, "no parent interrupt found!\n"); + return -EINVAL; + } + + ret = gic_get_clocks(dev, data); + if (ret) + goto irq_dispose; + + pm_runtime_enable(dev); + + ret = pm_runtime_get_sync(dev); + if (ret < 0) + goto rpm_disable; + + ret = gic_of_init_child(dev, &gic, irq); + if (ret) + goto rpm_put; + + platform_set_drvdata(pdev, gic); + + pm_runtime_put(dev); + + dev_info(dev, "GIC IRQ controller registered\n"); + + return 0; + +rpm_put: + pm_runtime_put_sync(dev); +rpm_disable: + pm_runtime_disable(dev); + pm_clk_destroy(dev); +irq_dispose: + irq_dispose_mapping(irq); + + return ret; +} + +static const struct dev_pm_ops gic_pm_ops = { + SET_RUNTIME_PM_OPS(gic_runtime_suspend, + gic_runtime_resume, NULL) +}; + +static const char * const gic400_clocks[] = { + "clk", +}; + +static const struct gic_clk_data gic400_data = { + .num_clocks = ARRAY_SIZE(gic400_clocks), + .clocks = gic400_clocks, +}; + +static const struct of_device_id gic_match[] = { + { .compatible = "nvidia,tegra210-agic", .data = &gic400_data }, + {}, +}; +MODULE_DEVICE_TABLE(of, gic_match); + +static struct platform_driver gic_driver = { + .probe = gic_probe, + .driver = { + .name = "gic", + .of_match_table = gic_match, + .pm = &gic_pm_ops, + } +}; + +builtin_platform_driver(gic_driver); @@ -24,6 +24,7 @@ #include <linux/of_pci.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/irqchip/arm-gic.h> /* * MSI_TYPER: @@ -41,6 +41,7 @@ #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) +#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) @@ -55,13 +56,14 @@ struct its_collection { }; /* - * The ITS_BASER structure - contains memory information and cached - * value of BASER register configuration. + * The ITS_BASER structure - contains memory information, cached + * value of BASER register configuration and ITS page size. */ struct its_baser { void *base; u64 val; u32 order; + u32 psz; }; /* @@ -82,6 +84,7 @@ struct its_node { u64 flags; u32 ite_size; u32 device_ids; + int numa_node; }; #define ITS_ITT_ALIGN SZ_256 @@ -613,11 +616,23 @@ static void its_unmask_irq(struct irq_data *d) static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { - unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); + unsigned int cpu; + const struct cpumask *cpu_mask = cpu_online_mask; struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_collection *target_col; u32 id = its_get_event_id(d); + /* lpi cannot be routed to a redistributor that is on a foreign node */ + if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { + if (its_dev->its->numa_node >= 0) { + cpu_mask = cpumask_of_node(its_dev->its->numa_node); + if (!cpumask_intersects(mask_val, cpu_mask)) + return -EINVAL; + } + } + + cpu = cpumask_any_and(mask_val, cpu_mask); + if (cpu >= nr_cpu_ids) return -EINVAL; @@ -810,180 +825,241 @@ static const char *its_base_type_string[] = { [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", }; -static void its_free_tables(struct its_node *its) +static u64 its_read_baser(struct its_node *its, struct its_baser *baser) { - int i; + u32 idx = baser - its->tables; - for (i = 0; i < GITS_BASER_NR_REGS; i++) { - if (its->tables[i].base) { - free_pages((unsigned long)its->tables[i].base, - its->tables[i].order); - its->tables[i].base = NULL; - } - } + return readq_relaxed(its->base + GITS_BASER + (idx << 3)); } -static int its_alloc_tables(const char *node_name, struct its_node *its) +static void its_write_baser(struct its_node *its, struct its_baser *baser, + u64 val) { - int err; - int i; - int psz = SZ_64K; - u64 shr = GITS_BASER_InnerShareable; - u64 cache; - u64 typer; - u32 ids; + u32 idx = baser - its->tables; - if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) { - /* - * erratum 22375: only alloc 8MB table size - * erratum 24313: ignore memory access type - */ - cache = 0; - ids = 0x14; /* 20 bits, 8MB */ - } else { - cache = GITS_BASER_WaWb; - typer = readq_relaxed(its->base + GITS_TYPER); - ids = GITS_TYPER_DEVBITS(typer); + writeq_relaxed(val, its->base + GITS_BASER + (idx << 3)); + baser->val = its_read_baser(its, baser); +} + +static int its_setup_baser(struct its_node *its, struct its_baser *baser, + u64 cache, u64 shr, u32 psz, u32 order, + bool indirect) +{ + u64 val = its_read_baser(its, baser); + u64 esz = GITS_BASER_ENTRY_SIZE(val); + u64 type = GITS_BASER_TYPE(val); + u32 alloc_pages; + void *base; + u64 tmp; + +retry_alloc_baser: + alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); + if (alloc_pages > GITS_BASER_PAGES_MAX) { + pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", + &its->phys_base, its_base_type_string[type], + alloc_pages, GITS_BASER_PAGES_MAX); + alloc_pages = GITS_BASER_PAGES_MAX; + order = get_order(GITS_BASER_PAGES_MAX * psz); } - its->device_ids = ids; + base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!base) + return -ENOMEM; - for (i = 0; i < GITS_BASER_NR_REGS; i++) { - u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); - u64 type = GITS_BASER_TYPE(val); - u64 entry_size = GITS_BASER_ENTRY_SIZE(val); - int order = get_order(psz); - int alloc_pages; - u64 tmp; - void *base; +retry_baser: + val = (virt_to_phys(base) | + (type << GITS_BASER_TYPE_SHIFT) | + ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | + ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | + cache | + shr | + GITS_BASER_VALID); + + val |= indirect ? GITS_BASER_INDIRECT : 0x0; + + switch (psz) { + case SZ_4K: + val |= GITS_BASER_PAGE_SIZE_4K; + break; + case SZ_16K: + val |= GITS_BASER_PAGE_SIZE_16K; + break; + case SZ_64K: + val |= GITS_BASER_PAGE_SIZE_64K; + break; + } - if (type == GITS_BASER_TYPE_NONE) - continue; + its_write_baser(its, baser, val); + tmp = baser->val; + if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { /* - * Allocate as many entries as required to fit the - * range of device IDs that the ITS can grok... The ID - * space being incredibly sparse, this results in a - * massive waste of memory. - * - * For other tables, only allocate a single page. + * Shareability didn't stick. Just use + * whatever the read reported, which is likely + * to be the only thing this redistributor + * supports. If that's zero, make it + * non-cacheable as well. */ - if (type == GITS_BASER_TYPE_DEVICE) { - /* - * 'order' was initialized earlier to the default page - * granule of the the ITS. We can't have an allocation - * smaller than that. If the requested allocation - * is smaller, round up to the default page granule. - */ - order = max(get_order((1UL << ids) * entry_size), - order); - if (order >= MAX_ORDER) { - order = MAX_ORDER - 1; - pr_warn("%s: Device Table too large, reduce its page order to %u\n", - node_name, order); - } + shr = tmp & GITS_BASER_SHAREABILITY_MASK; + if (!shr) { + cache = GITS_BASER_nC; + __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order)); } + goto retry_baser; + } -retry_alloc_baser: - alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); - if (alloc_pages > GITS_BASER_PAGES_MAX) { - alloc_pages = GITS_BASER_PAGES_MAX; - order = get_order(GITS_BASER_PAGES_MAX * psz); - pr_warn("%s: Device Table too large, reduce its page order to %u (%u pages)\n", - node_name, order, alloc_pages); - } - - base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); - if (!base) { - err = -ENOMEM; - goto out_free; - } - - its->tables[i].base = base; - its->tables[i].order = order; - -retry_baser: - val = (virt_to_phys(base) | - (type << GITS_BASER_TYPE_SHIFT) | - ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | - cache | - shr | - GITS_BASER_VALID); + if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { + /* + * Page size didn't stick. Let's try a smaller + * size and retry. If we reach 4K, then + * something is horribly wrong... + */ + free_pages((unsigned long)base, order); + baser->base = NULL; switch (psz) { - case SZ_4K: - val |= GITS_BASER_PAGE_SIZE_4K; - break; case SZ_16K: - val |= GITS_BASER_PAGE_SIZE_16K; - break; + psz = SZ_4K; + goto retry_alloc_baser; case SZ_64K: - val |= GITS_BASER_PAGE_SIZE_64K; - break; + psz = SZ_16K; + goto retry_alloc_baser; } + } + + if (val != tmp) { + pr_err("ITS@%pa: %s doesn't stick: %lx %lx\n", + &its->phys_base, its_base_type_string[type], + (unsigned long) val, (unsigned long) tmp); + free_pages((unsigned long)base, order); + return -ENXIO; + } + + baser->order = order; + baser->base = base; + baser->psz = psz; + tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; + + pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", + &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / tmp), + its_base_type_string[type], + (unsigned long)virt_to_phys(base), + indirect ? "indirect" : "flat", (int)esz, + psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); - val |= alloc_pages - 1; - its->tables[i].val = val; + return 0; +} - writeq_relaxed(val, its->base + GITS_BASER + i * 8); - tmp = readq_relaxed(its->base + GITS_BASER + i * 8); +static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser, + u32 psz, u32 *order) +{ + u64 esz = GITS_BASER_ENTRY_SIZE(its_read_baser(its, baser)); + u64 val = GITS_BASER_InnerShareable | GITS_BASER_WaWb; + u32 ids = its->device_ids; + u32 new_order = *order; + bool indirect = false; + + /* No need to enable Indirection if memory requirement < (psz*2)bytes */ + if ((esz << ids) > (psz * 2)) { + /* + * Find out whether hw supports a single or two-level table by + * table by reading bit at offset '62' after writing '1' to it. + */ + its_write_baser(its, baser, val | GITS_BASER_INDIRECT); + indirect = !!(baser->val & GITS_BASER_INDIRECT); - if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { + if (indirect) { /* - * Shareability didn't stick. Just use - * whatever the read reported, which is likely - * to be the only thing this redistributor - * supports. If that's zero, make it - * non-cacheable as well. + * The size of the lvl2 table is equal to ITS page size + * which is 'psz'. For computing lvl1 table size, + * subtract ID bits that sparse lvl2 table from 'ids' + * which is reported by ITS hardware times lvl1 table + * entry size. */ - shr = tmp & GITS_BASER_SHAREABILITY_MASK; - if (!shr) { - cache = GITS_BASER_nC; - __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order)); - } - goto retry_baser; + ids -= ilog2(psz / esz); + esz = GITS_LVL1_ENTRY_SIZE; } + } - if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { - /* - * Page size didn't stick. Let's try a smaller - * size and retry. If we reach 4K, then - * something is horribly wrong... - */ - free_pages((unsigned long)base, order); - its->tables[i].base = NULL; + /* + * Allocate as many entries as required to fit the + * range of device IDs that the ITS can grok... The ID + * space being incredibly sparse, this results in a + * massive waste of memory if two-level device table + * feature is not supported by hardware. + */ + new_order = max_t(u32, get_order(esz << ids), new_order); + if (new_order >= MAX_ORDER) { + new_order = MAX_ORDER - 1; + ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / esz); + pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n", + &its->phys_base, its->device_ids, ids); + } - switch (psz) { - case SZ_16K: - psz = SZ_4K; - goto retry_alloc_baser; - case SZ_64K: - psz = SZ_16K; - goto retry_alloc_baser; - } - } + *order = new_order; - if (val != tmp) { - pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n", - node_name, i, - (unsigned long) val, (unsigned long) tmp); - err = -ENXIO; - goto out_free; + return indirect; +} + +static void its_free_tables(struct its_node *its) +{ + int i; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + if (its->tables[i].base) { + free_pages((unsigned long)its->tables[i].base, + its->tables[i].order); + its->tables[i].base = NULL; } + } +} - pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", - (int)(PAGE_ORDER_TO_SIZE(order) / entry_size), - its_base_type_string[type], - (unsigned long)virt_to_phys(base), - psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); +static int its_alloc_tables(struct its_node *its) +{ + u64 typer = readq_relaxed(its->base + GITS_TYPER); + u32 ids = GITS_TYPER_DEVBITS(typer); + u64 shr = GITS_BASER_InnerShareable; + u64 cache = GITS_BASER_WaWb; + u32 psz = SZ_64K; + int err, i; + + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) { + /* + * erratum 22375: only alloc 8MB table size + * erratum 24313: ignore memory access type + */ + cache = GITS_BASER_nCnB; + ids = 0x14; /* 20 bits, 8MB */ } - return 0; + its->device_ids = ids; -out_free: - its_free_tables(its); + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + struct its_baser *baser = its->tables + i; + u64 val = its_read_baser(its, baser); + u64 type = GITS_BASER_TYPE(val); + u32 order = get_order(psz); + bool indirect = false; - return err; + if (type == GITS_BASER_TYPE_NONE) + continue; + + if (type == GITS_BASER_TYPE_DEVICE) + indirect = its_parse_baser_device(its, baser, psz, &order); + + err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); + if (err < 0) { + its_free_tables(its); + return err; + } + + /* Update settings which will be used for next BASERn */ + psz = baser->psz; + cache = baser->val & GITS_BASER_CACHEABILITY_MASK; + shr = baser->val & GITS_BASER_SHAREABILITY_MASK; + } + + return 0; } static int its_alloc_collections(struct its_node *its) @@ -1101,6 +1177,16 @@ static void its_cpu_init_collection(void) list_for_each_entry(its, &its_nodes, entry) { u64 target; + /* avoid cross node collections and its mapping */ + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { + struct device_node *cpu_node; + + cpu_node = of_get_cpu_node(cpu, NULL); + if (its->numa_node != NUMA_NO_NODE && + its->numa_node != of_node_to_nid(cpu_node)) + continue; + } + /* * We now have to bind each collection to its target * redistributor. @@ -1161,10 +1247,57 @@ static struct its_baser *its_get_baser(struct its_node *its, u32 type) return NULL; } +static bool its_alloc_device_table(struct its_node *its, u32 dev_id) +{ + struct its_baser *baser; + struct page *page; + u32 esz, idx; + __le64 *table; + + baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); + + /* Don't allow device id that exceeds ITS hardware limit */ + if (!baser) + return (ilog2(dev_id) < its->device_ids); + + /* Don't allow device id that exceeds single, flat table limit */ + esz = GITS_BASER_ENTRY_SIZE(baser->val); + if (!(baser->val & GITS_BASER_INDIRECT)) + return (dev_id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); + + /* Compute 1st level table index & check if that exceeds table limit */ + idx = dev_id >> ilog2(baser->psz / esz); + if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) + return false; + + table = baser->base; + + /* Allocate memory for 2nd level table */ + if (!table[idx]) { + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz)); + if (!page) + return false; + + /* Flush Lvl2 table to PoC if hw doesn't support coherency */ + if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) + __flush_dcache_area(page_address(page), baser->psz); + + table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); + + /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ + if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) + __flush_dcache_area(table + idx, GITS_LVL1_ENTRY_SIZE); + + /* Ensure updated table contents are visible to ITS hardware */ + dsb(sy); + } + + return true; +} + static struct its_device *its_create_device(struct its_node *its, u32 dev_id, int nvecs) { - struct its_baser *baser; struct its_device *dev; unsigned long *lpi_map; unsigned long flags; @@ -1175,14 +1308,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, int nr_ites; int sz; - baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); - - /* Don't allow 'dev_id' that exceeds single, flat table limit */ - if (baser) { - if (dev_id >= (PAGE_ORDER_TO_SIZE(baser->order) / - GITS_BASER_ENTRY_SIZE(baser->val))) - return NULL; - } else if (ilog2(dev_id) >= its->device_ids) + if (!its_alloc_device_table(its, dev_id)) return NULL; dev = kzalloc(sizeof(*dev), GFP_KERNEL); @@ -1351,9 +1477,14 @@ static void its_irq_domain_activate(struct irq_domain *domain, { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); + const struct cpumask *cpu_mask = cpu_online_mask; + + /* get the cpu_mask of local node */ + if (its_dev->its->numa_node >= 0) + cpu_mask = cpumask_of_node(its_dev->its->numa_node); /* Bind the LPI to the first possible CPU */ - its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask); + its_dev->event_map.col_map[event] = cpumask_first(cpu_mask); /* Map the GIC IRQ and event to the device */ its_send_mapvi(its_dev, d->hwirq, event); @@ -1443,6 +1574,13 @@ static void __maybe_unused its_enable_quirk_cavium_22375(void *data) its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; } +static void __maybe_unused its_enable_quirk_cavium_23144(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; +} + static const struct gic_quirk its_quirks[] = { #ifdef CONFIG_CAVIUM_ERRATUM_22375 { @@ -1452,6 +1590,14 @@ static const struct gic_quirk its_quirks[] = { .init = its_enable_quirk_cavium_22375, }, #endif +#ifdef CONFIG_CAVIUM_ERRATUM_23144 + { + .desc = "ITS: Cavium erratum 23144", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_23144, + }, +#endif { } }; @@ -1514,6 +1660,7 @@ static int __init its_probe(struct device_node *node, its->base = its_base; its->phys_base = res.start; its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; + its->numa_node = of_node_to_nid(node); its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); if (!its->cmd_base) { @@ -1524,7 +1671,7 @@ static int __init its_probe(struct device_node *node, its_enable_quirks(its); - err = its_alloc_tables(node->full_name, its); + err = its_alloc_tables(its); if (err) goto out_free_cmd; @@ -155,7 +155,7 @@ static void gic_enable_redist(bool enable) while (count--) { val = readl_relaxed(rbase + GICR_WAKER); - if (enable ^ (val & GICR_WAKER_ChildrenAsleep)) + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) break; cpu_relax(); udelay(1); @@ -75,7 +75,7 @@ struct gic_chip_data { void __iomem *raw_dist_base; void __iomem *raw_cpu_base; u32 percpu_offset; -#ifdef CONFIG_CPU_PM +#if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM) u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; u32 saved_spi_active[DIV_ROUND_UP(1020, 32)]; u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; @@ -449,7 +449,7 @@ static void gic_cpu_if_up(struct gic_chip_data *gic) } -static void __init gic_dist_init(struct gic_chip_data *gic) +static void gic_dist_init(struct gic_chip_data *gic) { unsigned int i; u32 cpumask; @@ -528,14 +528,14 @@ int gic_cpu_if_down(unsigned int gic_nr) return 0; } -#ifdef CONFIG_CPU_PM +#if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM) /* * Saves the GIC distributor registers during suspend or idle. Must be called * with interrupts disabled but before powering down the GIC. After calling * this function, no interrupts will be delivered by the GIC, and another * platform-specific wakeup source must be enabled. */ -static void gic_dist_save(struct gic_chip_data *gic) +void gic_dist_save(struct gic_chip_data *gic) { unsigned int gic_irqs; void __iomem *dist_base; @@ -574,7 +574,7 @@ static void gic_dist_save(struct gic_chip_data *gic) * handled normally, but any edge interrupts that occured will not be seen by * the GIC and need to be handled by the platform-specific wakeup source. */ -static void gic_dist_restore(struct gic_chip_data *gic) +void gic_dist_restore(struct gic_chip_data *gic) { unsigned int gic_irqs; unsigned int i; @@ -620,7 +620,7 @@ static void gic_dist_restore(struct gic_chip_data *gic) writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); } -static void gic_cpu_save(struct gic_chip_data *gic) +void gic_cpu_save(struct gic_chip_data *gic) { int i; u32 *ptr; @@ -650,7 +650,7 @@ static void gic_cpu_save(struct gic_chip_data *gic) } -static void gic_cpu_restore(struct gic_chip_data *gic) +void gic_cpu_restore(struct gic_chip_data *gic) { int i; u32 *ptr; @@ -727,7 +727,7 @@ static struct notifier_block gic_notifier_block = { .notifier_call = gic_notifier, }; -static int __init gic_pm_init(struct gic_chip_data *gic) +static int gic_pm_init(struct gic_chip_data *gic) { gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, sizeof(u32)); @@ -757,7 +757,7 @@ free_ppi_enable: return -ENOMEM; } #else -static int __init gic_pm_init(struct gic_chip_data *gic) +static int gic_pm_init(struct gic_chip_data *gic) { return 0; } @@ -1032,32 +1032,31 @@ static const struct irq_domain_ops gic_irq_domain_ops = { .unmap = gic_irq_domain_unmap, }; -static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start, - struct fwnode_handle *handle) +static void gic_init_chip(struct gic_chip_data *gic, struct device *dev, + const char *name, bool use_eoimode1) { - irq_hw_number_t hwirq_base; - int gic_irqs, irq_base, i, ret; - - if (WARN_ON(!gic || gic->domain)) - return -EINVAL; - /* Initialize irq_chip */ gic->chip = gic_chip; + gic->chip.name = name; + gic->chip.parent_device = dev; - if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) { + if (use_eoimode1) { gic->chip.irq_mask = gic_eoimode1_mask_irq; gic->chip.irq_eoi = gic_eoimode1_eoi_irq; gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity; - gic->chip.name = kasprintf(GFP_KERNEL, "GICv2"); - } else { - gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", - (int)(gic - &gic_data[0])); } #ifdef CONFIG_SMP if (gic == &gic_data[0]) gic->chip.irq_set_affinity = gic_set_affinity; #endif +} + +static int gic_init_bases(struct gic_chip_data *gic, int irq_start, + struct fwnode_handle *handle) +{ + irq_hw_number_t hwirq_base; + int gic_irqs, irq_base, ret; if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) { /* Frankein-GIC without banked registers... */ @@ -1138,6 +1137,36 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start, goto error; } + gic_dist_init(gic); + ret = gic_cpu_init(gic); + if (ret) + goto error; + + ret = gic_pm_init(gic); + if (ret) + goto error; + + return 0; + +error: + if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) { + free_percpu(gic->dist_base.percpu_base); + free_percpu(gic->cpu_base.percpu_base); + } + + return ret; +} + +static int __init __gic_init_bases(struct gic_chip_data *gic, + int irq_start, + struct fwnode_handle *handle) +{ + char *name; + int i, ret; + + if (WARN_ON(!gic || gic->domain)) + return -EINVAL; + if (gic == &gic_data[0]) { /* * Initialize the CPU interface map to all CPUs. @@ -1155,24 +1184,17 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start, pr_info("GIC: Using split EOI/Deactivate mode\n"); } - gic_dist_init(gic); - ret = gic_cpu_init(gic); - if (ret) - goto error; - - ret = gic_pm_init(gic); - if (ret) - goto error; - - return 0; - -error: - if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) { - free_percpu(gic->dist_base.percpu_base); - free_percpu(gic->cpu_base.percpu_base); + if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) { + name = kasprintf(GFP_KERNEL, "GICv2"); + gic_init_chip(gic, NULL, name, true); + } else { + name = kasprintf(GFP_KERNEL, "GIC-%d", (int)(gic-&gic_data[0])); + gic_init_chip(gic, NULL, name, false); } - kfree(gic->chip.name); + ret = gic_init_bases(gic, irq_start, handle); + if (ret) + kfree(name); return ret; } @@ -1250,7 +1272,7 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base) return true; } -static int __init gic_of_setup(struct gic_chip_data *gic, struct device_node *node) +static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node) { if (!gic || !node) return -EINVAL; @@ -1274,6 +1296,34 @@ error: return -ENOMEM; } +int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq) +{ + int ret; + + if (!dev || !dev->of_node || !gic || !irq) + return -EINVAL; + + *gic = devm_kzalloc(dev, sizeof(**gic), GFP_KERNEL); + if (!*gic) + return -ENOMEM; + + gic_init_chip(*gic, dev, dev->of_node->name, false); + + ret = gic_of_setup(*gic, dev->of_node); + if (ret) + return ret; + + ret = gic_init_bases(*gic, -1, &dev->of_node->fwnode); + if (ret) { + gic_teardown(*gic); + return ret; + } + + irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic); + + return 0; +} + static void __init gic_of_setup_kvm_info(struct device_node *node) { int ret; @@ -1353,7 +1403,11 @@ IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init); IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init); - +#else +int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq) +{ + return -ENOTSUPP; +} #endif #ifdef CONFIG_ACPI @@ -718,7 +718,7 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, spin_lock_irqsave(&gic_lock, flags); gic_map_to_pin(intr, gic_cpu_pin); - gic_map_to_vpe(intr, vpe); + gic_map_to_vpe(intr, mips_cm_vp_id(vpe)); for (i = 0; i < min(gic_vpes, NR_CPUS); i++) clear_bit(intr, pcpu_masks[i].pcpu_mask); set_bit(intr, pcpu_masks[vpe].pcpu_mask); @@ -746,6 +746,12 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, /* verify that it doesn't conflict with an IPI irq */ if (test_bit(spec->hwirq, ipi_resrv)) return -EBUSY; + + hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq); + + return irq_domain_set_hwirq_and_chip(d, virq, hwirq, + &gic_level_irq_controller, + NULL); } else { base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs); if (base_hwirq == gic_shared_intrs) { @@ -867,10 +873,14 @@ static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq, &gic_level_irq_controller, NULL); if (ret) - return ret; + goto error; } return 0; + +error: + irq_domain_free_irqs_parent(d, virq, nr_irqs); + return ret; } void gic_dev_domain_free(struct irq_domain *d, unsigned int virq, @@ -949,7 +959,7 @@ int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node, switch (bus_token) { case DOMAIN_BUS_IPI: is_ipi = d->bus_token == bus_token; - return to_of_node(d->fwnode) == node && is_ipi; + return (!node || to_of_node(d->fwnode) == node) && is_ipi; break; default: return 0; @@ -1032,12 +1042,14 @@ static void __init __gic_init(unsigned long gic_base_addr, &gic_irq_domain_ops, NULL); if (!gic_irq_domain) panic("Failed to add GIC IRQ domain"); + gic_irq_domain->name = "mips-gic-irq"; gic_dev_domain = irq_domain_add_hierarchy(gic_irq_domain, 0, GIC_NUM_LOCAL_INTRS + gic_shared_intrs, node, &gic_dev_domain_ops, NULL); if (!gic_dev_domain) panic("Failed to add GIC DEV domain"); + gic_dev_domain->name = "mips-gic-dev"; gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, IRQ_DOMAIN_FLAG_IPI_PER_CPU, @@ -1046,6 +1058,7 @@ static void __init __gic_init(unsigned long gic_base_addr, if (!gic_ipi_domain) panic("Failed to add GIC IPI domain"); + gic_ipi_domain->name = "mips-gic-ipi"; gic_ipi_domain->bus_token = DOMAIN_BUS_IPI; if (node && @@ -23,6 +23,8 @@ #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/irqchip/irq-omap-intc.h> + /* Define these here for now until we drop all board-files */ #define OMAP24XX_IC_BASE 0x480fe000 #define OMAP34XX_IC_BASE 0x48200000 @@ -91,7 +91,7 @@ static int pic32_set_type_edge(struct irq_data *data, /* set polarity for external interrupts only */ for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) { if (priv->ext_irqs[i] == data->hwirq) { - ret = pic32_set_ext_polarity(i + 1, flow_type); + ret = pic32_set_ext_polarity(i, flow_type); if (ret) return ret; } @@ -92,9 +92,9 @@ static void s3c_irq_mask(struct irq_data *data) unsigned long mask; unsigned int irqno; - mask = __raw_readl(intc->reg_mask); + mask = readl_relaxed(intc->reg_mask); mask |= (1UL << irq_data->offset); - __raw_writel(mask, intc->reg_mask); + writel_relaxed(mask, intc->reg_mask); if (parent_intc) { parent_data = &parent_intc->irqs[irq_data->parent_irq]; @@ -119,9 +119,9 @@ static void s3c_irq_unmask(struct irq_data *data) unsigned long mask; unsigned int irqno; - mask = __raw_readl(intc->reg_mask); + mask = readl_relaxed(intc->reg_mask); mask &= ~(1UL << irq_data->offset); - __raw_writel(mask, intc->reg_mask); + writel_relaxed(mask, intc->reg_mask); if (parent_intc) { irqno = irq_find_mapping(parent_intc->domain, @@ -136,9 +136,9 @@ static inline void s3c_irq_ack(struct irq_data *data) struct s3c_irq_intc *intc = irq_data->intc; unsigned long bitval = 1UL << irq_data->offset; - __raw_writel(bitval, intc->reg_pending); + writel_relaxed(bitval, intc->reg_pending); if (intc->reg_intpnd) - __raw_writel(bitval, intc->reg_intpnd); + writel_relaxed(bitval, intc->reg_intpnd); } static int s3c_irq_type(struct irq_data *data, unsigned int type) @@ -172,9 +172,9 @@ static int s3c_irqext_type_set(void __iomem *gpcon_reg, unsigned long newvalue = 0, value; /* Set the GPIO to external interrupt mode */ - value = __raw_readl(gpcon_reg); + value = readl_relaxed(gpcon_reg); value = (value & ~(3 << gpcon_offset)) | (0x02 << gpcon_offset); - __raw_writel(value, gpcon_reg); + writel_relaxed(value, gpcon_reg); /* Set the external interrupt to pointed trigger type */ switch (type) @@ -208,9 +208,9 @@ static int s3c_irqext_type_set(void __iomem *gpcon_reg, return -EINVAL; } - value = __raw_readl(extint_reg); + value = readl_relaxed(extint_reg); value = (value & ~(7 << extint_offset)) | (newvalue << extint_offset); - __raw_writel(value, extint_reg); + writel_relaxed(value, extint_reg); return 0; } @@ -315,8 +315,8 @@ static void s3c_irq_demux(struct irq_desc *desc) chained_irq_enter(chip, desc); - src = __raw_readl(sub_intc->reg_pending); - msk = __raw_readl(sub_intc->reg_mask); + src = readl_relaxed(sub_intc->reg_pending); + msk = readl_relaxed(sub_intc->reg_mask); src &= ~msk; src &= irq_data->sub_bits; @@ -337,7 +337,7 @@ static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc, int pnd; int offset; - pnd = __raw_readl(intc->reg_intpnd); + pnd = readl_relaxed(intc->reg_intpnd); if (!pnd) return false; @@ -352,7 +352,7 @@ static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc, * * Thanks to Klaus, Shannon, et al for helping to debug this problem */ - offset = __raw_readl(intc->reg_intpnd + 4); + offset = readl_relaxed(intc->reg_intpnd + 4); /* Find the bit manually, when the offset is wrong. * The pending register only ever contains the one bit of the next @@ -406,7 +406,7 @@ int s3c24xx_set_fiq(unsigned int irq, bool on) intmod = 0; } - __raw_writel(intmod, S3C2410_INTMOD); + writel_relaxed(intmod, S3C2410_INTMOD); return 0; } @@ -508,14 +508,14 @@ static void s3c24xx_clear_intc(struct s3c_irq_intc *intc) last = 0; for (i = 0; i < 4; i++) { - pend = __raw_readl(reg_source); + pend = readl_relaxed(reg_source); if (pend == 0 || pend == last) break; - __raw_writel(pend, intc->reg_pending); + writel_relaxed(pend, intc->reg_pending); if (intc->reg_intpnd) - __raw_writel(pend, intc->reg_intpnd); + writel_relaxed(pend, intc->reg_intpnd); pr_info("irq: clearing pending status %08x\n", (int)pend); last = pend; @@ -29,6 +29,11 @@ static struct irq_domain *sirfsoc_irqdomain; +static void __iomem *sirfsoc_irq_get_regbase(void) +{ + return (void __iomem __force *)sirfsoc_irqdomain->host_data; +} + static __init void sirfsoc_alloc_gc(void __iomem *base) { unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; @@ -53,7 +58,7 @@ static __init void sirfsoc_alloc_gc(void __iomem *base) static void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs) { - void __iomem *base = sirfsoc_irqdomain->host_data; + void __iomem *base = sirfsoc_irq_get_regbase(); u32 irqstat; irqstat = readl_relaxed(base + SIRFSOC_INIT_IRQ_ID); @@ -94,7 +99,7 @@ static struct sirfsoc_irq_status sirfsoc_irq_st; static int sirfsoc_irq_suspend(void) { - void __iomem *base = sirfsoc_irqdomain->host_data; + void __iomem *base = sirfsoc_irq_get_regbase(); sirfsoc_irq_st.mask0 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK0); sirfsoc_irq_st.mask1 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK1); @@ -106,7 +111,7 @@ static int sirfsoc_irq_suspend(void) static void sirfsoc_irq_resume(void) { - void __iomem *base = sirfsoc_irqdomain->host_data; + void __iomem *base = sirfsoc_irq_get_regbase(); writel_relaxed(sirfsoc_irq_st.mask0, base + SIRFSOC_INT_RISC_MASK0); writel_relaxed(sirfsoc_irq_st.mask1, base + SIRFSOC_INT_RISC_MASK1); @@ -90,7 +90,7 @@ static struct tegra_ictlr_info *lic; static inline void tegra_ictlr_write_mask(struct irq_data *d, unsigned long reg) { - void __iomem *base = d->chip_data; + void __iomem *base = (void __iomem __force *)d->chip_data; u32 mask; mask = BIT(d->hwirq % 32); @@ -266,7 +266,7 @@ static int tegra_ictlr_domain_alloc(struct irq_domain *domain, irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, &tegra_ictlr_chip, - info->base[ictlr]); + (void __force *)info->base[ictlr]); } parent_fwspec = *fwspec; @@ -167,7 +167,7 @@ static int vic_suspend(void) return 0; } -struct syscore_ops vic_syscore_ops = { +static struct syscore_ops vic_syscore_ops = { .suspend = vic_suspend, .resume = vic_resume, }; @@ -517,7 +517,8 @@ int __init vic_init_cascaded(void __iomem *base, unsigned int parent_irq, EXPORT_SYMBOL_GPL(vic_init_cascaded); #ifdef CONFIG_OF -int __init vic_of_init(struct device_node *node, struct device_node *parent) +static int __init vic_of_init(struct device_node *node, + struct device_node *parent) { void __iomem *regs; u32 interrupt_mask = ~0; @@ -445,32 +445,32 @@ void divasa_unmap_pci_bar(void __iomem *bar) /********************************************************* ** I/O port access *********************************************************/ -byte __inline__ inpp(void __iomem *addr) +inline byte inpp(void __iomem *addr) { return (inb((unsigned long) addr)); } -word __inline__ inppw(void __iomem *addr) +inline word inppw(void __iomem *addr) { return (inw((unsigned long) addr)); } -void __inline__ inppw_buffer(void __iomem *addr, void *P, int length) +inline void inppw_buffer(void __iomem *addr, void *P, int length) { insw((unsigned long) addr, (word *) P, length >> 1); } -void __inline__ outppw_buffer(void __iomem *addr, void *P, int length) +inline void outppw_buffer(void __iomem *addr, void *P, int length) { outsw((unsigned long) addr, (word *) P, length >> 1); } -void __inline__ outppw(void __iomem *addr, word w) +inline void outppw(void __iomem *addr, word w) { outw(w, (unsigned long) addr); } -void __inline__ outpp(void __iomem *addr, word p) +inline void outpp(void __iomem *addr, word p) { outb(p, (unsigned long) addr); } @@ -203,7 +203,7 @@ void PCIread(byte bus, byte func, int offset, void *data, int length, void *pci_ /* ** I/O Port utilities */ -int diva_os_register_io_port(void *adapter, int register, unsigned long port, +int diva_os_register_io_port(void *adapter, int reg, unsigned long port, unsigned long length, const char *name, int id); /* ** I/O port access abstraction @@ -271,13 +271,13 @@ void diva_os_get_time(dword *sec, dword *usec); ** atomic operation, fake because we use threads */ typedef int diva_os_atomic_t; -static diva_os_atomic_t __inline__ +static inline diva_os_atomic_t diva_os_atomic_increment(diva_os_atomic_t *pv) { *pv += 1; return (*pv); } -static diva_os_atomic_t __inline__ +static inline diva_os_atomic_t diva_os_atomic_decrement(diva_os_atomic_t *pv) { *pv -= 1; @@ -228,6 +228,20 @@ config LEDS_LP3944 To compile this driver as a module, choose M here: the module will be called leds-lp3944. +config LEDS_LP3952 + tristate "LED Support for TI LP3952 2 channel LED driver" + depends on LEDS_CLASS + depends on I2C + depends on ACPI + depends on GPIOLIB + select REGMAP_I2C + help + This option enables support for LEDs connected to the Texas + Instruments LP3952 LED driver. + + To compile this driver as a module, choose M here: the + module will be called leds-lp3952. + config LEDS_LP55XX_COMMON tristate "Common Driver for TI/National LP5521/5523/55231/5562/8501" depends on LEDS_LP5521 || LEDS_LP5523 || LEDS_LP5562 || LEDS_LP8501 @@ -26,6 +26,7 @@ obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o obj-$(CONFIG_LEDS_GPIO_REGISTER) += leds-gpio-register.o obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o +obj-$(CONFIG_LEDS_LP3952) += leds-lp3952.o obj-$(CONFIG_LEDS_LP55XX_COMMON) += leds-lp55xx-common.o obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o @@ -53,11 +53,12 @@ static void led_timer_function(unsigned long data) if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) { led_set_brightness_nosleep(led_cdev, LED_OFF); + led_cdev->flags &= ~LED_BLINK_SW; return; } if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) { - led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP; + led_cdev->flags &= ~(LED_BLINK_ONESHOT_STOP | LED_BLINK_SW); return; } @@ -151,6 +152,7 @@ static void led_set_software_blink(struct led_classdev *led_cdev, return; } + led_cdev->flags |= LED_BLINK_SW; mod_timer(&led_cdev->blink_timer, jiffies + 1); } @@ -219,6 +221,7 @@ void led_stop_software_blink(struct led_classdev *led_cdev) del_timer_sync(&led_cdev->blink_timer); led_cdev->blink_delay_on = 0; led_cdev->blink_delay_off = 0; + led_cdev->flags &= ~LED_BLINK_SW; } EXPORT_SYMBOL_GPL(led_stop_software_blink); @@ -226,10 +229,10 @@ void led_set_brightness(struct led_classdev *led_cdev, enum led_brightness brightness) { /* - * In case blinking is on delay brightness setting + * If software blink is active, delay brightness setting * until the next timer tick. */ - if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) { + if (led_cdev->flags & LED_BLINK_SW) { /* * If we need to disable soft blinking delegate this to the * work queue task to avoid problems in case we are called @@ -60,6 +60,8 @@ ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr, goto unlock; } } + /* we come here only if buf matches no trigger */ + ret = -EINVAL; up_read(&triggers_list_lock); unlock: @@ -165,6 +165,7 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev) return ERR_PTR(-ENOMEM); device_for_each_child_node(dev, child) { + struct gpio_led_data *led_dat = &priv->leds[priv->num_leds]; struct gpio_led led = {}; const char *state = NULL; @@ -205,12 +206,12 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev) if (fwnode_property_present(child, "panic-indicator")) led.panic_indicator = 1; - ret = create_gpio_led(&led, &priv->leds[priv->num_leds], - dev, NULL); + ret = create_gpio_led(&led, led_dat, dev, NULL); if (ret < 0) { fwnode_handle_put(child); goto err; } + led_dat->cdev.dev->of_node = np; priv->num_leds++; } @@ -50,7 +50,7 @@ static struct led_classdev hp6xx_red_led = { static struct led_classdev hp6xx_green_led = { .name = "hp6xx:green", - .default_trigger = "ide-disk", + .default_trigger = "disk-activity", .brightness_set = hp6xxled_green_set, .flags = LED_CORE_SUSPENDRESUME, }; @@ -422,7 +422,7 @@ err: return ret; } -static const struct of_device_id of_is31fl31xx_match[] = { +static const struct of_device_id of_is31fl32xx_match[] = { { .compatible = "issi,is31fl3236", .data = &is31fl3236_cdef, }, { .compatible = "issi,is31fl3235", .data = &is31fl3235_cdef, }, { .compatible = "issi,is31fl3218", .data = &is31fl3218_cdef, }, @@ -432,7 +432,7 @@ static const struct of_device_id of_is31fl31xx_match[] = { {}, }; -MODULE_DEVICE_TABLE(of, of_is31fl31xx_match); +MODULE_DEVICE_TABLE(of, of_is31fl32xx_match); static int is31fl32xx_probe(struct i2c_client *client, const struct i2c_device_id *id) @@ -444,7 +444,7 @@ static int is31fl32xx_probe(struct i2c_client *client, int count; int ret = 0; - of_dev_id = of_match_device(of_is31fl31xx_match, dev); + of_dev_id = of_match_device(of_is31fl32xx_match, dev); if (!of_dev_id) return -EINVAL; @@ -482,23 +482,29 @@ static int is31fl32xx_remove(struct i2c_client *client) } /* - * i2c-core requires that id_table be non-NULL, even though - * it is not used for DeviceTree based instantiation. + * i2c-core (and modalias) requires that id_table be properly filled, + * even though it is not used for DeviceTree based instantiation. */ -static const struct i2c_device_id is31fl31xx_id[] = { +static const struct i2c_device_id is31fl32xx_id[] = { + { "is31fl3236" }, + { "is31fl3235" }, + { "is31fl3218" }, + { "sn3218" }, + { "is31fl3216" }, + { "sn3216" }, {}, }; -MODULE_DEVICE_TABLE(i2c, is31fl31xx_id); +MODULE_DEVICE_TABLE(i2c, is31fl32xx_id); static struct i2c_driver is31fl32xx_driver = { .driver = { .name = "is31fl32xx", - .of_match_table = of_is31fl31xx_match, + .of_match_table = of_is31fl32xx_match, }, .probe = is31fl32xx_probe, .remove = is31fl32xx_remove, - .id_table = is31fl31xx_id, + .id_table = is31fl32xx_id, }; module_i2c_driver(is31fl32xx_driver); diff --git a/drivers/leds/leds-lp3952.c b/drivers/leds/leds-lp3952.c new file mode 100644 index 000000000000..a73c8ff08530 --- /dev/null +++ b/ drivers/leds/leds-lp3952.c@@ -0,0 +1,301 @@ +/* + * LED driver for TI lp3952 controller + * + * Copyright (C) 2016, DAQRI, LLC. + * Author: Tony Makkiel <tony.makkiel@daqri.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/acpi.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/i2c.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/leds.h> +#include <linux/leds-lp3952.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/reboot.h> +#include <linux/regmap.h> + +static int lp3952_register_write(struct i2c_client *client, u8 reg, u8 val) +{ + int ret; + struct lp3952_led_array *priv = i2c_get_clientdata(client); + + ret = regmap_write(priv->regmap, reg, val); + + if (ret) + dev_err(&client->dev, "%s: reg 0x%x, val 0x%x, err %d\n", + __func__, reg, val, ret); + return ret; +} + +static void lp3952_on_off(struct lp3952_led_array *priv, + enum lp3952_leds led_id, bool on) +{ + int ret, val; + + dev_dbg(&priv->client->dev, "%s LED %d to %d\n", __func__, led_id, on); + + val = 1 << led_id; + if (led_id == LP3952_LED_ALL) + val = LP3952_LED_MASK_ALL; + + ret = regmap_update_bits(priv->regmap, LP3952_REG_LED_CTRL, val, + on ? val : 0); + if (ret) + dev_err(&priv->client->dev, "%s, Error %d\n", __func__, ret); +} + +/* + * Using Imax to control brightness. There are 4 possible + * setting 25, 50, 75 and 100 % of Imax. Possible values are + * values 0-4. 0 meaning turn off. + */ +static int lp3952_set_brightness(struct led_classdev *cdev, + enum led_brightness value) +{ + unsigned int reg, shift_val; + struct lp3952_ctrl_hdl *led = container_of(cdev, + struct lp3952_ctrl_hdl, + cdev); + struct lp3952_led_array *priv = (struct lp3952_led_array *)led->priv; + + dev_dbg(cdev->dev, "Brightness request: %d on %d\n", value, + led->channel); + + if (value == LED_OFF) { + lp3952_on_off(priv, led->channel, false); + return 0; + } + + if (led->channel > LP3952_RED_1) { + dev_err(cdev->dev, " %s Invalid LED requested", __func__); + return -EINVAL; + } + + if (led->channel >= LP3952_BLUE_1) { + reg = LP3952_REG_RGB1_MAX_I_CTRL; + shift_val = (led->channel - LP3952_BLUE_1) * 2; + } else { + reg = LP3952_REG_RGB2_MAX_I_CTRL; + shift_val = led->channel * 2; + } + + /* Enable the LED in case it is not enabled already */ + lp3952_on_off(priv, led->channel, true); + + return regmap_update_bits(priv->regmap, reg, 3 << shift_val, + --value << shift_val); +} + +static int lp3952_get_label(struct device *dev, const char *label, char *dest) +{ + int ret; + const char *str; + + ret = device_property_read_string(dev, label, &str); + if (!ret) + strncpy(dest, str, LP3952_LABEL_MAX_LEN); + + return ret; +} + +static int lp3952_register_led_classdev(struct lp3952_led_array *priv) +{ + int i, acpi_ret, ret = -ENODEV; + static const char *led_name_hdl[LP3952_LED_ALL] = { + "blue2", + "green2", + "red2", + "blue1", + "green1", + "red1" + }; + + for (i = 0; i < LP3952_LED_ALL; i++) { + acpi_ret = lp3952_get_label(&priv->client->dev, led_name_hdl[i], + priv->leds[i].name); + if (acpi_ret) + continue; + + priv->leds[i].cdev.name = priv->leds[i].name; + priv->leds[i].cdev.brightness = LED_OFF; + priv->leds[i].cdev.max_brightness = LP3952_BRIGHT_MAX; + priv->leds[i].cdev.brightness_set_blocking = + lp3952_set_brightness; + priv->leds[i].channel = i; + priv->leds[i].priv = priv; + + ret = devm_led_classdev_register(&priv->client->dev, + &priv->leds[i].cdev); + if (ret < 0) { + dev_err(&priv->client->dev, + "couldn't register LED %s\n", + priv->leds[i].cdev.name); + break; + } + } + return ret; +} + +static int lp3952_set_pattern_gen_cmd(struct lp3952_led_array *priv, + u8 cmd_index, u8 r, u8 g, u8 b, + enum lp3952_tt tt, enum lp3952_cet cet) +{ + int ret; + struct ptrn_gen_cmd line = { + { + { + .r = r, + .g = g, + .b = b, + .cet = cet, + .tt = tt + } + } + }; + + if (cmd_index >= LP3952_CMD_REG_COUNT) + return -EINVAL; + + ret = lp3952_register_write(priv->client, + LP3952_REG_CMD_0 + cmd_index * 2, + line.bytes.msb); + if (ret) + return ret; + + return lp3952_register_write(priv->client, + LP3952_REG_CMD_0 + cmd_index * 2 + 1, + line.bytes.lsb); +} + +static int lp3952_configure(struct lp3952_led_array *priv) +{ + int ret; + + /* Disable any LEDs on from any previous conf. */ + ret = lp3952_register_write(priv->client, LP3952_REG_LED_CTRL, 0); + if (ret) + return ret; + + /* enable rgb patter, loop */ + ret = lp3952_register_write(priv->client, LP3952_REG_PAT_GEN_CTRL, + LP3952_PATRN_LOOP | LP3952_PATRN_GEN_EN); + if (ret) + return ret; + + /* Update Bit 6 (Active mode), Select both Led sets, Bit [1:0] */ + ret = lp3952_register_write(priv->client, LP3952_REG_ENABLES, + LP3952_ACTIVE_MODE | LP3952_INT_B00ST_LDR); + if (ret) + return ret; + + /* Set Cmd1 for RGB intensity,cmd and transition time */ + return lp3952_set_pattern_gen_cmd(priv, 0, I46, I71, I100, TT0, + CET197); +} + +static const struct regmap_config lp3952_regmap = { + .reg_bits = 8, + .val_bits = 8, + .max_register = REG_MAX, + .cache_type = REGCACHE_RBTREE, +}; + +static int lp3952_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int status; + struct lp3952_led_array *priv; + + priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->client = client; + + priv->enable_gpio = devm_gpiod_get(&client->dev, "nrst", + GPIOD_OUT_HIGH); + if (IS_ERR(priv->enable_gpio)) { + status = PTR_ERR(priv->enable_gpio); + dev_err(&client->dev, "Failed to enable gpio: %d\n", status); + return status; + } + + priv->regmap = devm_regmap_init_i2c(client, &lp3952_regmap); + if (IS_ERR(priv->regmap)) { + int err = PTR_ERR(priv->regmap); + + dev_err(&client->dev, "Failed to allocate register map: %d\n", + err); + return err; + } + + i2c_set_clientdata(client, priv); + + status = lp3952_configure(priv); + if (status) { + dev_err(&client->dev, "Probe failed. Device not found (%d)\n", + status); + return status; + } + + status = lp3952_register_led_classdev(priv); + if (status) { + dev_err(&client->dev, "Unable to register led_classdev: %d\n", + status); + return status; + } + + return 0; +} + +static int lp3952_remove(struct i2c_client *client) +{ + struct lp3952_led_array *priv; + + priv = i2c_get_clientdata(client); + lp3952_on_off(priv, LP3952_LED_ALL, false); + gpiod_set_value(priv->enable_gpio, 0); + + return 0; +} + +static const struct i2c_device_id lp3952_id[] = { + {LP3952_NAME, 0}, + {} +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id lp3952_acpi_match[] = { + {"TXNW3952", 0}, + {} +}; + +MODULE_DEVICE_TABLE(acpi, lp3952_acpi_match); +#endif + +static struct i2c_driver lp3952_i2c_driver = { + .driver = { + .name = LP3952_NAME, + .acpi_match_table = ACPI_PTR(lp3952_acpi_match), + }, + .probe = lp3952_probe, + .remove = lp3952_remove, + .id_table = lp3952_id, +}; + +module_i2c_driver(lp3952_i2c_driver); + +MODULE_AUTHOR("Tony Makkiel <tony.makkiel@daqri.com>"); +MODULE_DESCRIPTION("lp3952 I2C LED controller driver"); +MODULE_LICENSE("GPL v2"); @@ -21,6 +21,8 @@ #include <linux/workqueue.h> #include <linux/leds-pca9532.h> #include <linux/gpio.h> +#include <linux/of.h> +#include <linux/of_device.h> /* m = num_leds*/ #define PCA9532_REG_INPUT(i) ((i) >> 3) @@ -86,9 +88,22 @@ static const struct pca9532_chip_info pca9532_chip_info_tbl[] = { }, }; +#ifdef CONFIG_OF +static const struct of_device_id of_pca9532_leds_match[] = { + { .compatible = "nxp,pca9530", .data = (void *)pca9530 }, + { .compatible = "nxp,pca9531", .data = (void *)pca9531 }, + { .compatible = "nxp,pca9532", .data = (void *)pca9532 }, + { .compatible = "nxp,pca9533", .data = (void *)pca9533 }, + {}, +}; + +MODULE_DEVICE_TABLE(of, of_pca9532_leds_match); +#endif + static struct i2c_driver pca9532_driver = { .driver = { .name = "leds-pca953x", + .of_match_table = of_match_ptr(of_pca9532_leds_match), }, .probe = pca9532_probe, .remove = pca9532_remove, @@ -354,6 +369,7 @@ static int pca9532_configure(struct i2c_client *client, led->state = pled->state; led->name = pled->name; led->ldev.name = led->name; + led->ldev.default_trigger = led->default_trigger; led->ldev.brightness = LED_OFF; led->ldev.brightness_set_blocking = pca9532_set_brightness; @@ -432,15 +448,66 @@ exit: return err; } +static struct pca9532_platform_data * +pca9532_of_populate_pdata(struct device *dev, struct device_node *np) +{ + struct pca9532_platform_data *pdata; + struct device_node *child; + const struct of_device_id *match; + int devid, maxleds; + int i = 0; + + match = of_match_device(of_pca9532_leds_match, dev); + if (!match) + return ERR_PTR(-ENODEV); + + devid = (int)(uintptr_t)match->data; + maxleds = pca9532_chip_info_tbl[devid].num_leds; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + for_each_child_of_node(np, child) { + if (of_property_read_string(child, "label", + &pdata->leds[i].name)) + pdata->leds[i].name = child->name; + of_property_read_u32(child, "type", &pdata->leds[i].type); + of_property_read_string(child, "linux,default-trigger", + &pdata->leds[i].default_trigger); + if (++i >= maxleds) { + of_node_put(child); + break; + } + } + + return pdata; +} + static int pca9532_probe(struct i2c_client *client, const struct i2c_device_id *id) { + int devid; struct pca9532_data *data = i2c_get_clientdata(client); struct pca9532_platform_data *pca9532_pdata = dev_get_platdata(&client->dev); - - if (!pca9532_pdata) - return -EIO; + struct device_node *np = client->dev.of_node; + + if (!pca9532_pdata) { + if (np) { + pca9532_pdata = + pca9532_of_populate_pdata(&client->dev, np); + if (IS_ERR(pca9532_pdata)) + return PTR_ERR(pca9532_pdata); + } else { + dev_err(&client->dev, "no platform data\n"); + return -EINVAL; + } + devid = (int)(uintptr_t)of_match_device( + of_pca9532_leds_match, &client->dev)->data; + } else { + devid = id->driver_data; + } if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) @@ -450,7 +517,7 @@ static int pca9532_probe(struct i2c_client *client, if (!data) return -ENOMEM; - data->chip_info = &pca9532_chip_info_tbl[id->driver_data]; + data->chip_info = &pca9532_chip_info_tbl[devid]; dev_info(&client->dev, "setting platform data\n"); i2c_set_clientdata(client, data); @@ -33,12 +33,12 @@ config LEDS_TRIGGER_ONESHOT If unsure, say Y. -config LEDS_TRIGGER_IDE_DISK - bool "LED IDE Disk Trigger" - depends on IDE_GD_ATA +config LEDS_TRIGGER_DISK + bool "LED Disk Trigger" + depends on IDE_GD_ATA || ATA depends on LEDS_TRIGGERS help - This allows LEDs to be controlled by IDE disk activity. + This allows LEDs to be controlled by disk activity. If unsure, say Y. config LEDS_TRIGGER_MTD @@ -1,6 +1,6 @@ obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o obj-$(CONFIG_LEDS_TRIGGER_ONESHOT) += ledtrig-oneshot.o -obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK) += ledtrig-ide-disk.o +obj-$(CONFIG_LEDS_TRIGGER_DISK) += ledtrig-disk.o obj-$(CONFIG_LEDS_TRIGGER_MTD) += ledtrig-mtd.o obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o @@ -1,5 +1,5 @@ /* - * LED IDE-Disk Activity Trigger + * LED Disk Activity Trigger * * Copyright 2006 Openedhand Ltd. * @@ -17,20 +17,25 @@ #define BLINK_DELAY 30 +DEFINE_LED_TRIGGER(ledtrig_disk); DEFINE_LED_TRIGGER(ledtrig_ide); -void ledtrig_ide_activity(void) +void ledtrig_disk_activity(void) { - unsigned long ide_blink_delay = BLINK_DELAY; + unsigned long blink_delay = BLINK_DELAY; + led_trigger_blink_oneshot(ledtrig_disk, + &blink_delay, &blink_delay, 0); led_trigger_blink_oneshot(ledtrig_ide, - &ide_blink_delay, &ide_blink_delay, 0); + &blink_delay, &blink_delay, 0); } -EXPORT_SYMBOL(ledtrig_ide_activity); +EXPORT_SYMBOL(ledtrig_disk_activity); -static int __init ledtrig_ide_init(void) +static int __init ledtrig_disk_init(void) { + led_trigger_register_simple("disk-activity", &ledtrig_disk); led_trigger_register_simple("ide-disk", &ledtrig_ide); + return 0; } -device_initcall(ledtrig_ide_init); +device_initcall(ledtrig_disk_init); @@ -19,6 +19,7 @@ #include <linux/sched.h> #include <linux/leds.h> #include <linux/reboot.h> +#include <linux/suspend.h> #include "../leds.h" static int panic_heartbeats; @@ -154,6 +155,30 @@ static struct led_trigger heartbeat_led_trigger = { .deactivate = heartbeat_trig_deactivate, }; +static int heartbeat_pm_notifier(struct notifier_block *nb, + unsigned long pm_event, void *unused) +{ + int rc; + + switch (pm_event) { + case PM_SUSPEND_PREPARE: + case PM_HIBERNATION_PREPARE: + case PM_RESTORE_PREPARE: + led_trigger_unregister(&heartbeat_led_trigger); + break; + case PM_POST_SUSPEND: + case PM_POST_HIBERNATION: + case PM_POST_RESTORE: + rc = led_trigger_register(&heartbeat_led_trigger); + if (rc) + pr_err("could not re-register heartbeat trigger\n"); + break; + default: + break; + } + return NOTIFY_DONE; +} + static int heartbeat_reboot_notifier(struct notifier_block *nb, unsigned long code, void *unused) { @@ -168,6 +193,10 @@ static int heartbeat_panic_notifier(struct notifier_block *nb, return NOTIFY_DONE; } +static struct notifier_block heartbeat_pm_nb = { + .notifier_call = heartbeat_pm_notifier, +}; + static struct notifier_block heartbeat_reboot_nb = { .notifier_call = heartbeat_reboot_notifier, }; @@ -184,12 +213,14 @@ static int __init heartbeat_trig_init(void) atomic_notifier_chain_register(&panic_notifier_list, &heartbeat_panic_nb); register_reboot_notifier(&heartbeat_reboot_nb); + register_pm_notifier(&heartbeat_pm_nb); } return rc; } static void __exit heartbeat_trig_exit(void) { + unregister_pm_notifier(&heartbeat_pm_nb); unregister_reboot_notifier(&heartbeat_reboot_nb); atomic_notifier_chain_unregister(&panic_notifier_list, &heartbeat_panic_nb); @@ -27,11 +27,13 @@ config NVM_DEBUG It is required to create/remove targets without IOCTLs. config NVM_GENNVM - tristate "Generic NVM manager for Open-Channel SSDs" + tristate "General Non-Volatile Memory Manager for Open-Channel SSDs" ---help--- - NVM media manager for Open-Channel SSDs that offload management - functionality to device, while keeping data placement and garbage - collection decisions on the host. + Non-volatile memory media manager for Open-Channel SSDs that implements + physical media metadata management and block provisioning API. + + This is the standard media manager for using Open-Channel SSDs, and + required for targets to be instantiated. config NVM_RRPC tristate "Round-robin Hybrid Open-Channel SSD target" @@ -18,8 +18,6 @@ * */ -#include <linux/blkdev.h> -#include <linux/blk-mq.h> #include <linux/list.h> #include <linux/types.h> #include <linux/sem.h> @@ -28,46 +26,42 @@ #include <linux/miscdevice.h> #include <linux/lightnvm.h> #include <linux/sched/sysctl.h> -#include <uapi/linux/lightnvm.h> static LIST_HEAD(nvm_tgt_types); +static DECLARE_RWSEM(nvm_tgtt_lock); static LIST_HEAD(nvm_mgrs); static LIST_HEAD(nvm_devices); -static LIST_HEAD(nvm_targets); static DECLARE_RWSEM(nvm_lock); -static struct nvm_target *nvm_find_target(const char *name) +struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock) { - struct nvm_target *tgt; + struct nvm_tgt_type *tmp, *tt = NULL; - list_for_each_entry(tgt, &nvm_targets, list) - if (!strcmp(name, tgt->disk->disk_name)) - return tgt; + if (lock) + down_write(&nvm_tgtt_lock); - return NULL; -} - -static struct nvm_tgt_type *nvm_find_target_type(const char *name) -{ - struct nvm_tgt_type *tt; - - list_for_each_entry(tt, &nvm_tgt_types, list) - if (!strcmp(name, tt->name)) - return tt; + list_for_each_entry(tmp, &nvm_tgt_types, list) + if (!strcmp(name, tmp->name)) { + tt = tmp; + break; + } - return NULL; + if (lock) + up_write(&nvm_tgtt_lock); + return tt; } +EXPORT_SYMBOL(nvm_find_target_type); int nvm_register_tgt_type(struct nvm_tgt_type *tt) { int ret = 0; - down_write(&nvm_lock); - if (nvm_find_target_type(tt->name)) + down_write(&nvm_tgtt_lock); + if (nvm_find_target_type(tt->name, 0)) ret = -EEXIST; else list_add(&tt->list, &nvm_tgt_types); - up_write(&nvm_lock); + up_write(&nvm_tgtt_lock); return ret; } @@ -110,7 +104,7 @@ static struct nvmm_type *nvm_find_mgr_type(const char *name) return NULL; } -struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev) +static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev) { struct nvmm_type *mt; int ret; @@ -182,20 +176,6 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name) return NULL; } -struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *dev, struct nvm_lun *lun, - unsigned long flags) -{ - return dev->mt->get_blk_unlocked(dev, lun, flags); -} -EXPORT_SYMBOL(nvm_get_blk_unlocked); - -/* Assumes that all valid pages have already been moved on release to bm */ -void nvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk) -{ - return dev->mt->put_blk_unlocked(dev, blk); -} -EXPORT_SYMBOL(nvm_put_blk_unlocked); - struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun, unsigned long flags) { @@ -210,6 +190,12 @@ void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) } EXPORT_SYMBOL(nvm_put_blk); +void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type) +{ + return dev->mt->mark_blk(dev, ppa, type); +} +EXPORT_SYMBOL(nvm_mark_blk); + int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) { return dev->mt->submit_io(dev, rqd); @@ -251,9 +237,10 @@ void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) EXPORT_SYMBOL(nvm_generic_to_addr_mode); int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd, - struct ppa_addr *ppas, int nr_ppas, int vblk) + const struct ppa_addr *ppas, int nr_ppas, int vblk) { int i, plane_cnt, pl_idx; + struct ppa_addr ppa; if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) { rqd->nr_ppas = nr_ppas; @@ -278,8 +265,9 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd, for (i = 0; i < nr_ppas; i++) { for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) { - ppas[i].g.pl = pl_idx; - rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i]; + ppa = ppas[i]; + ppa.g.pl = pl_idx; + rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa; } } } @@ -337,7 +325,7 @@ static void nvm_end_io_sync(struct nvm_rq *rqd) complete(waiting); } -int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode, +static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode, int flags, void *buf, int len) { DECLARE_COMPLETION_ONSTACK(wait); @@ -367,7 +355,9 @@ int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode, /* Prevent hang_check timer from firing at us during very long I/O */ hang_check = sysctl_hung_task_timeout_secs; if (hang_check) - while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2))); + while (!wait_for_completion_io_timeout(&wait, + hang_check * (HZ/2))) + ; else wait_for_completion_io(&wait); @@ -510,7 +500,8 @@ static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp) /* The lower page table encoding consists of a list of bytes, where each * has a lower and an upper half. The first half byte maintains the * increment value and every value after is an offset added to the - * previous incrementation value */ + * previous incrementation value + */ dev->lptbl[0] = mlc->pairs[0] & 0xF; for (i = 1; i < dev->lps_per_blk; i++) { p = mlc->pairs[i >> 1]; @@ -596,42 +587,11 @@ err_fmtype: return ret; } -static void nvm_remove_target(struct nvm_target *t) -{ - struct nvm_tgt_type *tt = t->type; - struct gendisk *tdisk = t->disk; - struct request_queue *q = tdisk->queue; - - lockdep_assert_held(&nvm_lock); - - del_gendisk(tdisk); - blk_cleanup_queue(q); - - if (tt->exit) - tt->exit(tdisk->private_data); - - put_disk(tdisk); - - list_del(&t->list); - kfree(t); -} - static void nvm_free_mgr(struct nvm_dev *dev) { - struct nvm_target *tgt, *tmp; - if (!dev->mt) return; - down_write(&nvm_lock); - list_for_each_entry_safe(tgt, tmp, &nvm_targets, list) { - if (tgt->dev != dev) - continue; - - nvm_remove_target(tgt); - } - up_write(&nvm_lock); - dev->mt->unregister_mgr(dev); dev->mt = NULL; } @@ -778,91 +738,6 @@ void nvm_unregister(char *disk_name) } EXPORT_SYMBOL(nvm_unregister); -static const struct block_device_operations nvm_fops = { - .owner = THIS_MODULE, -}; - -static int nvm_create_target(struct nvm_dev *dev, - struct nvm_ioctl_create *create) -{ - struct nvm_ioctl_create_simple *s = &create->conf.s; - struct request_queue *tqueue; - struct gendisk *tdisk; - struct nvm_tgt_type *tt; - struct nvm_target *t; - void *targetdata; - - if (!dev->mt) { - pr_info("nvm: device has no media manager registered.\n"); - return -ENODEV; - } - - down_write(&nvm_lock); - tt = nvm_find_target_type(create->tgttype); - if (!tt) { - pr_err("nvm: target type %s not found\n", create->tgttype); - up_write(&nvm_lock); - return -EINVAL; - } - - t = nvm_find_target(create->tgtname); - if (t) { - pr_err("nvm: target name already exists.\n"); - up_write(&nvm_lock); - return -EINVAL; - } - up_write(&nvm_lock); - - t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL); - if (!t) - return -ENOMEM; - - tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node); - if (!tqueue) - goto err_t; - blk_queue_make_request(tqueue, tt->make_rq); - - tdisk = alloc_disk(0); - if (!tdisk) - goto err_queue; - - sprintf(tdisk->disk_name, "%s", create->tgtname); - tdisk->flags = GENHD_FL_EXT_DEVT; - tdisk->major = 0; - tdisk->first_minor = 0; - tdisk->fops = &nvm_fops; - tdisk->queue = tqueue; - - targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end); - if (IS_ERR(targetdata)) - goto err_init; - - tdisk->private_data = targetdata; - tqueue->queuedata = targetdata; - - blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect); - - set_capacity(tdisk, tt->capacity(targetdata)); - add_disk(tdisk); - - t->type = tt; - t->disk = tdisk; - t->dev = dev; - - down_write(&nvm_lock); - list_add_tail(&t->list, &nvm_targets); - up_write(&nvm_lock); - - return 0; -err_init: - put_disk(tdisk); -err_queue: - blk_cleanup_queue(tqueue); -err_t: - kfree(t); - return -ENOMEM; -} - static int __nvm_configure_create(struct nvm_ioctl_create *create) { struct nvm_dev *dev; @@ -871,11 +746,17 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create) down_write(&nvm_lock); dev = nvm_find_nvm_dev(create->dev); up_write(&nvm_lock); + if (!dev) { pr_err("nvm: device not found\n"); return -EINVAL; } + if (!dev->mt) { + pr_info("nvm: device has no media manager registered.\n"); + return -ENODEV; + } + if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) { pr_err("nvm: config type not valid\n"); return -EINVAL; @@ -888,25 +769,7 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create) return -EINVAL; } - return nvm_create_target(dev, create); -} - -static int __nvm_configure_remove(struct nvm_ioctl_remove *remove) -{ - struct nvm_target *t; - - down_write(&nvm_lock); - t = nvm_find_target(remove->tgtname); - if (!t) { - pr_err("nvm: target \"%s\" doesn't exist.\n", remove->tgtname); - up_write(&nvm_lock); - return -EINVAL; - } - - nvm_remove_target(t); - up_write(&nvm_lock); - - return 0; + return dev->mt->create_tgt(dev, create); } #ifdef CONFIG_NVM_DEBUG @@ -941,8 +804,9 @@ static int nvm_configure_show(const char *val) static int nvm_configure_remove(const char *val) { struct nvm_ioctl_remove remove; + struct nvm_dev *dev; char opcode; - int ret; + int ret = 0; ret = sscanf(val, "%c %256s", &opcode, remove.tgtname); if (ret != 2) { @@ -952,7 +816,13 @@ static int nvm_configure_remove(const char *val) remove.flags = 0; - return __nvm_configure_remove(&remove); + list_for_each_entry(dev, &nvm_devices, devices) { + ret = dev->mt->remove_tgt(dev, &remove); + if (!ret) + break; + } + + return ret; } static int nvm_configure_create(const char *val) @@ -1149,6 +1019,8 @@ static long nvm_ioctl_dev_create(struct file *file, void __user *arg) static long nvm_ioctl_dev_remove(struct file *file, void __user *arg) { struct nvm_ioctl_remove remove; + struct nvm_dev *dev; + int ret = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -1163,7 +1035,13 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg) return -EINVAL; } - return __nvm_configure_remove(&remove); + list_for_each_entry(dev, &nvm_devices, devices) { + ret = dev->mt->remove_tgt(dev, &remove); + if (!ret) + break; + } + + return ret; } static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info) @@ -15,22 +15,160 @@ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, * USA. * - * Implementation of a generic nvm manager for Open-Channel SSDs. + * Implementation of a general nvm manager for Open-Channel SSDs. */ #include "gennvm.h" -static int gennvm_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len) +static struct nvm_target *gen_find_target(struct gen_dev *gn, const char *name) { - struct gen_nvm *gn = dev->mp; - struct gennvm_area *area, *prev, *next; + struct nvm_target *tgt; + + list_for_each_entry(tgt, &gn->targets, list) + if (!strcmp(name, tgt->disk->disk_name)) + return tgt; + + return NULL; +} + +static const struct block_device_operations gen_fops = { + .owner = THIS_MODULE, +}; + +static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create) +{ + struct gen_dev *gn = dev->mp; + struct nvm_ioctl_create_simple *s = &create->conf.s; + struct request_queue *tqueue; + struct gendisk *tdisk; + struct nvm_tgt_type *tt; + struct nvm_target *t; + void *targetdata; + + tt = nvm_find_target_type(create->tgttype, 1); + if (!tt) { + pr_err("nvm: target type %s not found\n", create->tgttype); + return -EINVAL; + } + + mutex_lock(&gn->lock); + t = gen_find_target(gn, create->tgtname); + if (t) { + pr_err("nvm: target name already exists.\n"); + mutex_unlock(&gn->lock); + return -EINVAL; + } + mutex_unlock(&gn->lock); + + t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL); + if (!t) + return -ENOMEM; + + tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node); + if (!tqueue) + goto err_t; + blk_queue_make_request(tqueue, tt->make_rq); + + tdisk = alloc_disk(0); + if (!tdisk) + goto err_queue; + + sprintf(tdisk->disk_name, "%s", create->tgtname); + tdisk->flags = GENHD_FL_EXT_DEVT; + tdisk->major = 0; + tdisk->first_minor = 0; + tdisk->fops = &gen_fops; + tdisk->queue = tqueue; + + targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end); + if (IS_ERR(targetdata)) + goto err_init; + + tdisk->private_data = targetdata; + tqueue->queuedata = targetdata; + + blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect); + + set_capacity(tdisk, tt->capacity(targetdata)); + add_disk(tdisk); + + t->type = tt; + t->disk = tdisk; + t->dev = dev; + + mutex_lock(&gn->lock); + list_add_tail(&t->list, &gn->targets); + mutex_unlock(&gn->lock); + + return 0; +err_init: + put_disk(tdisk); +err_queue: + blk_cleanup_queue(tqueue); +err_t: + kfree(t); + return -ENOMEM; +} + +static void __gen_remove_target(struct nvm_target *t) +{ + struct nvm_tgt_type *tt = t->type; + struct gendisk *tdisk = t->disk; + struct request_queue *q = tdisk->queue; + + del_gendisk(tdisk); + blk_cleanup_queue(q); + + if (tt->exit) + tt->exit(tdisk->private_data); + + put_disk(tdisk); + + list_del(&t->list); + kfree(t); +} + +/** + * gen_remove_tgt - Removes a target from the media manager + * @dev: device + * @remove: ioctl structure with target name to remove. + * + * Returns: + * 0: on success + * 1: on not found + * <0: on error + */ +static int gen_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove) +{ + struct gen_dev *gn = dev->mp; + struct nvm_target *t; + + if (!gn) + return 1; + + mutex_lock(&gn->lock); + t = gen_find_target(gn, remove->tgtname); + if (!t) { + mutex_unlock(&gn->lock); + return 1; + } + __gen_remove_target(t); + mutex_unlock(&gn->lock); + + return 0; +} + +static int gen_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len) +{ + struct gen_dev *gn = dev->mp; + struct gen_area *area, *prev, *next; sector_t begin = 0; sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9; if (len > max_sectors) return -EINVAL; - area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL); + area = kmalloc(sizeof(struct gen_area), GFP_KERNEL); if (!area) return -ENOMEM; @@ -64,10 +202,10 @@ static int gennvm_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len) return 0; } -static void gennvm_put_area(struct nvm_dev *dev, sector_t begin) +static void gen_put_area(struct nvm_dev *dev, sector_t begin) { - struct gen_nvm *gn = dev->mp; - struct gennvm_area *area; + struct gen_dev *gn = dev->mp; + struct gen_area *area; spin_lock(&dev->lock); list_for_each_entry(area, &gn->area_list, list) { @@ -82,27 +220,27 @@ static void gennvm_put_area(struct nvm_dev *dev, sector_t begin) spin_unlock(&dev->lock); } -static void gennvm_blocks_free(struct nvm_dev *dev) +static void gen_blocks_free(struct nvm_dev *dev) { - struct gen_nvm *gn = dev->mp; + struct gen_dev *gn = dev->mp; struct gen_lun *lun; int i; - gennvm_for_each_lun(gn, lun, i) { + gen_for_each_lun(gn, lun, i) { if (!lun->vlun.blocks) break; vfree(lun->vlun.blocks); } } -static void gennvm_luns_free(struct nvm_dev *dev) +static void gen_luns_free(struct nvm_dev *dev) { - struct gen_nvm *gn = dev->mp; + struct gen_dev *gn = dev->mp; kfree(gn->luns); } -static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn) +static int gen_luns_init(struct nvm_dev *dev, struct gen_dev *gn) { struct gen_lun *lun; int i; @@ -111,7 +249,7 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn) if (!gn->luns) return -ENOMEM; - gennvm_for_each_lun(gn, lun, i) { + gen_for_each_lun(gn, lun, i) { spin_lock_init(&lun->vlun.lock); INIT_LIST_HEAD(&lun->free_list); INIT_LIST_HEAD(&lun->used_list); @@ -122,14 +260,11 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn) lun->vlun.lun_id = i % dev->luns_per_chnl; lun->vlun.chnl_id = i / dev->luns_per_chnl; lun->vlun.nr_free_blocks = dev->blks_per_lun; - lun->vlun.nr_open_blocks = 0; - lun->vlun.nr_closed_blocks = 0; - lun->vlun.nr_bad_blocks = 0; } return 0; } -static int gennvm_block_bb(struct gen_nvm *gn, struct ppa_addr ppa, +static int gen_block_bb(struct gen_dev *gn, struct ppa_addr ppa, u8 *blks, int nr_blks) { struct nvm_dev *dev = gn->dev; @@ -149,17 +284,16 @@ static int gennvm_block_bb(struct gen_nvm *gn, struct ppa_addr ppa, blk = &lun->vlun.blocks[i]; list_move_tail(&blk->list, &lun->bb_list); - lun->vlun.nr_bad_blocks++; lun->vlun.nr_free_blocks--; } return 0; } -static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private) +static int gen_block_map(u64 slba, u32 nlb, __le64 *entries, void *private) { struct nvm_dev *dev = private; - struct gen_nvm *gn = dev->mp; + struct gen_dev *gn = dev->mp; u64 elba = slba + nlb; struct gen_lun *lun; struct nvm_block *blk; @@ -167,7 +301,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private) int lun_id; if (unlikely(elba > dev->total_secs)) { - pr_err("gennvm: L2P data from device is out of bounds!\n"); + pr_err("gen: L2P data from device is out of bounds!\n"); return -EINVAL; } @@ -175,7 +309,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private) u64 pba = le64_to_cpu(entries[i]); if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) { - pr_err("gennvm: L2P data entry is out of bounds!\n"); + pr_err("gen: L2P data entry is out of bounds!\n"); return -EINVAL; } @@ -200,16 +334,15 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private) * block state. The block is assumed to be open. */ list_move_tail(&blk->list, &lun->used_list); - blk->state = NVM_BLK_ST_OPEN; + blk->state = NVM_BLK_ST_TGT; lun->vlun.nr_free_blocks--; - lun->vlun.nr_open_blocks++; } } return 0; } -static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) +static int gen_blocks_init(struct nvm_dev *dev, struct gen_dev *gn) { struct gen_lun *lun; struct nvm_block *block; @@ -222,7 +355,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) if (!blks) return -ENOMEM; - gennvm_for_each_lun(gn, lun, lun_iter) { + gen_for_each_lun(gn, lun, lun_iter) { lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) * dev->blks_per_lun); if (!lun->vlun.blocks) { @@ -256,20 +389,20 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) ret = nvm_get_bb_tbl(dev, ppa, blks); if (ret) - pr_err("gennvm: could not get BB table\n"); + pr_err("gen: could not get BB table\n"); - ret = gennvm_block_bb(gn, ppa, blks, nr_blks); + ret = gen_block_bb(gn, ppa, blks, nr_blks); if (ret) - pr_err("gennvm: BB table map failed\n"); + pr_err("gen: BB table map failed\n"); } } if ((dev->identity.dom & NVM_RSP_L2P) && dev->ops->get_l2p_tbl) { ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs, - gennvm_block_map, dev); + gen_block_map, dev); if (ret) { - pr_err("gennvm: could not read L2P table.\n"); - pr_warn("gennvm: default block initialization"); + pr_err("gen: could not read L2P table.\n"); + pr_warn("gen: default block initialization"); } } @@ -277,67 +410,79 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) return 0; } -static void gennvm_free(struct nvm_dev *dev) +static void gen_free(struct nvm_dev *dev) { - gennvm_blocks_free(dev); - gennvm_luns_free(dev); + gen_blocks_free(dev); + gen_luns_free(dev); kfree(dev->mp); dev->mp = NULL; } -static int gennvm_register(struct nvm_dev *dev) +static int gen_register(struct nvm_dev *dev) { - struct gen_nvm *gn; + struct gen_dev *gn; int ret; if (!try_module_get(THIS_MODULE)) return -ENODEV; - gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL); + gn = kzalloc(sizeof(struct gen_dev), GFP_KERNEL); if (!gn) return -ENOMEM; gn->dev = dev; gn->nr_luns = dev->nr_luns; INIT_LIST_HEAD(&gn->area_list); + mutex_init(&gn->lock); + INIT_LIST_HEAD(&gn->targets); dev->mp = gn; - ret = gennvm_luns_init(dev, gn); + ret = gen_luns_init(dev, gn); if (ret) { - pr_err("gennvm: could not initialize luns\n"); + pr_err("gen: could not initialize luns\n"); goto err; } - ret = gennvm_blocks_init(dev, gn); + ret = gen_blocks_init(dev, gn); if (ret) { - pr_err("gennvm: could not initialize blocks\n"); + pr_err("gen: could not initialize blocks\n"); goto err; } return 1; err: - gennvm_free(dev); + gen_free(dev); module_put(THIS_MODULE); return ret; } -static void gennvm_unregister(struct nvm_dev *dev) +static void gen_unregister(struct nvm_dev *dev) { - gennvm_free(dev); + struct gen_dev *gn = dev->mp; + struct nvm_target *t, *tmp; + + mutex_lock(&gn->lock); + list_for_each_entry_safe(t, tmp, &gn->targets, list) { + if (t->dev != dev) + continue; + __gen_remove_target(t); + } + mutex_unlock(&gn->lock); + + gen_free(dev); module_put(THIS_MODULE); } -static struct nvm_block *gennvm_get_blk_unlocked(struct nvm_dev *dev, +static struct nvm_block *gen_get_blk(struct nvm_dev *dev, struct nvm_lun *vlun, unsigned long flags) { struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun); struct nvm_block *blk = NULL; int is_gc = flags & NVM_IOTYPE_GC; - assert_spin_locked(&vlun->lock); - + spin_lock(&vlun->lock); if (list_empty(&lun->free_list)) { - pr_err_ratelimited("gennvm: lun %u have no free pages available", + pr_err_ratelimited("gen: lun %u have no free pages available", lun->vlun.id); goto out; } @@ -346,88 +491,58 @@ static struct nvm_block *gennvm_get_blk_unlocked(struct nvm_dev *dev, goto out; blk = list_first_entry(&lun->free_list, struct nvm_block, list); - list_move_tail(&blk->list, &lun->used_list); - blk->state = NVM_BLK_ST_OPEN; + list_move_tail(&blk->list, &lun->used_list); + blk->state = NVM_BLK_ST_TGT; lun->vlun.nr_free_blocks--; - lun->vlun.nr_open_blocks++; - out: - return blk; -} - -static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, - struct nvm_lun *vlun, unsigned long flags) -{ - struct nvm_block *blk; - - spin_lock(&vlun->lock); - blk = gennvm_get_blk_unlocked(dev, vlun, flags); spin_unlock(&vlun->lock); return blk; } -static void gennvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk) +static void gen_put_blk(struct nvm_dev *dev, struct nvm_block *blk) { struct nvm_lun *vlun = blk->lun; struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun); - assert_spin_locked(&vlun->lock); - - if (blk->state & NVM_BLK_ST_OPEN) { - list_move_tail(&blk->list, &lun->free_list); - lun->vlun.nr_open_blocks--; - lun->vlun.nr_free_blocks++; - blk->state = NVM_BLK_ST_FREE; - } else if (blk->state & NVM_BLK_ST_CLOSED) { + spin_lock(&vlun->lock); + if (blk->state & NVM_BLK_ST_TGT) { list_move_tail(&blk->list, &lun->free_list); - lun->vlun.nr_closed_blocks--; lun->vlun.nr_free_blocks++; blk->state = NVM_BLK_ST_FREE; } else if (blk->state & NVM_BLK_ST_BAD) { list_move_tail(&blk->list, &lun->bb_list); - lun->vlun.nr_bad_blocks++; blk->state = NVM_BLK_ST_BAD; } else { WARN_ON_ONCE(1); - pr_err("gennvm: erroneous block type (%lu -> %u)\n", + pr_err("gen: erroneous block type (%lu -> %u)\n", blk->id, blk->state); list_move_tail(&blk->list, &lun->bb_list); - lun->vlun.nr_bad_blocks++; - blk->state = NVM_BLK_ST_BAD; } -} - -static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) -{ - struct nvm_lun *vlun = blk->lun; - - spin_lock(&vlun->lock); - gennvm_put_blk_unlocked(dev, blk); spin_unlock(&vlun->lock); } -static void gennvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type) +static void gen_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type) { - struct gen_nvm *gn = dev->mp; + struct gen_dev *gn = dev->mp; struct gen_lun *lun; struct nvm_block *blk; - pr_debug("gennvm: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n", + pr_debug("gen: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n", ppa.g.ch, ppa.g.lun, ppa.g.blk, ppa.g.pg, type); if (unlikely(ppa.g.ch > dev->nr_chnls || ppa.g.lun > dev->luns_per_chnl || ppa.g.blk > dev->blks_per_lun)) { WARN_ON_ONCE(1); - pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u", + pr_err("gen: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u", ppa.g.ch, dev->nr_chnls, ppa.g.lun, dev->luns_per_chnl, ppa.g.blk, dev->blks_per_lun); return; } - lun = &gn->luns[ppa.g.lun * ppa.g.ch]; + lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun]; blk = &lun->vlun.blocks[ppa.g.blk]; /* will be moved to bb list on put_blk from target */ @@ -435,9 +550,9 @@ static void gennvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type) } /* - * mark block bad in gennvm. It is expected that the target recovers separately + * mark block bad in gen. It is expected that the target recovers separately */ -static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) +static void gen_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) { int bit = -1; int max_secs = dev->ops->max_phys_sect; @@ -447,25 +562,25 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) /* look up blocks and mark them as bad */ if (rqd->nr_ppas == 1) { - gennvm_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD); + gen_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD); return; } while ((bit = find_next_bit(comp_bits, max_secs, bit + 1)) < max_secs) - gennvm_mark_blk(dev, rqd->ppa_list[bit], NVM_BLK_ST_BAD); + gen_mark_blk(dev, rqd->ppa_list[bit], NVM_BLK_ST_BAD); } -static void gennvm_end_io(struct nvm_rq *rqd) +static void gen_end_io(struct nvm_rq *rqd) { struct nvm_tgt_instance *ins = rqd->ins; if (rqd->error == NVM_RSP_ERR_FAILWRITE) - gennvm_mark_blk_bad(rqd->dev, rqd); + gen_mark_blk_bad(rqd->dev, rqd); ins->tt->end_io(rqd); } -static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) +static int gen_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) { if (!dev->ops->submit_io) return -ENODEV; @@ -474,11 +589,11 @@ static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) nvm_generic_to_addr_mode(dev, rqd); rqd->dev = dev; - rqd->end_io = gennvm_end_io; + rqd->end_io = gen_end_io; return dev->ops->submit_io(dev, rqd); } -static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, +static int gen_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, unsigned long flags) { struct ppa_addr addr = block_to_ppa(dev, blk); @@ -486,19 +601,19 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, return nvm_erase_ppa(dev, &addr, 1); } -static int gennvm_reserve_lun(struct nvm_dev *dev, int lunid) +static int gen_reserve_lun(struct nvm_dev *dev, int lunid) { return test_and_set_bit(lunid, dev->lun_map); } -static void gennvm_release_lun(struct nvm_dev *dev, int lunid) +static void gen_release_lun(struct nvm_dev *dev, int lunid) { WARN_ON(!test_and_clear_bit(lunid, dev->lun_map)); } -static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid) +static struct nvm_lun *gen_get_lun(struct nvm_dev *dev, int lunid) { - struct gen_nvm *gn = dev->mp; + struct gen_dev *gn = dev->mp; if (unlikely(lunid >= dev->nr_luns)) return NULL; @@ -506,66 +621,62 @@ static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid) return &gn->luns[lunid].vlun; } -static void gennvm_lun_info_print(struct nvm_dev *dev) +static void gen_lun_info_print(struct nvm_dev *dev) { - struct gen_nvm *gn = dev->mp; + struct gen_dev *gn = dev->mp; struct gen_lun *lun; unsigned int i; - gennvm_for_each_lun(gn, lun, i) { + gen_for_each_lun(gn, lun, i) { spin_lock(&lun->vlun.lock); - pr_info("%s: lun%8u\t%u\t%u\t%u\t%u\n", - dev->name, i, - lun->vlun.nr_free_blocks, - lun->vlun.nr_open_blocks, - lun->vlun.nr_closed_blocks, - lun->vlun.nr_bad_blocks); + pr_info("%s: lun%8u\t%u\n", dev->name, i, + lun->vlun.nr_free_blocks); spin_unlock(&lun->vlun.lock); } } -static struct nvmm_type gennvm = { +static struct nvmm_type gen = { .name = "gennvm", .version = {0, 1, 0}, - .register_mgr = gennvm_register, - .unregister_mgr = gennvm_unregister, + .register_mgr = gen_register, + .unregister_mgr = gen_unregister, - .get_blk_unlocked = gennvm_get_blk_unlocked, - .put_blk_unlocked = gennvm_put_blk_unlocked, + .create_tgt = gen_create_tgt, + .remove_tgt = gen_remove_tgt, - .get_blk = gennvm_get_blk, - .put_blk = gennvm_put_blk, + .get_blk = gen_get_blk, + .put_blk = gen_put_blk, - .submit_io = gennvm_submit_io, - .erase_blk = gennvm_erase_blk, + .submit_io = gen_submit_io, + .erase_blk = gen_erase_blk, - .mark_blk = gennvm_mark_blk, + .mark_blk = gen_mark_blk, - .get_lun = gennvm_get_lun, - .reserve_lun = gennvm_reserve_lun, - .release_lun = gennvm_release_lun, - .lun_info_print = gennvm_lun_info_print, + .get_lun = gen_get_lun, + .reserve_lun = gen_reserve_lun, + .release_lun = gen_release_lun, + .lun_info_print = gen_lun_info_print, - .get_area = gennvm_get_area, - .put_area = gennvm_put_area, + .get_area = gen_get_area, + .put_area = gen_put_area, }; -static int __init gennvm_module_init(void) +static int __init gen_module_init(void) { - return nvm_register_mgr(&gennvm); + return nvm_register_mgr(&gen); } -static void gennvm_module_exit(void) +static void gen_module_exit(void) { - nvm_unregister_mgr(&gennvm); + nvm_unregister_mgr(&gen); } -module_init(gennvm_module_init); -module_exit(gennvm_module_exit); +module_init(gen_module_init); +module_exit(gen_module_exit); MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("Generic media manager for Open-Channel SSDs"); +MODULE_DESCRIPTION("General media manager for Open-Channel SSDs"); @@ -34,20 +34,24 @@ struct gen_lun { */ }; -struct gen_nvm { +struct gen_dev { struct nvm_dev *dev; int nr_luns; struct gen_lun *luns; struct list_head area_list; + + struct mutex lock; + struct list_head targets; }; -struct gennvm_area { +struct gen_area { struct list_head list; sector_t begin; sector_t end; /* end is excluded */ }; -#define gennvm_for_each_lun(bm, lun, i) \ + +#define gen_for_each_lun(bm, lun, i) \ for ((i) = 0, lun = &(bm)->luns[0]; \ (i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)]) @@ -48,7 +48,7 @@ static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a) } static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba, - unsigned len) + unsigned int len) { sector_t i; @@ -96,10 +96,13 @@ static void rrpc_discard(struct rrpc *rrpc, struct bio *bio) sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE; struct nvm_rq *rqd; - do { + while (1) { rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len); + if (rqd) + break; + schedule(); - } while (!rqd); + } if (IS_ERR(rqd)) { pr_err("rrpc: unable to acquire inflight IO\n"); @@ -172,39 +175,32 @@ static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) } /* requires lun->lock taken */ -static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk) +static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk, + struct rrpc_block **cur_rblk) { struct rrpc *rrpc = rlun->rrpc; - BUG_ON(!rblk); - - if (rlun->cur) { - spin_lock(&rlun->cur->lock); - WARN_ON(!block_is_full(rrpc, rlun->cur)); - spin_unlock(&rlun->cur->lock); + if (*cur_rblk) { + spin_lock(&(*cur_rblk)->lock); + WARN_ON(!block_is_full(rrpc, *cur_rblk)); + spin_unlock(&(*cur_rblk)->lock); } - rlun->cur = rblk; + *cur_rblk = new_rblk; } static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun, unsigned long flags) { - struct nvm_lun *lun = rlun->parent; struct nvm_block *blk; struct rrpc_block *rblk; - spin_lock(&lun->lock); - blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags); + blk = nvm_get_blk(rrpc->dev, rlun->parent, flags); if (!blk) { pr_err("nvm: rrpc: cannot get new block from media manager\n"); - spin_unlock(&lun->lock); return NULL; } rblk = rrpc_get_rblk(rlun, blk->id); - list_add_tail(&rblk->list, &rlun->open_list); - spin_unlock(&lun->lock); - blk->priv = rblk; bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk); rblk->next_page = 0; @@ -216,13 +212,7 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun, static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk) { - struct rrpc_lun *rlun = rblk->rlun; - struct nvm_lun *lun = rlun->parent; - - spin_lock(&lun->lock); - nvm_put_blk_unlocked(rrpc->dev, rblk->parent); - list_del(&rblk->list); - spin_unlock(&lun->lock); + nvm_put_blk(rrpc->dev, rblk->parent); } static void rrpc_put_blks(struct rrpc *rrpc) @@ -342,7 +332,7 @@ try: /* Perform read to do GC */ bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); - bio->bi_rw = READ; + bio_set_op_attrs(bio, REQ_OP_READ, 0); bio->bi_private = &wait; bio->bi_end_io = rrpc_end_sync_bio; @@ -364,7 +354,7 @@ try: reinit_completion(&wait); bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); - bio->bi_rw = WRITE; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio->bi_private = &wait; bio->bi_end_io = rrpc_end_sync_bio; @@ -508,21 +498,11 @@ static void rrpc_gc_queue(struct work_struct *work) struct rrpc *rrpc = gcb->rrpc; struct rrpc_block *rblk = gcb->rblk; struct rrpc_lun *rlun = rblk->rlun; - struct nvm_lun *lun = rblk->parent->lun; - struct nvm_block *blk = rblk->parent; spin_lock(&rlun->lock); list_add_tail(&rblk->prio, &rlun->prio_list); spin_unlock(&rlun->lock); - spin_lock(&lun->lock); - lun->nr_open_blocks--; - lun->nr_closed_blocks++; - blk->state &= ~NVM_BLK_ST_OPEN; - blk->state |= NVM_BLK_ST_CLOSED; - list_move_tail(&rblk->list, &rlun->closed_list); - spin_unlock(&lun->lock); - mempool_free(gcb, rrpc->gcb_pool); pr_debug("nvm: block '%lu' is full, allow GC (sched)\n", rblk->parent->id); @@ -596,21 +576,20 @@ out: return addr; } -/* Simple round-robin Logical to physical address translation. - * - * Retrieve the mapping using the active append point. Then update the ap for - * the next write to the disk. +/* Map logical address to a physical page. The mapping implements a round robin + * approach and allocates a page from the next lun available. * - * Returns rrpc_addr with the physical address and block. Remember to return to - * rrpc->addr_cache when request is finished. + * Returns rrpc_addr with the physical address and block. Returns NULL if no + * blocks in the next rlun are available. */ static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr, int is_gc) { struct rrpc_lun *rlun; - struct rrpc_block *rblk; + struct rrpc_block *rblk, **cur_rblk; struct nvm_lun *lun; u64 paddr; + int gc_force = 0; rlun = rrpc_get_lun_rr(rrpc, is_gc); lun = rlun->parent; @@ -618,41 +597,65 @@ static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr, if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4) return NULL; - spin_lock(&rlun->lock); + /* + * page allocation steps: + * 1. Try to allocate new page from current rblk + * 2a. If succeed, proceed to map it in and return + * 2b. If fail, first try to allocate a new block from media manger, + * and then retry step 1. Retry until the normal block pool is + * exhausted. + * 3. If exhausted, and garbage collector is requesting the block, + * go to the reserved block and retry step 1. + * In the case that this fails as well, or it is not GC + * requesting, report not able to retrieve a block and let the + * caller handle further processing. + */ + spin_lock(&rlun->lock); + cur_rblk = &rlun->cur; rblk = rlun->cur; retry: paddr = rrpc_alloc_addr(rrpc, rblk); - if (paddr == ADDR_EMPTY) { - rblk = rrpc_get_blk(rrpc, rlun, 0); - if (rblk) { - rrpc_set_lun_cur(rlun, rblk); - goto retry; - } + if (paddr != ADDR_EMPTY) + goto done; - if (is_gc) { - /* retry from emergency gc block */ - paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur); - if (paddr == ADDR_EMPTY) { - rblk = rrpc_get_blk(rrpc, rlun, 1); - if (!rblk) { - pr_err("rrpc: no more blocks"); - goto err; - } - - rlun->gc_cur = rblk; - paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur); - } - rblk = rlun->gc_cur; - } + if (!list_empty(&rlun->wblk_list)) { +new_blk: + rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block, + prio); + rrpc_set_lun_cur(rlun, rblk, cur_rblk); + list_del(&rblk->prio); + goto retry; + } + spin_unlock(&rlun->lock); + + rblk = rrpc_get_blk(rrpc, rlun, gc_force); + if (rblk) { + spin_lock(&rlun->lock); + list_add_tail(&rblk->prio, &rlun->wblk_list); + /* + * another thread might already have added a new block, + * Therefore, make sure that one is used, instead of the + * one just added. + */ + goto new_blk; } + if (unlikely(is_gc) && !gc_force) { + /* retry from emergency gc block */ + cur_rblk = &rlun->gc_cur; + rblk = rlun->gc_cur; + gc_force = 1; + spin_lock(&rlun->lock); + goto retry; + } + + pr_err("rrpc: failed to allocate new block\n"); + return NULL; +done: spin_unlock(&rlun->lock); return rrpc_update_map(rrpc, laddr, rblk, paddr); -err: - spin_unlock(&rlun->lock); - return NULL; } static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk) @@ -850,14 +853,14 @@ static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio, return NVM_IO_ERR; } - if (bio_rw(bio) == WRITE) + if (bio_op(bio) == REQ_OP_WRITE) return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags, npages); return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages); } - if (bio_rw(bio) == WRITE) + if (bio_op(bio) == REQ_OP_WRITE) return rrpc_write_rq(rrpc, bio, rqd, flags); return rrpc_read_rq(rrpc, bio, rqd, flags); @@ -908,7 +911,7 @@ static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio) struct nvm_rq *rqd; int err; - if (bio->bi_rw & REQ_DISCARD) { + if (bio_op(bio) == REQ_OP_DISCARD) { rrpc_discard(rrpc, bio); return BLK_QC_T_NONE; } @@ -1196,8 +1199,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) rlun->rrpc = rrpc; INIT_LIST_HEAD(&rlun->prio_list); - INIT_LIST_HEAD(&rlun->open_list); - INIT_LIST_HEAD(&rlun->closed_list); + INIT_LIST_HEAD(&rlun->wblk_list); INIT_WORK(&rlun->ws_gc, rrpc_lun_gc); spin_lock_init(&rlun->lock); @@ -1338,14 +1340,13 @@ static int rrpc_luns_configure(struct rrpc *rrpc) rblk = rrpc_get_blk(rrpc, rlun, 0); if (!rblk) goto err; - - rrpc_set_lun_cur(rlun, rblk); + rrpc_set_lun_cur(rlun, rblk, &rlun->cur); /* Emergency gc block */ rblk = rrpc_get_blk(rrpc, rlun, 1); if (!rblk) goto err; - rlun->gc_cur = rblk; + rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur); } return 0; @@ -56,7 +56,6 @@ struct rrpc_block { struct nvm_block *parent; struct rrpc_lun *rlun; struct list_head prio; - struct list_head list; #define MAX_INVALID_PAGES_STORAGE 8 /* Bitmap for invalid page intries */ @@ -77,13 +76,7 @@ struct rrpc_lun { struct rrpc_block *blocks; /* Reference to block allocation */ struct list_head prio_list; /* Blocks that may be GC'ed */ - struct list_head open_list; /* In-use open blocks. These are blocks - * that can be both written to and read - * from - */ - struct list_head closed_list; /* In-use closed blocks. These are - * blocks that can _only_ be read from - */ + struct list_head wblk_list; /* Queued blocks to be written to */ struct work_struct ws_gc; @@ -188,7 +181,7 @@ static inline int request_intersects(struct rrpc_inflight_rq *r, } static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr, - unsigned pages, struct rrpc_inflight_rq *r) + unsigned int pages, struct rrpc_inflight_rq *r) { sector_t laddr_end = laddr + pages - 1; struct rrpc_inflight_rq *rtmp; @@ -213,7 +206,7 @@ static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr, } static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr, - unsigned pages, + unsigned int pages, struct rrpc_inflight_rq *r) { BUG_ON((laddr + pages) > rrpc->nr_sects); @@ -39,7 +39,8 @@ static inline int scan_ppa_idx(int row, int blkid) return (row * MAX_BLKS_PR_SYSBLK) + blkid; } -void nvm_sysblk_to_cpu(struct nvm_sb_info *info, struct nvm_system_block *sb) +static void nvm_sysblk_to_cpu(struct nvm_sb_info *info, + struct nvm_system_block *sb) { info->seqnr = be32_to_cpu(sb->seqnr); info->erase_cnt = be32_to_cpu(sb->erase_cnt); @@ -48,7 +49,8 @@ void nvm_sysblk_to_cpu(struct nvm_sb_info *info, struct nvm_system_block *sb) info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa); } -void nvm_cpu_to_sysblk(struct nvm_system_block *sb, struct nvm_sb_info *info) +static void nvm_cpu_to_sysblk(struct nvm_system_block *sb, + struct nvm_sb_info *info) { sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC); sb->seqnr = cpu_to_be32(info->seqnr); @@ -86,7 +88,7 @@ static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas) return nr_rows; } -void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s, +static void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s, struct ppa_addr *sysblk_ppas) { memset(s, 0, sizeof(struct sysblk_scan)); @@ -96,19 +96,18 @@ config ADB_PMU_LED Support the front LED on Power/iBooks as a generic LED that can be triggered by any of the supported triggers. To get the behaviour of the old CONFIG_BLK_DEV_IDE_PMAC_BLINK, select this - and the ide-disk LED trigger and configure appropriately through - sysfs. + and the disk LED trigger and configure appropriately through sysfs. -config ADB_PMU_LED_IDE - bool "Use front LED as IDE LED by default" +config ADB_PMU_LED_DISK + bool "Use front LED as DISK LED by default" depends on ADB_PMU_LED depends on LEDS_CLASS depends on IDE_GD_ATA select LEDS_TRIGGERS - select LEDS_TRIGGER_IDE_DISK + select LEDS_TRIGGER_DISK help - This option makes the front LED default to the IDE trigger - so that it blinks on IDE activity. + This option makes the front LED default to the disk trigger + so that it blinks on disk activity. config PMAC_SMU bool "Support for SMU based PowerMacs" @@ -73,8 +73,8 @@ static void pmu_led_set(struct led_classdev *led_cdev, static struct led_classdev pmu_led = { .name = "pmu-led::front", -#ifdef CONFIG_ADB_PMU_LED_IDE - .default_trigger = "ide-disk", +#ifdef CONFIG_ADB_PMU_LED_DISK + .default_trigger = "disk-activity", #endif .brightness_set = pmu_led_set, }; @@ -61,21 +61,36 @@ static int mcb_probe(struct device *dev) struct mcb_driver *mdrv = to_mcb_driver(dev->driver); struct mcb_device *mdev = to_mcb_device(dev); const struct mcb_device_id *found_id; + struct module *carrier_mod; + int ret; found_id = mcb_match_id(mdrv->id_table, mdev); if (!found_id) return -ENODEV; - return mdrv->probe(mdev, found_id); + carrier_mod = mdev->dev.parent->driver->owner; + if (!try_module_get(carrier_mod)) + return -EINVAL; + + get_device(dev); + ret = mdrv->probe(mdev, found_id); + if (ret) + module_put(carrier_mod); + + return ret; } static int mcb_remove(struct device *dev) { struct mcb_driver *mdrv = to_mcb_driver(dev->driver); struct mcb_device *mdev = to_mcb_device(dev); + struct module *carrier_mod; mdrv->remove(mdev); + carrier_mod = mdev->dev.parent->driver->owner; + module_put(carrier_mod); + put_device(&mdev->dev); return 0; @@ -3,7 +3,8 @@ # dm-mod-y += dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \ - dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o dm-stats.o + dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o dm-stats.o \ + dm-rq.o dm-multipath-y += dm-path-selector.o dm-mpath.o dm-snapshot-y += dm-snap.o dm-exception-store.o dm-snap-transient.o \ dm-snap-persistent.o @@ -294,10 +294,10 @@ static void bch_btree_node_read(struct btree *b) closure_init_stack(&cl); bio = bch_bbio_alloc(b->c); - bio->bi_rw = REQ_META|READ_SYNC; bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; bio->bi_end_io = btree_node_read_endio; bio->bi_private = &cl; + bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC); bch_bio_map(bio, b->keys.set[0].data); @@ -396,8 +396,8 @@ static void do_btree_node_write(struct btree *b) b->bio->bi_end_io = btree_node_write_endio; b->bio->bi_private = cl; - b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); + bio_set_op_attrs(b->bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA); bch_bio_map(b->bio, i); /* @@ -112,7 +112,7 @@ bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl) EXPORT_SYMBOL(closure_wait); /** - * closure_sync - sleep until a closure a closure has nothing left to wait on + * closure_sync - sleep until a closure has nothing left to wait on * * Sleeps until the refcount hits 1 - the thread that's running the closure owns * the last refcount. @@ -31,7 +31,8 @@ * passing it, as you might expect, the function to run when nothing is pending * and the workqueue to run that function out of. * - * continue_at() also, critically, is a macro that returns the calling function. + * continue_at() also, critically, requires a 'return' immediately following the + * location where this macro is referenced, to return to the calling function. * There's good reason for this. * * To use safely closures asynchronously, they must always have a refcount while @@ -52,9 +52,10 @@ void bch_btree_verify(struct btree *b) bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev; bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9; + bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC); bch_bio_map(bio, sorted); - submit_bio_wait(REQ_META|READ_SYNC, bio); + submit_bio_wait(bio); bch_bbio_free(bio, b->c); memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9); @@ -113,11 +114,12 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) check = bio_clone(bio, GFP_NOIO); if (!check) return; + bio_set_op_attrs(check, REQ_OP_READ, READ_SYNC); if (bio_alloc_pages(check, GFP_NOIO)) goto out_put; - submit_bio_wait(READ_SYNC, check); + submit_bio_wait(check); bio_for_each_segment(bv, bio, iter) { void *p1 = kmap_atomic(bv.bv_page); @@ -25,7 +25,6 @@ struct bio *bch_bbio_alloc(struct cache_set *c) struct bio *bio = &b->bio; bio_init(bio); - bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET; bio->bi_max_vecs = bucket_pages(c); bio->bi_io_vec = bio->bi_inline_vecs; @@ -111,7 +110,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, struct bbio *b = container_of(bio, struct bbio, bio); struct cache *ca = PTR_CACHE(c, &b->key, 0); - unsigned threshold = bio->bi_rw & REQ_WRITE + unsigned threshold = op_is_write(bio_op(bio)) ? c->congested_write_threshold_us : c->congested_read_threshold_us; @@ -54,11 +54,11 @@ reread: left = ca->sb.bucket_size - offset; bio_reset(bio); bio->bi_iter.bi_sector = bucket + offset; bio->bi_bdev = ca->bdev; - bio->bi_rw = READ; bio->bi_iter.bi_size = len << 9; bio->bi_end_io = journal_read_endio; bio->bi_private = &cl; + bio_set_op_attrs(bio, REQ_OP_READ, 0); bch_bio_map(bio, data); closure_bio_submit(bio, &cl); @@ -418,7 +418,7 @@ static void journal_discard_work(struct work_struct *work) struct journal_device *ja = container_of(work, struct journal_device, discard_work); - submit_bio(0, &ja->discard_bio); + submit_bio(&ja->discard_bio); } static void do_journal_discard(struct cache *ca) @@ -449,10 +449,10 @@ static void do_journal_discard(struct cache *ca) atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); bio_init(bio); + bio_set_op_attrs(bio, REQ_OP_DISCARD, 0); bio->bi_iter.bi_sector = bucket_to_sector(ca->set, ca->sb.d[ja->discard_idx]); bio->bi_bdev = ca->bdev; - bio->bi_rw = REQ_WRITE|REQ_DISCARD; bio->bi_max_vecs = 1; bio->bi_io_vec = bio->bi_inline_vecs; bio->bi_iter.bi_size = bucket_bytes(ca); @@ -626,11 +626,12 @@ static void journal_write_unlocked(struct closure *cl) bio_reset(bio); bio->bi_iter.bi_sector = PTR_OFFSET(k, i); bio->bi_bdev = ca->bdev; - bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; bio->bi_iter.bi_size = sectors << 9; bio->bi_end_io = journal_write_endio; bio->bi_private = w; + bio_set_op_attrs(bio, REQ_OP_WRITE, + REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA); bch_bio_map(bio, w->data); trace_bcache_journal_write(bio); @@ -163,7 +163,7 @@ static void read_moving(struct cache_set *c) moving_init(io); bio = &io->bio.bio; - bio->bi_rw = READ; + bio_set_op_attrs(bio, REQ_OP_READ, 0); bio->bi_end_io = read_moving_endio; if (bio_alloc_pages(bio, GFP_KERNEL)) @@ -205,10 +205,10 @@ static void bch_data_insert_start(struct closure *cl) return bch_data_invalidate(cl); /* - * Journal writes are marked REQ_FLUSH; if the original write was a + * Journal writes are marked REQ_PREFLUSH; if the original write was a * flush, it'll wait on the journal write. */ - bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA); + bio->bi_rw &= ~(REQ_PREFLUSH|REQ_FUA); do { unsigned i; @@ -253,7 +253,7 @@ static void bch_data_insert_start(struct closure *cl) trace_bcache_cache_insert(k); bch_keylist_push(&op->insert_keys); - n->bi_rw |= REQ_WRITE; + bio_set_op_attrs(n, REQ_OP_WRITE, 0); bch_submit_bbio(n, op->c, k, 0); } while (n != bio); @@ -378,12 +378,12 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || c->gc_stats.in_use > CUTOFF_CACHE_ADD || - (bio->bi_rw & REQ_DISCARD)) + (bio_op(bio) == REQ_OP_DISCARD)) goto skip; if (mode == CACHE_MODE_NONE || (mode == CACHE_MODE_WRITEAROUND && - (bio->bi_rw & REQ_WRITE))) + op_is_write(bio_op(bio)))) goto skip; if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || @@ -404,7 +404,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) if (!congested && mode == CACHE_MODE_WRITEBACK && - (bio->bi_rw & REQ_WRITE) && + op_is_write(bio_op(bio)) && (bio->bi_rw & REQ_SYNC)) goto rescale; @@ -657,7 +657,7 @@ static inline struct search *search_alloc(struct bio *bio, s->cache_miss = NULL; s->d = d; s->recoverable = 1; - s->write = (bio->bi_rw & REQ_WRITE) != 0; + s->write = op_is_write(bio_op(bio)); s->read_dirty_data = 0; s->start_time = jiffies; @@ -668,7 +668,7 @@ static inline struct search *search_alloc(struct bio *bio, s->iop.write_prio = 0; s->iop.error = 0; s->iop.flags = 0; - s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; + s->iop.flush_journal = (bio->bi_rw & (REQ_PREFLUSH|REQ_FUA)) != 0; s->iop.wq = bcache_wq; return s; @@ -899,7 +899,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) * But check_overlapping drops dirty keys for which io hasn't started, * so we still want to call it. */ - if (bio->bi_rw & REQ_DISCARD) + if (bio_op(bio) == REQ_OP_DISCARD) s->iop.bypass = true; if (should_writeback(dc, s->orig_bio, @@ -913,22 +913,22 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) s->iop.bio = s->orig_bio; bio_get(s->iop.bio); - if (!(bio->bi_rw & REQ_DISCARD) || + if ((bio_op(bio) != REQ_OP_DISCARD) || blk_queue_discard(bdev_get_queue(dc->bdev))) closure_bio_submit(bio, cl); } else if (s->iop.writeback) { bch_writeback_add(dc); s->iop.bio = bio; - if (bio->bi_rw & REQ_FLUSH) { + if (bio->bi_rw & REQ_PREFLUSH) { /* Also need to send a flush to the backing device */ struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, dc->disk.bio_split); - flush->bi_rw = WRITE_FLUSH; flush->bi_bdev = bio->bi_bdev; flush->bi_end_io = request_endio; flush->bi_private = cl; + bio_set_op_attrs(flush, REQ_OP_WRITE, WRITE_FLUSH); closure_bio_submit(flush, cl); } @@ -992,7 +992,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q, cached_dev_read(dc, s); } } else { - if ((bio->bi_rw & REQ_DISCARD) && + if ((bio_op(bio) == REQ_OP_DISCARD) && !blk_queue_discard(bdev_get_queue(dc->bdev))) bio_endio(bio); else @@ -1103,7 +1103,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q, &KEY(d->id, bio->bi_iter.bi_sector, 0), &KEY(d->id, bio_end_sector(bio), 0)); - s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; + s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0; s->iop.writeback = true; s->iop.bio = bio; @@ -134,7 +134,6 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, case BCACHE_SB_VERSION_CDEV: case BCACHE_SB_VERSION_CDEV_WITH_UUID: sb->nbuckets = le64_to_cpu(s->nbuckets); - sb->block_size = le16_to_cpu(s->block_size); sb->bucket_size = le16_to_cpu(s->bucket_size); sb->nr_in_set = le16_to_cpu(s->nr_in_set); @@ -212,8 +211,8 @@ static void __write_super(struct cache_sb *sb, struct bio *bio) unsigned i; bio->bi_iter.bi_sector = SB_SECTOR; - bio->bi_rw = REQ_SYNC|REQ_META; bio->bi_iter.bi_size = SB_SIZE; + bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META); bch_bio_map(bio, NULL); out->offset = cpu_to_le64(sb->offset); @@ -238,7 +237,7 @@ static void __write_super(struct cache_sb *sb, struct bio *bio) pr_debug("ver %llu, flags %llu, seq %llu", sb->version, sb->flags, sb->seq); - submit_bio(REQ_WRITE, bio); + submit_bio(bio); } static void bch_write_bdev_super_unlock(struct closure *cl) @@ -333,7 +332,7 @@ static void uuid_io_unlock(struct closure *cl) up(&c->uuid_write_mutex); } -static void uuid_io(struct cache_set *c, unsigned long rw, +static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, struct bkey *k, struct closure *parent) { struct closure *cl = &c->uuid_write; @@ -348,21 +347,22 @@ static void uuid_io(struct cache_set *c, unsigned long rw, for (i = 0; i < KEY_PTRS(k); i++) { struct bio *bio = bch_bbio_alloc(c); - bio->bi_rw = REQ_SYNC|REQ_META|rw; + bio->bi_rw = REQ_SYNC|REQ_META|op_flags; bio->bi_iter.bi_size = KEY_SIZE(k) << 9; bio->bi_end_io = uuid_endio; bio->bi_private = cl; + bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); bch_bio_map(bio, c->uuids); bch_submit_bbio(bio, c, k, i); - if (!(rw & WRITE)) + if (op != REQ_OP_WRITE) break; } bch_extent_to_text(buf, sizeof(buf), k); - pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf); + pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf); for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) if (!bch_is_zero(u->uuid, 16)) @@ -381,7 +381,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) return "bad uuid pointer"; bkey_copy(&c->uuid_bucket, k); - uuid_io(c, READ_SYNC, k, cl); + uuid_io(c, REQ_OP_READ, READ_SYNC, k, cl); if (j->version < BCACHE_JSET_VERSION_UUIDv1) { struct uuid_entry_v0 *u0 = (void *) c->uuids; @@ -426,7 +426,7 @@ static int __uuid_write(struct cache_set *c) return 1; SET_KEY_SIZE(&k.key, c->sb.bucket_size); - uuid_io(c, REQ_WRITE, &k.key, &cl); + uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); closure_sync(&cl); bkey_copy(&c->uuid_bucket, &k.key); @@ -498,7 +498,8 @@ static void prio_endio(struct bio *bio) closure_put(&ca->prio); } -static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) +static void prio_io(struct cache *ca, uint64_t bucket, int op, + unsigned long op_flags) { struct closure *cl = &ca->prio; struct bio *bio = bch_bbio_alloc(ca->set); @@ -507,11 +508,11 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; bio->bi_bdev = ca->bdev; - bio->bi_rw = REQ_SYNC|REQ_META|rw; bio->bi_iter.bi_size = bucket_bytes(ca); bio->bi_end_io = prio_endio; bio->bi_private = ca; + bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); bch_bio_map(bio, ca->disk_buckets); closure_bio_submit(bio, &ca->prio); @@ -557,7 +558,7 @@ void bch_prio_write(struct cache *ca) BUG_ON(bucket == -1); mutex_unlock(&ca->set->bucket_lock); - prio_io(ca, bucket, REQ_WRITE); + prio_io(ca, bucket, REQ_OP_WRITE, 0); mutex_lock(&ca->set->bucket_lock); ca->prio_buckets[i] = bucket; @@ -599,7 +600,7 @@ static void prio_read(struct cache *ca, uint64_t bucket) ca->prio_last_buckets[bucket_nr] = bucket; bucket_nr++; - prio_io(ca, bucket, READ_SYNC); + prio_io(ca, bucket, REQ_OP_READ, READ_SYNC); if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) pr_warn("bad csum reading priorities"); @@ -1518,7 +1519,8 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) || !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || - !(c->moving_gc_wq = create_workqueue("bcache_gc")) || + !(c->moving_gc_wq = alloc_workqueue("bcache_gc", + WQ_MEM_RECLAIM, 0)) || bch_journal_alloc(c) || bch_btree_cache_alloc(c) || bch_open_buckets_alloc(c) || @@ -1803,7 +1805,7 @@ void bch_cache_release(struct kobject *kobj) module_put(THIS_MODULE); } -static int cache_alloc(struct cache_sb *sb, struct cache *ca) +static int cache_alloc(struct cache *ca) { size_t free; struct bucket *b; @@ -1858,7 +1860,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, if (blk_queue_discard(bdev_get_queue(ca->bdev))) ca->discard = CACHE_DISCARD(&ca->sb); - ret = cache_alloc(sb, ca); + ret = cache_alloc(ca); if (ret != 0) goto err; @@ -2097,7 +2099,7 @@ static int __init bcache_init(void) return bcache_major; } - if (!(bcache_wq = create_workqueue("bcache")) || + if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) || !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || sysfs_create_files(bcache_kobj, files) || bch_request_init() || @@ -182,7 +182,7 @@ static void write_dirty(struct closure *cl) struct keybuf_key *w = io->bio.bi_private; dirty_init(w); - io->bio.bi_rw = WRITE; + bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0); io->bio.bi_iter.bi_sector = KEY_START(&w->key); io->bio.bi_bdev = io->dc->bdev; io->bio.bi_end_io = dirty_endio; @@ -251,10 +251,10 @@ static void read_dirty(struct cached_dev *dc) io->dc = dc; dirty_init(w); + bio_set_op_attrs(&io->bio, REQ_OP_READ, 0); io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); io->bio.bi_bdev = PTR_CACHE(dc->disk.c, &w->key, 0)->bdev; - io->bio.bi_rw = READ; io->bio.bi_end_io = read_dirty_endio; if (bio_alloc_pages(&io->bio, GFP_KERNEL)) @@ -162,7 +162,7 @@ static int read_sb_page(struct mddev *mddev, loff_t offset, if (sync_page_io(rdev, target, roundup(size, bdev_logical_block_size(rdev->bdev)), - page, READ, true)) { + page, REQ_OP_READ, 0, true)) { page->index = index; return 0; } @@ -297,7 +297,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait) atomic_inc(&bitmap->pending_writes); set_buffer_locked(bh); set_buffer_mapped(bh); - submit_bh(WRITE | REQ_SYNC, bh); + submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); bh = bh->b_this_page; } @@ -392,7 +392,7 @@ static int read_page(struct file *file, unsigned long index, atomic_inc(&bitmap->pending_writes); set_buffer_locked(bh); set_buffer_mapped(bh); - submit_bh(READ, bh); + submit_bh(REQ_OP_READ, 0, bh); } block++; bh = bh->b_this_page; @@ -574,7 +574,8 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block, { int r; struct dm_io_request io_req = { - .bi_rw = rw, + .bi_op = rw, + .bi_op_flags = 0, .notify.fn = dmio_complete, .notify.context = b, .client = b->c->dm_io, @@ -634,6 +635,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, * the dm_buffer's inline bio is local to bufio. */ b->bio.bi_private = end_io; + bio_set_op_attrs(&b->bio, rw, 0); /* * We assume that if len >= PAGE_SIZE ptr is page-aligned. @@ -660,7 +662,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, ptr += PAGE_SIZE; } while (len > 0); - submit_bio(rw, &b->bio); + submit_bio(&b->bio); } static void submit_io(struct dm_buffer *b, int rw, sector_t block, @@ -1326,7 +1328,8 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers); int dm_bufio_issue_flush(struct dm_bufio_client *c) { struct dm_io_request io_req = { - .bi_rw = WRITE_FLUSH, + .bi_op = REQ_OP_WRITE, + .bi_op_flags = WRITE_FLUSH, .mem.type = DM_IO_KMEM, .mem.ptr.addr = NULL, .client = c->dm_io, @@ -1,4 +1,4 @@ -#include "dm.h" +#include "dm-core.h" /* * The kobject release method must not be placed in the module itself, @@ -788,7 +788,8 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) spin_lock_irqsave(&cache->lock, flags); if (cache->need_tick_bio && - !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) { + !(bio->bi_rw & (REQ_FUA | REQ_PREFLUSH)) && + bio_op(bio) != REQ_OP_DISCARD) { pb->tick = true; cache->need_tick_bio = false; } @@ -829,7 +830,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) static int bio_triggers_commit(struct cache *cache, struct bio *bio) { - return bio->bi_rw & (REQ_FLUSH | REQ_FUA); + return bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); } /* @@ -851,7 +852,7 @@ static void inc_ds(struct cache *cache, struct bio *bio, static bool accountable_bio(struct cache *cache, struct bio *bio) { return ((bio->bi_bdev == cache->origin_dev->bdev) && - !(bio->bi_rw & REQ_DISCARD)); + bio_op(bio) != REQ_OP_DISCARD); } static void accounted_begin(struct cache *cache, struct bio *bio) @@ -1067,7 +1068,8 @@ static void dec_io_migrations(struct cache *cache) static bool discard_or_flush(struct bio *bio) { - return bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD); + return bio_op(bio) == REQ_OP_DISCARD || + bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); } static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell) @@ -1612,8 +1614,8 @@ static void process_flush_bio(struct cache *cache, struct bio *bio) remap_to_cache(cache, bio, 0); /* - * REQ_FLUSH is not directed at any particular block so we don't - * need to inc_ds(). REQ_FUA's are split into a write + REQ_FLUSH + * REQ_PREFLUSH is not directed at any particular block so we don't + * need to inc_ds(). REQ_FUA's are split into a write + REQ_PREFLUSH * by dm-core. */ issue(cache, bio); @@ -1978,9 +1980,9 @@ static void process_deferred_bios(struct cache *cache) bio = bio_list_pop(&bios); - if (bio->bi_rw & REQ_FLUSH) + if (bio->bi_rw & REQ_PREFLUSH) process_flush_bio(cache, bio); - else if (bio->bi_rw & REQ_DISCARD) + else if (bio_op(bio) == REQ_OP_DISCARD) process_discard_bio(cache, &structs, bio); else process_bio(cache, &structs, bio); diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h new file mode 100644 index 000000000000..40ceba1fe8be --- /dev/null +++ b/ drivers/md/dm-core.h@@ -0,0 +1,149 @@ +/* + * Internal header file _only_ for device mapper core + * + * Copyright (C) 2016 Red Hat, Inc. All rights reserved. + * + * This file is released under the LGPL. + */ + +#ifndef DM_CORE_INTERNAL_H +#define DM_CORE_INTERNAL_H + +#include <linux/kthread.h> +#include <linux/ktime.h> +#include <linux/blk-mq.h> + +#include <trace/events/block.h> + +#include "dm.h" + +#define DM_RESERVED_MAX_IOS 1024 + +struct dm_kobject_holder { + struct kobject kobj; + struct completion completion; +}; + +/* + * DM core internal structure that used directly by dm.c and dm-rq.c + * DM targets must _not_ deference a mapped_device to directly access its members! + */ +struct mapped_device { + struct srcu_struct io_barrier; + struct mutex suspend_lock; + + /* + * The current mapping (struct dm_table *). + * Use dm_get_live_table{_fast} or take suspend_lock for + * dereference. + */ + void __rcu *map; + + struct list_head table_devices; + struct mutex table_devices_lock; + + unsigned long flags; + + struct request_queue *queue; + int numa_node_id; + + unsigned type; + /* Protect queue and type against concurrent access. */ + struct mutex type_lock; + + atomic_t holders; + atomic_t open_count; + + struct dm_target *immutable_target; + struct target_type *immutable_target_type; + + struct gendisk *disk; + char name[16]; + + void *interface_ptr; + + /* + * A list of ios that arrived while we were suspended. + */ + atomic_t pending[2]; + wait_queue_head_t wait; + struct work_struct work; + spinlock_t deferred_lock; + struct bio_list deferred; + + /* + * Event handling. + */ + wait_queue_head_t eventq; + atomic_t event_nr; + atomic_t uevent_seq; + struct list_head uevent_list; + spinlock_t uevent_lock; /* Protect access to uevent_list */ + + /* the number of internal suspends */ + unsigned internal_suspend_count; + + /* + * Processing queue (flush) + */ + struct workqueue_struct *wq; + + /* + * io objects are allocated from here. + */ + mempool_t *io_pool; + mempool_t *rq_pool; + + struct bio_set *bs; + + /* + * freeze/thaw support require holding onto a super block + */ + struct super_block *frozen_sb; + + /* forced geometry settings */ + struct hd_geometry geometry; + + struct block_device *bdev; + + /* kobject and completion */ + struct dm_kobject_holder kobj_holder; + + /* zero-length flush that will be cloned and submitted to targets */ + struct bio flush_bio; + + struct dm_stats stats; + + struct kthread_worker kworker; + struct task_struct *kworker_task; + + /* for request-based merge heuristic in dm_request_fn() */ + unsigned seq_rq_merge_deadline_usecs; + int last_rq_rw; + sector_t last_rq_pos; + ktime_t last_rq_start_time; + + /* for blk-mq request-based DM support */ + struct blk_mq_tag_set *tag_set; + bool use_blk_mq:1; + bool init_tio_pdu:1; +}; + +void dm_init_md_queue(struct mapped_device *md); +void dm_init_normal_md_queue(struct mapped_device *md); +int md_in_flight(struct mapped_device *md); +void disable_write_same(struct mapped_device *md); + +static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) +{ + return &container_of(kobj, struct dm_kobject_holder, kobj)->completion; +} + +unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max); + +static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen) +{ + return !maxlen || strlen(result) + 1 >= maxlen; +} + +#endif @@ -683,7 +683,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc, u8 *data) { struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; - u64 sector = cpu_to_le64((u64)dmreq->iv_sector); + __le64 sector = cpu_to_le64(dmreq->iv_sector); u8 buf[TCW_WHITENING_SIZE]; SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm); int i, r; @@ -722,7 +722,7 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, struct dm_crypt_request *dmreq) { struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; - u64 sector = cpu_to_le64((u64)dmreq->iv_sector); + __le64 sector = cpu_to_le64(dmreq->iv_sector); u8 *src; int r = 0; @@ -1136,7 +1136,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone) clone->bi_private = io; clone->bi_end_io = crypt_endio; clone->bi_bdev = cc->dev->bdev; - clone->bi_rw = io->base_bio->bi_rw; + bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_rw); } static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) @@ -1911,11 +1911,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) struct crypt_config *cc = ti->private; /* - * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues. - * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight - * - for REQ_DISCARD caller must use flush if IO ordering matters + * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues. + * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight + * - for REQ_OP_DISCARD caller must use flush if IO ordering matters */ - if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { + if (unlikely(bio->bi_rw & REQ_PREFLUSH || + bio_op(bio) == REQ_OP_DISCARD)) { bio->bi_bdev = cc->dev->bdev; if (bio_sectors(bio)) bio->bi_iter.bi_sector = cc->start + @@ -1540,9 +1540,9 @@ static int era_map(struct dm_target *ti, struct bio *bio) remap_to_origin(era, bio); /* - * REQ_FLUSH bios carry no data, so we're not interested in them. + * REQ_PREFLUSH bios carry no data, so we're not interested in them. */ - if (!(bio->bi_rw & REQ_FLUSH) && + if (!(bio->bi_rw & REQ_PREFLUSH) && (bio_data_dir(bio) == WRITE) && !metadata_current_marked(era->md, block)) { defer_bio(era, bio); @@ -266,7 +266,7 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value; DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " - "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n", + "(rw=%c bi_rw=%u bi_sector=%llu cur_bytes=%u)\n", bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw, (unsigned long long)bio->bi_iter.bi_sector, bio_bytes); @@ -5,7 +5,7 @@ * This file is released under the GPL. */ -#include "dm.h" +#include "dm-core.h" #include <linux/device-mapper.h> @@ -278,8 +278,9 @@ static void km_dp_init(struct dpages *dp, void *data) /*----------------------------------------------------------------- * IO routines that accept a list of pages. *---------------------------------------------------------------*/ -static void do_region(int rw, unsigned region, struct dm_io_region *where, - struct dpages *dp, struct io *io) +static void do_region(int op, int op_flags, unsigned region, + struct dm_io_region *where, struct dpages *dp, + struct io *io) { struct bio *bio; struct page *page; @@ -295,24 +296,25 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, /* * Reject unsupported discard and write same requests. */ - if (rw & REQ_DISCARD) + if (op == REQ_OP_DISCARD) special_cmd_max_sectors = q->limits.max_discard_sectors; - else if (rw & REQ_WRITE_SAME) + else if (op == REQ_OP_WRITE_SAME) special_cmd_max_sectors = q->limits.max_write_same_sectors; - if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) { + if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_SAME) && + special_cmd_max_sectors == 0) { dec_count(io, region, -EOPNOTSUPP); return; } /* - * where->count may be zero if rw holds a flush and we need to + * where->count may be zero if op holds a flush and we need to * send a zero-sized flush. */ do { /* * Allocate a suitably sized-bio. */ - if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME)) + if ((op == REQ_OP_DISCARD) || (op == REQ_OP_WRITE_SAME)) num_bvecs = 1; else num_bvecs = min_t(int, BIO_MAX_PAGES, @@ -322,13 +324,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, bio->bi_iter.bi_sector = where->sector + (where->count - remaining); bio->bi_bdev = where->bdev; bio->bi_end_io = endio; + bio_set_op_attrs(bio, op, op_flags); store_io_and_region_in_bio(bio, io, region); - if (rw & REQ_DISCARD) { + if (op == REQ_OP_DISCARD) { num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; remaining -= num_sectors; - } else if (rw & REQ_WRITE_SAME) { + } else if (op == REQ_OP_WRITE_SAME) { /* * WRITE SAME only uses a single page. */ @@ -355,11 +358,11 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, } atomic_inc(&io->count); - submit_bio(rw, bio); + submit_bio(bio); } while (remaining); } -static void dispatch_io(int rw, unsigned int num_regions, +static void dispatch_io(int op, int op_flags, unsigned int num_regions, struct dm_io_region *where, struct dpages *dp, struct io *io, int sync) { @@ -369,7 +372,7 @@ static void dispatch_io(int rw, unsigned int num_regions, BUG_ON(num_regions > DM_IO_MAX_REGIONS); if (sync) - rw |= REQ_SYNC; + op_flags |= REQ_SYNC; /* * For multiple regions we need to be careful to rewind @@ -377,8 +380,8 @@ static void dispatch_io(int rw, unsigned int num_regions, */ for (i = 0; i < num_regions; i++) { *dp = old_pages; - if (where[i].count || (rw & REQ_FLUSH)) - do_region(rw, i, where + i, dp, io); + if (where[i].count || (op_flags & REQ_PREFLUSH)) + do_region(op, op_flags, i, where + i, dp, io); } /* @@ -402,13 +405,13 @@ static void sync_io_complete(unsigned long error, void *context) } static int sync_io(struct dm_io_client *client, unsigned int num_regions, - struct dm_io_region *where, int rw, struct dpages *dp, - unsigned long *error_bits) + struct dm_io_region *where, int op, int op_flags, + struct dpages *dp, unsigned long *error_bits) { struct io *io; struct sync_io sio; - if (num_regions > 1 && (rw & RW_MASK) != WRITE) { + if (num_regions > 1 && !op_is_write(op)) { WARN_ON(1); return -EIO; } @@ -425,7 +428,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, io->vma_invalidate_address = dp->vma_invalidate_address; io->vma_invalidate_size = dp->vma_invalidate_size; - dispatch_io(rw, num_regions, where, dp, io, 1); + dispatch_io(op, op_flags, num_regions, where, dp, io, 1); wait_for_completion_io(&sio.wait); @@ -436,12 +439,12 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, } static int async_io(struct dm_io_client *client, unsigned int num_regions, - struct dm_io_region *where, int rw, struct dpages *dp, - io_notify_fn fn, void *context) + struct dm_io_region *where, int op, int op_flags, + struct dpages *dp, io_notify_fn fn, void *context) { struct io *io; - if (num_regions > 1 && (rw & RW_MASK) != WRITE) { + if (num_regions > 1 && !op_is_write(op)) { WARN_ON(1); fn(1, context); return -EIO; @@ -457,7 +460,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, io->vma_invalidate_address = dp->vma_invalidate_address; io->vma_invalidate_size = dp->vma_invalidate_size; - dispatch_io(rw, num_regions, where, dp, io, 0); + dispatch_io(op, op_flags, num_regions, where, dp, io, 0); return 0; } @@ -480,7 +483,7 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp, case DM_IO_VMA: flush_kernel_vmap_range(io_req->mem.ptr.vma, size); - if ((io_req->bi_rw & RW_MASK) == READ) { + if (io_req->bi_op == REQ_OP_READ) { dp->vma_invalidate_address = io_req->mem.ptr.vma; dp->vma_invalidate_size = size; } @@ -518,10 +521,12 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions, if (!io_req->notify.fn) return sync_io(io_req->client, num_regions, where, - io_req->bi_rw, &dp, sync_error_bits); + io_req->bi_op, io_req->bi_op_flags, &dp, + sync_error_bits); - return async_io(io_req->client, num_regions, where, io_req->bi_rw, - &dp, io_req->notify.fn, io_req->notify.context); + return async_io(io_req->client, num_regions, where, io_req->bi_op, + io_req->bi_op_flags, &dp, io_req->notify.fn, + io_req->notify.context); } EXPORT_SYMBOL(dm_io); @@ -5,7 +5,7 @@ * This file is released under the GPL. */ -#include "dm.h" +#include "dm-core.h" #include <linux/module.h> #include <linux/vmalloc.h> @@ -1267,6 +1267,15 @@ static int populate_table(struct dm_table *table, return dm_table_complete(table); } +static bool is_valid_type(unsigned cur, unsigned new) +{ + if (cur == new || + (cur == DM_TYPE_BIO_BASED && new == DM_TYPE_DAX_BIO_BASED)) + return true; + + return false; +} + static int table_load(struct dm_ioctl *param, size_t param_size) { int r; @@ -1309,7 +1318,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size) DMWARN("unable to set up device queue for new table."); goto err_unlock_md_type; } - } else if (dm_get_md_type(md) != dm_table_get_type(t)) { + } else if (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) { DMWARN("can't change device type after initial table load."); r = -EINVAL; goto err_unlock_md_type; @@ -1670,8 +1679,7 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user) return r; } -#define DM_PARAMS_KMALLOC 0x0001 /* Params alloced with kmalloc */ -#define DM_PARAMS_VMALLOC 0x0002 /* Params alloced with vmalloc */ +#define DM_PARAMS_MALLOC 0x0001 /* Params allocated with kvmalloc() */ #define DM_WIPE_BUFFER 0x0010 /* Wipe input buffer before returning from ioctl */ static void free_params(struct dm_ioctl *param, size_t param_size, int param_flags) @@ -1679,10 +1687,8 @@ static void free_params(struct dm_ioctl *param, size_t param_size, int param_fla if (param_flags & DM_WIPE_BUFFER) memset(param, 0, param_size); - if (param_flags & DM_PARAMS_KMALLOC) - kfree(param); - if (param_flags & DM_PARAMS_VMALLOC) - vfree(param); + if (param_flags & DM_PARAMS_MALLOC) + kvfree(param); } static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel, @@ -1714,19 +1720,14 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern * Use kmalloc() rather than vmalloc() when we can. */ dmi = NULL; - if (param_kernel->data_size <= KMALLOC_MAX_SIZE) { + if (param_kernel->data_size <= KMALLOC_MAX_SIZE) dmi = kmalloc(param_kernel->data_size, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); - if (dmi) - *param_flags |= DM_PARAMS_KMALLOC; - } if (!dmi) { unsigned noio_flag; noio_flag = memalloc_noio_save(); dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_HIGH | __GFP_HIGHMEM, PAGE_KERNEL); memalloc_noio_restore(noio_flag); - if (dmi) - *param_flags |= DM_PARAMS_VMALLOC; } if (!dmi) { @@ -1735,6 +1736,8 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern return -ENOMEM; } + *param_flags |= DM_PARAMS_MALLOC; + if (copy_from_user(dmi, user, param_kernel->data_size)) goto bad; @@ -26,7 +26,7 @@ #include <linux/device-mapper.h> #include <linux/dm-kcopyd.h> -#include "dm.h" +#include "dm-core.h" #define SUB_JOB_SIZE 128 #define SPLIT_COUNT 8 @@ -465,7 +465,7 @@ static void complete_io(unsigned long error, void *context) io_job_finish(kc->throttle); if (error) { - if (job->rw & WRITE) + if (op_is_write(job->rw)) job->write_err |= error; else job->read_err = 1; @@ -477,7 +477,7 @@ static void complete_io(unsigned long error, void *context) } } - if (job->rw & WRITE) + if (op_is_write(job->rw)) push(&kc->complete_jobs, job); else { @@ -496,7 +496,8 @@ static int run_io_job(struct kcopyd_job *job) { int r; struct dm_io_request io_req = { - .bi_rw = job->rw, + .bi_op = job->rw, + .bi_op_flags = 0, .mem.type = DM_IO_PAGE_LIST, .mem.ptr.pl = job->pages, .mem.offset = 0, @@ -550,7 +551,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, if (r < 0) { /* error this rogue job */ - if (job->rw & WRITE) + if (op_is_write(job->rw)) job->write_err = (unsigned long) -1L; else job->read_err = 1; @@ -734,7 +735,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, /* * Use WRITE SAME to optimize zeroing if all dests support it. */ - job->rw = WRITE | REQ_WRITE_SAME; + job->rw = REQ_OP_WRITE_SAME; for (i = 0; i < job->num_dests; i++) if (!bdev_write_same(job->dests[i].bdev)) { job->rw = WRITE; @@ -141,9 +141,27 @@ static int linear_iterate_devices(struct dm_target *ti, return fn(ti, lc->dev, lc->start, ti->len, data); } +static long linear_direct_access(struct dm_target *ti, sector_t sector, + void __pmem **kaddr, pfn_t *pfn, long size) +{ + struct linear_c *lc = ti->private; + struct block_device *bdev = lc->dev->bdev; + struct blk_dax_ctl dax = { + .sector = linear_map_sector(ti, sector), + .size = size, + }; + long ret; + + ret = bdev_direct_access(bdev, &dax); + *kaddr = dax.addr; + *pfn = dax.pfn; + + return ret; +} + static struct target_type linear_target = { .name = "linear", - .version = {1, 2, 1}, + .version = {1, 3, 0}, .module = THIS_MODULE, .ctr = linear_ctr, .dtr = linear_dtr, @@ -151,6 +169,7 @@ static struct target_type linear_target = { .status = linear_status, .prepare_ioctl = linear_prepare_ioctl, .iterate_devices = linear_iterate_devices, + .direct_access = linear_direct_access, }; int __init dm_linear_init(void) @@ -205,6 +205,7 @@ static int write_metadata(struct log_writes_c *lc, void *entry, bio->bi_bdev = lc->logdev->bdev; bio->bi_end_io = log_end_io; bio->bi_private = lc; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); page = alloc_page(GFP_KERNEL); if (!page) { @@ -226,7 +227,7 @@ static int write_metadata(struct log_writes_c *lc, void *entry, DMERR("Couldn't add page to the log block"); goto error_bio; } - submit_bio(WRITE, bio); + submit_bio(bio); return 0; error_bio: bio_put(bio); @@ -269,6 +270,7 @@ static int log_one_block(struct log_writes_c *lc, bio->bi_bdev = lc->logdev->bdev; bio->bi_end_io = log_end_io; bio->bi_private = lc; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); for (i = 0; i < block->vec_cnt; i++) { /* @@ -279,7 +281,7 @@ static int log_one_block(struct log_writes_c *lc, block->vecs[i].bv_len, 0); if (ret != block->vecs[i].bv_len) { atomic_inc(&lc->io_blocks); - submit_bio(WRITE, bio); + submit_bio(bio); bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i); if (!bio) { DMERR("Couldn't alloc log bio"); @@ -290,6 +292,7 @@ static int log_one_block(struct log_writes_c *lc, bio->bi_bdev = lc->logdev->bdev; bio->bi_end_io = log_end_io; bio->bi_private = lc; + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); ret = bio_add_page(bio, block->vecs[i].bv_page, block->vecs[i].bv_len, 0); @@ -301,7 +304,7 @@ static int log_one_block(struct log_writes_c *lc, } sector += block->vecs[i].bv_len >> SECTOR_SHIFT; } - submit_bio(WRITE, bio); + submit_bio(bio); out: kfree(block->data); kfree(block); @@ -552,9 +555,9 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio) struct bio_vec bv; size_t alloc_size; int i = 0; - bool flush_bio = (bio->bi_rw & REQ_FLUSH); + bool flush_bio = (bio->bi_rw & REQ_PREFLUSH); bool fua_bio = (bio->bi_rw & REQ_FUA); - bool discard_bio = (bio->bi_rw & REQ_DISCARD); + bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD); pb->block = NULL; @@ -293,7 +293,7 @@ static void header_from_disk(struct log_header_core *core, struct log_header_dis static int rw_header(struct log_c *lc, int rw) { - lc->io_req.bi_rw = rw; + lc->io_req.bi_op = rw; return dm_io(&lc->io_req, 1, &lc->header_location, NULL); } @@ -306,7 +306,8 @@ static int flush_header(struct log_c *lc) .count = 0, }; - lc->io_req.bi_rw = WRITE_FLUSH; + lc->io_req.bi_op = REQ_OP_WRITE; + lc->io_req.bi_op_flags = WRITE_FLUSH; return dm_io(&lc->io_req, 1, &null_location, NULL); } @@ -7,7 +7,8 @@ #include <linux/device-mapper.h> -#include "dm.h" +#include "dm-rq.h" +#include "dm-bio-record.h" #include "dm-path-selector.h" #include "dm-uevent.h" @@ -89,6 +90,8 @@ struct multipath { atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */ atomic_t pg_init_count; /* Number of times pg_init called */ + unsigned queue_mode; + /* * We must use a mempool of dm_mpath_io structs so that we * can resubmit bios on error. @@ -97,10 +100,13 @@ struct multipath { struct mutex work_mutex; struct work_struct trigger_event; + + struct work_struct process_queued_bios; + struct bio_list queued_bios; }; /* - * Context information attached to each bio we process. + * Context information attached to each io we process. */ struct dm_mpath_io { struct pgpath *pgpath; @@ -114,6 +120,7 @@ static struct kmem_cache *_mpio_cache; static struct workqueue_struct *kmultipathd, *kmpath_handlerd; static void trigger_event(struct work_struct *work); static void activate_path(struct work_struct *work); +static void process_queued_bios(struct work_struct *work); /*----------------------------------------------- * Multipath state flags. @@ -185,7 +192,7 @@ static void free_priority_group(struct priority_group *pg, kfree(pg); } -static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq) +static struct multipath *alloc_multipath(struct dm_target *ti) { struct multipath *m; @@ -203,15 +210,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq) mutex_init(&m->work_mutex); m->mpio_pool = NULL; - if (!use_blk_mq) { - unsigned min_ios = dm_get_reserved_rq_based_ios(); - - m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache); - if (!m->mpio_pool) { - kfree(m); - return NULL; - } - } + m->queue_mode = DM_TYPE_NONE; m->ti = ti; ti->private = m; @@ -220,6 +219,39 @@ static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq) return m; } +static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m) +{ + if (m->queue_mode == DM_TYPE_NONE) { + /* + * Default to request-based. + */ + if (dm_use_blk_mq(dm_table_get_md(ti->table))) + m->queue_mode = DM_TYPE_MQ_REQUEST_BASED; + else + m->queue_mode = DM_TYPE_REQUEST_BASED; + } + + if (m->queue_mode == DM_TYPE_REQUEST_BASED) { + unsigned min_ios = dm_get_reserved_rq_based_ios(); + + m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache); + if (!m->mpio_pool) + return -ENOMEM; + } + else if (m->queue_mode == DM_TYPE_BIO_BASED) { + INIT_WORK(&m->process_queued_bios, process_queued_bios); + /* + * bio-based doesn't support any direct scsi_dh management; + * it just discovers if a scsi_dh is attached. + */ + set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags); + } + + dm_table_set_type(ti->table, m->queue_mode); + + return 0; +} + static void free_multipath(struct multipath *m) { struct priority_group *pg, *tmp; @@ -272,6 +304,41 @@ static void clear_request_fn_mpio(struct multipath *m, union map_info *info) } } +static size_t multipath_per_bio_data_size(void) +{ + return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details); +} + +static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio) +{ + return dm_per_bio_data(bio, multipath_per_bio_data_size()); +} + +static struct dm_bio_details *get_bio_details_from_bio(struct bio *bio) +{ + /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */ + struct dm_mpath_io *mpio = get_mpio_from_bio(bio); + void *bio_details = mpio + 1; + + return bio_details; +} + +static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p, + struct dm_bio_details **bio_details_p) +{ + struct dm_mpath_io *mpio = get_mpio_from_bio(bio); + struct dm_bio_details *bio_details = get_bio_details_from_bio(bio); + + memset(mpio, 0, sizeof(*mpio)); + memset(bio_details, 0, sizeof(*bio_details)); + dm_bio_record(bio_details, bio); + + if (mpio_p) + *mpio_p = mpio; + if (bio_details_p) + *bio_details_p = bio_details; +} + /*----------------------------------------------- * Path selection *-----------------------------------------------*/ @@ -431,16 +498,26 @@ failed: * and multipath_resume() calls and we have no need to check * for the DMF_NOFLUSH_SUSPENDING flag. */ -static int must_push_back(struct multipath *m) +static bool __must_push_back(struct multipath *m) +{ + return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) != + test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) && + dm_noflush_suspending(m->ti)); +} + +static bool must_push_back_rq(struct multipath *m) { return (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) || - ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) != - test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) && - dm_noflush_suspending(m->ti))); + __must_push_back(m)); +} + +static bool must_push_back_bio(struct multipath *m) +{ + return __must_push_back(m); } /* - * Map cloned requests + * Map cloned requests (request-based multipath) */ static int __multipath_map(struct dm_target *ti, struct request *clone, union map_info *map_context, @@ -459,7 +536,7 @@ static int __multipath_map(struct dm_target *ti, struct request *clone, pgpath = choose_pgpath(m, nr_bytes); if (!pgpath) { - if (!must_push_back(m)) + if (!must_push_back_rq(m)) r = -EIO; /* Failed */ return r; } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) || @@ -530,6 +607,108 @@ static void multipath_release_clone(struct request *clone) } /* + * Map cloned bios (bio-based multipath) + */ +static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_mpath_io *mpio) +{ + size_t nr_bytes = bio->bi_iter.bi_size; + struct pgpath *pgpath; + unsigned long flags; + bool queue_io; + + /* Do we need to select a new pgpath? */ + pgpath = lockless_dereference(m->current_pgpath); + queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); + if (!pgpath || !queue_io) + pgpath = choose_pgpath(m, nr_bytes); + + if ((pgpath && queue_io) || + (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) { + /* Queue for the daemon to resubmit */ + spin_lock_irqsave(&m->lock, flags); + bio_list_add(&m->queued_bios, bio); + spin_unlock_irqrestore(&m->lock, flags); + /* PG_INIT_REQUIRED cannot be set without QUEUE_IO */ + if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) + pg_init_all_paths(m); + else if (!queue_io) + queue_work(kmultipathd, &m->process_queued_bios); + return DM_MAPIO_SUBMITTED; + } + + if (!pgpath) { + if (!must_push_back_bio(m)) + return -EIO; + return DM_MAPIO_REQUEUE; + } + + mpio->pgpath = pgpath; + mpio->nr_bytes = nr_bytes; + + bio->bi_error = 0; + bio->bi_bdev = pgpath->path.dev->bdev; + bio->bi_rw |= REQ_FAILFAST_TRANSPORT; + + if (pgpath->pg->ps.type->start_io) + pgpath->pg->ps.type->start_io(&pgpath->pg->ps, + &pgpath->path, + nr_bytes); + return DM_MAPIO_REMAPPED; +} + +static int multipath_map_bio(struct dm_target *ti, struct bio *bio) +{ + struct multipath *m = ti->private; + struct dm_mpath_io *mpio = NULL; + + multipath_init_per_bio_data(bio, &mpio, NULL); + + return __multipath_map_bio(m, bio, mpio); +} + +static void process_queued_bios_list(struct multipath *m) +{ + if (m->queue_mode == DM_TYPE_BIO_BASED) + queue_work(kmultipathd, &m->process_queued_bios); +} + +static void process_queued_bios(struct work_struct *work) +{ + int r; + unsigned long flags; + struct bio *bio; + struct bio_list bios; + struct blk_plug plug; + struct multipath *m = + container_of(work, struct multipath, process_queued_bios); + + bio_list_init(&bios); + + spin_lock_irqsave(&m->lock, flags); + + if (bio_list_empty(&m->queued_bios)) { + spin_unlock_irqrestore(&m->lock, flags); + return; + } + + bio_list_merge(&bios, &m->queued_bios); + bio_list_init(&m->queued_bios); + + spin_unlock_irqrestore(&m->lock, flags); + + blk_start_plug(&plug); + while ((bio = bio_list_pop(&bios))) { + r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio)); + if (r < 0 || r == DM_MAPIO_REQUEUE) { + bio->bi_error = r; + bio_endio(bio); + } else if (r == DM_MAPIO_REMAPPED) + generic_make_request(bio); + } + blk_finish_plug(&plug); +} + +/* * If we run out of usable paths, should we queue I/O or error it? */ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path, @@ -557,8 +736,10 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path, spin_unlock_irqrestore(&m->lock, flags); - if (!queue_if_no_path) + if (!queue_if_no_path) { dm_table_run_md_queue_async(m->ti->table); + process_queued_bios_list(m); + } return 0; } @@ -798,6 +979,12 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m) if (!hw_argc) return 0; + if (m->queue_mode == DM_TYPE_BIO_BASED) { + dm_consume_args(as, hw_argc); + DMERR("bio-based multipath doesn't allow hardware handler args"); + return 0; + } + m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL); if (hw_argc > 1) { @@ -833,7 +1020,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m) const char *arg_name; static struct dm_arg _args[] = { - {0, 6, "invalid number of feature args"}, + {0, 8, "invalid number of feature args"}, {1, 50, "pg_init_retries must be between 1 and 50"}, {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, }; @@ -873,6 +1060,24 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m) continue; } + if (!strcasecmp(arg_name, "queue_mode") && + (argc >= 1)) { + const char *queue_mode_name = dm_shift_arg(as); + + if (!strcasecmp(queue_mode_name, "bio")) + m->queue_mode = DM_TYPE_BIO_BASED; + else if (!strcasecmp(queue_mode_name, "rq")) + m->queue_mode = DM_TYPE_REQUEST_BASED; + else if (!strcasecmp(queue_mode_name, "mq")) + m->queue_mode = DM_TYPE_MQ_REQUEST_BASED; + else { + ti->error = "Unknown 'queue_mode' requested"; + r = -EINVAL; + } + argc--; + continue; + } + ti->error = "Unrecognised multipath feature request"; r = -EINVAL; } while (argc && !r); @@ -880,8 +1085,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m) return r; } -static int multipath_ctr(struct dm_target *ti, unsigned int argc, - char **argv) +static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv) { /* target arguments */ static struct dm_arg _args[] = { @@ -894,12 +1098,11 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, struct dm_arg_set as; unsigned pg_count = 0; unsigned next_pg_num; - bool use_blk_mq = dm_use_blk_mq(dm_table_get_md(ti->table)); as.argc = argc; as.argv = argv; - m = alloc_multipath(ti, use_blk_mq); + m = alloc_multipath(ti); if (!m) { ti->error = "can't allocate multipath"; return -EINVAL; @@ -909,6 +1112,10 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, if (r) goto bad; + r = alloc_multipath_stage2(ti, m); + if (r) + goto bad; + r = parse_hw_handler(&as, m); if (r) goto bad; @@ -958,7 +1165,9 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, ti->num_flush_bios = 1; ti->num_discard_bios = 1; ti->num_write_same_bios = 1; - if (use_blk_mq) + if (m->queue_mode == DM_TYPE_BIO_BASED) + ti->per_io_data_size = multipath_per_bio_data_size(); + else if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED) ti->per_io_data_size = sizeof(struct dm_mpath_io); return 0; @@ -1083,8 +1292,10 @@ static int reinstate_path(struct pgpath *pgpath) out: spin_unlock_irqrestore(&m->lock, flags); - if (run_queue) + if (run_queue) { dm_table_run_md_queue_async(m->ti->table); + process_queued_bios_list(m); + } return r; } @@ -1281,6 +1492,8 @@ static void pg_init_done(void *data, int errors) } clear_bit(MPATHF_QUEUE_IO, &m->flags); + process_queued_bios_list(m); + /* * Wake up any thread waiting to suspend. */ @@ -1328,7 +1541,7 @@ static int do_end_io(struct multipath *m, struct request *clone, * during end I/O handling, since those clone requests don't have * bio clones. If we queue them inside the multipath target, * we need to make bio clones, that requires memory allocation. - * (See drivers/md/dm.c:end_clone_bio() about why the clone requests + * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests * don't have bio clones.) * Instead of queueing the clone request here, we queue the original * request into dm core, which will remake a clone request and @@ -1347,7 +1560,7 @@ static int do_end_io(struct multipath *m, struct request *clone, if (!atomic_read(&m->nr_valid_paths)) { if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { - if (!must_push_back(m)) + if (!must_push_back_rq(m)) r = -EIO; } else { if (error == -EBADE) @@ -1381,6 +1594,64 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone, return r; } +static int do_end_io_bio(struct multipath *m, struct bio *clone, + int error, struct dm_mpath_io *mpio) +{ + unsigned long flags; + + if (!error) + return 0; /* I/O complete */ + + if (noretry_error(error)) + return error; + + if (mpio->pgpath) + fail_path(mpio->pgpath); + + if (!atomic_read(&m->nr_valid_paths)) { + if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { + if (!must_push_back_bio(m)) + return -EIO; + return DM_ENDIO_REQUEUE; + } else { + if (error == -EBADE) + return error; + } + } + + /* Queue for the daemon to resubmit */ + dm_bio_restore(get_bio_details_from_bio(clone), clone); + + spin_lock_irqsave(&m->lock, flags); + bio_list_add(&m->queued_bios, clone); + spin_unlock_irqrestore(&m->lock, flags); + if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) + queue_work(kmultipathd, &m->process_queued_bios); + + return DM_ENDIO_INCOMPLETE; +} + +static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int error) +{ + struct multipath *m = ti->private; + struct dm_mpath_io *mpio = get_mpio_from_bio(clone); + struct pgpath *pgpath; + struct path_selector *ps; + int r; + + BUG_ON(!mpio); + + r = do_end_io_bio(m, clone, error, mpio); + pgpath = mpio->pgpath; + if (pgpath) { + ps = &pgpath->pg->ps; + if (ps->type->end_io) + ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes); + } + + return r; +} + /* * Suspend can't complete until all the I/O is processed so if * the last path fails we must error any remaining I/O. @@ -1454,7 +1725,9 @@ static void multipath_status(struct dm_target *ti, status_type_t type, DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) + (m->pg_init_retries > 0) * 2 + (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 + - test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)); + test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) + + (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2); + if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) DMEMIT("queue_if_no_path "); if (m->pg_init_retries) @@ -1463,6 +1736,16 @@ static void multipath_status(struct dm_target *ti, status_type_t type, DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs); if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) DMEMIT("retain_attached_hw_handler "); + if (m->queue_mode != DM_TYPE_REQUEST_BASED) { + switch(m->queue_mode) { + case DM_TYPE_BIO_BASED: + DMEMIT("queue_mode bio "); + break; + case DM_TYPE_MQ_REQUEST_BASED: + DMEMIT("queue_mode mq "); + break; + } + } } if (!m->hw_handler_name || type == STATUSTYPE_INFO) @@ -1642,6 +1925,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) pg_init_all_paths(m); dm_table_run_md_queue_async(m->ti->table); + process_queued_bios_list(m); } /* @@ -1748,7 +2032,7 @@ static int multipath_busy(struct dm_target *ti) *---------------------------------------------------------------*/ static struct target_type multipath_target = { .name = "multipath", - .version = {1, 11, 0}, + .version = {1, 12, 0}, .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE, .module = THIS_MODULE, .ctr = multipath_ctr, @@ -1757,6 +2041,8 @@ static struct target_type multipath_target = { .clone_and_map_rq = multipath_clone_and_map, .release_clone_rq = multipath_release_clone, .rq_end_io = multipath_end_io, + .map = multipath_map_bio, + .end_io = multipath_end_io_bio, .presuspend = multipath_presuspend, .postsuspend = multipath_postsuspend, .resume = multipath_resume, @@ -1771,14 +2057,14 @@ static int __init dm_multipath_init(void) { int r; - /* allocate a slab for the dm_ios */ + /* allocate a slab for the dm_mpath_ios */ _mpio_cache = KMEM_CACHE(dm_mpath_io, 0); if (!_mpio_cache) return -ENOMEM; r = dm_register_target(&multipath_target); if (r < 0) { - DMERR("register failed %d", r); + DMERR("request-based register failed %d", r); r = -EINVAL; goto bad_register_target; } @@ -1804,10 +2090,6 @@ static int __init dm_multipath_init(void) goto bad_alloc_kmpath_handlerd; } - DMINFO("version %u.%u.%u loaded", - multipath_target.version[0], multipath_target.version[1], - multipath_target.version[2]); - return 0; bad_alloc_kmpath_handlerd: @@ -1,6 +1,6 @@ /* * Copyright (C) 2010-2011 Neil Brown - * Copyright (C) 2010-2015 Red Hat, Inc. All rights reserved. + * Copyright (C) 2010-2016 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ @@ -17,7 +17,12 @@ #include <linux/device-mapper.h> #define DM_MSG_PREFIX "raid" -#define MAX_RAID_DEVICES 253 /* raid4/5/6 limit */ +#define MAX_RAID_DEVICES 253 /* md-raid kernel limit */ + +/* + * Minimum sectors of free reshape space per raid device + */ +#define MIN_FREE_RESHAPE_SPACE to_sector(4*4096) static bool devices_handle_discard_safely = false; @@ -25,12 +30,12 @@ static bool devices_handle_discard_safely = false; * The following flags are used by dm-raid.c to set up the array state. * They must be cleared before md_run is called. */ -#define FirstUse 10 /* rdev flag */ +#define FirstUse 10 /* rdev flag */ struct raid_dev { /* * Two DM devices, one to hold metadata and one to hold the - * actual data/parity. The reason for this is to not confuse + * actual data/parity. The reason for this is to not confuse * ti->len and give more flexibility in altering size and * characteristics. * @@ -46,25 +51,175 @@ struct raid_dev { }; /* + * Bits for establishing rs->ctr_flags + * + * 1 = no flag value + * 2 = flag with value + */ +#define __CTR_FLAG_SYNC 0 /* 1 */ /* Not with raid0! */ +#define __CTR_FLAG_NOSYNC 1 /* 1 */ /* Not with raid0! */ +#define __CTR_FLAG_REBUILD 2 /* 2 */ /* Not with raid0! */ +#define __CTR_FLAG_DAEMON_SLEEP 3 /* 2 */ /* Not with raid0! */ +#define __CTR_FLAG_MIN_RECOVERY_RATE 4 /* 2 */ /* Not with raid0! */ +#define __CTR_FLAG_MAX_RECOVERY_RATE 5 /* 2 */ /* Not with raid0! */ +#define __CTR_FLAG_MAX_WRITE_BEHIND 6 /* 2 */ /* Only with raid1! */ +#define __CTR_FLAG_WRITE_MOSTLY 7 /* 2 */ /* Only with raid1! */ +#define __CTR_FLAG_STRIPE_CACHE 8 /* 2 */ /* Only with raid4/5/6! */ +#define __CTR_FLAG_REGION_SIZE 9 /* 2 */ /* Not with raid0! */ +#define __CTR_FLAG_RAID10_COPIES 10 /* 2 */ /* Only with raid10 */ +#define __CTR_FLAG_RAID10_FORMAT 11 /* 2 */ /* Only with raid10 */ +/* New for v1.9.0 */ +#define __CTR_FLAG_DELTA_DISKS 12 /* 2 */ /* Only with reshapable raid1/4/5/6/10! */ +#define __CTR_FLAG_DATA_OFFSET 13 /* 2 */ /* Only with reshapable raid4/5/6/10! */ +#define __CTR_FLAG_RAID10_USE_NEAR_SETS 14 /* 2 */ /* Only with raid10! */ + +/* * Flags for rs->ctr_flags field. */ -#define CTR_FLAG_SYNC 0x1 -#define CTR_FLAG_NOSYNC 0x2 -#define CTR_FLAG_REBUILD 0x4 -#define CTR_FLAG_DAEMON_SLEEP 0x8 -#define CTR_FLAG_MIN_RECOVERY_RATE 0x10 -#define CTR_FLAG_MAX_RECOVERY_RATE 0x20 -#define CTR_FLAG_MAX_WRITE_BEHIND 0x40 -#define CTR_FLAG_STRIPE_CACHE 0x80 -#define CTR_FLAG_REGION_SIZE 0x100 -#define CTR_FLAG_RAID10_COPIES 0x200 -#define CTR_FLAG_RAID10_FORMAT 0x400 +#define CTR_FLAG_SYNC (1 << __CTR_FLAG_SYNC) +#define CTR_FLAG_NOSYNC (1 << __CTR_FLAG_NOSYNC) +#define CTR_FLAG_REBUILD (1 << __CTR_FLAG_REBUILD) +#define CTR_FLAG_DAEMON_SLEEP (1 << __CTR_FLAG_DAEMON_SLEEP) +#define CTR_FLAG_MIN_RECOVERY_RATE (1 << __CTR_FLAG_MIN_RECOVERY_RATE) +#define CTR_FLAG_MAX_RECOVERY_RATE (1 << __CTR_FLAG_MAX_RECOVERY_RATE) +#define CTR_FLAG_MAX_WRITE_BEHIND (1 << __CTR_FLAG_MAX_WRITE_BEHIND) +#define CTR_FLAG_WRITE_MOSTLY (1 << __CTR_FLAG_WRITE_MOSTLY) +#define CTR_FLAG_STRIPE_CACHE (1 << __CTR_FLAG_STRIPE_CACHE) +#define CTR_FLAG_REGION_SIZE (1 << __CTR_FLAG_REGION_SIZE) +#define CTR_FLAG_RAID10_COPIES (1 << __CTR_FLAG_RAID10_COPIES) +#define CTR_FLAG_RAID10_FORMAT (1 << __CTR_FLAG_RAID10_FORMAT) +#define CTR_FLAG_DELTA_DISKS (1 << __CTR_FLAG_DELTA_DISKS) +#define CTR_FLAG_DATA_OFFSET (1 << __CTR_FLAG_DATA_OFFSET) +#define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS) + +/* + * Definitions of various constructor flags to + * be used in checks of valid / invalid flags + * per raid level. + */ +/* Define all any sync flags */ +#define CTR_FLAGS_ANY_SYNC (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC) + +/* Define flags for options without argument (e.g. 'nosync') */ +#define CTR_FLAG_OPTIONS_NO_ARGS (CTR_FLAGS_ANY_SYNC | \ + CTR_FLAG_RAID10_USE_NEAR_SETS) + +/* Define flags for options with one argument (e.g. 'delta_disks +2') */ +#define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \ + CTR_FLAG_WRITE_MOSTLY | \ + CTR_FLAG_DAEMON_SLEEP | \ + CTR_FLAG_MIN_RECOVERY_RATE | \ + CTR_FLAG_MAX_RECOVERY_RATE | \ + CTR_FLAG_MAX_WRITE_BEHIND | \ + CTR_FLAG_STRIPE_CACHE | \ + CTR_FLAG_REGION_SIZE | \ + CTR_FLAG_RAID10_COPIES | \ + CTR_FLAG_RAID10_FORMAT | \ + CTR_FLAG_DELTA_DISKS | \ + CTR_FLAG_DATA_OFFSET) + +/* Valid options definitions per raid level... */ + +/* "raid0" does only accept data offset */ +#define RAID0_VALID_FLAGS (CTR_FLAG_DATA_OFFSET) + +/* "raid1" does not accept stripe cache, data offset, delta_disks or any raid10 options */ +#define RAID1_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \ + CTR_FLAG_REBUILD | \ + CTR_FLAG_WRITE_MOSTLY | \ + CTR_FLAG_DAEMON_SLEEP | \ + CTR_FLAG_MIN_RECOVERY_RATE | \ + CTR_FLAG_MAX_RECOVERY_RATE | \ + CTR_FLAG_MAX_WRITE_BEHIND | \ + CTR_FLAG_REGION_SIZE | \ + CTR_FLAG_DELTA_DISKS | \ + CTR_FLAG_DATA_OFFSET) + +/* "raid10" does not accept any raid1 or stripe cache options */ +#define RAID10_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \ + CTR_FLAG_REBUILD | \ + CTR_FLAG_DAEMON_SLEEP | \ + CTR_FLAG_MIN_RECOVERY_RATE | \ + CTR_FLAG_MAX_RECOVERY_RATE | \ + CTR_FLAG_REGION_SIZE | \ + CTR_FLAG_RAID10_COPIES | \ + CTR_FLAG_RAID10_FORMAT | \ + CTR_FLAG_DELTA_DISKS | \ + CTR_FLAG_DATA_OFFSET | \ + CTR_FLAG_RAID10_USE_NEAR_SETS) + +/* + * "raid4/5/6" do not accept any raid1 or raid10 specific options + * + * "raid6" does not accept "nosync", because it is not guaranteed + * that both parity and q-syndrome are being written properly with + * any writes + */ +#define RAID45_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \ + CTR_FLAG_REBUILD | \ + CTR_FLAG_DAEMON_SLEEP | \ + CTR_FLAG_MIN_RECOVERY_RATE | \ + CTR_FLAG_MAX_RECOVERY_RATE | \ + CTR_FLAG_MAX_WRITE_BEHIND | \ + CTR_FLAG_STRIPE_CACHE | \ + CTR_FLAG_REGION_SIZE | \ + CTR_FLAG_DELTA_DISKS | \ + CTR_FLAG_DATA_OFFSET) + +#define RAID6_VALID_FLAGS (CTR_FLAG_SYNC | \ + CTR_FLAG_REBUILD | \ + CTR_FLAG_DAEMON_SLEEP | \ + CTR_FLAG_MIN_RECOVERY_RATE | \ + CTR_FLAG_MAX_RECOVERY_RATE | \ + CTR_FLAG_MAX_WRITE_BEHIND | \ + CTR_FLAG_STRIPE_CACHE | \ + CTR_FLAG_REGION_SIZE | \ + CTR_FLAG_DELTA_DISKS | \ + CTR_FLAG_DATA_OFFSET) +/* ...valid options definitions per raid level */ + +/* + * Flags for rs->runtime_flags field + * (RT_FLAG prefix meaning "runtime flag") + * + * These are all internal and used to define runtime state, + * e.g. to prevent another resume from preresume processing + * the raid set all over again. + */ +#define RT_FLAG_RS_PRERESUMED 0 +#define RT_FLAG_RS_RESUMED 1 +#define RT_FLAG_RS_BITMAP_LOADED 2 +#define RT_FLAG_UPDATE_SBS 3 +#define RT_FLAG_RESHAPE_RS 4 +#define RT_FLAG_KEEP_RS_FROZEN 5 + +/* Array elements of 64 bit needed for rebuild/failed disk bits */ +#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) + +/* + * raid set level, layout and chunk sectors backup/restore + */ +struct rs_layout { + int new_level; + int new_layout; + int new_chunk_sectors; +}; struct raid_set { struct dm_target *ti; uint32_t bitmap_loaded; - uint32_t ctr_flags; + uint32_t stripe_cache_entries; + unsigned long ctr_flags; + unsigned long runtime_flags; + + uint64_t rebuild_disks[DISKS_ARRAY_ELEMS]; + + int raid_disks; + int delta_disks; + int data_offset; + int raid10_copies; + int requested_bitmap_chunk_sectors; struct mddev md; struct raid_type *raid_type; @@ -73,82 +228,446 @@ struct raid_set { struct raid_dev dev[0]; }; +static void rs_config_backup(struct raid_set *rs, struct rs_layout *l) +{ + struct mddev *mddev = &rs->md; + + l->new_level = mddev->new_level; + l->new_layout = mddev->new_layout; + l->new_chunk_sectors = mddev->new_chunk_sectors; +} + +static void rs_config_restore(struct raid_set *rs, struct rs_layout *l) +{ + struct mddev *mddev = &rs->md; + + mddev->new_level = l->new_level; + mddev->new_layout = l->new_layout; + mddev->new_chunk_sectors = l->new_chunk_sectors; +} + +/* raid10 algorithms (i.e. formats) */ +#define ALGORITHM_RAID10_DEFAULT 0 +#define ALGORITHM_RAID10_NEAR 1 +#define ALGORITHM_RAID10_OFFSET 2 +#define ALGORITHM_RAID10_FAR 3 + /* Supported raid types and properties. */ static struct raid_type { const char *name; /* RAID algorithm. */ const char *descr; /* Descriptor text for logging. */ - const unsigned parity_devs; /* # of parity devices. */ - const unsigned minimal_devs; /* minimal # of devices in set. */ - const unsigned level; /* RAID level. */ - const unsigned algorithm; /* RAID algorithm. */ + const unsigned int parity_devs; /* # of parity devices. */ + const unsigned int minimal_devs;/* minimal # of devices in set. */ + const unsigned int level; /* RAID level. */ + const unsigned int algorithm; /* RAID algorithm. */ } raid_types[] = { - {"raid0", "RAID0 (striping)", 0, 2, 0, 0 /* NONE */}, - {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */}, - {"raid10", "RAID10 (striped mirrors)", 0, 2, 10, UINT_MAX /* Varies */}, - {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, - {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC}, - {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC}, - {"raid5_ls", "RAID5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC}, - {"raid5_rs", "RAID5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC}, - {"raid6_zr", "RAID6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART}, - {"raid6_nr", "RAID6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART}, - {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE} + {"raid0", "raid0 (striping)", 0, 2, 0, 0 /* NONE */}, + {"raid1", "raid1 (mirroring)", 0, 2, 1, 0 /* NONE */}, + {"raid10_far", "raid10 far (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_FAR}, + {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET}, + {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR}, + {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT}, + {"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */ + {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N}, + {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC}, + {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC}, + {"raid5_la", "raid5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC}, + {"raid5_ra", "raid5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC}, + {"raid6_zr", "raid6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART}, + {"raid6_nr", "raid6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART}, + {"raid6_nc", "raid6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}, + {"raid6_n_6", "raid6 (dedicated parity/Q n/6)", 2, 4, 6, ALGORITHM_PARITY_N_6}, + {"raid6_ls_6", "raid6 (left symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_SYMMETRIC_6}, + {"raid6_rs_6", "raid6 (right symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_SYMMETRIC_6}, + {"raid6_la_6", "raid6 (left asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_ASYMMETRIC_6}, + {"raid6_ra_6", "raid6 (right asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_ASYMMETRIC_6} +}; + +/* True, if @v is in inclusive range [@min, @max] */ +static bool __within_range(long v, long min, long max) +{ + return v >= min && v <= max; +} + +/* All table line arguments are defined here */ +static struct arg_name_flag { + const unsigned long flag; + const char *name; +} __arg_name_flags[] = { + { CTR_FLAG_SYNC, "sync"}, + { CTR_FLAG_NOSYNC, "nosync"}, + { CTR_FLAG_REBUILD, "rebuild"}, + { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"}, + { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"}, + { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"}, + { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"}, + { CTR_FLAG_WRITE_MOSTLY, "write_mostly"}, + { CTR_FLAG_STRIPE_CACHE, "stripe_cache"}, + { CTR_FLAG_REGION_SIZE, "region_size"}, + { CTR_FLAG_RAID10_COPIES, "raid10_copies"}, + { CTR_FLAG_RAID10_FORMAT, "raid10_format"}, + { CTR_FLAG_DATA_OFFSET, "data_offset"}, + { CTR_FLAG_DELTA_DISKS, "delta_disks"}, + { CTR_FLAG_RAID10_USE_NEAR_SETS, "raid10_use_near_sets"}, }; -static char *raid10_md_layout_to_format(int layout) +/* Return argument name string for given @flag */ +static const char *dm_raid_arg_name_by_flag(const uint32_t flag) +{ + if (hweight32(flag) == 1) { + struct arg_name_flag *anf = __arg_name_flags + ARRAY_SIZE(__arg_name_flags); + + while (anf-- > __arg_name_flags) + if (flag & anf->flag) + return anf->name; + + } else + DMERR("%s called with more than one flag!", __func__); + + return NULL; +} + +/* + * Bool helpers to test for various raid levels of a raid set. + * It's level as reported by the superblock rather than + * the requested raid_type passed to the constructor. + */ +/* Return true, if raid set in @rs is raid0 */ +static bool rs_is_raid0(struct raid_set *rs) +{ + return !rs->md.level; +} + +/* Return true, if raid set in @rs is raid1 */ +static bool rs_is_raid1(struct raid_set *rs) +{ + return rs->md.level == 1; +} + +/* Return true, if raid set in @rs is raid10 */ +static bool rs_is_raid10(struct raid_set *rs) +{ + return rs->md.level == 10; +} + +/* Return true, if raid set in @rs is level 6 */ +static bool rs_is_raid6(struct raid_set *rs) +{ + return rs->md.level == 6; +} + +/* Return true, if raid set in @rs is level 4, 5 or 6 */ +static bool rs_is_raid456(struct raid_set *rs) +{ + return __within_range(rs->md.level, 4, 6); +} + +/* Return true, if raid set in @rs is reshapable */ +static bool __is_raid10_far(int layout); +static bool rs_is_reshapable(struct raid_set *rs) +{ + return rs_is_raid456(rs) || + (rs_is_raid10(rs) && !__is_raid10_far(rs->md.new_layout)); +} + +/* Return true, if raid set in @rs is recovering */ +static bool rs_is_recovering(struct raid_set *rs) +{ + return rs->md.recovery_cp < rs->dev[0].rdev.sectors; +} + +/* Return true, if raid set in @rs is reshaping */ +static bool rs_is_reshaping(struct raid_set *rs) +{ + return rs->md.reshape_position != MaxSector; +} + +/* + * bool helpers to test for various raid levels of a raid type @rt + */ + +/* Return true, if raid type in @rt is raid0 */ +static bool rt_is_raid0(struct raid_type *rt) +{ + return !rt->level; +} + +/* Return true, if raid type in @rt is raid1 */ +static bool rt_is_raid1(struct raid_type *rt) +{ + return rt->level == 1; +} + +/* Return true, if raid type in @rt is raid10 */ +static bool rt_is_raid10(struct raid_type *rt) +{ + return rt->level == 10; +} + +/* Return true, if raid type in @rt is raid4/5 */ +static bool rt_is_raid45(struct raid_type *rt) +{ + return __within_range(rt->level, 4, 5); +} + +/* Return true, if raid type in @rt is raid6 */ +static bool rt_is_raid6(struct raid_type *rt) +{ + return rt->level == 6; +} + +/* Return true, if raid type in @rt is raid4/5/6 */ +static bool rt_is_raid456(struct raid_type *rt) +{ + return __within_range(rt->level, 4, 6); +} +/* END: raid level bools */ + +/* Return valid ctr flags for the raid level of @rs */ +static unsigned long __valid_flags(struct raid_set *rs) +{ + if (rt_is_raid0(rs->raid_type)) + return RAID0_VALID_FLAGS; + else if (rt_is_raid1(rs->raid_type)) + return RAID1_VALID_FLAGS; + else if (rt_is_raid10(rs->raid_type)) + return RAID10_VALID_FLAGS; + else if (rt_is_raid45(rs->raid_type)) + return RAID45_VALID_FLAGS; + else if (rt_is_raid6(rs->raid_type)) + return RAID6_VALID_FLAGS; + + return 0; +} + +/* + * Check for valid flags set on @rs + * + * Has to be called after parsing of the ctr flags! + */ +static int rs_check_for_valid_flags(struct raid_set *rs) +{ + if (rs->ctr_flags & ~__valid_flags(rs)) { + rs->ti->error = "Invalid flags combination"; + return -EINVAL; + } + + return 0; +} + +/* MD raid10 bit definitions and helpers */ +#define RAID10_OFFSET (1 << 16) /* stripes with data copies area adjacent on devices */ +#define RAID10_BROCKEN_USE_FAR_SETS (1 << 17) /* Broken in raid10.c: use sets instead of whole stripe rotation */ +#define RAID10_USE_FAR_SETS (1 << 18) /* Use sets instead of whole stripe rotation */ +#define RAID10_FAR_COPIES_SHIFT 8 /* raid10 # far copies shift (2nd byte of layout) */ + +/* Return md raid10 near copies for @layout */ +static unsigned int __raid10_near_copies(int layout) +{ + return layout & 0xFF; +} + +/* Return md raid10 far copies for @layout */ +static unsigned int __raid10_far_copies(int layout) +{ + return __raid10_near_copies(layout >> RAID10_FAR_COPIES_SHIFT); +} + +/* Return true if md raid10 offset for @layout */ +static bool __is_raid10_offset(int layout) +{ + return !!(layout & RAID10_OFFSET); +} + +/* Return true if md raid10 near for @layout */ +static bool __is_raid10_near(int layout) +{ + return !__is_raid10_offset(layout) && __raid10_near_copies(layout) > 1; +} + +/* Return true if md raid10 far for @layout */ +static bool __is_raid10_far(int layout) +{ + return !__is_raid10_offset(layout) && __raid10_far_copies(layout) > 1; +} + +/* Return md raid10 layout string for @layout */ +static const char *raid10_md_layout_to_format(int layout) { /* - * Bit 16 and 17 stand for "offset" and "use_far_sets" + * Bit 16 stands for "offset" + * (i.e. adjacent stripes hold copies) + * * Refer to MD's raid10.c for details */ - if ((layout & 0x10000) && (layout & 0x20000)) + if (__is_raid10_offset(layout)) return "offset"; - if ((layout & 0xFF) > 1) + if (__raid10_near_copies(layout) > 1) return "near"; + WARN_ON(__raid10_far_copies(layout) < 2); + return "far"; } -static unsigned raid10_md_layout_to_copies(int layout) +/* Return md raid10 algorithm for @name */ +static int raid10_name_to_format(const char *name) { - if ((layout & 0xFF) > 1) - return layout & 0xFF; - return (layout >> 8) & 0xFF; + if (!strcasecmp(name, "near")) + return ALGORITHM_RAID10_NEAR; + else if (!strcasecmp(name, "offset")) + return ALGORITHM_RAID10_OFFSET; + else if (!strcasecmp(name, "far")) + return ALGORITHM_RAID10_FAR; + + return -EINVAL; } -static int raid10_format_to_md_layout(char *format, unsigned copies) +/* Return md raid10 copies for @layout */ +static unsigned int raid10_md_layout_to_copies(int layout) { - unsigned n = 1, f = 1; + return max(__raid10_near_copies(layout), __raid10_far_copies(layout)); +} - if (!strcasecmp("near", format)) +/* Return md raid10 format id for @format string */ +static int raid10_format_to_md_layout(struct raid_set *rs, + unsigned int algorithm, + unsigned int copies) +{ + unsigned int n = 1, f = 1, r = 0; + + /* + * MD resilienece flaw: + * + * enabling use_far_sets for far/offset formats causes copies + * to be colocated on the same devs together with their origins! + * + * -> disable it for now in the definition above + */ + if (algorithm == ALGORITHM_RAID10_DEFAULT || + algorithm == ALGORITHM_RAID10_NEAR) n = copies; - else + + else if (algorithm == ALGORITHM_RAID10_OFFSET) { f = copies; + r = RAID10_OFFSET; + if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) + r |= RAID10_USE_FAR_SETS; - if (!strcasecmp("offset", format)) - return 0x30000 | (f << 8) | n; + } else if (algorithm == ALGORITHM_RAID10_FAR) { + f = copies; + r = !RAID10_OFFSET; + if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) + r |= RAID10_USE_FAR_SETS; - if (!strcasecmp("far", format)) - return 0x20000 | (f << 8) | n; + } else + return -EINVAL; - return (f << 8) | n; + return r | (f << RAID10_FAR_COPIES_SHIFT) | n; } +/* END: MD raid10 bit definitions and helpers */ -static struct raid_type *get_raid_type(char *name) +/* Check for any of the raid10 algorithms */ +static bool __got_raid10(struct raid_type *rtp, const int layout) { - int i; + if (rtp->level == 10) { + switch (rtp->algorithm) { + case ALGORITHM_RAID10_DEFAULT: + case ALGORITHM_RAID10_NEAR: + return __is_raid10_near(layout); + case ALGORITHM_RAID10_OFFSET: + return __is_raid10_offset(layout); + case ALGORITHM_RAID10_FAR: + return __is_raid10_far(layout); + default: + break; + } + } + + return false; +} - for (i = 0; i < ARRAY_SIZE(raid_types); i++) - if (!strcmp(raid_types[i].name, name)) - return &raid_types[i]; +/* Return raid_type for @name */ +static struct raid_type *get_raid_type(const char *name) |