summaryrefslogtreecommitdiff
path: root/drivers/thunderbolt/switch.c
diff options
context:
space:
mode:
authorMika Westerberg <mika.westerberg@linux.intel.com>2017-06-06 15:25:17 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-06-09 11:42:43 +0200
commite6b245ccd524441f462f1ca1fe726123dcedeeee (patch)
treee4b37479fce488999f3948c75e6f6c9e71e93440 /drivers/thunderbolt/switch.c
parentf67cf491175a315ca86c9b349708bfed7b1f40c1 (diff)
thunderbolt: Add support for host and device NVM firmware upgrade
Starting from Intel Falcon Ridge the NVM firmware can be upgraded by using DMA configuration based mailbox commands. If we detect that the host or device (device support starts from Intel Alpine Ridge) has the DMA configuration based mailbox we expose NVM information to the userspace as two separate Linux NVMem devices: nvm_active and nvm_non_active. The former is read-only portion of the active NVM which firmware upgrade tools can be use to find out suitable NVM image if the device identification strings are not enough. The latter is write-only portion where the new NVM image is to be written by the userspace. It is up to the userspace to find out right NVM image (the kernel does very minimal validation). The ICM firmware itself authenticates the new NVM firmware and fails the operation if it is not what is expected. We also expose two new sysfs files per each switch: nvm_version and nvm_authenticate which can be used to read the active NVM version and start the upgrade process. We also introduce safe mode which is the mode a switch goes when it does not have properly authenticated firmware. In this mode the switch only accepts a couple of commands including flashing a new NVM firmware image and triggering power cycle. This code is based on the work done by Amir Levy and Michael Jamet. Signed-off-by: Michael Jamet <michael.jamet@intel.com> Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Reviewed-by: Yehezkel Bernat <yehezkel.bernat@intel.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Andreas Noever <andreas.noever@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/thunderbolt/switch.c')
-rw-r--r--drivers/thunderbolt/switch.c603
1 files changed, 582 insertions, 21 deletions
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 1524edf42ee8..ab3e8f410444 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -5,13 +5,395 @@
*/
#include <linux/delay.h>
+#include <linux/idr.h>
+#include <linux/nvmem-provider.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include "tb.h"
/* Switch authorization from userspace is serialized by this lock */
static DEFINE_MUTEX(switch_lock);
+/* Switch NVM support */
+
+#define NVM_DEVID 0x05
+#define NVM_VERSION 0x08
+#define NVM_CSS 0x10
+#define NVM_FLASH_SIZE 0x45
+
+#define NVM_MIN_SIZE SZ_32K
+#define NVM_MAX_SIZE SZ_512K
+
+static DEFINE_IDA(nvm_ida);
+
+struct nvm_auth_status {
+ struct list_head list;
+ uuid_be uuid;
+ u32 status;
+};
+
+/*
+ * Hold NVM authentication failure status per switch This information
+ * needs to stay around even when the switch gets power cycled so we
+ * keep it separately.
+ */
+static LIST_HEAD(nvm_auth_status_cache);
+static DEFINE_MUTEX(nvm_auth_status_lock);
+
+static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
+{
+ struct nvm_auth_status *st;
+
+ list_for_each_entry(st, &nvm_auth_status_cache, list) {
+ if (!uuid_be_cmp(st->uuid, *sw->uuid))
+ return st;
+ }
+
+ return NULL;
+}
+
+static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
+{
+ struct nvm_auth_status *st;
+
+ mutex_lock(&nvm_auth_status_lock);
+ st = __nvm_get_auth_status(sw);
+ mutex_unlock(&nvm_auth_status_lock);
+
+ *status = st ? st->status : 0;
+}
+
+static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
+{
+ struct nvm_auth_status *st;
+
+ if (WARN_ON(!sw->uuid))
+ return;
+
+ mutex_lock(&nvm_auth_status_lock);
+ st = __nvm_get_auth_status(sw);
+
+ if (!st) {
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto unlock;
+
+ memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
+ INIT_LIST_HEAD(&st->list);
+ list_add_tail(&st->list, &nvm_auth_status_cache);
+ }
+
+ st->status = status;
+unlock:
+ mutex_unlock(&nvm_auth_status_lock);
+}
+
+static void nvm_clear_auth_status(const struct tb_switch *sw)
+{
+ struct nvm_auth_status *st;
+
+ mutex_lock(&nvm_auth_status_lock);
+ st = __nvm_get_auth_status(sw);
+ if (st) {
+ list_del(&st->list);
+ kfree(st);
+ }
+ mutex_unlock(&nvm_auth_status_lock);
+}
+
+static int nvm_validate_and_write(struct tb_switch *sw)
+{
+ unsigned int image_size, hdr_size;
+ const u8 *buf = sw->nvm->buf;
+ u16 ds_size;
+ int ret;
+
+ if (!buf)
+ return -EINVAL;
+
+ image_size = sw->nvm->buf_data_size;
+ if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
+ return -EINVAL;
+
+ /*
+ * FARB pointer must point inside the image and must at least
+ * contain parts of the digital section we will be reading here.
+ */
+ hdr_size = (*(u32 *)buf) & 0xffffff;
+ if (hdr_size + NVM_DEVID + 2 >= image_size)
+ return -EINVAL;
+
+ /* Digital section start should be aligned to 4k page */
+ if (!IS_ALIGNED(hdr_size, SZ_4K))
+ return -EINVAL;
+
+ /*
+ * Read digital section size and check that it also fits inside
+ * the image.
+ */
+ ds_size = *(u16 *)(buf + hdr_size);
+ if (ds_size >= image_size)
+ return -EINVAL;
+
+ if (!sw->safe_mode) {
+ u16 device_id;
+
+ /*
+ * Make sure the device ID in the image matches the one
+ * we read from the switch config space.
+ */
+ device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
+ if (device_id != sw->config.device_id)
+ return -EINVAL;
+
+ if (sw->generation < 3) {
+ /* Write CSS headers first */
+ ret = dma_port_flash_write(sw->dma_port,
+ DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
+ DMA_PORT_CSS_MAX_SIZE);
+ if (ret)
+ return ret;
+ }
+
+ /* Skip headers in the image */
+ buf += hdr_size;
+ image_size -= hdr_size;
+ }
+
+ return dma_port_flash_write(sw->dma_port, 0, buf, image_size);
+}
+
+static int nvm_authenticate_host(struct tb_switch *sw)
+{
+ int ret;
+
+ /*
+ * Root switch NVM upgrade requires that we disconnect the
+ * existing PCIe paths first (in case it is not in safe mode
+ * already).
+ */
+ if (!sw->safe_mode) {
+ ret = tb_domain_disconnect_pcie_paths(sw->tb);
+ if (ret)
+ return ret;
+ /*
+ * The host controller goes away pretty soon after this if
+ * everything goes well so getting timeout is expected.
+ */
+ ret = dma_port_flash_update_auth(sw->dma_port);
+ return ret == -ETIMEDOUT ? 0 : ret;
+ }
+
+ /*
+ * From safe mode we can get out by just power cycling the
+ * switch.
+ */
+ dma_port_power_cycle(sw->dma_port);
+ return 0;
+}
+
+static int nvm_authenticate_device(struct tb_switch *sw)
+{
+ int ret, retries = 10;
+
+ ret = dma_port_flash_update_auth(sw->dma_port);
+ if (ret && ret != -ETIMEDOUT)
+ return ret;
+
+ /*
+ * Poll here for the authentication status. It takes some time
+ * for the device to respond (we get timeout for a while). Once
+ * we get response the device needs to be power cycled in order
+ * to the new NVM to be taken into use.
+ */
+ do {
+ u32 status;
+
+ ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
+ if (ret < 0 && ret != -ETIMEDOUT)
+ return ret;
+ if (ret > 0) {
+ if (status) {
+ tb_sw_warn(sw, "failed to authenticate NVM\n");
+ nvm_set_auth_status(sw, status);
+ }
+
+ tb_sw_info(sw, "power cycling the switch now\n");
+ dma_port_power_cycle(sw->dma_port);
+ return 0;
+ }
+
+ msleep(500);
+ } while (--retries);
+
+ return -ETIMEDOUT;
+}
+
+static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct tb_switch *sw = priv;
+
+ return dma_port_flash_read(sw->dma_port, offset, val, bytes);
+}
+
+static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct tb_switch *sw = priv;
+ int ret = 0;
+
+ if (mutex_lock_interruptible(&switch_lock))
+ return -ERESTARTSYS;
+
+ /*
+ * Since writing the NVM image might require some special steps,
+ * for example when CSS headers are written, we cache the image
+ * locally here and handle the special cases when the user asks
+ * us to authenticate the image.
+ */
+ if (!sw->nvm->buf) {
+ sw->nvm->buf = vmalloc(NVM_MAX_SIZE);
+ if (!sw->nvm->buf) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ }
+
+ sw->nvm->buf_data_size = offset + bytes;
+ memcpy(sw->nvm->buf + offset, val, bytes);
+
+unlock:
+ mutex_unlock(&switch_lock);
+
+ return ret;
+}
+
+static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
+ size_t size, bool active)
+{
+ struct nvmem_config config;
+
+ memset(&config, 0, sizeof(config));
+
+ if (active) {
+ config.name = "nvm_active";
+ config.reg_read = tb_switch_nvm_read;
+ } else {
+ config.name = "nvm_non_active";
+ config.reg_write = tb_switch_nvm_write;
+ }
+
+ config.id = id;
+ config.stride = 4;
+ config.word_size = 4;
+ config.size = size;
+ config.dev = &sw->dev;
+ config.owner = THIS_MODULE;
+ config.root_only = true;
+ config.priv = sw;
+
+ return nvmem_register(&config);
+}
+
+static int tb_switch_nvm_add(struct tb_switch *sw)
+{
+ struct nvmem_device *nvm_dev;
+ struct tb_switch_nvm *nvm;
+ u32 val;
+ int ret;
+
+ if (!sw->dma_port)
+ return 0;
+
+ nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
+ if (!nvm)
+ return -ENOMEM;
+
+ nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
+
+ /*
+ * If the switch is in safe-mode the only accessible portion of
+ * the NVM is the non-active one where userspace is expected to
+ * write new functional NVM.
+ */
+ if (!sw->safe_mode) {
+ u32 nvm_size, hdr_size;
+
+ ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val,
+ sizeof(val));
+ if (ret)
+ goto err_ida;
+
+ hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
+ nvm_size = (SZ_1M << (val & 7)) / 8;
+ nvm_size = (nvm_size - hdr_size) / 2;
+
+ ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val,
+ sizeof(val));
+ if (ret)
+ goto err_ida;
+
+ nvm->major = val >> 16;
+ nvm->minor = val >> 8;
+
+ nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true);
+ if (IS_ERR(nvm_dev)) {
+ ret = PTR_ERR(nvm_dev);
+ goto err_ida;
+ }
+ nvm->active = nvm_dev;
+ }
+
+ nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
+ if (IS_ERR(nvm_dev)) {
+ ret = PTR_ERR(nvm_dev);
+ goto err_nvm_active;
+ }
+ nvm->non_active = nvm_dev;
+
+ mutex_lock(&switch_lock);
+ sw->nvm = nvm;
+ mutex_unlock(&switch_lock);
+
+ return 0;
+
+err_nvm_active:
+ if (nvm->active)
+ nvmem_unregister(nvm->active);
+err_ida:
+ ida_simple_remove(&nvm_ida, nvm->id);
+ kfree(nvm);
+
+ return ret;
+}
+
+static void tb_switch_nvm_remove(struct tb_switch *sw)
+{
+ struct tb_switch_nvm *nvm;
+
+ mutex_lock(&switch_lock);
+ nvm = sw->nvm;
+ sw->nvm = NULL;
+ mutex_unlock(&switch_lock);
+
+ if (!nvm)
+ return;
+
+ /* Remove authentication status in case the switch is unplugged */
+ if (!nvm->authenticating)
+ nvm_clear_auth_status(sw);
+
+ nvmem_unregister(nvm->non_active);
+ if (nvm->active)
+ nvmem_unregister(nvm->active);
+ ida_simple_remove(&nvm_ida, nvm->id);
+ vfree(nvm->buf);
+ kfree(nvm);
+}
+
/* port utility functions */
static const char *tb_port_type(struct tb_regs_port_header *port)
@@ -448,6 +830,83 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(key);
+static ssize_t nvm_authenticate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ u32 status;
+
+ nvm_get_auth_status(sw, &status);
+ return sprintf(buf, "%#x\n", status);
+}
+
+static ssize_t nvm_authenticate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ bool val;
+ int ret;
+
+ if (mutex_lock_interruptible(&switch_lock))
+ return -ERESTARTSYS;
+
+ /* If NVMem devices are not yet added */
+ if (!sw->nvm) {
+ ret = -EAGAIN;
+ goto exit_unlock;
+ }
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ goto exit_unlock;
+
+ /* Always clear the authentication status */
+ nvm_clear_auth_status(sw);
+
+ if (val) {
+ ret = nvm_validate_and_write(sw);
+ if (ret)
+ goto exit_unlock;
+
+ sw->nvm->authenticating = true;
+
+ if (!tb_route(sw))
+ ret = nvm_authenticate_host(sw);
+ else
+ ret = nvm_authenticate_device(sw);
+ }
+
+exit_unlock:
+ mutex_unlock(&switch_lock);
+
+ if (ret)
+ return ret;
+ return count;
+}
+static DEVICE_ATTR_RW(nvm_authenticate);
+
+static ssize_t nvm_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ int ret;
+
+ if (mutex_lock_interruptible(&switch_lock))
+ return -ERESTARTSYS;
+
+ if (sw->safe_mode)
+ ret = -ENODATA;
+ else if (!sw->nvm)
+ ret = -EAGAIN;
+ else
+ ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
+
+ mutex_unlock(&switch_lock);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(nvm_version);
+
static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -480,6 +939,8 @@ static struct attribute *switch_attrs[] = {
&dev_attr_device.attr,
&dev_attr_device_name.attr,
&dev_attr_key.attr,
+ &dev_attr_nvm_authenticate.attr,
+ &dev_attr_nvm_version.attr,
&dev_attr_vendor.attr,
&dev_attr_vendor_name.attr,
&dev_attr_unique_id.attr,
@@ -498,9 +959,14 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
sw->security_level == TB_SECURITY_SECURE)
return attr->mode;
return 0;
+ } else if (attr == &dev_attr_nvm_authenticate.attr ||
+ attr == &dev_attr_nvm_version.attr) {
+ if (sw->dma_port)
+ return attr->mode;
+ return 0;
}
- return attr->mode;
+ return sw->safe_mode ? 0 : attr->mode;
}
static struct attribute_group switch_group = {
@@ -652,6 +1118,45 @@ err_free_sw_ports:
}
/**
+ * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
+ * @tb: Pointer to the owning domain
+ * @parent: Parent device for this switch
+ * @route: Route string for this switch
+ *
+ * This creates a switch in safe mode. This means the switch pretty much
+ * lacks all capabilities except DMA configuration port before it is
+ * flashed with a valid NVM firmware.
+ *
+ * The returned switch must be released by calling tb_switch_put().
+ *
+ * Return: Pointer to the allocated switch or %NULL in case of failure
+ */
+struct tb_switch *
+tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
+{
+ struct tb_switch *sw;
+
+ sw = kzalloc(sizeof(*sw), GFP_KERNEL);
+ if (!sw)
+ return NULL;
+
+ sw->tb = tb;
+ sw->config.depth = tb_route_length(route);
+ sw->config.route_hi = upper_32_bits(route);
+ sw->config.route_lo = lower_32_bits(route);
+ sw->safe_mode = true;
+
+ device_initialize(&sw->dev);
+ sw->dev.parent = parent;
+ sw->dev.bus = &tb_bus_type;
+ sw->dev.type = &tb_switch_type;
+ sw->dev.groups = switch_groups;
+ dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
+
+ return sw;
+}
+
+/**
* tb_switch_configure() - Uploads configuration to the switch
* @sw: Switch to configure
*
@@ -717,8 +1222,11 @@ static void tb_switch_set_uuid(struct tb_switch *sw)
sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
}
-static void tb_switch_add_dma_port(struct tb_switch *sw)
+static int tb_switch_add_dma_port(struct tb_switch *sw)
{
+ u32 status;
+ int ret;
+
switch (sw->generation) {
case 3:
break;
@@ -726,14 +1234,49 @@ static void tb_switch_add_dma_port(struct tb_switch *sw)
case 2:
/* Only root switch can be upgraded */
if (tb_route(sw))
- return;
+ return 0;
break;
default:
- return;
+ /*
+ * DMA port is the only thing available when the switch
+ * is in safe mode.
+ */
+ if (!sw->safe_mode)
+ return 0;
+ break;
}
+ if (sw->no_nvm_upgrade)
+ return 0;
+
sw->dma_port = dma_port_alloc(sw);
+ if (!sw->dma_port)
+ return 0;
+
+ /*
+ * Check status of the previous flash authentication. If there
+ * is one we need to power cycle the switch in any case to make
+ * it functional again.
+ */
+ ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
+ if (ret <= 0)
+ return ret;
+
+ if (status) {
+ tb_sw_info(sw, "switch flash authentication failed\n");
+ tb_switch_set_uuid(sw);
+ nvm_set_auth_status(sw, status);
+ }
+
+ tb_sw_info(sw, "power cycling the switch now\n");
+ dma_port_power_cycle(sw->dma_port);
+
+ /*
+ * We return error here which causes the switch adding failure.
+ * It should appear back after power cycle is complete.
+ */
+ return -ESHUTDOWN;
}
/**
@@ -759,29 +1302,41 @@ int tb_switch_add(struct tb_switch *sw)
* to the userspace. NVM can be accessed through DMA
* configuration based mailbox.
*/
- tb_switch_add_dma_port(sw);
-
- /* read drom */
- ret = tb_drom_read(sw);
- if (ret) {
- tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
+ ret = tb_switch_add_dma_port(sw);
+ if (ret)
return ret;
- }
- tb_sw_info(sw, "uid: %#llx\n", sw->uid);
- tb_switch_set_uuid(sw);
+ if (!sw->safe_mode) {
+ /* read drom */
+ ret = tb_drom_read(sw);
+ if (ret) {
+ tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
+ return ret;
+ }
+ tb_sw_info(sw, "uid: %#llx\n", sw->uid);
- for (i = 0; i <= sw->config.max_port_number; i++) {
- if (sw->ports[i].disabled) {
- tb_port_info(&sw->ports[i], "disabled by eeprom\n");
- continue;
+ tb_switch_set_uuid(sw);
+
+ for (i = 0; i <= sw->config.max_port_number; i++) {
+ if (sw->ports[i].disabled) {
+ tb_port_info(&sw->ports[i], "disabled by eeprom\n");
+ continue;
+ }
+ ret = tb_init_port(&sw->ports[i]);
+ if (ret)
+ return ret;
}
- ret = tb_init_port(&sw->ports[i]);
- if (ret)
- return ret;
}
- return device_add(&sw->dev);
+ ret = device_add(&sw->dev);
+ if (ret)
+ return ret;
+
+ ret = tb_switch_nvm_add(sw);
+ if (ret)
+ device_del(&sw->dev);
+
+ return ret;
}
/**
@@ -808,6 +1363,7 @@ void tb_switch_remove(struct tb_switch *sw)
if (!sw->is_unplugged)
tb_plug_events_active(sw, false);
+ tb_switch_nvm_remove(sw);
device_unregister(&sw->dev);
}
@@ -976,3 +1532,8 @@ struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid)
return NULL;
}
+
+void tb_switch_exit(void)
+{
+ ida_destroy(&nvm_ida);
+}