summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-16 08:31:32 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-16 08:31:32 -0700
commit22c58fd70ca48a29505922b1563826593b08cc00 (patch)
treeee3154075bcd0a867fd0abf7e00ba3b4055999d4 /drivers
parenta455eda33faafcaac1effb31d682765b14ef868c (diff)
parent7a0c4c17089a8aff52f516f0f52002be52950aae (diff)
Merge tag 'armsoc-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
Pull ARM SoC platform updates from Olof Johansson: "SoC updates, mostly refactorings and cleanups of old legacy platforms. Major themes this release: - Conversion of ixp4xx to a modern platform (drivers, DT, bindings) - Moving some of the ep93xx headers around to get it closer to multiplatform enabled. - Cleanups of Davinci This also contains a few patches that were queued up as fixes before 5.1 but I didn't get sent in before release" * tag 'armsoc-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (123 commits) ARM: debug-ll: add default address for digicolor ARM: u300: regulator: add MODULE_LICENSE() ARM: ep93xx: move private headers out of mach/* ARM: ep93xx: move pinctrl interfaces into include/linux/soc ARM: ep93xx: keypad: stop using mach/platform.h ARM: ep93xx: move network platform data to separate header ARM: stm32: add AMBA support for stm32 family MAINTAINERS: update arch/arm/mach-davinci ARM: rockchip: add missing of_node_put in rockchip_smp_prepare_pmu ARM: dts: Add queue manager and NPE to the IXP4xx DTSI soc: ixp4xx: qmgr: Add DT probe code soc: ixp4xx: qmgr: Add DT bindings for IXP4xx qmgr soc: ixp4xx: npe: Add DT probe code soc: ixp4xx: Add DT bindings for IXP4xx NPE soc: ixp4xx: qmgr: Pass resources soc: ixp4xx: Remove unused functions soc: ixp4xx: Uninline several functions soc: ixp4xx: npe: Pass addresses as resources ARM: ixp4xx: Turn the QMGR into a platform device ARM: ixp4xx: Turn the NPE into a platform device ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/pata_ep93xx.c2
-rw-r--r--drivers/bus/ti-sysc.c661
-rw-r--r--drivers/clocksource/Kconfig7
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/timer-ixp4xx.c282
-rw-r--r--drivers/crypto/ixp4xx_crypto.c4
-rw-r--r--drivers/firmware/Kconfig16
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/trusted_foundations.c176
-rw-r--r--drivers/gpio/Kconfig13
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-ixp4xx.c474
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c8
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c20
-rw-r--r--drivers/irqchip/Kconfig6
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-ixp4xx.c403
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig2
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c14
-rw-r--r--drivers/net/wan/ixp4xx_hss.c4
-rw-r--r--drivers/pwm/pwm-ep93xx.c2
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/ixp4xx/Kconfig16
-rw-r--r--drivers/soc/ixp4xx/Makefile2
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-npe.c762
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-qmgr.c488
-rw-r--r--drivers/usb/host/ohci-da8xx.c42
-rw-r--r--drivers/watchdog/ixp4xx_wdt.c9
31 files changed, 3260 insertions, 163 deletions
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index cc6d06c1b2c7..db271b705529 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -44,7 +44,7 @@
#include <linux/ktime.h>
#include <linux/platform_data/dma-ep93xx.h>
-#include <mach/platform.h>
+#include <linux/soc/cirrus/ep93xx.h>
#define DRV_NAME "ep93xx-ide"
#define DRV_VERSION "1.0"
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index d299ec79e4c3..308475ed4b32 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -47,7 +47,10 @@ enum sysc_clocks {
SYSC_MAX_CLOCKS,
};
-static const char * const clock_names[SYSC_ICK + 1] = { "fck", "ick", };
+static const char * const clock_names[SYSC_MAX_CLOCKS] = {
+ "fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4",
+ "opt5", "opt6", "opt7",
+};
#define SYSC_IDLEMODE_MASK 3
#define SYSC_CLOCKACTIVITY_MASK 3
@@ -75,6 +78,7 @@ struct sysc {
u32 module_size;
void __iomem *module_va;
int offsets[SYSC_MAX_REGS];
+ struct ti_sysc_module_data *mdata;
struct clk **clocks;
const char **clock_roles;
int nr_clocks;
@@ -94,7 +98,7 @@ struct sysc {
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
bool is_child);
-void sysc_write(struct sysc *ddata, int offset, u32 value)
+static void sysc_write(struct sysc *ddata, int offset, u32 value)
{
writel_relaxed(value, ddata->module_va + offset);
}
@@ -128,6 +132,81 @@ static u32 sysc_read_revision(struct sysc *ddata)
return sysc_read(ddata, offset);
}
+static int sysc_add_named_clock_from_child(struct sysc *ddata,
+ const char *name,
+ const char *optfck_name)
+{
+ struct device_node *np = ddata->dev->of_node;
+ struct device_node *child;
+ struct clk_lookup *cl;
+ struct clk *clock;
+ const char *n;
+
+ if (name)
+ n = name;
+ else
+ n = optfck_name;
+
+ /* Does the clock alias already exist? */
+ clock = of_clk_get_by_name(np, n);
+ if (!IS_ERR(clock)) {
+ clk_put(clock);
+
+ return 0;
+ }
+
+ child = of_get_next_available_child(np, NULL);
+ if (!child)
+ return -ENODEV;
+
+ clock = devm_get_clk_from_child(ddata->dev, child, name);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ /*
+ * Use clkdev_add() instead of clkdev_alloc() to avoid the MAX_DEV_ID
+ * limit for clk_get(). If cl ever needs to be freed, it should be done
+ * with clkdev_drop().
+ */
+ cl = kcalloc(1, sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return -ENOMEM;
+
+ cl->con_id = n;
+ cl->dev_id = dev_name(ddata->dev);
+ cl->clk = clock;
+ clkdev_add(cl);
+
+ clk_put(clock);
+
+ return 0;
+}
+
+static int sysc_init_ext_opt_clock(struct sysc *ddata, const char *name)
+{
+ const char *optfck_name;
+ int error, index;
+
+ if (ddata->nr_clocks < SYSC_OPTFCK0)
+ index = SYSC_OPTFCK0;
+ else
+ index = ddata->nr_clocks;
+
+ if (name)
+ optfck_name = name;
+ else
+ optfck_name = clock_names[index];
+
+ error = sysc_add_named_clock_from_child(ddata, name, optfck_name);
+ if (error)
+ return error;
+
+ ddata->clock_roles[index] = optfck_name;
+ ddata->nr_clocks++;
+
+ return 0;
+}
+
static int sysc_get_one_clock(struct sysc *ddata, const char *name)
{
int error, i, index = -ENODEV;
@@ -199,6 +278,12 @@ static int sysc_get_clocks(struct sysc *ddata)
if (ddata->nr_clocks < 1)
return 0;
+ if ((ddata->cfg.quirks & SYSC_QUIRK_EXT_OPT_CLOCK)) {
+ error = sysc_init_ext_opt_clock(ddata, NULL);
+ if (error)
+ return error;
+ }
+
if (ddata->nr_clocks > SYSC_MAX_CLOCKS) {
dev_err(ddata->dev, "too many clocks for %pOF\n", np);
@@ -231,39 +316,125 @@ static int sysc_get_clocks(struct sysc *ddata)
return 0;
}
+static int sysc_enable_main_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i, error;
+
+ if (!ddata->clocks)
+ return 0;
+
+ for (i = 0; i < SYSC_OPTFCK0; i++) {
+ clock = ddata->clocks[i];
+
+ /* Main clocks may not have ick */
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ error = clk_enable(clock);
+ if (error)
+ goto err_disable;
+ }
+
+ return 0;
+
+err_disable:
+ for (i--; i >= 0; i--) {
+ clock = ddata->clocks[i];
+
+ /* Main clocks may not have ick */
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+
+ return error;
+}
+
+static void sysc_disable_main_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i;
+
+ if (!ddata->clocks)
+ return;
+
+ for (i = 0; i < SYSC_OPTFCK0; i++) {
+ clock = ddata->clocks[i];
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+}
+
+static int sysc_enable_opt_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i, error;
+
+ if (!ddata->clocks)
+ return 0;
+
+ for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
+ clock = ddata->clocks[i];
+
+ /* Assume no holes for opt clocks */
+ if (IS_ERR_OR_NULL(clock))
+ return 0;
+
+ error = clk_enable(clock);
+ if (error)
+ goto err_disable;
+ }
+
+ return 0;
+
+err_disable:
+ for (i--; i >= 0; i--) {
+ clock = ddata->clocks[i];
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+
+ return error;
+}
+
+static void sysc_disable_opt_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i;
+
+ if (!ddata->clocks)
+ return;
+
+ for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
+ clock = ddata->clocks[i];
+
+ /* Assume no holes for opt clocks */
+ if (IS_ERR_OR_NULL(clock))
+ return;
+
+ clk_disable(clock);
+ }
+}
+
/**
- * sysc_init_resets - reset module on init
+ * sysc_init_resets - init rstctrl reset line if configured
* @ddata: device driver data
*
- * A module can have both OCP softreset control and external rstctrl.
- * If more complicated rstctrl resets are needed, please handle these
- * directly from the child device driver and map only the module reset
- * for the parent interconnect target module device.
- *
- * Automatic reset of the module on init can be skipped with the
- * "ti,no-reset-on-init" device tree property.
+ * See sysc_rstctrl_reset_deassert().
*/
static int sysc_init_resets(struct sysc *ddata)
{
- int error;
-
ddata->rsts =
devm_reset_control_array_get_optional_exclusive(ddata->dev);
if (IS_ERR(ddata->rsts))
return PTR_ERR(ddata->rsts);
- if (ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
- goto deassert;
-
- error = reset_control_assert(ddata->rsts);
- if (error)
- return error;
-
-deassert:
- error = reset_control_deassert(ddata->rsts);
- if (error)
- return error;
-
return 0;
}
@@ -622,91 +793,239 @@ static void sysc_show_registers(struct sysc *ddata)
buf);
}
-static int __maybe_unused sysc_runtime_suspend(struct device *dev)
+#define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1)
+
+static int sysc_enable_module(struct device *dev)
{
- struct ti_sysc_platform_data *pdata;
struct sysc *ddata;
- int error = 0, i;
+ const struct sysc_regbits *regbits;
+ u32 reg, idlemodes, best_mode;
ddata = dev_get_drvdata(dev);
+ if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
+ return 0;
- if (!ddata->enabled)
+ /*
+ * TODO: Need to prevent clockdomain autoidle?
+ * See clkdm_deny_idle() in arch/mach-omap2/omap_hwmod.c
+ */
+
+ regbits = ddata->cap->regbits;
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /* Set SIDLE mode */
+ idlemodes = ddata->cfg.sidlemodes;
+ if (!idlemodes || regbits->sidle_shift < 0)
+ goto set_midle;
+
+ best_mode = fls(ddata->cfg.sidlemodes) - 1;
+ if (best_mode > SYSC_IDLE_MASK) {
+ dev_err(dev, "%s: invalid sidlemode\n", __func__);
+ return -EINVAL;
+ }
+
+ reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
+ reg |= best_mode << regbits->sidle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+
+set_midle:
+ /* Set MIDLE mode */
+ idlemodes = ddata->cfg.midlemodes;
+ if (!idlemodes || regbits->midle_shift < 0)
return 0;
- if (ddata->legacy_mode) {
- pdata = dev_get_platdata(ddata->dev);
- if (!pdata)
- return 0;
+ best_mode = fls(ddata->cfg.midlemodes) - 1;
+ if (best_mode > SYSC_IDLE_MASK) {
+ dev_err(dev, "%s: invalid midlemode\n", __func__);
+ return -EINVAL;
+ }
- if (!pdata->idle_module)
- return -ENODEV;
+ reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
+ reg |= best_mode << regbits->midle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
- error = pdata->idle_module(dev, &ddata->cookie);
- if (error)
- dev_err(dev, "%s: could not idle: %i\n",
- __func__, error);
+ return 0;
+}
+
+static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
+{
+ if (idlemodes & BIT(SYSC_IDLE_SMART_WKUP))
+ *best_mode = SYSC_IDLE_SMART_WKUP;
+ else if (idlemodes & BIT(SYSC_IDLE_SMART))
+ *best_mode = SYSC_IDLE_SMART;
+ else if (idlemodes & SYSC_IDLE_FORCE)
+ *best_mode = SYSC_IDLE_FORCE;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sysc_disable_module(struct device *dev)
+{
+ struct sysc *ddata;
+ const struct sysc_regbits *regbits;
+ u32 reg, idlemodes, best_mode;
+ int ret;
- goto idled;
+ ddata = dev_get_drvdata(dev);
+ if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
+ return 0;
+
+ /*
+ * TODO: Need to prevent clockdomain autoidle?
+ * See clkdm_deny_idle() in arch/mach-omap2/omap_hwmod.c
+ */
+
+ regbits = ddata->cap->regbits;
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /* Set MIDLE mode */
+ idlemodes = ddata->cfg.midlemodes;
+ if (!idlemodes || regbits->midle_shift < 0)
+ goto set_sidle;
+
+ ret = sysc_best_idle_mode(idlemodes, &best_mode);
+ if (ret) {
+ dev_err(dev, "%s: invalid midlemode\n", __func__);
+ return ret;
}
- for (i = 0; i < ddata->nr_clocks; i++) {
- if (IS_ERR_OR_NULL(ddata->clocks[i]))
- continue;
+ reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
+ reg |= best_mode << regbits->midle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
- if (i >= SYSC_OPTFCK0 && !sysc_opt_clks_needed(ddata))
- break;
+set_sidle:
+ /* Set SIDLE mode */
+ idlemodes = ddata->cfg.sidlemodes;
+ if (!idlemodes || regbits->sidle_shift < 0)
+ return 0;
- clk_disable(ddata->clocks[i]);
+ ret = sysc_best_idle_mode(idlemodes, &best_mode);
+ if (ret) {
+ dev_err(dev, "%s: invalid sidlemode\n", __func__);
+ return ret;
}
-idled:
- ddata->enabled = false;
+ reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
+ reg |= best_mode << regbits->sidle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
- return error;
+ return 0;
}
-static int __maybe_unused sysc_runtime_resume(struct device *dev)
+static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev,
+ struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata;
+ int error;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (!pdata)
+ return 0;
+
+ if (!pdata->idle_module)
+ return -ENODEV;
+
+ error = pdata->idle_module(dev, &ddata->cookie);
+ if (error)
+ dev_err(dev, "%s: could not idle: %i\n",
+ __func__, error);
+
+ return 0;
+}
+
+static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev,
+ struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata;
+ int error;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (!pdata)
+ return 0;
+
+ if (!pdata->enable_module)
+ return -ENODEV;
+
+ error = pdata->enable_module(dev, &ddata->cookie);
+ if (error)
+ dev_err(dev, "%s: could not enable: %i\n",
+ __func__, error);
+
+ return 0;
+}
+
+static int __maybe_unused sysc_runtime_suspend(struct device *dev)
+{
struct sysc *ddata;
- int error = 0, i;
+ int error = 0;
ddata = dev_get_drvdata(dev);
- if (ddata->enabled)
+ if (!ddata->enabled)
return 0;
if (ddata->legacy_mode) {
- pdata = dev_get_platdata(ddata->dev);
- if (!pdata)
- return 0;
+ error = sysc_runtime_suspend_legacy(dev, ddata);
+ if (error)
+ return error;
+ } else {
+ error = sysc_disable_module(dev);
+ if (error)
+ return error;
+ }
- if (!pdata->enable_module)
- return -ENODEV;
+ sysc_disable_main_clocks(ddata);
- error = pdata->enable_module(dev, &ddata->cookie);
- if (error)
- dev_err(dev, "%s: could not enable: %i\n",
- __func__, error);
+ if (sysc_opt_clks_needed(ddata))
+ sysc_disable_opt_clocks(ddata);
- goto awake;
- }
+ ddata->enabled = false;
- for (i = 0; i < ddata->nr_clocks; i++) {
- if (IS_ERR_OR_NULL(ddata->clocks[i]))
- continue;
+ return error;
+}
- if (i >= SYSC_OPTFCK0 && !sysc_opt_clks_needed(ddata))
- break;
+static int __maybe_unused sysc_runtime_resume(struct device *dev)
+{
+ struct sysc *ddata;
+ int error = 0;
+
+ ddata = dev_get_drvdata(dev);
- error = clk_enable(ddata->clocks[i]);
+ if (ddata->enabled)
+ return 0;
+
+ if (sysc_opt_clks_needed(ddata)) {
+ error = sysc_enable_opt_clocks(ddata);
if (error)
return error;
}
-awake:
+ error = sysc_enable_main_clocks(ddata);
+ if (error)
+ goto err_opt_clocks;
+
+ if (ddata->legacy_mode) {
+ error = sysc_runtime_resume_legacy(dev, ddata);
+ if (error)
+ goto err_main_clocks;
+ } else {
+ error = sysc_enable_module(dev);
+ if (error)
+ goto err_main_clocks;
+ }
+
ddata->enabled = true;
+ return 0;
+
+err_main_clocks:
+ sysc_disable_main_clocks(ddata);
+err_opt_clocks:
+ if (sysc_opt_clks_needed(ddata))
+ sysc_disable_opt_clocks(ddata);
+
return error;
}
@@ -788,12 +1107,17 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff,
0),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
- SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
/* Uarts on omap4 and later */
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
- SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
- SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+
+ /* Quirks that need to be set based on the module address */
+ SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -1, 0x50000800, 0xffffffff,
+ SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
+ SYSC_QUIRK_SWSUP_SIDLE),
#ifdef DEBUG
SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0),
@@ -805,6 +1129,7 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
0xffff00f0, 0),
SYSC_QUIRK("dcan", 0, 0, -1, -1, 0xffffffff, 0xffffffff, 0),
SYSC_QUIRK("dcan", 0, 0, -1, -1, 0x00001401, 0xffffffff, 0),
+ SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0),
SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -1, 0, 0, 0),
@@ -853,6 +1178,42 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
#endif
};
+/*
+ * Early quirks based on module base and register offsets only that are
+ * needed before the module revision can be read
+ */
+static void sysc_init_early_quirks(struct sysc *ddata)
+{
+ const struct sysc_revision_quirk *q;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
+ q = &sysc_revision_quirks[i];
+
+ if (!q->base)
+ continue;
+
+ if (q->base != ddata->module_pa)
+ continue;
+
+ if (q->rev_offset >= 0 &&
+ q->rev_offset != ddata->offsets[SYSC_REVISION])
+ continue;
+
+ if (q->sysc_offset >= 0 &&
+ q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
+ continue;
+
+ if (q->syss_offset >= 0 &&
+ q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
+ continue;
+
+ ddata->name = q->name;
+ ddata->cfg.quirks |= q->quirks;
+ }
+}
+
+/* Quirks that also consider the revision register value */
static void sysc_init_revision_quirks(struct sysc *ddata)
{
const struct sysc_revision_quirk *q;
@@ -885,6 +1246,55 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
}
}
+/*
+ * Note that pdata->init_module() typically does a reset first. After
+ * pdata->init_module() is done, PM runtime can be used for the interconnect
+ * target module.
+ */
+static int sysc_legacy_init(struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
+ int error;
+
+ if (!ddata->legacy_mode || !pdata || !pdata->init_module)
+ return 0;
+
+ error = pdata->init_module(ddata->dev, ddata->mdata, &ddata->cookie);
+ if (error == -EEXIST)
+ error = 0;
+
+ return error;
+}
+
+/**
+ * sysc_rstctrl_reset_deassert - deassert rstctrl reset
+ * @ddata: device driver data
+ * @reset: reset before deassert
+ *
+ * A module can have both OCP softreset control and external rstctrl.
+ * If more complicated rstctrl resets are needed, please handle these
+ * directly from the child device driver and map only the module reset
+ * for the parent interconnect target module device.
+ *
+ * Automatic reset of the module on init can be skipped with the
+ * "ti,no-reset-on-init" device tree property.
+ */
+static int sysc_rstctrl_reset_deassert(struct sysc *ddata, bool reset)
+{
+ int error;
+
+ if (!ddata->rsts)
+ return 0;
+
+ if (reset) {
+ error = reset_control_assert(ddata->rsts);
+ if (error)
+ return error;
+ }
+
+ return reset_control_deassert(ddata->rsts);
+}
+
static int sysc_reset(struct sysc *ddata)
{
int offset = ddata->offsets[SYSC_SYSCONFIG];
@@ -915,38 +1325,58 @@ static int sysc_reset(struct sysc *ddata)
100, MAX_MODULE_SOFTRESET_WAIT);
}
-/* At this point the module is configured enough to read the revision */
+/*
+ * At this point the module is configured enough to read the revision but
+ * module may not be completely configured yet to use PM runtime. Enable
+ * all clocks directly during init to configure the quirks needed for PM
+ * runtime based on the revision register.
+ */
static int sysc_init_module(struct sysc *ddata)
{
- int error;
+ int error = 0;
+ bool manage_clocks = true;
+ bool reset = true;
- if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE_ON_INIT) {
- ddata->revision = sysc_read_revision(ddata);
- goto rev_quirks;
- }
+ if (ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
+ reset = false;
- error = pm_runtime_get_sync(ddata->dev);
- if (error < 0) {
- pm_runtime_put_noidle(ddata->dev);
+ error = sysc_rstctrl_reset_deassert(ddata, reset);
+ if (error)
+ return error;
- return 0;
- }
+ if (ddata->cfg.quirks &
+ (SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))
+ manage_clocks = false;
- error = sysc_reset(ddata);
- if (error) {
- dev_err(ddata->dev, "Reset failed with %d\n", error);
- pm_runtime_put_sync(ddata->dev);
+ if (manage_clocks) {
+ error = sysc_enable_opt_clocks(ddata);
+ if (error)
+ return error;
- return error;
+ error = sysc_enable_main_clocks(ddata);
+ if (error)
+ goto err_opt_clocks;
}
ddata->revision = sysc_read_revision(ddata);
- pm_runtime_put_sync(ddata->dev);
-
-rev_quirks:
sysc_init_revision_quirks(ddata);
- return 0;
+ error = sysc_legacy_init(ddata);
+ if (error)
+ goto err_main_clocks;
+
+ error = sysc_reset(ddata);
+ if (error)
+ dev_err(ddata->dev, "Reset failed with %d\n", error);
+
+err_main_clocks:
+ if (manage_clocks)
+ sysc_disable_main_clocks(ddata);
+err_opt_clocks:
+ if (manage_clocks)
+ sysc_disable_opt_clocks(ddata);
+
+ return error;
}
static int sysc_init_sysc_mask(struct sysc *ddata)
@@ -1208,7 +1638,7 @@ static int sysc_child_resume_noirq(struct device *dev)
}
#endif
-struct dev_pm_domain sysc_child_pm_domain = {
+static struct dev_pm_domain sysc_child_pm_domain = {
.ops = {
SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend,
sysc_child_runtime_resume,
@@ -1281,6 +1711,8 @@ static const struct sysc_dts_quirk sysc_dts_quirks[] = {
.mask = SYSC_QUIRK_NO_IDLE_ON_INIT, },
{ .name = "ti,no-reset-on-init",
.mask = SYSC_QUIRK_NO_RESET_ON_INIT, },
+ { .name = "ti,no-idle",
+ .mask = SYSC_QUIRK_NO_IDLE, },
};
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
@@ -1331,6 +1763,9 @@ static void sysc_unprepare(struct sysc *ddata)
{
int i;
+ if (!ddata->clocks)
+ return;
+
for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
if (!IS_ERR_OR_NULL(ddata->clocks[i]))
clk_unprepare(ddata->clocks[i]);
@@ -1576,28 +2011,26 @@ static const struct sysc_capabilities sysc_dra7_mcan = {
static int sysc_init_pdata(struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
- struct ti_sysc_module_data mdata;
- int error = 0;
+ struct ti_sysc_module_data *mdata;
if (!pdata || !ddata->legacy_mode)
return 0;
- mdata.name = ddata->legacy_mode;
- mdata.module_pa = ddata->module_pa;
- mdata.module_size = ddata->module_size;
- mdata.offsets = ddata->offsets;
- mdata.nr_offsets = SYSC_MAX_REGS;
- mdata.cap = ddata->cap;
- mdata.cfg = &ddata->cfg;
+ mdata = devm_kzalloc(ddata->dev, sizeof(*mdata), GFP_KERNEL);
+ if (!mdata)
+ return -ENOMEM;
- if (!pdata->init_module)
- return -ENODEV;
+ mdata->name = ddata->legacy_mode;
+ mdata->module_pa = ddata->module_pa;
+ mdata->module_size = ddata->module_size;
+ mdata->offsets = ddata->offsets;
+ mdata->nr_offsets = SYSC_MAX_REGS;
+ mdata->cap = ddata->cap;
+ mdata->cfg = &ddata->cfg;
- error = pdata->init_module(ddata->dev, &mdata, &ddata->cookie);
- if (error == -EEXIST)
- error = 0;
+ ddata->mdata = mdata;
- return error;
+ return 0;
}
static int sysc_init_match(struct sysc *ddata)
@@ -1651,10 +2084,6 @@ static int sysc_probe(struct platform_device *pdev)
if (error)
goto unprepare;
- error = sysc_get_clocks(ddata);
- if (error)
- return error;
-
error = sysc_map_and_check_registers(ddata);
if (error)
goto unprepare;
@@ -1675,15 +2104,21 @@ static int sysc_probe(struct platform_device *pdev)
if (error)
goto unprepare;
+ sysc_init_early_quirks(ddata);
+
+ error = sysc_get_clocks(ddata);
+ if (error)
+ return error;
+
error = sysc_init_resets(ddata);
if (error)
return error;
- pm_runtime_enable(ddata->dev);
error = sysc_init_module(ddata);
if (error)
goto unprepare;
+ pm_runtime_enable(ddata->dev);
error = pm_runtime_get_sync(ddata->dev);
if (error < 0) {
pm_runtime_put_noidle(ddata->dev);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 4b3d143f0f8a..48321488f0fd 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -69,6 +69,13 @@ config FTTMR010_TIMER
Enables support for the Faraday Technology timer block
FTTMR010.
+config IXP4XX_TIMER
+ bool "Intel XScale IXP4xx timer driver" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select CLKSRC_MMIO
+ help
+ Enables support for the Intel XScale IXP4xx SoC timer.
+
config ROCKCHIP_TIMER
bool "Rockchip timer driver" if COMPILE_TEST
depends on ARM || ARM64
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index be6e0fbc7489..dba4eff880de 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o
+obj-$(CONFIG_IXP4XX_TIMER) += timer-ixp4xx.o
obj-$(CONFIG_ROCKCHIP_TIMER) += timer-rockchip.o
obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
diff --git a/drivers/clocksource/timer-ixp4xx.c b/drivers/clocksource/timer-ixp4xx.c
new file mode 100644
index 000000000000..5c2190b654cd
--- /dev/null
+++ b/drivers/clocksource/timer-ixp4xx.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IXP4 timer driver
+ * Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on arch/arm/mach-ixp4xx/common.c
+ * Copyright 2002 (C) Intel Corporation
+ * Copyright 2003-2004 (C) MontaVista, Software, Inc.
+ * Copyright (C) Deepak Saxena <dsaxena@plexity.net>
+ */
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+/* Goes away with OF conversion */
+#include <linux/platform_data/timer-ixp4xx.h>
+
+/*
+ * Constants to make it easy to access Timer Control/Status registers
+ */
+#define IXP4XX_OSTS_OFFSET 0x00 /* Continuous Timestamp */
+#define IXP4XX_OST1_OFFSET 0x04 /* Timer 1 Timestamp */
+#define IXP4XX_OSRT1_OFFSET 0x08 /* Timer 1 Reload */
+#define IXP4XX_OST2_OFFSET 0x0C /* Timer 2 Timestamp */
+#define IXP4XX_OSRT2_OFFSET 0x10 /* Timer 2 Reload */
+#define IXP4XX_OSWT_OFFSET 0x14 /* Watchdog Timer */
+#define IXP4XX_OSWE_OFFSET 0x18 /* Watchdog Enable */
+#define IXP4XX_OSWK_OFFSET 0x1C /* Watchdog Key */
+#define IXP4XX_OSST_OFFSET 0x20 /* Timer Status */
+
+/*
+ * Timer register values and bit definitions
+ */
+#define IXP4XX_OST_ENABLE 0x00000001
+#define IXP4XX_OST_ONE_SHOT 0x00000002
+/* Low order bits of reload value ignored */
+#define IXP4XX_OST_RELOAD_MASK 0x00000003
+#define IXP4XX_OST_DISABLED 0x00000000
+#define IXP4XX_OSST_TIMER_1_PEND 0x00000001
+#define IXP4XX_OSST_TIMER_2_PEND 0x00000002
+#define IXP4XX_OSST_TIMER_TS_PEND 0x00000004
+#define IXP4XX_OSST_TIMER_WDOG_PEND 0x00000008
+#define IXP4XX_OSST_TIMER_WARM_RESET 0x00000010
+
+#define IXP4XX_WDT_KEY 0x0000482E
+#define IXP4XX_WDT_RESET_ENABLE 0x00000001
+#define IXP4XX_WDT_IRQ_ENABLE 0x00000002
+#define IXP4XX_WDT_COUNT_ENABLE 0x00000004
+
+struct ixp4xx_timer {
+ void __iomem *base;
+ unsigned int tick_rate;
+ u32 latch;
+ struct clock_event_device clkevt;
+#ifdef CONFIG_ARM
+ struct delay_timer delay_timer;
+#endif
+};
+
+/*
+ * A local singleton used by sched_clock and delay timer reads, which are
+ * fast and stateless
+ */
+static struct ixp4xx_timer *local_ixp4xx_timer;
+
+static inline struct ixp4xx_timer *
+to_ixp4xx_timer(struct clock_event_device *evt)
+{
+ return container_of(evt, struct ixp4xx_timer, clkevt);
+}
+
+static u64 notrace ixp4xx_read_sched_clock(void)
+{
+ return __raw_readl(local_ixp4xx_timer->base + IXP4XX_OSTS_OFFSET);
+}
+
+static u64 ixp4xx_clocksource_read(struct clocksource *c)
+{
+ return __raw_readl(local_ixp4xx_timer->base + IXP4XX_OSTS_OFFSET);
+}
+
+static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id)
+{
+ struct ixp4xx_timer *tmr = dev_id;
+ struct clock_event_device *evt = &tmr->clkevt;
+
+ /* Clear Pending Interrupt */
+ __raw_writel(IXP4XX_OSST_TIMER_1_PEND,
+ tmr->base + IXP4XX_OSST_OFFSET);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static int ixp4xx_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
+ /* Keep enable/oneshot bits */
+ val &= IXP4XX_OST_RELOAD_MASK;
+ __raw_writel((cycles & ~IXP4XX_OST_RELOAD_MASK) | val,
+ tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_shutdown(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
+ val &= ~IXP4XX_OST_ENABLE;
+ __raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_set_oneshot(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+
+ __raw_writel(IXP4XX_OST_ENABLE | IXP4XX_OST_ONE_SHOT,
+ tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_set_periodic(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = tmr->latch & ~IXP4XX_OST_RELOAD_MASK;
+ val |= IXP4XX_OST_ENABLE;
+ __raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_resume(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
+ val |= IXP4XX_OST_ENABLE;
+ __raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+/*
+ * IXP4xx timer tick
+ * We use OS timer1 on the CPU for the timer tick and the timestamp
+ * counter as a source of real clock ticks to account for missed jiffies.
+ */
+static __init int ixp4xx_timer_register(void __iomem *base,
+ int timer_irq,
+ unsigned int timer_freq)
+{
+ struct ixp4xx_timer *tmr;
+ int ret;
+
+ tmr = kzalloc(sizeof(*tmr), GFP_KERNEL);
+ if (!tmr)
+ return -ENOMEM;
+ tmr->base = base;
+ tmr->tick_rate = timer_freq;
+
+ /*
+ * The timer register doesn't allow to specify the two least
+ * significant bits of the timeout value and assumes them being zero.
+ * So make sure the latch is the best value with the two least
+ * significant bits unset.
+ */
+ tmr->latch = DIV_ROUND_CLOSEST(timer_freq,
+ (IXP4XX_OST_RELOAD_MASK + 1) * HZ)
+ * (IXP4XX_OST_RELOAD_MASK + 1);
+
+ local_ixp4xx_timer = tmr;
+
+ /* Reset/disable counter */
+ __raw_writel(0, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ /* Clear any pending interrupt on timer 1 */
+ __raw_writel(IXP4XX_OSST_TIMER_1_PEND,
+ tmr->base + IXP4XX_OSST_OFFSET);
+
+ /* Reset time-stamp counter */
+ __raw_writel(0, tmr->base + IXP4XX_OSTS_OFFSET);
+
+ clocksource_mmio_init(NULL, "OSTS", timer_freq, 200, 32,
+ ixp4xx_clocksource_read);
+
+ tmr->clkevt.name = "ixp4xx timer1";
+ tmr->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ tmr->clkevt.rating = 200;
+ tmr->clkevt.set_state_shutdown = ixp4xx_shutdown;
+ tmr->clkevt.set_state_periodic = ixp4xx_set_periodic;
+ tmr->clkevt.set_state_oneshot = ixp4xx_set_oneshot;
+ tmr->clkevt.tick_resume = ixp4xx_resume;
+ tmr->clkevt.set_next_event = ixp4xx_set_next_event;
+ tmr->clkevt.cpumask = cpumask_of(0);
+ tmr->clkevt.irq = timer_irq;
+ ret = request_irq(timer_irq, ixp4xx_timer_interrupt,
+ IRQF_TIMER, "IXP4XX-TIMER1", tmr);
+ if (ret) {
+ pr_crit("no timer IRQ\n");
+ return -ENODEV;
+ }
+ clockevents_config_and_register(&tmr->clkevt, timer_freq,
+ 0xf, 0xfffffffe);
+
+ sched_clock_register(ixp4xx_read_sched_clock, 32, timer_freq);
+
+ return 0;
+}
+
+/**
+ * ixp4xx_timer_setup() - Timer setup function to be called from boardfiles
+ * @timerbase: physical base of timer block
+ * @timer_irq: Linux IRQ number for the timer
+ * @timer_freq: Fixed frequency of the timer
+ */
+void __init ixp4xx_timer_setup(resource_size_t timerbase,
+ int timer_irq,
+ unsigned int timer_freq)
+{
+ void __iomem *base;
+
+ base = ioremap(timerbase, 0x100);
+ if (!base) {
+ pr_crit("IXP4xx: can't remap timer\n");
+ return;
+ }
+ ixp4xx_timer_register(base, timer_irq, timer_freq);
+}
+EXPORT_SYMBOL_GPL(ixp4xx_timer_setup);
+
+#ifdef CONFIG_OF
+static __init int ixp4xx_of_timer_init(struct device_node *np)
+{
+ void __iomem *base;
+ int irq;
+ int ret;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_crit("IXP4xx: can't remap timer\n");
+ return -ENODEV;
+ }
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ\n");
+ ret = -EINVAL;
+ goto out_unmap;
+ }
+
+ /* TODO: get some fixed clocks into the device tree */
+ ret = ixp4xx_timer_register(base, irq, 66666000);
+ if (ret)
+ goto out_unmap;
+ return 0;
+
+out_unmap:
+ iounmap(base);
+ return ret;
+}
+TIMER_OF_DECLARE(ixp4xx, "intel,ixp4xx-timer", ixp4xx_of_timer_init);
+#endif
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 9bbde2f26cac..f5414b6dfb55 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -30,8 +30,8 @@
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
-#include <mach/npe.h>
-#include <mach/qmgr.h>
+#include <linux/soc/ixp4xx/npe.h>
+#include <linux/soc/ixp4xx/qmgr.h>
#define MAX_KEYLEN 32
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 7b655f6156fb..11fda9eb2466 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -253,6 +253,22 @@ config TI_SCI_PROTOCOL
This protocol library is used by client drivers to use the features
provided by the system controller.
+config TRUSTED_FOUNDATIONS
+ bool "Trusted Foundations secure monitor support"
+ depends on ARM
+ help
+ Some devices (including most early Tegra-based consumer devices on
+ the market) are booted with the Trusted Foundations secure monitor
+ active, requiring some core operations to be performed by the secure
+ monitor instead of the kernel.
+
+ This option allows the kernel to invoke the secure monitor whenever
+ required on devices using Trusted Foundations. See the functions and
+ comments in linux/firmware/trusted_foundations.h or the device tree
+ bindings for "tlm,trusted-foundations" for details on how to use it.
+
+ Choose N if you don't know what this is about.
+
config HAVE_ARM_SMCCC
bool
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 9a3909a22682..3fa0b34eb72f 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
+obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += arm_scmi/
obj-y += psci/
diff --git a/drivers/firmware/trusted_foundations.c b/drivers/firmware/trusted_foundations.c
new file mode 100644
index 000000000000..fd4999388ff1
--- /dev/null
+++ b/drivers/firmware/trusted_foundations.c
@@ -0,0 +1,176 @@
+/*
+ * Trusted Foundations support for ARM CPUs
+ *
+ * Copyright (c) 2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+
+#include <linux/firmware/trusted_foundations.h>
+
+#include <asm/firmware.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/outercache.h>
+
+#define TF_CACHE_MAINT 0xfffff100
+
+#define TF_CACHE_ENABLE 1
+#define TF_CACHE_DISABLE 2
+
+#define TF_SET_CPU_BOOT_ADDR_SMC 0xfffff200
+
+#define TF_CPU_PM 0xfffffffc
+#define TF_CPU_PM_S3 0xffffffe3
+#define TF_CPU_PM_S2 0xffffffe6
+#define TF_CPU_PM_S2_NO_MC_CLK 0xffffffe5
+#define TF_CPU_PM_S1 0xffffffe4
+#define TF_CPU_PM_S1_NOFLUSH_L2 0xffffffe7
+
+static unsigned long cpu_boot_addr;
+
+static void tf_generic_smc(u32 type, u32 arg1, u32 arg2)
+{
+ register u32 r0 asm("r0") = type;
+ register u32 r1 asm("r1") = arg1;
+ register u32 r2 asm("r2") = arg2;
+
+ asm volatile(
+ ".arch_extension sec\n\t"
+ "stmfd sp!, {r4 - r11}\n\t"
+ __asmeq("%0", "r0")
+ __asmeq("%1", "r1")
+ __asmeq("%2", "r2")
+ "mov r3, #0\n\t"
+ "mov r4, #0\n\t"
+ "smc #0\n\t"
+ "ldmfd sp!, {r4 - r11}\n\t"
+ :
+ : "r" (r0), "r" (r1), "r" (r2)
+ : "memory", "r3", "r12", "lr");
+}
+
+static int tf_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
+{
+ cpu_boot_addr = boot_addr;
+ tf_generic_smc(TF_SET_CPU_BOOT_ADDR_SMC, cpu_boot_addr, 0);
+
+ return 0;
+}
+
+static int tf_prepare_idle(unsigned long mode)
+{
+ switch (mode) {
+ case TF_PM_MODE_LP0:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S3, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP1:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S2, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP1_NO_MC_CLK:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S2_NO_MC_CLK,
+ cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP2:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S1, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP2_NOFLUSH_L2:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S1_NOFLUSH_L2,
+ cpu_boot_addr);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_CACHE_L2X0
+static void tf_cache_write_sec(unsigned long val, unsigned int reg)
+{
+ u32 l2x0_way_mask = 0xff;
+
+ switch (reg) {
+ case L2X0_CTRL:
+ if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_ASSOCIATIVITY_16)
+ l2x0_way_mask = 0xffff;
+
+ if (val == L2X0_CTRL_EN)
+ tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_ENABLE,
+ l2x0_saved_regs.aux_ctrl);
+ else
+ tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_DISABLE,
+ l2x0_way_mask);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int tf_init_cache(void)
+{
+ outer_cache.write_sec = tf_cache_write_sec;
+
+ return 0;
+}
+#endif /* CONFIG_CACHE_L2X0 */
+
+static const struct firmware_ops trusted_foundations_ops = {
+ .set_cpu_boot_addr = tf_set_cpu_boot_addr,
+ .prepare_idle = tf_prepare_idle,
+#ifdef CONFIG_CACHE_L2X0
+ .l2x0_init = tf_init_cache,
+#endif
+};
+
+void register_trusted_foundations(struct trusted_foundations_platform_data *pd)
+{
+ /*
+ * we are not using version information for now since currently
+ * supported SMCs are compatible with all TF releases
+ */
+ register_firmware_ops(&trusted_foundations_ops);
+}
+
+void of_register_trusted_foundations(void)
+{
+ struct device_node *node;
+ struct trusted_foundations_platform_data pdata;
+ int err;
+
+ node = of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations");
+ if (!node)
+ return;
+
+ err = of_property_read_u32(node, "tlm,version-major",
+ &pdata.version_major);
+ if (err != 0)
+ panic("Trusted Foundation: missing version-major property\n");
+ err = of_property_read_u32(node, "tlm,version-minor",
+ &pdata.version_minor);
+ if (err != 0)
+ panic("Trusted Foundation: missing version-minor property\n");
+ register_trusted_foundations(&pdata);
+}
+
+bool trusted_foundations_registered(void)
+{
+ return firmware_ops == &trusted_foundations_ops;
+}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 41f08362dad3..8023d03ec362 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -286,6 +286,19 @@ config GPIO_IOP
If unsure, say N.
+config GPIO_IXP4XX
+ bool "Intel IXP4xx GPIO"
+ depends on ARM # For <asm/mach-types.h>
+ depends on ARCH_IXP4XX
+ select GPIO_GENERIC
+ select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Say yes here to support the GPIO functionality of a number of Intel
+ IXP4xx series of chips.
+
+ If unsure, say N.
+
config GPIO_LOONGSON
bool "Loongson-2/3 GPIO support"
depends on CPU_LOONGSON2 || CPU_LOONGSON3
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index e19be766f6a6..6700eee860b7 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_GPIO_HLWD) += gpio-hlwd.o
obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
+obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o
obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o
diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c
new file mode 100644
index 000000000000..4b1cf7ea858d
--- /dev/null
+++ b/drivers/gpio/gpio-ixp4xx.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// IXP4 GPIO driver
+// Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+//
+// based on previous work and know-how from:
+// Deepak Saxena <dsaxena@plexity.net>
+
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+/* Include that go away with DT transition */
+#include <linux/irqchip/irq-ixp4xx.h>
+
+#include <asm/mach-types.h>
+
+#define IXP4XX_REG_GPOUT 0x00
+#define IXP4XX_REG_GPOE 0x04
+#define IXP4XX_REG_GPIN 0x08
+#define IXP4XX_REG_GPIS 0x0C
+#define IXP4XX_REG_GPIT1 0x10
+#define IXP4XX_REG_GPIT2 0x14
+#define IXP4XX_REG_GPCLK 0x18
+#define IXP4XX_REG_GPDBSEL 0x1C
+
+/*
+ * The hardware uses 3 bits to indicate interrupt "style".
+ * we clear and set these three bits accordingly. The lower 24
+ * bits in two registers (GPIT1 and GPIT2) are used to set up
+ * the style for 8 lines each for a total of 16 GPIO lines.
+ */
+#define IXP4XX_GPIO_STYLE_ACTIVE_HIGH 0x0
+#define IXP4XX_GPIO_STYLE_ACTIVE_LOW 0x1
+#define IXP4XX_GPIO_STYLE_RISING_EDGE 0x2
+#define IXP4XX_GPIO_STYLE_FALLING_EDGE 0x3
+#define IXP4XX_GPIO_STYLE_TRANSITIONAL 0x4
+#define IXP4XX_GPIO_STYLE_MASK GENMASK(2, 0)
+#define IXP4XX_GPIO_STYLE_SIZE 3
+
+/**
+ * struct ixp4xx_gpio - IXP4 GPIO state container
+ * @dev: containing device for this instance
+ * @fwnode: the fwnode for this GPIO chip
+ * @gc: gpiochip for this instance
+ * @domain: irqdomain for this chip instance
+ * @base: remapped I/O-memory base
+ * @irq_edge: Each bit represents an IRQ: 1: edge-triggered,
+ * 0: level triggered
+ */
+struct ixp4xx_gpio {
+ struct device *dev;
+ struct fwnode_handle *fwnode;
+ struct gpio_chip gc;
+ struct irq_domain *domain;
+ void __iomem *base;
+ unsigned long long irq_edge;
+};
+
+/**
+ * struct ixp4xx_gpio_map - IXP4 GPIO to parent IRQ map
+ * @gpio_offset: offset of the IXP4 GPIO line
+ * @parent_hwirq: hwirq on the parent IRQ controller
+ */
+struct ixp4xx_gpio_map {
+ int gpio_offset;
+ int parent_hwirq;
+};
+
+/* GPIO lines 0..12 have corresponding IRQs, GPIOs 13..15 have no IRQs */
+const struct ixp4xx_gpio_map ixp4xx_gpiomap[] = {
+ { .gpio_offset = 0, .parent_hwirq = 6 },
+ { .gpio_offset = 1, .parent_hwirq = 7 },
+ { .gpio_offset = 2, .parent_hwirq = 19 },
+ { .gpio_offset = 3, .parent_hwirq = 20 },
+ { .gpio_offset = 4, .parent_hwirq = 21 },
+ { .gpio_offset = 5, .parent_hwirq = 22 },
+ { .gpio_offset = 6, .parent_hwirq = 23 },
+ { .gpio_offset = 7, .parent_hwirq = 24 },
+ { .gpio_offset = 8, .parent_hwirq = 25 },
+ { .gpio_offset = 9, .parent_hwirq = 26 },
+ { .gpio_offset = 10, .parent_hwirq = 27 },
+ { .gpio_offset = 11, .parent_hwirq = 28 },
+ { .gpio_offset = 12, .parent_hwirq = 29 },
+};
+
+static void ixp4xx_gpio_irq_ack(struct irq_data *d)
+{
+ struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+
+ __raw_writel(BIT(d->hwirq), g->base + IXP4XX_REG_GPIS);
+}
+
+static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
+{
+ struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+
+ /* ACK when unmasking if not edge-triggered */
+ if (!(g->irq_edge & BIT(d->hwirq)))
+ ixp4xx_gpio_irq_ack(d);
+
+ irq_chip_unmask_parent(d);
+}
+
+static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+ int line = d->hwirq;
+ unsigned long flags;
+ u32 int_style;
+ u32 int_reg;
+ u32 val;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ irq_set_handler_locked(d, handle_edge_irq);
+ int_style = IXP4XX_GPIO_STYLE_TRANSITIONAL;
+ g->irq_edge |= BIT(d->hwirq);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ irq_set_handler_locked(d, handle_edge_irq);
+ int_style = IXP4XX_GPIO_STYLE_RISING_EDGE;
+ g->irq_edge |= BIT(d->hwirq);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ irq_set_handler_locked(d, handle_edge_irq);
+ int_style = IXP4XX_GPIO_STYLE_FALLING_EDGE;
+ g->irq_edge |= BIT(d->hwirq);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_set_handler_locked(d, handle_level_irq);
+ int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH;
+ g->irq_edge &= ~BIT(d->hwirq);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_set_handler_locked(d, handle_level_irq);
+ int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW;
+ g->irq_edge &= ~BIT(d->hwirq);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (line >= 8) {
+ /* pins 8-15 */
+ line -= 8;
+ int_reg = IXP4XX_REG_GPIT2;
+ } else {
+ /* pins 0-7 */
+ int_reg = IXP4XX_REG_GPIT1;
+ }
+
+ spin_lock_irqsave(&g->gc.bgpio_lock, flags);
+
+ /* Clear the style for the appropriate pin */
+ val = __raw_readl(g->base + int_reg);
+ val &= ~(IXP4XX_GPIO_STYLE_MASK << (line * IXP4XX_GPIO_STYLE_SIZE));
+ __raw_writel(val, g->base + int_reg);
+
+ __raw_writel(BIT(line), g->base + IXP4XX_REG_GPIS);
+
+ /* Set the new style */
+ val = __raw_readl(g->base + int_reg);
+ val |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE));
+ __raw_writel(val, g->base + int_reg);
+
+ /* Force-configure this line as an input */
+ val = __raw_readl(g->base + IXP4XX_REG_GPOE);
+ val |= BIT(d->hwirq);
+ __raw_writel(val, g->base + IXP4XX_REG_GPOE);
+
+ spin_unlock_irqrestore(&g->gc.bgpio_lock, flags);
+
+ /* This parent only accept level high (asserted) */
+ return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static struct irq_chip ixp4xx_gpio_irqchip = {
+ .name = "IXP4GPIO",
+ .irq_ack = ixp4xx_gpio_irq_ack,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = ixp4xx_gpio_irq_unmask,
+ .irq_set_type = ixp4xx_gpio_irq_set_type,
+};
+
+static int ixp4xx_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+ struct ixp4xx_gpio *g = gpiochip_get_data(gc);
+ struct irq_fwspec fwspec;
+
+ fwspec.fwnode = g->fwnode;
+ fwspec.param_count = 2;
+ fwspec.param[0] = offset;
+ fwspec.param[1] = IRQ_TYPE_NONE;
+
+ return irq_create_fwspec_mapping(&fwspec);
+}
+
+static int ixp4xx_gpio_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+
+ /* We support standard DT translation */
+ if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+ }
+
+ /* This goes away when we transition to DT */
+ if (is_fwnode_irqchip(fwspec->fwnode)) {
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ WARN_ON(*type == IRQ_TYPE_NONE);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int ixp4xx_gpio_irq_domain_alloc(struct irq_domain *d,
+ unsigned int irq, unsigned int nr_irqs,
+ void *data)
+{
+ struct ixp4xx_gpio *g = d->host_data;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_fwspec *fwspec = data;
+ int ret;
+ int i;
+
+ ret = ixp4xx_gpio_irq_domain_translate(d, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ dev_dbg(g->dev, "allocate IRQ %d..%d, hwirq %lu..%lu\n",
+ irq, irq + nr_irqs - 1,
+ hwirq, hwirq + nr_irqs - 1);
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_fwspec parent_fwspec;
+ const struct ixp4xx_gpio_map *map;
+ int j;
+
+ /* Not all lines support IRQs */
+ for (j = 0; j < ARRAY_SIZE(ixp4xx_gpiomap); j++) {
+ map = &ixp4xx_gpiomap[j];
+ if (map->gpio_offset == hwirq)
+ break;
+ }
+ if (j == ARRAY_SIZE(ixp4xx_gpiomap)) {
+ dev_err(g->dev, "can't look up hwirq %lu\n", hwirq);
+ return -EINVAL;
+ }
+ dev_dbg(g->dev, "found parent hwirq %u\n", map->parent_hwirq);
+
+ /*
+ * We set handle_bad_irq because the .set_type() should
+ * always be invoked and set the right type of handler.
+ */
+ irq_domain_set_info(d,
+ irq + i,
+ hwirq + i,
+ &ixp4xx_gpio_irqchip,
+ g,
+ handle_bad_irq,
+ NULL, NULL);
+ irq_set_probe(irq + i);
+
+ /*
+ * Create a IRQ fwspec to send up to the parent irqdomain:
+ * specify the hwirq we address on the parent and tie it
+ * all together up the chain.
+ */
+ parent_fwspec.fwnode = d->parent->fwnode;
+ parent_fwspec.param_count = 2;
+ parent_fwspec.param[0] = map->parent_hwirq;
+ /* This parent only handles asserted level IRQs */
+ parent_fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
+ dev_dbg(g->dev, "alloc_irqs_parent for %d parent hwirq %d\n",
+ irq + i, map->parent_hwirq);
+ ret = irq_domain_alloc_irqs_parent(d, irq + i, 1,
+ &parent_fwspec);
+ if (ret)
+ dev_err(g->dev,
+ "failed to allocate parent hwirq %d for hwirq %lu\n",
+ map->parent_hwirq, hwirq);
+ }
+
+ return 0;
+}
+
+static const struct irq_domain_ops ixp4xx_gpio_irqdomain_ops = {
+ .translate = ixp4xx_gpio_irq_domain_translate,
+ .alloc = ixp4xx_gpio_irq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int ixp4xx_gpio_probe(struct platform_device *pdev)
+{
+ unsigned long flags;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct irq_domain *parent;
+ struct resource *res;
+ struct ixp4xx_gpio *g;
+ int ret;
+ int i;
+
+ g = devm_kzalloc(dev, sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return -ENOMEM;
+ g->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ g->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(g->base)) {
+ dev_err(dev, "ioremap error\n");
+ return PTR_ERR(g->base);
+ }
+
+ /*
+ * Make sure GPIO 14 and 15 are NOT used as clocks but GPIO on
+ * specific machines.
+ */
+ if (machine_is_dsmg600() || machine_is_nas100d())
+ __raw_writel(0x0, g->base + IXP4XX_REG_GPCLK);
+
+ /*
+ * This is a very special big-endian ARM issue: when the IXP4xx is
+ * run in big endian mode, all registers in the machine are switched
+ * around to the CPU-native endianness. As you see mostly in the
+ * driver we use __raw_readl()/__raw_writel() to access the registers
+ * in the appropriate order. With the GPIO library we need to specify
+ * byte order explicitly, so this flag needs to be set when compiling
+ * for big endian.
+ */
+#if defined(CONFIG_CPU_BIG_ENDIAN)
+ flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+#else
+ flags = 0;
+#endif
+
+ /* Populate and register gpio chip */
+ ret = bgpio_init(&g->gc, dev, 4,
+ g->base + IXP4XX_REG_GPIN,
+ g->base + IXP4XX_REG_GPOUT,
+ NULL,
+ NULL,
+ g->base + IXP4XX_REG_GPOE,
+ flags);
+ if (ret) {
+ dev_err(dev, "unable to init generic GPIO\n");
+ return ret;
+ }
+ g->gc.to_irq = ixp4xx_gpio_to_irq;
+ g->gc.ngpio = 16;
+ g->gc.label = "IXP4XX_GPIO_CHIP";
+ /*
+ * TODO: when we have migrated to device tree and all GPIOs
+ * are fetched using phandles, set this to -1 to get rid of
+ * the fixed gpiochip base.
+ */
+ g->gc.base = 0;
+ g->gc.parent = &pdev->dev;
+ g->gc.owner = THIS_MODULE;
+
+ ret = devm_gpiochip_add_data(dev, &g->gc, g);
+ if (ret) {
+ dev_err(dev, "failed to add SoC gpiochip\n");
+ return ret;
+ }
+
+ /*
+ * When we convert to device tree we will simply look up the
+ * parent irqdomain using irq_find_host(parent) as parent comes
+ * from IRQCHIP_DECLARE(), then use of_node_to_fwnode() to get
+ * the fwnode. For now we need this boardfile style code.
+ */
+ if (np) {
+ struct device_node *irq_parent;
+
+ irq_parent = of_irq_find_parent(np);
+ if (!irq_parent) {
+ dev_err(dev, "no IRQ parent node\n");
+ return -ENODEV;
+ }
+ parent = irq_find_host(irq_parent);
+ if (!parent) {
+ dev_err(dev, "no IRQ parent domain\n");
+ return -ENODEV;
+ }
+ g->fwnode = of_node_to_fwnode(np);
+ } else {
+ parent = ixp4xx_get_irq_domain();
+ g->fwnode = irq_domain_alloc_fwnode(g->base);
+ if (!g->fwnode) {
+ dev_err(dev, "no domain base\n");
+ return -ENODEV;
+ }
+ }
+ g->domain = irq_domain_create_hierarchy(parent,
+ IRQ_DOMAIN_FLAG_HIERARCHY,
+ ARRAY_SIZE(ixp4xx_gpiomap),
+ g->fwnode,
+ &ixp4xx_gpio_irqdomain_ops,
+ g);
+ if (!g->domain) {
+ irq_domain_free_fwnode(g->fwnode);
+ dev_err(dev, "no hierarchical irq domain\n");
+ return ret;
+ }
+
+ /*
+ * After adding OF support, this is no longer needed: irqs
+ * will be allocated for the respective fwnodes.
+ */
+ if (!np) {
+ for (i = 0; i < ARRAY_SIZE(ixp4xx_gpiomap); i++) {
+ const struct ixp4xx_gpio_map *map = &ixp4xx_gpiomap[i];
+ struct irq_fwspec fwspec;
+
+ fwspec.fwnode = g->fwnode;
+ /* This is the hwirq for the GPIO line side of things */
+ fwspec.param[0] = map->gpio_offset;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ fwspec.param_count = 2;
+ ret = __irq_domain_alloc_irqs(g->domain,
+ -1, /* just pick something */
+ 1,
+ NUMA_NO_NODE,
+ &fwspec,
+ false,
+ NULL);
+ if (ret < 0) {
+ irq_domain_free_fwnode(g->fwnode);
+ dev_err(dev,
+ "can not allocate irq for GPIO line %d parent hwirq %d in hierarchy domain: %d\n",
+ map->gpio_offset, map->parent_hwirq,
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ platform_set_drvdata(pdev, g);
+ dev_info(dev, "IXP4 GPIO @%p registered\n", g->base);
+
+ return 0;
+}
+
+static const struct of_device_id ixp4xx_gpio_of_match[] = {
+ {
+ .compatible = "intel,ixp4xx-gpio",
+ },
+ {},
+};
+
+
+static struct platform_driver ixp4xx_gpio_driver = {
+ .driver = {
+ .name = "ixp4xx-gpio",
+ .of_match_table = of_match_ptr(ixp4xx_gpio_of_match),
+ },
+ .probe = ixp4xx_gpio_probe,
+};
+builtin_platform_driver(ixp4xx_gpio_driver);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 1fe039d7326b..82398827b64f 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -205,7 +205,7 @@ config KEYBOARD_LKKBD
config KEYBOARD_EP93XX
tristate "EP93xx Matrix Keypad support"
- depends on ARCH_EP93XX
+ depends on ARCH_EP93XX || COMPILE_TEST
select INPUT_MATRIXKMAP
help
Say Y here to enable the matrix keypad on the Cirrus EP93XX.
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index f77b295e0123..575dac52f7b4 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -27,8 +27,7 @@
#include <linux/io.h>
#include <linux/input/matrix_keypad.h>
#include <linux/slab.h>
-
-#include <mach/hardware.h>
+#include <linux/soc/cirrus/ep93xx.h>
#include <linux/platform_data/keypad-ep93xx.h>
/*
@@ -137,10 +136,7 @@ static void ep93xx_keypad_config(struct ep93xx_keypad *keypad)
struct ep93xx_keypad_platform_data *pdata = keypad->pdata;
unsigned int val = 0;
- if (pdata->flags & EP93XX_KEYPAD_KDIV)
- clk_set_rate(keypad->clk, EP93XX_KEYTCHCLK_DIV4);
- else
- clk_set_rate(keypad->clk, EP93XX_KEYTCHCLK_DIV16);
+ clk_set_rate(keypad->clk, pdata->clk_rate);
if (pdata->flags & EP93XX_KEYPAD_DISABLE_3_KEY)
val |= KEY_INIT_DIS3KY;
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index 1fe149f3def2..4776273fa10b 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -30,6 +30,8 @@ MODULE_ALIAS("platform:ixp4xx-beeper");
static DEFINE_SPINLOCK(beep_lock);
+static int ixp4xx_timer2_irq;
+
static void ixp4xx_spkr_control(unsigned int pin, unsigned int count)
{
unsigned long flags;
@@ -90,6 +92,7 @@ static irqreturn_t ixp4xx_spkr_interrupt(int irq, void *dev_id)
static int ixp4xx_spkr_probe(struct platform_device *dev)
{
struct input_dev *input_dev;
+ int irq;
int err;
input_dev = input_allocate_device();
@@ -110,15 +113,22 @@ static int ixp4xx_spkr_probe(struct platform_device *dev)
input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE);
input_dev->event = ixp4xx_spkr_event;
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
+ err = irq;
+ goto err_free_device;
+ }
+
err = gpio_request(dev->id, "ixp4-beeper");
if (err)
goto err_free_device;
- err = request_irq(IRQ_IXP4XX_TIMER2, &ixp4xx_spkr_interrupt,
+ err = request_irq(irq, &ixp4xx_spkr_interrupt,
IRQF_NO_SUSPEND, "ixp4xx-beeper",
(void *) dev->id);
if (err)
goto err_free_gpio;
+ ixp4xx_timer2_irq = irq;
err = input_register_device(input_dev);
if (err)
@@ -129,7 +139,7 @@ static int ixp4xx_spkr_probe(struct platform_device *dev)
return 0;
err_free_irq:
- free_irq(IRQ_IXP4XX_TIMER2, (void *)dev->id);
+ free_irq(irq, (void *)dev->id);
err_free_gpio:
gpio_free(dev->id);
err_free_device:
@@ -146,10 +156,10 @@ static int ixp4xx_spkr_remove(struct platform_device *dev)
input_unregister_device(input_dev);
/* turn the speaker off */
- disable_irq(IRQ_IXP4XX_TIMER2);
+ disable_irq(ixp4xx_timer2_irq);
ixp4xx_spkr_control(pin, 0);
- free_irq(IRQ_IXP4XX_TIMER2, (void *)dev->id);
+ free_irq(ixp4xx_timer2_irq, (void *)dev->id);
gpio_free(dev->id);
return 0;
@@ -161,7 +171,7 @@ static void ixp4xx_spkr_shutdown(struct platform_device *dev)
unsigned int pin = (unsigned int) input_get_drvdata(input_dev);
/* turn off the speaker */
- disable_irq(IRQ_IXP4XX_TIMER2);
+ disable_irq(ixp4xx_timer2_irq);
ixp4xx_spkr_control(pin, 0);
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 5438abb1baba..cf7984991062 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -160,6 +160,12 @@ config IMGPDC_IRQ
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
+config IXP4XX_IRQ
+ bool
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_MULTI_HANDLER
+ select SPARSE_IRQ
+
config MADERA_IRQ
tristate
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 85972ae1bd7f..f8c66e958a64 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_ATMEL_AIC5_IRQ) += irq-atmel-aic-common.o irq-atmel-aic5.o
obj-$(CONFIG_I8259) += irq-i8259.o
obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o
obj-$(CONFIG_IRQ_MIPS_CPU) += irq-mips-cpu.o
+obj-$(CONFIG_IXP4XX_IRQ) += irq-ixp4xx.o
obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
obj-$(CONFIG_JCORE_AIC) += irq-jcore-aic.o
obj-$(CONFIG_RDA_INTC) += irq-rda-intc.o
diff --git a/drivers/irqchip/irq-ixp4xx.c b/drivers/irqchip/irq-ixp4xx.c
new file mode 100644
index 000000000000..d576809429ac
--- /dev/null
+++ b/drivers/irqchip/irq-ixp4xx.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * irqchip for the IXP4xx interrupt controller
+ * Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on arch/arm/mach-ixp4xx/common.c
+ * Copyright 2002 (C) Intel Corporation
+ * Copyright 2003-2004 (C) MontaVista, Software, Inc.
+ * Copyright (C) Deepak Saxena <dsaxena@plexity.net>
+ */
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/irq-ixp4xx.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/cpu.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#define IXP4XX_ICPR 0x00 /* Interrupt Status */
+#define IXP4XX_ICMR 0x04 /* Interrupt Enable */
+#define IXP4XX_ICLR 0x08 /* Interrupt IRQ/FIQ Select */
+#define IXP4XX_ICIP 0x0C /* IRQ Status */
+#define IXP4XX_ICFP 0x10 /* FIQ Status */
+#define IXP4XX_ICHR 0x14 /* Interrupt Priority */
+#define IXP4XX_ICIH 0x18 /* IRQ Highest Pri Int */
+#define IXP4XX_ICFH 0x1C /* FIQ Highest Pri Int */
+
+/* IXP43x and IXP46x-only */
+#define IXP4XX_ICPR2 0x20 /* Interrupt Status 2 */
+#define IXP4XX_ICMR2 0x24 /* Interrupt Enable 2 */
+#define IXP4XX_ICLR2 0x28 /* Interrupt IRQ/FIQ Select 2 */
+#define IXP4XX_ICIP2 0x2C /* IRQ Status */
+#define IXP4XX_ICFP2 0x30 /* FIQ Status */
+#define IXP4XX_ICEEN 0x34 /* Error High Pri Enable */
+
+/**
+ * struct ixp4xx_irq - state container for the Faraday IRQ controller
+ * @irqbase: IRQ controller memory base in virtual memory
+ * @is_356: if this is an IXP43x, IXP45x or IX46x SoC (with 64 IRQs)
+ * @irqchip: irqchip for this instance
+ * @domain: IRQ domain for this instance
+ */
+struct ixp4xx_irq {
+ void __iomem *irqbase;
+ bool is_356;
+ struct irq_chip irqchip;
+ struct irq_domain *domain;
+};
+
+/* Local static state container */
+static struct ixp4xx_irq ixirq;
+
+/* GPIO Clocks */
+#define IXP4XX_GPIO_CLK_0 14
+#define IXP4XX_GPIO_CLK_1 15
+
+static int ixp4xx_set_irq_type(struct irq_data *d, unsigned int type)
+{
+ /* All are level active high (asserted) here */
+ if (type != IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+ return 0;
+}
+
+static void ixp4xx_irq_mask(struct irq_data *d)
+{
+ struct ixp4xx_irq *ixi = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ if (ixi->is_356 && d->hwirq >= 32) {
+ val = __raw_readl(ixi->irqbase + IXP4XX_ICMR2);
+ val &= ~BIT(d->hwirq - 32);
+ __raw_writel(val, ixi->irqbase + IXP4XX_ICMR2);
+ } else {
+ val = __raw_readl(ixi->irqbase + IXP4XX_ICMR);
+ val &= ~BIT(d->hwirq);
+ __raw_writel(val, ixi->irqbase + IXP4XX_ICMR);
+ }
+}
+
+/*
+ * Level triggered interrupts on GPIO lines can only be cleared when the
+ * interrupt condition disappears.
+ */
+static void ixp4xx_irq_unmask(struct irq_data *d)
+{
+ struct ixp4xx_irq *ixi = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ if (ixi->is_356 && d->hwirq >= 32) {
+ val = __raw_readl(ixi->irqbase + IXP4XX_ICMR2);
+ val |= BIT(d->hwirq - 32);
+ __raw_writel(val, ixi->irqbase + IXP4XX_ICMR2);
+ } else {
+ val = __raw_readl(ixi->irqbase + IXP4XX_ICMR);
+ val |= BIT(d->hwirq);
+ __raw_writel(val, ixi->irqbase + IXP4XX_ICMR);
+ }
+}
+
+asmlinkage void __exception_irq_entry ixp4xx_handle_irq(struct pt_regs *regs)
+{
+ struct ixp4xx_irq *ixi = &ixirq;
+ unsigned long status;
+ int i;
+
+ status = __raw_readl(ixi->irqbase + IXP4XX_ICIP);
+ for_each_set_bit(i, &status, 32)
+ handle_domain_irq(ixi->domain, i, regs);
+
+ /*
+ * IXP465/IXP435 has an upper IRQ status register
+ */
+ if (ixi->is_356) {
+ status = __raw_readl(ixi->irqbase + IXP4XX_ICIP2);
+ for_each_set_bit(i, &status, 32)
+ handle_domain_irq(ixi->domain, i + 32, regs);
+ }
+}
+
+static int ixp4xx_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ /* We support standard DT translation */
+ if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+ }
+
+ if (is_fwnode_irqchip(fwspec->fwnode)) {
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ WARN_ON(*type == IRQ_TYPE_NONE);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ixp4xx_irq_domain_alloc(struct irq_domain *d,
+ unsigned int irq, unsigned int nr_irqs,
+ void *data)
+{
+ struct ixp4xx_irq *ixi = d->host_data;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_fwspec *fwspec = data;
+ int ret;
+ int i;
+
+ ret = ixp4xx_irq_domain_translate(d, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ /*
+ * TODO: after converting IXP4xx to only device tree, set
+ * handle_bad_irq as default handler and assume all consumers
+ * call .set_type() as this is provided in the second cell in
+ * the device tree phandle.
+ */
+ irq_domain_set_info(d,
+ irq + i,
+ hwirq + i,
+ &ixi->irqchip,
+ ixi,
+ handle_level_irq,
+ NULL, NULL);
+ irq_set_probe(irq + i);
+ }
+
+ return 0;
+}
+
+/*
+ * This needs to be a hierarchical irqdomain to work well with the
+ * GPIO irqchip (which is lower in the hierarchy)
+ */
+static const struct irq_domain_ops ixp4xx_irqdomain_ops = {
+ .translate = ixp4xx_irq_domain_translate,
+ .alloc = ixp4xx_irq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+/**
+ * ixp4xx_get_irq_domain() - retrieve the ixp4xx irq domain
+ *
+ * This function will go away when we transition to DT probing.
+ */
+struct irq_domain *ixp4xx_get_irq_domain(void)
+{
+ struct ixp4xx_irq *ixi = &ixirq;
+
+ return ixi->domain;
+}
+EXPORT_SYMBOL_GPL(ixp4xx_get_irq_domain);
+
+/*
+ * This is the Linux IRQ to hwirq mapping table. This goes away when
+ * we have DT support as all IRQ resources are defined in the device
+ * tree. It will register all the IRQs that are not used by the hierarchical
+ * GPIO IRQ chip. The "holes" inbetween these IRQs will be requested by
+ * the GPIO driver using . This is a step-gap solution.
+ */
+struct ixp4xx_irq_chunk {
+ int irq;
+ int hwirq;
+ int nr_irqs;
+};
+
+static const struct ixp4xx_irq_chunk ixp4xx_irq_chunks[] = {
+ {
+ .irq = 16,
+ .hwirq = 0,
+ .nr_irqs = 6,
+ },
+ {
+ .irq = 24,
+ .hwirq = 8,
+ .nr_irqs = 11,
+ },
+ {
+ .irq = 46,
+ .hwirq = 30,
+ .nr_irqs = 2,
+ },
+ /* Only on the 436 variants */
+ {
+ .irq = 48,
+ .hwirq = 32,
+ .nr_irqs = 10,
+ },
+};
+
+/**
+ * ixp4x_irq_setup() - Common setup code for the IXP4xx interrupt controller
+ * @ixi: State container
+ * @irqbase: Virtual memory base for the interrupt controller
+ * @fwnode: Corresponding fwnode abstraction for this controller
+ * @is_356: if this is an IXP43x, IXP45x or IXP46x SoC variant
+ */
+static int ixp4xx_irq_setup(struct ixp4xx_irq *ixi,
+ void __iomem *irqbase,
+ struct fwnode_handle *fwnode,
+ bool is_356)
+{
+ int nr_irqs;
+
+ ixi->irqbase = irqbase;
+ ixi->is_356 = is_356;
+
+ /* Route all sources to IRQ instead of FIQ */
+ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICLR);
+
+ /* Disable all interrupts */
+ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICMR);
+
+ if (is_356) {
+ /* Route upper 32 sources to IRQ instead of FIQ */
+ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICLR2);
+
+ /* Disable upper 32 interrupts */
+ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICMR2);
+
+ nr_irqs = 64;
+ } else {
+ nr_irqs = 32;
+ }
+
+ ixi->irqchip.name = "IXP4xx";
+ ixi->irqchip.irq_mask = ixp4xx_irq_mask;
+ ixi->irqchip.irq_unmask = ixp4xx_irq_unmask;
+ ixi->irqchip.irq_set_type = ixp4xx_set_irq_type;
+
+ ixi->domain = irq_domain_create_linear(fwnode, nr_irqs,
+ &ixp4xx_irqdomain_ops,
+ ixi);
+ if (!ixi->domain) {
+ pr_crit("IXP4XX: can not add primary irqdomain\n");
+ return -ENODEV;
+ }
+
+ set_handle_irq(ixp4xx_handle_irq);
+
+ return 0;
+}
+
+/**
+ * ixp4xx_irq_init() - Function to initialize the irqchip from boardfiles
+ * @irqbase: physical base for the irq controller
+ * @is_356: if this is an IXP43x, IXP45x or IXP46x SoC variant
+ */
+void __init ixp4xx_irq_init(resource_size_t irqbase,
+ bool is_356)
+{
+ struct ixp4xx_irq *ixi = &ixirq;
+ void __iomem *base;
+ struct fwnode_handle *fwnode;
+ struct irq_fwspec fwspec;
+ int nr_chunks;
+ int ret;
+ int i;
+
+ base = ioremap(irqbase, 0x100);
+ if (!base) {
+ pr_crit("IXP4XX: could not ioremap interrupt controller\n");
+ return;
+ }
+ fwnode = irq_domain_alloc_fwnode(base);
+ if (!fwnode) {
+ pr_crit("IXP4XX: no domain handle\n");
+ return;
+ }
+ ret = ixp4xx_irq_setup(ixi, base, fwnode, is_356);
+ if (ret) {
+ pr_crit("IXP4XX: failed to set up irqchip\n");
+ irq_domain_free_fwnode(fwnode);
+ }
+
+ nr_chunks = ARRAY_SIZE(ixp4xx_irq_chunks);
+ if (!is_356)
+ nr_chunks--;
+
+ /*
+ * After adding OF support, this is no longer needed: irqs
+ * will be allocated for the respective fwnodes.
+ */
+ for (i = 0; i < nr_chunks; i++) {
+ const struct ixp4xx_irq_chunk *chunk = &ixp4xx_irq_chunks[i];
+
+ pr_info("Allocate Linux IRQs %d..%d HW IRQs %d..%d\n",
+ chunk->irq, chunk->irq + chunk->nr_irqs - 1,
+ chunk->hwirq, chunk->hwirq + chunk->nr_irqs - 1);
+ fwspec.fwnode = fwnode;
+ fwspec.param[0] = chunk->hwirq;
+ fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
+ fwspec.param_count = 2;
+ ret = __irq_domain_alloc_irqs(ixi->domain,
+ chunk->irq,
+ chunk->nr_irqs,
+ NUMA_NO_NODE,
+ &fwspec,
+ false,
+ NULL);
+ if (ret < 0) {
+ pr_crit("IXP4XX: can not allocate irqs in hierarchy %d\n",
+ ret);
+ return;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(ixp4xx_irq_init);
+
+#ifdef CONFIG_OF
+int __init ixp4xx_of_init_irq(struct device_node *np,
+ struct device_node *parent)
+{
+ struct ixp4xx_irq *ixi = &ixirq;
+ void __iomem *base;
+ struct fwnode_handle *fwnode;
+ bool is_356;
+ int ret;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_crit("IXP4XX: could not ioremap interrupt controller\n");
+ return -ENODEV;
+ }
+ fwnode = of_node_to_fwnode(np);
+
+ /* These chip variants have 64 interrupts */
+ is_356 = of_device_is_compatible(np, "intel,ixp43x-interrupt") ||
+ of_device_is_compatible(np, "intel,ixp45x-interrupt") ||
+ of_device_is_compatible(np, "intel,ixp46x-interrupt");
+
+ ret = ixp4xx_irq_setup(ixi, base, fwnode, is_356);
+ if (ret)
+ pr_crit("IXP4XX: failed to set up irqchip\n");
+
+ return ret;
+}
+IRQCHIP_DECLARE(ixp42x, "intel,ixp42x-interrupt",
+ ixp4xx_of_init_irq);
+IRQCHIP_DECLARE(ixp43x, "intel,ixp43x-interrupt",
+ ixp4xx_of_init_irq);
+IRQCHIP_DECLARE(ixp45x, "intel,ixp45x-interrupt",
+ ixp4xx_of_init_irq);
+IRQCHIP_DECLARE(ixp46x, "intel,ixp46x-interrupt",
+ ixp4xx_of_init_irq);
+#endif
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index e9a0213b08c4..6238e6951336 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -41,7 +41,7 @@ config CS89x0_PLATFORM
config EP93XX_ETH
tristate "EP93xx Ethernet support"
- depends on ARM && ARCH_EP93XX
+ depends on (ARM && ARCH_EP93XX) || COMPILE_TEST
select MII
help
This is a driver for the ethernet hardware included in EP93xx CPUs.
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 13dfdfca49fc..a6da9873570b 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -25,7 +25,7 @@
#include <linux/io.h>
#include <linux/slab.h>
-#include <mach/hardware.h>
+#include <linux/platform_data/eth-ep93xx.h>
#define DRV_MODULE_NAME "ep93xx-eth"
#define DRV_MODULE_VERSION "0.1"
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index ed6623a9801e..319db3ece263 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -31,14 +31,15 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/net_tstamp.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/ptp_classify.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <mach/ixp46x_ts.h>
-#include <mach/npe.h>
-#include <mach/qmgr.h>
+#include <linux/soc/ixp4xx/npe.h>
+#include <linux/soc/ixp4xx/qmgr.h>
#define DEBUG_DESC 0
#define DEBUG_RX 0
@@ -1497,6 +1498,15 @@ static struct platform_driver ixp4xx_eth_driver = {
static int __init eth_init_module(void)
{
int err;
+
+ /*
+ * FIXME: we bail out on device tree boot but this really needs
+ * to be fixed in a nicer way: this registers the MDIO bus before
+ * even matching the driver infrastructure, we should only probe
+ * detected hardware.
+ */
+ if (of_have_populated_dt())
+ return -ENODEV;
if ((err = ixp4xx_mdio_register()))
return err;
return platform_driver_register(&ixp4xx_eth_driver);
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 5c60dc60a8e6..46a05b6540b8 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -22,8 +22,8 @@
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/slab.h>
-#include <mach/npe.h>
-#include <mach/qmgr.h>
+#include <linux/soc/ixp4xx/npe.h>
+#include <linux/soc/ixp4xx/qmgr.h>
#define DEBUG_DESC 0
#define DEBUG_RX 0
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index bbf10ae02f0e..fa168581e6b8 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -35,7 +35,7 @@
#include <asm/div64.h>
-#include <mach/platform.h> /* for ep93xx_pwm_{acquire,release}_gpio() */
+#include <linux/soc/cirrus/ep93xx.h> /* for ep93xx_pwm_{acquire,release}_gpio() */
#define EP93XX_PWMx_TERM_COUNT 0x00
#define EP93XX_PWMx_DUTY_CYCLE 0x04
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index c07b4a85253f..ae9bf20b26fa 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -6,6 +6,7 @@ source "drivers/soc/atmel/Kconfig"
source "drivers/soc/bcm/Kconfig"
source "drivers/soc/fsl/Kconfig"
source "drivers/soc/imx/Kconfig"
+source "drivers/soc/ixp4xx/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/qcom/Kconfig"
source "drivers/soc/renesas/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 90b686e586c6..c7c1a139ad8d 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
obj-$(CONFIG_ARCH_GEMINI) += gemini/
obj-$(CONFIG_ARCH_MXC) += imx/
+obj-$(CONFIG_ARCH_IXP4XX) += ixp4xx/
obj-$(CONFIG_SOC_XWAY) += lantiq/
obj-y += mediatek/
obj-y += amlogic/
diff --git a/drivers/soc/ixp4xx/Kconfig b/drivers/soc/ixp4xx/Kconfig
new file mode 100644
index 000000000000..de6becdc78a2
--- /dev/null
+++ b/drivers/soc/ixp4xx/Kconfig
@@ -0,0 +1,16 @@
+menu "IXP4xx SoC drivers"
+
+config IXP4XX_QMGR
+ tristate "IXP4xx Queue Manager support"
+ help
+ This driver supports IXP4xx built-in hardware queue manager
+ and is automatically selected by Ethernet and HSS drivers.
+
+config IXP4XX_NPE
+ tristate "IXP4xx Network Processor Engine support"
+ select FW_LOADER
+ help
+ This driver supports IXP4xx built-in network coprocessors
+ and is automatically selected by Ethernet and HSS drivers.
+
+endmenu
diff --git a/drivers/soc/ixp4xx/Makefile b/drivers/soc/ixp4xx/Makefile
new file mode 100644
index 000000000000..d20d99e6df65
--- /dev/null
+++ b/drivers/soc/ixp4xx/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_IXP4XX_QMGR) += ixp4xx-qmgr.o
+obj-$(CONFIG_IXP4XX_NPE) += ixp4xx-npe.o
diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c
new file mode 100644
index 000000000000..15979d4376ab
--- /dev/null
+++ b/drivers/soc/ixp4xx/ixp4xx-npe.c
@@ -0,0 +1,762 @@
+/*
+ * Intel IXP4xx Network Processor Engine driver for Linux
+ *
+ * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * The code is based on publicly available information:
+ * - Intel IXP4xx Developer's Manual and other e-papers
+ * - Intel IXP400 Access Library Software (BSD license)
+ * - previous works by Christian Hohnstaedt <chohnstaedt@innominate.com>
+ * Thanks, Christian.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ixp4xx/npe.h>
+
+#define DEBUG_MSG 0
+#define DEBUG_FW 0
+
+#define NPE_COUNT 3
+#define MAX_RETRIES 1000 /* microseconds */
+#define NPE_42X_DATA_SIZE 0x800 /* in dwords */
+#define NPE_46X_DATA_SIZE 0x1000
+#define NPE_A_42X_INSTR_SIZE 0x1000
+#define NPE_B_AND_C_42X_INSTR_SIZE 0x800
+#define NPE_46X_INSTR_SIZE 0x1000
+#define REGS_SIZE 0x1000
+
+#define NPE_PHYS_REG 32
+
+#define FW_MAGIC 0xFEEDF00D
+#define FW_BLOCK_TYPE_INSTR 0x0
+#define FW_BLOCK_TYPE_DATA 0x1
+#define FW_BLOCK_TYPE_EOF 0xF
+
+/* NPE exec status (read) and command (write) */
+#define CMD_NPE_STEP 0x01
+#define CMD_NPE_START 0x02
+#define CMD_NPE_STOP 0x03
+#define CMD_NPE_CLR_PIPE 0x04
+#define CMD_CLR_PROFILE_CNT 0x0C
+#define CMD_RD_INS_MEM 0x10 /* instruction memory */
+#define CMD_WR_INS_MEM 0x11
+#define CMD_RD_DATA_MEM 0x12 /* data memory */
+#define CMD_WR_DATA_MEM 0x13
+#define CMD_RD_ECS_REG 0x14 /* exec access register */
+#define CMD_WR_ECS_REG 0x15
+
+#define STAT_RUN 0x80000000
+#define STAT_STOP 0x40000000
+#define STAT_CLEAR 0x20000000
+#define STAT_ECS_K 0x00800000 /* pipeline clean */
+
+#define NPE_STEVT 0x1B
+#define NPE_STARTPC 0x1C
+#define NPE_REGMAP 0x1E
+#define NPE_CINDEX 0x1F
+
+#define INSTR_WR_REG_SHORT 0x0000C000
+#define INSTR_WR_REG_BYTE 0x00004000
+#define INSTR_RD_FIFO 0x0F888220
+#define INSTR_RESET_MBOX 0x0FAC8210
+
+#define ECS_BG_CTXT_REG_0 0x00 /* Background Executing Context */
+#define ECS_BG_CTXT_REG_1 0x01 /* Stack level */
+#define ECS_BG_CTXT_REG_2 0x02
+#define ECS_PRI_1_CTXT_REG_0 0x04 /* Priority 1 Executing Context */
+#define ECS_PRI_1_CTXT_REG_1 0x05 /* Stack level */
+#define ECS_PRI_1_CTXT_REG_2 0x06
+#define ECS_PRI_2_CTXT_REG_0 0x08 /* Priority 2 Executing Context */
+#define ECS_PRI_2_CTXT_REG_1 0x09 /* Stack level */
+#define ECS_PRI_2_CTXT_REG_2 0x0A
+#define ECS_DBG_CTXT_REG_0 0x0C /* Debug Executing Context */
+#define ECS_DBG_CTXT_REG_1 0x0D /* Stack level */
+#define ECS_DBG_CTXT_REG_2 0x0E
+#define ECS_INSTRUCT_REG 0x11 /* NPE Instruction Register */
+
+#define ECS_REG_0_ACTIVE 0x80000000 /* all levels */
+#define ECS_REG_0_NEXTPC_MASK 0x1FFF0000 /* BG/PRI1/PRI2 levels */
+#define ECS_REG_0_LDUR_BITS 8
+#define ECS_REG_0_LDUR_MASK 0x00000700 /* all levels */
+#define ECS_REG_1_CCTXT_BITS 16
+#define ECS_REG_1_CCTXT_MASK 0x000F0000 /* all levels */
+#define ECS_REG_1_SELCTXT_BITS 0
+#define ECS_REG_1_SELCTXT_MASK 0x0000000F /* all levels */
+#define ECS_DBG_REG_2_IF 0x00100000 /* debug level */
+#define ECS_DBG_REG_2_IE 0x00080000 /* debug level */
+
+/* NPE watchpoint_fifo register bit */
+#define WFIFO_VALID 0x80000000
+
+/* NPE messaging_status register bit definitions */
+#define MSGSTAT_OFNE 0x00010000 /* OutFifoNotEmpty */
+#define MSGSTAT_IFNF 0x00020000 /* InFifoNotFull */
+#define MSGSTAT_OFNF 0x00040000 /* OutFifoNotFull */
+#define MSGSTAT_IFNE 0x00080000 /* InFifoNotEmpty */
+#define MSGSTAT_MBINT 0x00100000 /* Mailbox interrupt */
+#define MSGSTAT_IFINT 0x00200000 /* InFifo interrupt */
+#define MSGSTAT_OFINT 0x00400000 /* OutFifo interrupt */
+#define MSGSTAT_WFINT 0x00800000 /* WatchFifo interrupt */
+
+/* NPE messaging_control register bit definitions */
+#define MSGCTL_OUT_FIFO 0x00010000 /* enable output FIFO */
+#define MSGCTL_IN_FIFO 0x00020000 /* enable input FIFO */
+#define MSGCTL_OUT_FIFO_WRITE 0x01000000 /* enable FIFO + WRITE */
+#define MSGCTL_IN_FIFO_WRITE 0x02000000
+
+/* NPE mailbox_status value for reset */
+#define RESET_MBOX_STAT 0x0000F0F0
+
+#define NPE_A_FIRMWARE "NPE-A"
+#define NPE_B_FIRMWARE "NPE-B"
+#define NPE_C_FIRMWARE "NPE-C"
+
+const char *npe_names[] = { NPE_A_FIRMWARE, NPE_B_FIRMWARE, NPE_C_FIRMWARE };
+
+#define print_npe(pri, npe, fmt, ...) \
+ printk(pri "%s: " fmt, npe_name(npe), ## __VA_ARGS__)
+
+#if DEBUG_MSG
+#define debug_msg(npe, fmt, ...) \
+ print_npe(KERN_DEBUG, npe, fmt, ## __VA_ARGS__)
+#else
+#define debug_msg(npe, fmt, ...)
+#endif
+
+static struct {
+ u32 reg, val;
+} ecs_reset[] = {
+ { ECS_BG_CTXT_REG_0, 0xA0000000 },
+ { ECS_BG_CTXT_REG_1, 0x01000000 },
+ { ECS_BG_CTXT_REG_2, 0x00008000 },
+ { ECS_PRI_1_CTXT_REG_0, 0x20000080 },
+ { ECS_PRI_1_CTXT_REG_1, 0x01000000 },
+ { ECS_PRI_1_CTXT_REG_2, 0x00008000 },
+ { ECS_PRI_2_CTXT_REG_0, 0x20000080 },
+ { ECS_PRI_2_CTXT_REG_1, 0x01000000 },
+ { ECS_PRI_2_CTXT_REG_2, 0x00008000 },
+ { ECS_DBG_CTXT_REG_0, 0x20000000 },
+ { ECS_DBG_CTXT_REG_1, 0x00000000 },
+ { ECS_DBG_CTXT_REG_2, 0x001E0000 },
+ { ECS_INSTRUCT_REG, 0x1003C00F },
+};
+
+static struct npe npe_tab[NPE_COUNT] = {
+ {
+ .id = 0,
+ }, {
+ .id = 1,
+ }, {
+ .id = 2,
+ }
+};
+
+int npe_running(struct npe *npe)
+{
+ return (__raw_readl(&npe->regs->exec_status_cmd) & STAT_RUN) != 0;
+}
+
+static void npe_cmd_write(struct npe *npe, u32 addr, int cmd, u32 data)
+{
+ __raw_writel(data, &npe->regs->exec_data);
+ __raw_writel(addr, &npe->regs->exec_addr);
+ __raw_writel(cmd, &npe->regs->exec_status_cmd);
+}
+
+static u32 npe_cmd_read(struct npe *npe, u32 addr, int cmd)
+{
+ __raw_writel(addr, &npe->regs->exec_addr);
+ __raw_writel(cmd, &npe->regs->exec_status_cmd);
+ /* Iintroduce extra read cycles after issuing read command to NPE
+ so that we read the register after the NPE has updated it.
+ This is to overcome race condition between XScale and NPE */
+ __raw_readl(&npe->regs->exec_data);
+ __raw_readl(&npe->regs->exec_data);
+ return __raw_readl(&npe->regs->exec_data);
+}
+
+static void npe_clear_active(struct npe *npe, u32 reg)
+{
+ u32 val = npe_cmd_read(npe, reg, CMD_RD_ECS_REG);
+ npe_cmd_write(npe, reg, CMD_WR_ECS_REG, val & ~ECS_REG_0_ACTIVE);
+}
+
+static void npe_start(struct npe *npe)
+{
+ /* ensure only Background Context Stack Level is active */
+ npe_clear_active(npe, ECS_PRI_1_CTXT_REG_0);
+ npe_clear_active(npe, ECS_PRI_2_CTXT_REG_0);
+ npe_clear_active(npe, ECS_DBG_CTXT_REG_0);
+
+ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd);
+ __raw_writel(CMD_NPE_START, &npe->regs->exec_status_cmd);
+}
+
+static void npe_stop(struct npe *npe)
+{
+ __raw_writel(CMD_NPE_STOP, &npe->regs->exec_status_cmd);
+ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); /*FIXME?*/
+}
+
+static int __must_check npe_debug_instr(struct npe *npe, u32 instr, u32 ctx,
+ u32 ldur)
+{
+ u32 wc;
+ int i;
+
+ /* set the Active bit, and the LDUR, in the debug level */
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_0, CMD_WR_ECS_REG,
+ ECS_REG_0_ACTIVE | (ldur << ECS_REG_0_LDUR_BITS));
+
+ /* set CCTXT at ECS DEBUG L3 to specify in which context to execute
+ the instruction, and set SELCTXT at ECS DEBUG Level to specify
+ which context store to access.
+ Debug ECS Level Reg 1 has form 0x000n000n, where n = context number
+ */
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_1, CMD_WR_ECS_REG,
+ (ctx << ECS_REG_1_CCTXT_BITS) |
+ (ctx << ECS_REG_1_SELCTXT_BITS));
+
+ /* clear the pipeline */
+ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd);
+
+ /* load NPE instruction into the instruction register */
+ npe_cmd_write(npe, ECS_INSTRUCT_REG, CMD_WR_ECS_REG, instr);
+
+ /* we need this value later to wait for completion of NPE execution
+ step */
+ wc = __raw_readl(&npe->regs->watch_count);
+
+ /* issue a Step One command via the Execution Control register */
+ __raw_writel(CMD_NPE_STEP, &npe->regs->exec_status_cmd);
+
+ /* Watch Count register increments when NPE completes an instruction */
+ for (i = 0; i < MAX_RETRIES; i++) {
+ if (wc != __raw_readl(&npe->regs->watch_count))
+ return 0;
+ udelay(1);
+ }
+
+ print_npe(KERN_ERR, npe, "reset: npe_debug_instr(): timeout\n");
+ return -ETIMEDOUT;
+}
+
+static int __must_check npe_logical_reg_write8(struct npe *npe, u32 addr,
+ u8 val, u32 ctx)
+{
+ /* here we build the NPE assembler instruction: mov8 d0, #0 */
+ u32 instr = INSTR_WR_REG_BYTE | /* OpCode */
+ addr << 9 | /* base Operand */
+ (val & 0x1F) << 4 | /* lower 5 bits to immediate data */
+ (val & ~0x1F) << (18 - 5);/* higher 3 bits to CoProc instr. */
+ return npe_debug_instr(npe, instr, ctx, 1); /* execute it */
+}
+
+static int __must_check npe_logical_reg_write16(struct npe *npe, u32 addr,
+ u16 val, u32 ctx)
+{
+ /* here we build the NPE assembler instruction: mov16 d0, #0 */
+ u32 instr = INSTR_WR_REG_SHORT | /* OpCode */
+ addr << 9 | /* base Operand */
+ (val & 0x1F) << 4 | /* lower 5 bits to immediate data */
+ (val & ~0x1F) << (18 - 5);/* higher 11 bits to CoProc instr. */
+ return npe_debug_instr(npe, instr, ctx, 1); /* execute it */
+}
+
+static int __must_check npe_logical_reg_write32(struct npe *npe, u32 addr,
+ u32 val, u32 ctx)
+{
+ /* write in 16 bit steps first the high and then the low value */
+ if (npe_logical_reg_write16(npe, addr, val >> 16, ctx))
+ return -ETIMEDOUT;
+ return npe_logical_reg_write16(npe, addr + 2, val & 0xFFFF, ctx);
+}
+
+static int npe_reset(struct npe *npe)
+{
+ u32 val, ctl, exec_count, ctx_reg2;
+ int i;
+
+ ctl = (__raw_readl(&npe->regs->messaging_control) | 0x3F000000) &
+ 0x3F3FFFFF;
+
+ /* disable parity interrupt */
+ __raw_writel(ctl & 0x3F00FFFF, &npe->regs->messaging_control);
+
+ /* pre exec - debug instruction */
+ /* turn off the halt bit by clearing Execution Count register. */
+ exec_count = __raw_readl(&npe->regs->exec_count);
+ __raw_writel(0, &npe->regs->exec_count);
+ /* ensure that IF and IE are on (temporarily), so that we don't end up
+ stepping forever */
+ ctx_reg2 = npe_cmd_read(npe, ECS_DBG_CTXT_REG_2, CMD_RD_ECS_REG);
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_2, CMD_WR_ECS_REG, ctx_reg2 |
+ ECS_DBG_REG_2_IF | ECS_DBG_REG_2_IE);
+
+ /* clear the FIFOs */
+ while (__raw_readl(&npe->regs->watchpoint_fifo) & WFIFO_VALID)
+ ;
+ while (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_OFNE)
+ /* read from the outFIFO until empty */
+ print_npe(KERN_DEBUG, npe, "npe_reset: read FIFO = 0x%X\n",
+ __raw_readl(&npe->regs->in_out_fifo));
+
+ while (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE)
+ /* step execution of the NPE intruction to read inFIFO using
+ the Debug Executing Context stack */
+ if (npe_debug_instr(npe, INSTR_RD_FIFO, 0, 0))
+ return -ETIMEDOUT;
+
+ /* reset the mailbox reg from the XScale side */
+ __raw_writel(RESET_MBOX_STAT, &npe->regs->mailbox_status);
+ /* from NPE side */
+ if (npe_debug_instr(npe, INSTR_RESET_MBOX, 0, 0))
+ return -ETIMEDOUT;
+
+ /* Reset the physical registers in the NPE register file */
+ for (val = 0; val < NPE_PHYS_REG; val++) {
+ if (npe_logical_reg_write16(npe, NPE_REGMAP, val >> 1, 0))
+ return -ETIMEDOUT;
+ /* address is either 0 or 4 */
+ if (npe_logical_reg_write32(npe, (val & 1) * 4, 0, 0))
+ return -ETIMEDOUT;
+ }
+
+ /* Reset the context store = each context's Context Store registers */
+
+ /* Context 0 has no STARTPC. Instead, this value is used to set NextPC
+ for Background ECS, to set where NPE starts executing code */
+ val = npe_cmd_read(npe, ECS_BG_CTXT_REG_0, CMD_RD_ECS_REG);
+ val &= ~ECS_REG_0_NEXTPC_MASK;
+ val |= (0 /* NextPC */ << 16) & ECS_REG_0_NEXTPC_MASK;
+ npe_cmd_write(npe, ECS_BG_CTXT_REG_0, CMD_WR_ECS_REG, val);
+
+ for (i = 0; i < 16; i++) {
+ if (i) { /* Context 0 has no STEVT nor STARTPC */
+ /* STEVT = off, 0x80 */
+ if (npe_logical_reg_write8(npe, NPE_STEVT, 0x80, i))
+ return -ETIMEDOUT;
+ if (npe_logical_reg_write16(npe, NPE_STARTPC, 0, i))
+ return -ETIMEDOUT;
+ }
+ /* REGMAP = d0->p0, d8->p2, d16->p4 */
+ if (npe_logical_reg_write16(npe, NPE_REGMAP, 0x820, i))
+ return -ETIMEDOUT;
+ if (npe_logical_reg_write8(npe, NPE_CINDEX, 0, i))
+ return -ETIMEDOUT;
+ }
+
+ /* post exec */
+ /* clear active bit in debug level */
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_0, CMD_WR_ECS_REG, 0);
+ /* clear the pipeline */
+ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd);
+ /* restore previous values */
+ __raw_writel(exec_count, &npe->regs->exec_count);
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_2, CMD_WR_ECS_REG, ctx_reg2);
+
+ /* write reset values to Execution Context Stack registers */
+ for (val = 0; val < ARRAY_SIZE(ecs_reset); val++)
+ npe_cmd_write(npe, ecs_reset[val].reg, CMD_WR_ECS_REG,
+ ecs_reset[val].val);
+
+ /* clear the profile counter */
+ __raw_writel(CMD_CLR_PROFILE_CNT, &npe->regs->exec_status_cmd);
+
+ __raw_writel(0, &npe->regs->exec_count);
+ __raw_writel(0, &npe->regs->action_points[0]);
+ __raw_writel(0, &npe->regs->action_points[1]);
+ __raw_writel(0, &npe->regs->action_points[2]);
+ __raw_writel(0, &npe->regs->action_points[3]);
+ __raw_writel(0, &npe->regs->watch_count);
+
+ val = ixp4xx_read_feature_bits();
+ /* reset the NPE */
+ ixp4xx_write_feature_bits(val &
+ ~(IXP4XX_FEATURE_RESET_NPEA << npe->id));
+ /* deassert reset */
+ ixp4xx_write_feature_bits(val |
+ (IXP4XX_FEATURE_RESET_NPEA << npe->id));
+ for (i = 0; i < MAX_RETRIES; i++) {
+ if (ixp4xx_read_feature_bits() &
+ (IXP4XX_FEATURE_RESET_NPEA << npe->id))
+ break; /* NPE is back alive */
+ udelay(1);
+ }
+ if (i == MAX_RETRIES)
+ return -ETIMEDOUT;
+
+ npe_stop(npe);
+
+ /* restore NPE configuration bus Control Register - parity settings */
+ __raw_writel(ctl, &npe->regs->messaging_control);
+ return 0;
+}
+
+
+int npe_send_message(struct npe *npe, const void *msg, const char *what)
+{
+ const u32 *send = msg;
+ int cycles = 0;
+
+ debug_msg(npe, "Trying to send message %s [%08X:%08X]\n",
+ what, send[0], send[1]);
+
+ if (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE) {
+ debug_msg(npe, "NPE input FIFO not empty\n");
+ return -EIO;
+ }
+
+ __raw_writel(send[0], &npe->regs->in_out_fifo);
+
+ if (!(__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNF)) {
+ debug_msg(npe, "NPE input FIFO full\n");
+ return -EIO;
+ }
+
+ __raw_writel(send[1], &npe->regs->in_out_fifo);
+
+ while ((cycles < MAX_RETRIES) &&
+ (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE)) {
+ udelay(1);
+ cycles++;
+ }
+
+ if (cycles == MAX_RETRIES) {
+ debug_msg(npe, "Timeout sending message\n");
+ return -ETIMEDOUT;
+ }
+
+#if DEBUG_MSG > 1
+ debug_msg(npe, "Sending a message took %i cycles\n", cycles);
+#endif
+ return 0;
+}
+
+int npe_recv_message(struct npe *npe, void *msg, const char *what)
+{
+ u32 *recv = msg;
+ int cycles = 0, cnt = 0;
+
+ debug_msg(npe, "Trying to receive message %s\n", what);
+
+ while (cycles < MAX_RETRIES) {
+ if (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_OFNE) {
+ recv[cnt++] = __raw_readl(&npe->regs->in_out_fifo);
+ if (cnt == 2)
+ break;
+ } else {
+ udelay(1);
+ cycles++;
+ }
+ }
+
+ switch(cnt) {
+ case 1:
+ debug_msg(npe, "Received [%08X]\n", recv[0]);
+ break;
+ case 2:
+ debug_msg(npe, "Received [%08X:%08X]\n", recv[0], recv[1]);
+ break;
+ }
+
+ if (cycles == MAX_RETRIES) {
+ debug_msg(npe, "Timeout waiting for message\n");
+ return -ETIMEDOUT;
+ }
+
+#if DEBUG_MSG > 1
+ debug_msg(npe, "Receiving a message took %i cycles\n", cycles);
+#endif
+ return 0;
+}
+
+int npe_send_recv_message(struct npe *npe, void *msg, const char *what)
+{
+ int result;
+ u32 *send = msg, recv[2];
+
+ if ((result = npe_send_message(npe, msg, what)) != 0)
+ return result;
+ if ((result = npe_recv_message(npe, recv, what)) != 0)
+ return result;
+
+ if ((recv[0] != send[0]) || (recv[1] != send[1])) {
+ debug_msg(npe, "Message %s: unexpected message received\n",
+ what);
+ return -EIO;
+ }
+ return 0;
+}
+
+
+int npe_load_firmware(struct npe *npe, const char *name, struct device *dev)
+{
+ const struct firmware *fw_entry;
+
+ struct dl_block {
+ u32 type;
+ u32 offset;
+ } *blk;
+
+ struct dl_image {
+ u32 magic;
+ u32 id;
+ u32 size;
+ union {
+ u32 data[0];
+ struct dl_block blocks[0];
+ };
+ } *image;
+
+ struct dl_codeblock {
+ u32 npe_addr;
+ u32 size;
+ u32 data[0];
+ } *cb;
+
+ int i, j, err, data_size, instr_size, blocks, table_end;
+ u32 cmd;
+
+ if ((err = request_firmware(&fw_entry, name, dev)) != 0)
+ return err;
+
+ err = -EINVAL;
+ if (fw_entry->size < sizeof(struct dl_image)) {
+ print_npe(KERN_ERR, npe, "incomplete firmware file\n");
+ goto err;
+ }
+ image = (struct dl_image*)fw_entry->data;
+
+#if DEBUG_FW
+ print_npe(KERN_DEBUG, npe, "firmware: %08X %08X %08X (0x%X bytes)\n",
+ image->magic, image->id, image->size, image->size * 4);
+#endif
+
+ if (image->magic == swab32(FW_MAGIC)) { /* swapped file */
+ image->id = swab32(image->id);
+ image->size = swab32(image->size);
+ } else if (image->magic != FW_MAGIC) {
+ print_npe(KERN_ERR, npe, "bad firmware file magic: 0x%X\n",
+ image->magic);
+ goto err;
+ }
+ if ((image->size * 4 + sizeof(struct dl_image)) != fw_entry->size) {
+ print_npe(KERN_ERR, npe,
+ "inconsistent size of firmware file\n");
+ goto err;
+ }
+ if (((image->id >> 24) & 0xF /* NPE ID */) != npe->id) {
+ print_npe(KERN_ERR, npe, "firmware file NPE ID mismatch\n");
+ goto err;
+ }
+ if (image->magic == swab32(FW_MAGIC))
+ for (i = 0; i < image->size; i++)
+ image->data[i] = swab32(image->data[i]);
+
+ if (cpu_is_ixp42x() && ((image->id >> 28) & 0xF /* device ID */)) {
+ print_npe(KERN_INFO, npe, "IXP43x/IXP46x firmware ignored on "
+ "IXP42x\n");
+ goto err;
+ }
+
+ if (npe_running(npe)) {
+ print_npe(KERN_INFO, npe, "unable to load firmware, NPE is "
+ "already running\n");
+ err = -EBUSY;
+ goto err;
+ }
+#if 0
+ npe_stop(npe);
+ npe_reset(npe);
+#endif
+
+ print_npe(KERN_INFO, npe, "firmware functionality 0x%X, "
+ "revision 0x%X:%X\n", (image->id >> 16) & 0xFF,
+ (image->id >> 8) & 0xFF, image->id & 0xFF);
+
+ if (cpu_is_ixp42x()) {
+ if (!npe->id)
+ instr_size = NPE_A_42X_INSTR_SIZE;
+ else
+ instr_size = NPE_B_AND_C_42X_INSTR_SIZE;
+ data_size = NPE_42X_DATA_SIZE;
+ } else {
+ instr_size = NPE_46X_INSTR_SIZE;
+ data_size = NPE_46X_DATA_SIZE;
+ }
+
+ for (blocks = 0; blocks * sizeof(struct dl_block) / 4 < image->size;
+ blocks++)
+ if (image->blocks[blocks].type == FW_BLOCK_TYPE_EOF)
+ break;
+ if (blocks * sizeof(struct dl_block) / 4 >= image->size) {
+ print_npe(KERN_INFO, npe, "firmware EOF block marker not "
+ "found\n");
+ goto err;
+ }
+
+#if DEBUG_FW
+ print_npe(KERN_DEBUG, npe, "%i firmware blocks found\n", blocks);
+#endif
+
+ table_end = blocks * sizeof(struct dl_block) / 4 + 1 /* EOF marker */;
+ for (i = 0, blk = image->blocks; i < blocks; i++, blk++) {
+ if (blk->offset > image->size - sizeof(struct dl_codeblock) / 4
+ || blk->offset < table_end) {
+ print_npe(KERN_INFO, npe, "invalid offset 0x%X of "
+ "firmware block #%i\n", blk->offset, i);
+ goto err;
+ }
+
+ cb = (struct dl_codeblock*)&image->data[blk->offset];
+ if (blk->type == FW_BLOCK_TYPE_INSTR) {
+ if (cb->npe_addr + cb->size > instr_size)
+ goto too_big;
+ cmd = CMD_WR_INS_MEM;
+ } else if (blk->type == FW_BLOCK_TYPE_DATA) {
+ if (cb->npe_addr + cb->size > data_size)
+ goto too_big;
+ cmd = CMD_WR_DATA_MEM;
+ } else {
+ print_npe(KERN_INFO, npe, "invalid firmware block #%i "
+ "type 0x%X\n", i, blk->type);
+ goto err;
+ }
+ if (blk->offset + sizeof(*cb) / 4 + cb->size > image->size) {
+ print_npe(KERN_INFO, npe, "firmware block #%i doesn't "
+ "fit in firmware image: type %c, start 0x%X,"
+ " length 0x%X\n", i,
+ blk->type == FW_BLOCK_TYPE_INSTR ? 'I' : 'D',
+ cb->npe_addr, cb->size);
+ goto err;
+ }
+
+ for (j = 0; j < cb->size; j++)
+ npe_cmd_write(npe, cb->npe_addr + j, cmd, cb->data[j]);
+ }
+
+ npe_start(npe);
+ if (!npe_running(npe))
+ print_npe(KERN_ERR, npe, "unable to start\n");
+ release_firmware(fw_entry);
+ return 0;
+
+too_big:
+ print_npe(KERN_INFO, npe, "firmware block #%i doesn't fit in NPE "
+ "memory: type %c, start 0x%X, length 0x%X\n", i,
+ blk->type == FW_BLOCK_TYPE_INSTR ? 'I' : 'D',
+ cb->npe_addr, cb->size);
+err:
+ release_firmware(fw_entry);
+ return err;
+}
+
+
+struct npe *npe_request(unsigned id)
+{
+ if (id < NPE_COUNT)
+ if (npe_tab[id].valid)
+ if (try_module_get(THIS_MODULE))
+ return &npe_tab[id];
+ return NULL;
+}
+
+void npe_release(struct npe *npe)
+{
+ module_put(THIS_MODULE);
+}
+
+static int ixp4xx_npe_probe(struct platform_device *pdev)
+{
+ int i, found = 0;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+
+ for (i = 0; i < NPE_COUNT; i++) {
+ struct npe *npe = &npe_tab[i];
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res)
+ return -ENODEV;
+
+ if (!(ixp4xx_read_feature_bits() &
+ (IXP4XX_FEATURE_RESET_NPEA << i))) {
+ dev_info(dev, "NPE%d at 0x%08x-0x%08x not available\n",
+ i, res->start, res->end);
+ continue; /* NPE already disabled or not present */
+ }
+ npe->regs = devm_ioremap_resource(dev, res);
+ if (!npe->regs)
+ return -ENOMEM;
+
+ if (npe_reset(npe)) {
+ dev_info(dev, "NPE%d at 0x%08x-0x%08x does not reset\n",
+ i, res->start, res->end);
+ continue;
+ }
+ npe->valid = 1;
+ dev_info(dev, "NPE%d at 0x%08x-0x%08x registered\n",
+ i, res->start, res->end);
+ found++;
+ }
+
+ if (!found)
+ return -ENODEV;
+ return 0;
+}
+
+static int ixp4xx_npe_remove(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < NPE_COUNT; i++)
+ if (npe_tab[i].regs) {
+ npe_reset(&npe_tab[i]);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ixp4xx_npe_of_match[] = {
+ {
+ .compatible = "intel,ixp4xx-network-processing-engine",
+ },
+ {},
+};
+
+static struct platform_driver ixp4xx_npe_driver = {
+ .driver = {
+ .name = "ixp4xx-npe",
+ .of_match_table = of_match_ptr(ixp4xx_npe_of_match),
+ },
+ .probe = ixp4xx_npe_probe,
+ .remove = ixp4xx_npe_remove,
+};
+module_platform_driver(ixp4xx_npe_driver);
+
+MODULE_AUTHOR("Krzysztof Halasa");
+MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE(NPE_A_FIRMWARE);
+MODULE_FIRMWARE(NPE_B_FIRMWARE);
+MODULE_FIRMWARE(NPE_C_FIRMWARE);
+
+EXPORT_SYMBOL(npe_names);
+EXPORT_SYMBOL(npe_running);
+EXPORT_SYMBOL(npe_request);
+EXPORT_SYMBOL(npe_release);
+EXPORT_SYMBOL(npe_load_firmware);
+EXPORT_SYMBOL(npe_send_message);
+EXPORT_SYMBOL(npe_recv_message);
+EXPORT_SYMBOL(npe_send_recv_message);
diff --git a/drivers/soc/ixp4xx/ixp4xx-qmgr.c b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
new file mode 100644
index 000000000000..13a8a13c9b01
--- /dev/null
+++ b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
@@ -0,0 +1,488 @@
+/*
+ * Intel IXP4xx Queue Manager driver for Linux
+ *
+ * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ixp4xx/qmgr.h>
+
+static struct qmgr_regs __iomem *qmgr_regs;
+static int qmgr_irq_1;
+static int qmgr_irq_2;
+static spinlock_t qmgr_lock;
+static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
+static void (*irq_handlers[QUEUES])(void *pdev);
+static void *irq_pdevs[QUEUES];
+
+#if DEBUG_QMGR
+char qmgr_queue_descs[QUEUES][32];
+#endif
+
+void qmgr_put_entry(unsigned int queue, u32 val)
+{
+#if DEBUG_QMGR
+ BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
+
+ printk(KERN_DEBUG "Queue %s(%i) put %X\n",
+ qmgr_queue_descs[queue], queue, val);
+#endif
+ __raw_writel(val, &qmgr_regs->acc[queue][0]);
+}
+
+u32 qmgr_get_entry(unsigned int queue)
+{
+ u32 val;
+ val = __raw_readl(&qmgr_regs->acc[queue][0]);
+#if DEBUG_QMGR
+ BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
+
+ printk(KERN_DEBUG "Queue %s(%i) get %X\n",
+ qmgr_queue_descs[queue], queue, val);
+#endif
+ return val;
+}
+
+static int __qmgr_get_stat1(unsigned int queue)
+{
+ return (__raw_readl(&qmgr_regs->stat1[queue >> 3])
+ >> ((queue & 7) << 2)) & 0xF;
+}
+
+static int __qmgr_get_stat2(unsigned int queue)
+{
+ BUG_ON(queue >= HALF_QUEUES);
+ return (__raw_readl(&qmgr_regs->stat2[queue >> 4])
+ >> ((queue & 0xF) << 1)) & 0x3;
+}
+
+/**
+ * qmgr_stat_empty() - checks if a hardware queue is empty
+ * @queue: queue number
+ *
+ * Returns non-zero value if the queue is empty.
+ */
+int qmgr_stat_empty(unsigned int queue)
+{
+ BUG_ON(queue >= HALF_QUEUES);
+ return __qmgr_get_stat1(queue) & QUEUE_STAT1_EMPTY;
+}
+
+/**
+ * qmgr_stat_below_low_watermark() - checks if a queue is below low watermark
+ * @queue: queue number
+ *
+ * Returns non-zero value if the queue is below low watermark.
+ */
+int qmgr_stat_below_low_watermark(unsigned int queue)
+{
+ if (queue >= HALF_QUEUES)
+ return (__raw_readl(&qmgr_regs->statne_h) >>
+ (queue - HALF_QUEUES)) & 0x01;
+ return __qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_EMPTY;
+}
+
+/**
+ * qmgr_stat_full() - checks if a hardware queue is full
+ * @queue: queue number
+ *
+ * Returns non-zero value if the queue is full.
+ */
+int qmgr_stat_full(unsigned int queue)
+{
+ if (queue >= HALF_QUEUES)
+ return (__raw_readl(&qmgr_regs->statf_h) >>
+ (queue - HALF_QUEUES)) & 0x01;
+ return __qmgr_get_stat1(queue) & QUEUE_STAT1_FULL;
+}
+
+/**
+ * qmgr_stat_overflow() - checks if a hardware queue experienced overflow
+ * @queue: queue number
+ *
+ * Returns non-zero value if the queue experienced overflow.
+ */
+int qmgr_stat_overflow(unsigned int queue)
+{
+ return __qmgr_get_stat2(queue) & QUEUE_STAT2_OVERFLOW;
+}
+
+void qmgr_set_irq(unsigned int queue, int src,
+ void (*handler)(void *pdev), void *pdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qmgr_lock, flags);
+ if (queue < HALF_QUEUES) {
+ u32 __iomem *reg;
+ int bit;
+ BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL);
+ reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */
+ bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
+ __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit),
+ reg);
+ } else
+ /* IRQ source for queues 32-63 is fixed */
+ BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY);
+
+ irq_handlers[queue] = handler;
+ irq_pdevs[queue] = pdev;
+ spin_unlock_irqrestore(&qmgr_lock, flags);
+}
+
+
+static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
+{
+ int i, ret = 0;
+ u32 en_bitmap, src, stat;
+
+ /* ACK - it may clear any bits so don't rely on it */
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
+
+ en_bitmap = qmgr_regs->irqen[0];
+ while (en_bitmap) {
+ i = __fls(en_bitmap); /* number of the last "low" queue */
+ en_bitmap &= ~BIT(i);
+ src = qmgr_regs->irqsrc[i >> 3];
+ stat = qmgr_regs->stat1[i >> 3];
+ if (src & 4) /* the IRQ condition is inverted */
+ stat = ~stat;
+ if (stat & BIT(src & 3)) {
+ irq_handlers[i](irq_pdevs[i]);
+ ret = IRQ_HANDLED;
+ }
+ }
+ return ret;
+}
+
+
+static irqreturn_t qmgr_irq2_a0(int irq, void *pdev)
+{
+ int i, ret = 0;
+ u32 req_bitmap;
+
+ /* ACK - it may clear any bits so don't rely on it */
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
+
+ req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h;
+ while (req_bitmap) {
+ i = __fls(req_bitmap); /* number of the last "high" queue */
+ req_bitmap &= ~BIT(i);
+ irq_handlers[HALF_QUEUES + i](irq_pdevs[HALF_QUEUES + i]);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+
+static irqreturn_t qmgr_irq(int irq, void *pdev)
+{
+ int i, half = (irq == qmgr_irq_1 ? 0 : 1);
+ u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]);
+
+ if (!req_bitmap)
+ return 0;
+ __raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */
+
+ while (req_bitmap) {
+ i = __fls(req_bitmap); /* number of the last queue */
+ req_bitmap &= ~BIT(i);
+ i += half * HALF_QUEUES;
+ irq_handlers[i](irq_pdevs[i]);
+ }
+ return IRQ_HANDLED;
+}
+
+
+void qmgr_enable_irq(unsigned int queue)
+{
+ unsigned long flags;
+ int half = queue / 32;
+ u32 mask = 1 << (queue & (HALF_QUEUES - 1));
+
+ spin_lock_irqsave(&qmgr_lock, flags);
+ __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask,
+ &qmgr_regs->irqen[half]);
+ spin_unlock_irqrestore(&qmgr_lock, flags);
+}
+
+void qmgr_disable_irq(unsigned int queue)
+{
+ unsigned long flags;
+ int half = queue / 32;
+ u32 mask = 1 << (queue & (HALF_QUEUES - 1));
+
+ spin_lock_irqsave(&qmgr_lock, flags);
+ __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask,
+ &qmgr_regs->irqen[half]);
+ __raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */
+ spin_unlock_irqrestore(&qmgr_lock, flags);
+}
+
+static inline void shift_mask(u32 *mask)
+{
+ mask[3] = mask[3] << 1 | mask[2] >> 31;
+ mask[2] = mask[2] << 1 | mask[1] >> 31;
+ mask[1] = mask[1] << 1 | mask[0] >> 31;
+ mask[0] <<= 1;
+}
+
+#if DEBUG_QMGR
+int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
+ unsigned int nearly_empty_watermark,
+ unsigned int nearly_full_watermark,
+ const char *desc_format, const char* name)
+#else
+int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
+ unsigned int nearly_empty_watermark,
+ unsigned int nearly_full_watermark)
+#endif
+{
+ u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
+ int err;
+
+ BUG_ON(queue >= QUEUES);
+
+ if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
+ return -EINVAL;
+
+ switch (len) {
+ case 16:
+ cfg = 0 << 24;
+ mask[0] = 0x1;
+ break;
+ case 32:
+ cfg = 1 << 24;
+ mask[0] = 0x3;
+ break;
+ case 64:
+ cfg = 2 << 24;
+ mask[0] = 0xF;
+ break;
+ case 128:
+ cfg = 3 << 24;
+ mask[0] = 0xFF;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cfg |= nearly_empty_watermark << 26;
+ cfg |= nearly_full_watermark << 29;
+ len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */
+ mask[1] = mask[2] = mask[3] = 0;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ spin_lock_irq(&qmgr_lock);
+ if (__raw_readl(&qmgr_regs->sram[queue])) {
+ err = -EBUSY;
+ goto err;
+ }
+
+ while (1) {
+ if (!(used_sram_bitmap[0] & mask[0]) &&
+ !(used_sram_bitmap[1] & mask[1]) &&
+ !(used_sram_bitmap[2] & mask[2]) &&
+ !(used_sram_bitmap[3] & mask[3]))
+ break; /* found free space */
+
+ addr++;
+ shift_mask(mask);
+ if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
+ printk(KERN_ERR "qmgr: no free SRAM space for"
+ " queue %i\n", queue);
+ err = -ENOMEM;
+ goto err;
+ }
+ }
+
+ used_sram_bitmap[0] |= mask[0];
+ used_sram_bitmap[1] |= mask[1];
+ used_sram_bitmap[2] |= mask[2];
+ used_sram_bitmap[3] |= mask[3];
+ __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
+#if DEBUG_QMGR
+ snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]),
+ desc_format, name);
+ printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n",
+ qmgr_queue_descs[queue], queue, addr);
+#endif
+ spin_unlock_irq(&qmgr_lock);
+ return 0;
+
+err:
+ spin_unlock_irq(&qmgr_lock);
+ module_put(THIS_MODULE);
+ return err;
+}
+
+void qmgr_release_queue(unsigned int queue)
+{
+ u32 cfg, addr, mask[4];
+
+ BUG_ON(queue >= QUEUES); /* not in valid range */
+
+ spin_lock_irq(&qmgr_lock);
+ cfg = __raw_readl(&qmgr_regs->sram[queue]);
+ addr = (cfg >> 14) & 0xFF;
+
+ BUG_ON(!addr); /* not requested */
+
+ switch ((cfg >> 24) & 3) {
+ case 0: mask[0] = 0x1; break;
+ case 1: mask[0] = 0x3; break;
+ case 2: mask[0] = 0xF; break;
+ case 3: mask[0] = 0xFF; break;
+ }
+
+ mask[1] = mask[2] = mask[3] = 0;
+
+ while (addr--)
+ shift_mask(mask);
+
+#if DEBUG_QMGR
+ printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n",
+ qmgr_queue_descs[queue], queue);
+ qmgr_queue_descs[queue][0] = '\x0';
+#endif
+
+ while ((addr = qmgr_get_entry(queue)))
+ printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
+ queue, addr);
+
+ __raw_writel(0, &qmgr_regs->sram[queue]);
+
+ used_sram_bitmap[0] &= ~mask[0];
+ used_sram_bitmap[1] &= ~mask[1];
+ used_sram_bitmap[2] &= ~mask[2];
+ used_sram_bitmap[3] &= ~mask[3];
+ irq_handlers[queue] = NULL; /* catch IRQ bugs */
+ spin_unlock_irq(&qmgr_lock);
+
+ module_put(THIS_MODULE);
+}
+
+static int ixp4xx_qmgr_probe(struct platform_device *pdev)
+{
+ int i, err;
+ irq_handler_t handler1, handler2;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq1, irq2;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ qmgr_regs = devm_ioremap_resource(dev, res);
+ if (!qmgr_regs)
+ return -ENOMEM;
+
+ irq1 = platform_get_irq(pdev, 0);
+ if (irq1 <= 0)
+ return irq1 ? irq1 : -EINVAL;
+ qmgr_irq_1 = irq1;
+ irq2 = platform_get_irq(pdev, 1);
+ if (irq2 <= 0)
+ return irq2 ? irq2 : -EINVAL;
+ qmgr_irq_2 = irq2;
+
+ /* reset qmgr registers */
+ for (i = 0; i < 4; i++) {
+ __raw_writel(0x33333333, &qmgr_regs->stat1[i]);
+ __raw_writel(0, &qmgr_regs->irqsrc[i]);
+ }
+ for (i = 0; i < 2; i++) {
+ __raw_writel(0, &qmgr_regs->stat2[i]);
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */
+ __raw_writel(0, &qmgr_regs->irqen[i]);
+ }
+
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h);
+ __raw_writel(0, &qmgr_regs->statf_h);
+
+ for (i = 0; i < QUEUES; i++)
+ __raw_writel(0, &qmgr_regs->sram[i]);
+
+ if (cpu_is_ixp42x_rev_a0()) {
+ handler1 = qmgr_irq1_a0;
+ handler2 = qmgr_irq2_a0;
+ } else
+ handler1 = handler2 = qmgr_irq;
+
+ err = devm_request_irq(dev, irq1, handler1, 0, "IXP4xx Queue Manager",
+ NULL);
+ if (err) {
+ dev_err(dev, "failed to request IRQ%i (%i)\n",
+ irq1, err);
+ return err;
+ }
+
+ err = devm_request_irq(dev, irq2, handler2, 0, "IXP4xx Queue Manager",
+ NULL);
+ if (err) {
+ dev_err(dev, "failed to request IRQ%i (%i)\n",
+ irq2, err);
+ return err;
+ }
+
+ used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */
+ spin_lock_init(&qmgr_lock);
+
+ dev_info(dev, "IXP4xx Queue Manager initialized.\n");
+ return 0;
+}
+
+static int ixp4xx_qmgr_remove(struct platform_device *pdev)
+{
+ synchronize_irq(qmgr_irq_1);
+ synchronize_irq(qmgr_irq_2);
+ return 0;
+}
+
+static const struct of_device_id ixp4xx_qmgr_of_match[] = {
+ {
+ .compatible = "intel,ixp4xx-ahb-queue-manager",
+ },
+ {},
+};
+
+static struct platform_driver ixp4xx_qmgr_driver = {
+ .driver = {
+ .name = "ixp4xx-qmgr",
+ .of_match_table = of_match_ptr(ixp4xx_qmgr_of_match),
+ },
+ .probe = ixp4xx_qmgr_probe,
+ .remove = ixp4xx_qmgr_remove,
+};
+module_platform_driver(ixp4xx_qmgr_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Krzysztof Halasa");
+
+EXPORT_SYMBOL(qmgr_put_entry);
+EXPORT_SYMBOL(qmgr_get_entry);
+EXPORT_SYMBOL(qmgr_stat_empty);
+EXPORT_SYMBOL(qmgr_stat_below_low_watermark);
+EXPORT_SYMBOL(qmgr_stat_full);
+EXPORT_SYMBOL(qmgr_stat_overflow);
+EXPORT_SYMBOL(qmgr_set_irq);
+EXPORT_SYMBOL(qmgr_enable_irq);
+EXPORT_SYMBOL(qmgr_disable_irq);
+#if DEBUG_QMGR
+EXPORT_SYMBOL(qmgr_queue_descs);
+EXPORT_SYMBOL(qmgr_request_queue);
+#else
+EXPORT_SYMBOL(__qmgr_request_queue);
+#endif
+EXPORT_SYMBOL(qmgr_release_queue);
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index ca8a94f15ac0..38183ac438c6 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -40,8 +40,6 @@ struct da8xx_ohci_hcd {
struct phy *usb11_phy;
struct regulator *vbus_reg;
struct notifier_block nb;
- unsigned int reg_enabled;
- struct gpio_desc *vbus_gpio;
struct gpio_desc *oc_gpio;
};
@@ -92,29 +90,21 @@ static int ohci_da8xx_set_power(struct usb_hcd *hcd, int on)
struct device *dev = hcd->self.controller;
int ret;
- if (da8xx_ohci->vbus_gpio) {
- gpiod_set_value_cansleep(da8xx_ohci->vbus_gpio, on);
- return 0;
- }
-
if (!da8xx_ohci->vbus_reg)
return 0;
- if (on && !da8xx_ohci->reg_enabled) {
+ if (on) {
ret = regulator_enable(da8xx_ohci->vbus_reg);
if (ret) {
dev_err(dev, "Failed to enable regulator: %d\n", ret);
return ret;
}
- da8xx_ohci->reg_enabled = 1;
-
- } else if (!on && da8xx_ohci->reg_enabled) {
+ } else {
ret = regulator_disable(da8xx_ohci->vbus_reg);
if (ret) {
dev_err(dev, "Failed to disable regulator: %d\n", ret);
return ret;
}
- da8xx_ohci->reg_enabled = 0;
}
return 0;
@@ -124,9 +114,6 @@ static int ohci_da8xx_get_power(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
- if (da8xx_ohci->vbus_gpio)
- return gpiod_get_value_cansleep(da8xx_ohci->vbus_gpio);
-
if (da8xx_ohci->vbus_reg)
return regulator_is_enabled(da8xx_ohci->vbus_reg);
@@ -159,9 +146,6 @@ static int ohci_da8xx_has_set_power(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
- if (da8xx_ohci->vbus_gpio)
- return 1;
-
if (da8xx_ohci->vbus_reg)
return 1;
@@ -206,12 +190,18 @@ static int ohci_da8xx_regulator_event(struct notifier_block *nb,
return 0;
}
-static irqreturn_t ohci_da8xx_oc_handler(int irq, void *data)
+static irqreturn_t ohci_da8xx_oc_thread(int irq, void *data)
{
struct da8xx_ohci_hcd *da8xx_ohci = data;
+ struct device *dev = da8xx_ohci->hcd->self.controller;
+ int ret;
- if (gpiod_get_value(da8xx_ohci->oc_gpio))
- gpiod_set_value(da8xx_ohci->vbus_gpio, 0);
+ if (gpiod_get_value_cansleep(da8xx_ohci->oc_gpio) &&
+ da8xx_ohci->vbus_reg) {
+ ret = regulator_disable(da8xx_ohci->vbus_reg);
+ if (ret)
+ dev_err(dev, "Failed to disable regulator: %d\n", ret);
+ }
return IRQ_HANDLED;
}
@@ -424,11 +414,6 @@ static int ohci_da8xx_probe(struct platform_device *pdev)
}
}
- da8xx_ohci->vbus_gpio = devm_gpiod_get_optional(dev, "vbus",
- GPIOD_OUT_HIGH);
- if (IS_ERR(da8xx_ohci->vbus_gpio))
- goto err;
-
da8xx_ohci->oc_gpio = devm_gpiod_get_optional(dev, "oc", GPIOD_IN);
if (IS_ERR(da8xx_ohci->oc_gpio))
goto err;
@@ -438,8 +423,9 @@ static int ohci_da8xx_probe(struct platform_device *pdev)
if (oc_irq < 0)
goto err;
- error = devm_request_irq(dev, oc_irq, ohci_da8xx_oc_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ error = devm_request_threaded_irq(dev, oc_irq, NULL,
+ ohci_da8xx_oc_thread, IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"OHCI over-current indicator", da8xx_ohci);
if (error)
goto err;
diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
index dd139cda936c..9067998759e3 100644
--- a/drivers/watchdog/ixp4xx_wdt.c
+++ b/drivers/watchdog/ixp4xx_wdt.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
+#include <linux/of.h>
#include <linux/watchdog.h>
#include <linux/init.h>
#include <linux/bitops.h>
@@ -176,6 +177,14 @@ static int __init ixp4xx_wdt_init(void)
{
int ret;
+ /*
+ * FIXME: we bail out on device tree boot but this really needs
+ * to be fixed in a nicer way: this registers the MDIO bus before
+ * even matching the driver infrastructure, we should only probe
+ * detected hardware.
+ */
+ if (of_have_populated_dt())
+ return -ENODEV;
if (!(read_cpuid_id() & 0xf) && !cpu_is_ixp46x()) {
pr_err("Rev. A0 IXP42x CPU detected - watchdog disabled\n");