diff options
Diffstat (limited to 'drivers')
69 files changed, 14496 insertions, 5079 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index d2ac339de85f..63baceb6c118 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -198,4 +198,6 @@ source "drivers/hwtracing/intel_th/Kconfig" source "drivers/fpga/Kconfig" +source "drivers/tee/Kconfig" + endmenu diff --git a/drivers/Makefile b/drivers/Makefile index 098997f2cc3a..a2a1fbd53bbe 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -173,3 +173,4 @@ obj-$(CONFIG_STM) += hwtracing/stm/ obj-$(CONFIG_ANDROID) += android/ obj-$(CONFIG_NVMEM) += nvmem/ obj-$(CONFIG_FPGA) += fpga/ +obj-$(CONFIG_TEE) += tee/ diff --git a/drivers/base/power/opp/Makefile b/drivers/base/power/opp/Makefile index 33c1e18c41a4..19837ef04d8e 100644 --- a/drivers/base/power/opp/Makefile +++ b/drivers/base/power/opp/Makefile @@ -1,2 +1,3 @@ ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG obj-y += core.o cpu.o +obj-$(CONFIG_DEBUG_FS) += debugfs.o diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index f8580900c273..d8f4cc22856c 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -13,50 +13,52 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/clk.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/of.h> #include <linux/export.h> +#include <linux/regulator/consumer.h> #include "opp.h" /* - * The root of the list of all devices. All device_opp structures branch off - * from here, with each device_opp containing the list of opp it supports in + * The root of the list of all opp-tables. All opp_table structures branch off + * from here, with each opp_table containing the list of opps it supports in * various states of availability. */ -static LIST_HEAD(dev_opp_list); +static LIST_HEAD(opp_tables); /* Lock to allow exclusive modification to the device and opp lists */ -DEFINE_MUTEX(dev_opp_list_lock); +DEFINE_MUTEX(opp_table_lock); #define opp_rcu_lockdep_assert() \ do { \ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ - !lockdep_is_held(&dev_opp_list_lock), \ - "Missing rcu_read_lock() or " \ - "dev_opp_list_lock protection"); \ + !lockdep_is_held(&opp_table_lock), \ + "Missing rcu_read_lock() or " \ + "opp_table_lock protection"); \ } while (0) -static struct device_list_opp *_find_list_dev(const struct device *dev, - struct device_opp *dev_opp) +static struct opp_device *_find_opp_dev(const struct device *dev, + struct opp_table *opp_table) { - struct device_list_opp *list_dev; + struct opp_device *opp_dev; - list_for_each_entry(list_dev, &dev_opp->dev_list, node) - if (list_dev->dev == dev) - return list_dev; + list_for_each_entry(opp_dev, &opp_table->dev_list, node) + if (opp_dev->dev == dev) + return opp_dev; return NULL; } -static struct device_opp *_managed_opp(const struct device_node *np) +static struct opp_table *_managed_opp(const struct device_node *np) { - struct device_opp *dev_opp; + struct opp_table *opp_table; - list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) { - if (dev_opp->np == np) { + list_for_each_entry_rcu(opp_table, &opp_tables, node) { + if (opp_table->np == np) { /* * Multiple devices can point to the same OPP table and * so will have same node-pointer, np. @@ -64,7 +66,7 @@ static struct device_opp *_managed_opp(const struct device_node *np) * But the OPPs will be considered as shared only if the * OPP table contains a "opp-shared" property. */ - return dev_opp->shared_opp ? dev_opp : NULL; + return opp_table->shared_opp ? opp_table : NULL; } } @@ -72,24 +74,24 @@ static struct device_opp *_managed_opp(const struct device_node *np) } /** - * _find_device_opp() - find device_opp struct using device pointer - * @dev: device pointer used to lookup device OPPs + * _find_opp_table() - find opp_table struct using device pointer + * @dev: device pointer used to lookup OPP table * - * Search list of device OPPs for one containing matching device. Does a RCU - * reader operation to grab the pointer needed. + * Search OPP table for one containing matching device. Does a RCU reader + * operation to grab the pointer needed. * - * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or + * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or * -EINVAL based on type of error. * * Locking: For readers, this function must be called under rcu_read_lock(). - * device_opp is a RCU protected pointer, which means that device_opp is valid + * opp_table is a RCU protected pointer, which means that opp_table is valid * as long as we are under RCU lock. * - * For Writers, this function must be called with dev_opp_list_lock held. + * For Writers, this function must be called with opp_table_lock held. */ -struct device_opp *_find_device_opp(struct device *dev) +struct opp_table *_find_opp_table(struct device *dev) { - struct device_opp *dev_opp; + struct opp_table *opp_table; opp_rcu_lockdep_assert(); @@ -98,9 +100,9 @@ struct device_opp *_find_device_opp(struct device *dev) return ERR_PTR(-EINVAL); } - list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) - if (_find_list_dev(dev, dev_opp)) - return dev_opp; + list_for_each_entry_rcu(opp_table, &opp_tables, node) + if (_find_opp_dev(dev, opp_table)) + return opp_table; return ERR_PTR(-ENODEV); } @@ -213,16 +215,16 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); */ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) { - struct device_opp *dev_opp; + struct opp_table *opp_table; unsigned long clock_latency_ns; rcu_read_lock(); - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) clock_latency_ns = 0; else - clock_latency_ns = dev_opp->clock_latency_ns_max; + clock_latency_ns = opp_table->clock_latency_ns_max; rcu_read_unlock(); return clock_latency_ns; @@ -230,6 +232,79 @@ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); /** + * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds + * @dev: device for which we do this operation + * + * Return: This function returns the max voltage latency in nanoseconds. + * + * Locking: This function takes rcu_read_lock(). + */ +unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) +{ + struct opp_table *opp_table; + struct dev_pm_opp *opp; + struct regulator *reg; + unsigned long latency_ns = 0; + unsigned long min_uV = ~0, max_uV = 0; + int ret; + + rcu_read_lock(); + + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { + rcu_read_unlock(); + return 0; + } + + reg = opp_table->regulator; + if (IS_ERR(reg)) { + /* Regulator may not be required for device */ + rcu_read_unlock(); + return 0; + } + + list_for_each_entry_rcu(opp, &opp_table->opp_list, node) { + if (!opp->available) + continue; + + if (opp->u_volt_min < min_uV) + min_uV = opp->u_volt_min; + if (opp->u_volt_max > max_uV) + max_uV = opp->u_volt_max; + } + + rcu_read_unlock(); + + /* + * The caller needs to ensure that opp_table (and hence the regulator) + * isn't freed, while we are executing this routine. + */ + ret = regulator_set_voltage_time(reg, min_uV, max_uV); + if (ret > 0) + latency_ns = ret * 1000; + + return latency_ns; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency); + +/** + * dev_pm_opp_get_max_transition_latency() - Get max transition latency in + * nanoseconds + * @dev: device for which we do this operation + * + * Return: This function returns the max transition latency, in nanoseconds, to + * switch from one OPP to other. + * + * Locking: This function takes rcu_read_lock(). + */ +unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev) +{ + return dev_pm_opp_get_max_volt_latency(dev) + + dev_pm_opp_get_max_clock_latency(dev); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency); + +/** * dev_pm_opp_get_suspend_opp() - Get suspend opp * @dev: device for which we do this operation * @@ -244,21 +319,21 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); */ struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) { - struct device_opp *dev_opp; + struct opp_table *opp_table; opp_rcu_lockdep_assert(); - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp) || !dev_opp->suspend_opp || - !dev_opp->suspend_opp->available) + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table) || !opp_table->suspend_opp || + !opp_table->suspend_opp->available) return NULL; - return dev_opp->suspend_opp; + return opp_table->suspend_opp; } EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp); /** - * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list + * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table * @dev: device for which we do this operation * * Return: This function returns the number of available opps if there are any, @@ -268,21 +343,21 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp); */ int dev_pm_opp_get_opp_count(struct device *dev) { - struct device_opp *dev_opp; + struct opp_table *opp_table; struct dev_pm_opp *temp_opp; int count = 0; rcu_read_lock(); - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) { - count = PTR_ERR(dev_opp); - dev_err(dev, "%s: device OPP not found (%d)\n", + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { + count = PTR_ERR(opp_table); + dev_err(dev, "%s: OPP table not found (%d)\n", __func__, count); goto out_unlock; } - list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { + list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) { if (temp_opp->available) count++; } @@ -299,7 +374,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); * @freq: frequency to search for * @available: true/false - match for available opp * - * Return: Searches for exact match in the opp list and returns pointer to the + * Return: Searches for exact match in the opp table and returns pointer to the * matching opp if found, else returns ERR_PTR in case of error and should * be handled using IS_ERR. Error return values can be: * EINVAL: for bad pointer @@ -323,19 +398,20 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, unsigned long freq, bool available) { - struct device_opp *dev_opp; + struct opp_table *opp_table; struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); opp_rcu_lockdep_assert(); - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) { - int r = PTR_ERR(dev_opp); - dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { + int r = PTR_ERR(opp_table); + + dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r); return ERR_PTR(r); } - list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { + list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) { if (temp_opp->available == available && temp_opp->rate == freq) { opp = temp_opp; @@ -371,7 +447,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, unsigned long *freq) { - struct device_opp *dev_opp; + struct opp_table *opp_table; struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); opp_rcu_lockdep_assert(); @@ -381,11 +457,11 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, return ERR_PTR(-EINVAL); } - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) - return ERR_CAST(dev_opp); + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) + return ERR_CAST(opp_table); - list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { + list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) { if (temp_opp->available && temp_opp->rate >= *freq) { opp = temp_opp; *freq = opp->rate; @@ -421,7 +497,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq) { - struct device_opp *dev_opp; + struct opp_table *opp_table; struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); opp_rcu_lockdep_assert(); @@ -431,11 +507,11 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, return ERR_PTR(-EINVAL); } - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) - return ERR_CAST(dev_opp); + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) + return ERR_CAST(opp_table); - list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { + list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) { if (temp_opp->available) { /* go to the next node, before choosing prev */ if (temp_opp->rate > *freq) @@ -451,116 +527,343 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, } EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); -/* List-dev Helpers */ -static void _kfree_list_dev_rcu(struct rcu_head *head) +/* + * The caller needs to ensure that opp_table (and hence the clk) isn't freed, + * while clk returned here is used. + */ +static struct clk *_get_opp_clk(struct device *dev) { - struct device_list_opp *list_dev; + struct opp_table *opp_table; + struct clk *clk; + + rcu_read_lock(); - list_dev = container_of(head, struct device_list_opp, rcu_head); - kfree_rcu(list_dev, rcu_head); + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { + dev_err(dev, "%s: device opp doesn't exist\n", __func__); + clk = ERR_CAST(opp_table); + goto unlock; + } + + clk = opp_table->clk; + if (IS_ERR(clk)) + dev_err(dev, "%s: No clock available for the device\n", + __func__); + +unlock: + rcu_read_unlock(); + return clk; } -static void _remove_list_dev(struct device_list_opp *list_dev, - struct device_opp *dev_opp) +static int _set_opp_voltage(struct device *dev, struct regulator *reg, + unsigned long u_volt, unsigned long u_volt_min, + unsigned long u_volt_max) { - list_del(&list_dev->node); - call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head, - _kfree_list_dev_rcu); + int ret; + + /* Regulator not available for device */ + if (IS_ERR(reg)) { + dev_dbg(dev, "%s: regulator not available: %ld\n", __func__, + PTR_ERR(reg)); + return 0; + } + + dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min, + u_volt, u_volt_max); + + ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt, + u_volt_max); + if (ret) + dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n", + __func__, u_volt_min, u_volt, u_volt_max, ret); + + return ret; } -struct device_list_opp *_add_list_dev(const struct device *dev, - struct device_opp *dev_opp) +/** + * dev_pm_opp_set_rate() - Configure new OPP based on frequency + * @dev: device for which we do this operation + * @target_freq: frequency to achieve + * + * This configures the power-supplies and clock source to the levels specified + * by the OPP corresponding to the target_freq. + * + * Locking: This function takes rcu_read_lock(). + */ +int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { - struct device_list_opp *list_dev; + struct opp_table *opp_table; + struct dev_pm_opp *old_opp, *opp; + struct regulator *reg; + struct clk *clk; + unsigned long freq, old_freq; + unsigned long u_volt, u_volt_min, u_volt_max; + unsigned long ou_volt, ou_volt_min, ou_volt_max; + int ret; + + if (unlikely(!target_freq)) { + dev_err(dev, "%s: Invalid target frequency %lu\n", __func__, + target_freq); + return -EINVAL; + } + + clk = _get_opp_clk(dev); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + freq = clk_round_rate(clk, target_freq); + if ((long)freq <= 0) + freq = target_freq; + + old_freq = clk_get_rate(clk); + + /* Return early if nothing to do */ + if (old_freq == freq) { + dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n", + __func__, freq); + return 0; + } + + rcu_read_lock(); + + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { + dev_err(dev, "%s: device opp doesn't exist\n", __func__); + rcu_read_unlock(); + return PTR_ERR(opp_table); + } + + old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq); + if (!IS_ERR(old_opp)) { + ou_volt = old_opp->u_volt; + ou_volt_min = old_opp->u_volt_min; + ou_volt_max = old_opp->u_volt_max; + } else { + dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n", + __func__, old_freq, PTR_ERR(old_opp)); + } + + opp = dev_pm_opp_find_freq_ceil(dev, &freq); + if (IS_ERR(opp)) { + ret = PTR_ERR(opp); + dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n", + __func__, freq, ret); + rcu_read_unlock(); + return ret; + } - list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL); - if (!list_dev) + u_volt = opp->u_volt; + u_volt_min = opp->u_volt_min; + u_volt_max = opp->u_volt_max; + + reg = opp_table->regulator; + + rcu_read_unlock(); + + /* Scaling up? Scale voltage before frequency */ + if (freq > old_freq) { + ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min, + u_volt_max); + if (ret) + goto restore_voltage; + } + + /* Change frequency */ + + dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", + __func__, old_freq, freq); + + ret = clk_set_rate(clk, freq); + if (ret) { + dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, + ret); + goto restore_voltage; + } + + /* Scaling down? Scale voltage after frequency */ + if (freq < old_freq) { + ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min, + u_volt_max); + if (ret) + goto restore_freq; + } + + return 0; + +restore_freq: + if (clk_set_rate(clk, old_freq)) + dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n", + __func__, old_freq); +restore_voltage: + /* This shouldn't harm even if the voltages weren't updated earlier */ + if (!IS_ERR(old_opp)) + _set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); + +/* OPP-dev Helpers */ +static void _kfree_opp_dev_rcu(struct rcu_head *head) +{ + struct opp_device *opp_dev; + + opp_dev = container_of(head, struct opp_device, rcu_head); + kfree_rcu(opp_dev, rcu_head); +} + +static void _remove_opp_dev(struct opp_device *opp_dev, + struct opp_table *opp_table) +{ + opp_debug_unregister(opp_dev, opp_table); + list_del(&opp_dev->node); + call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head, + _kfree_opp_dev_rcu); +} + +struct opp_device *_add_opp_dev(const struct device *dev, + struct opp_table *opp_table) +{ + struct opp_device *opp_dev; + int ret; + + opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL); + if (!opp_dev) return NULL; - /* Initialize list-dev */ - list_dev->dev = dev; - list_add_rcu(&list_dev->node, &dev_opp->dev_list); + /* Initialize opp-dev */ + opp_dev->dev = dev; + list_add_rcu(&opp_dev->node, &opp_table->dev_list); + + /* Create debugfs entries for the opp_table */ + ret = opp_debug_register(opp_dev, opp_table); + if (ret) + dev_err(dev, "%s: Failed to register opp debugfs (%d)\n", + __func__, ret); - return list_dev; + return opp_dev; } /** - * _add_device_opp() - Find device OPP table or allocate a new one + * _add_opp_table() - Find OPP table or allocate a new one * @dev: device for which we do this operation * * It tries to find an existing table first, if it couldn't find one, it * allocates a new OPP table and returns that. * - * Return: valid device_opp pointer if success, else NULL. + * Return: valid opp_table pointer if success, else NULL. */ -static struct device_opp *_add_device_opp(struct device *dev) +static struct opp_table *_add_opp_table(struct device *dev) { - struct device_opp *dev_opp; - struct device_list_opp *list_dev; + struct opp_table *opp_table; + struct opp_device *opp_dev; + struct device_node *np; + int ret; - /* Check for existing list for 'dev' first */ - dev_opp = _find_device_opp(dev); - if (!IS_ERR(dev_opp)) - return dev_opp; + /* Check for existing table for 'dev' first */ + opp_table = _find_opp_table(dev); + if (!IS_ERR(opp_table)) + return opp_table; /* - * Allocate a new device OPP table. In the infrequent case where a new + * Allocate a new OPP table. In the infrequent case where a new * device is needed to be added, we pay this penalty. */ - dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL); - if (!dev_opp) + opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL); + if (!opp_table) return NULL; - INIT_LIST_HEAD(&dev_opp->dev_list); + INIT_LIST_HEAD(&opp_table->dev_list); - list_dev = _add_list_dev(dev, dev_opp); - if (!list_dev) { - kfree(dev_opp); + opp_dev = _add_opp_dev(dev, opp_table); + if (!opp_dev) { + kfree(opp_table); return NULL; } - srcu_init_notifier_head(&dev_opp->srcu_head); - INIT_LIST_HEAD(&dev_opp->opp_list); + /* + * Only required for backward compatibility with v1 bindings, but isn't + * harmful for other cases. And so we do it unconditionally. + */ + np = of_node_get(dev->of_node); + if (np) { + u32 val; + + if (!of_property_read_u32(np, "clock-latency", &val)) + opp_table->clock_latency_ns_max = val; + of_property_read_u32(np, "voltage-tolerance", + &opp_table->voltage_tolerance_v1); + of_node_put(np); + } + + /* Set regulator to a non-NULL error value */ + opp_table->regulator = ERR_PTR(-ENXIO); + + /* Find clk for the device */ + opp_table->clk = clk_get(dev, NULL); + if (IS_ERR(opp_table->clk)) { + ret = PTR_ERR(opp_table->clk); + if (ret != -EPROBE_DEFER) + dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, + ret); + } - /* Secure the device list modification */ - list_add_rcu(&dev_opp->node, &dev_opp_list); - return dev_opp; + srcu_init_notifier_head(&opp_table->srcu_head); + INIT_LIST_HEAD(&opp_table->opp_list); + + /* Secure the device table modification */ + list_add_rcu(&opp_table->node, &opp_tables); + return opp_table; } /** - * _kfree_device_rcu() - Free device_opp RCU handler + * _kfree_device_rcu() - Free opp_table RCU handler * @head: RCU head */ static void _kfree_device_rcu(struct rcu_head *head) { - struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head); + struct opp_table *opp_table = container_of(head, struct opp_table, + rcu_head); - kfree_rcu(device_opp, rcu_head); + kfree_rcu(opp_table, rcu_head); } /** - * _remove_device_opp() - Removes a device OPP table - * @dev_opp: device OPP table to be removed. + * _remove_opp_table() - Removes a OPP table + * @opp_table: OPP table to be removed. * - * Removes/frees device OPP table it it doesn't contain any OPPs. + * Removes/frees OPP table if it doesn't contain any OPPs. */ -static void _remove_device_opp(struct device_opp *dev_opp) +static void _remove_opp_table(struct opp_table *opp_table) { - struct device_list_opp *list_dev; + struct opp_device *opp_dev; + + if (!list_empty(&opp_table->opp_list)) + return; + + if (opp_table->supported_hw) + return; - if (!list_empty(&dev_opp->opp_list)) + if (opp_table->prop_name) return; - list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp, - node); + if (!IS_ERR(opp_table->regulator)) + return; + + /* Release clk */ + if (!IS_ERR(opp_table->clk)) + clk_put(opp_table->clk); + + opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device, + node); - _remove_list_dev(list_dev, dev_opp); + _remove_opp_dev(opp_dev, opp_table); /* dev_list must be empty now */ - WARN_ON(!list_empty(&dev_opp->dev_list)); + WARN_ON(!list_empty(&opp_table->dev_list)); - list_del_rcu(&dev_opp->node); - call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head, + list_del_rcu(&opp_table->node); + call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head, _kfree_device_rcu); } @@ -577,17 +880,17 @@ static void _kfree_opp_rcu(struct rcu_head *head) /** * _opp_remove() - Remove an OPP from a table definition - * @dev_opp: points back to the device_opp struct this opp belongs to + * @opp_table: points back to the opp_table struct this opp belongs to * @opp: pointer to the OPP to remove * @notify: OPP_EVENT_REMOVE notification should be sent or not * - * This function removes an opp definition from the opp list. + * This function removes an opp definition from the opp table. * - * Locking: The internal device_opp and opp structures are RCU protected. + * Locking: The internal opp_table and opp structures are RCU protected. * It is assumed that the caller holds required mutex for an RCU updater * strategy. */ -static void _opp_remove(struct device_opp *dev_opp, +static void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp, bool notify) { /* @@ -595,21 +898,23 @@ static void _opp_remove(struct device_opp *dev_opp, * frequency/voltage list. */ if (notify) - srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); + srcu_notifier_call_chain(&opp_table->srcu_head, + OPP_EVENT_REMOVE, opp); + opp_debug_remove_one(opp); list_del_rcu(&opp->node); - call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); + call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); - _remove_device_opp(dev_opp); + _remove_opp_table(opp_table); } /** - * dev_pm_opp_remove() - Remove an OPP from OPP list + * dev_pm_opp_remove() - Remove an OPP from OPP table * @dev: device for which we do this operation * @freq: OPP to remove with matching 'freq' * - * This function removes an opp from the opp list. + * This function removes an opp from the opp table. * - * Locking: The internal device_opp and opp structures are RCU protected. + * Locking: The internal opp_table and opp structures are RCU protected. * Hence this function internally uses RCU updater strategy with mutex locks * to keep the integrity of the internal data structures. Callers should ensure * that this function is *NOT* called under RCU protection or in contexts where @@ -618,17 +923,17 @@ static void _opp_remove(struct device_opp *dev_opp, void dev_pm_opp_remove(struct device *dev, unsigned long freq) { struct dev_pm_opp *opp; - struct device_opp *dev_opp; + struct opp_table *opp_table; bool found = false; - /* Hold our list modification lock here */ - mutex_lock(&dev_opp_list_lock); + /* Hold our table modification lock here */ + mutex_lock(&opp_table_lock); - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) goto unlock; - list_for_each_entry(opp, &dev_opp->opp_list, node) { + list_for_each_entry(opp, &opp_table->opp_list, node) { if (opp->rate == freq) { found = true; break; @@ -641,14 +946,14 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) goto unlock; } - _opp_remove(dev_opp, opp, true); + _opp_remove(opp_table, opp, true); unlock: - mutex_unlock(&dev_opp_list_lock); + mutex_unlock(&opp_table_lock); } EXPORT_SYMBOL_GPL(dev_pm_opp_remove); static struct dev_pm_opp *_allocate_opp(struct device *dev, - struct device_opp **dev_opp) + struct opp_table **opp_table) { struct dev_pm_opp *opp; @@ -659,8 +964,8 @@ static struct dev_pm_opp *_allocate_opp(struct device *dev, INIT_LIST_HEAD(&opp->node); - *dev_opp = _add_device_opp(dev); - if (!*dev_opp) { + *opp_table = _add_opp_table(dev); + if (!*opp_table) { kfree(opp); return NULL; } @@ -668,21 +973,38 @@ static struct dev_pm_opp *_allocate_opp(struct device *dev, return opp; } +static bool _opp_supported_by_regulators(struct dev_pm_opp *opp, + struct opp_table *opp_table) +{ + struct regulator *reg = opp_table->regulator; + + if (!IS_ERR(reg) && + !regulator_is_supported_voltage(reg, opp->u_volt_min, + opp->u_volt_max)) { + pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n", + __func__, opp->u_volt_min, opp->u_volt_max); + return false; + } + + return true; +} + static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, - struct device_opp *dev_opp) + struct opp_table *opp_table) { struct dev_pm_opp *opp; - struct list_head *head = &dev_opp->opp_list; + struct list_head *head = &opp_table->opp_list; + int ret; /* * Insert new OPP in order of increasing frequency and discard if * already present. * - * Need to use &dev_opp->opp_list in the condition part of the 'for' + * Need to use &opp_table->opp_list in the condition part of the 'for' * loop, don't replace it with head otherwise it will become an infinite * loop. */ - list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { + list_for_each_entry_rcu(opp, &opp_table->opp_list, node) { if (new_opp->rate > opp->rate) { head = &opp->node; continue; @@ -700,9 +1022,20 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, 0 : -EEXIST; } - new_opp->dev_opp = dev_opp; + new_opp->opp_table = opp_table; list_add_rcu(&new_opp->node, head); + ret = opp_debug_create_one(new_opp, opp_table); + if (ret) + dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n", + __func__, ret); + + if (!_opp_supported_by_regulators(new_opp, opp_table)) { + new_opp->available = false; + dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n", + __func__, new_opp->rate); + } + return 0; } @@ -713,14 +1046,14 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, * @u_volt: Voltage in uVolts for this OPP * @dynamic: Dynamically added OPPs. * - * This function adds an opp definition to the opp list and returns status. + * This function adds an opp definition to the opp table and returns status. * The opp is made available by default and it can be controlled using * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. * * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table * and freed by dev_pm_opp_of_remove_table. * - * Locking: The internal device_opp and opp structures are RCU protected. + * Locking: The internal opp_table and opp structures are RCU protected. * Hence this function internally uses RCU updater strategy with mutex locks * to keep the integrity of the internal data structures. Callers should ensure * that this function is *NOT* called under RCU protection or in contexts where @@ -736,14 +1069,15 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, bool dynamic) { - struct device_opp *dev_opp; + struct opp_table *opp_table; struct dev_pm_opp *new_opp; + unsigned long tol; int ret; - /* Hold our list modification lock here */ - mutex_lock(&dev_opp_list_lock); + /* Hold our table modification lock here */ + mutex_lock(&opp_table_lock); - new_opp = _allocate_opp(dev, &dev_opp); + new_opp = _allocate_opp(dev, &opp_table); if (!new_opp) { ret = -ENOMEM; goto unlock; @@ -751,60 +1085,77 @@ static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, /* populate the opp table */ new_opp->rate = freq; + tol = u_volt * opp_table->voltage_tolerance_v1 / 100; new_opp->u_volt = u_volt; + new_opp->u_volt_min = u_volt - tol; + new_opp->u_volt_max = u_volt + tol; new_opp->available = true; new_opp->dynamic = dynamic; - ret = _opp_add(dev, new_opp, dev_opp); + ret = _opp_add(dev, new_opp, opp_table); if (ret) goto free_opp; - mutex_unlock(&dev_opp_list_lock); + mutex_unlock(&opp_table_lock); /* * Notify the changes in the availability of the operable * frequency/voltage list. */ - srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); + srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp); return 0; free_opp: - _opp_remove(dev_opp, new_opp, false); + _opp_remove(opp_table, new_opp, false); unlock: - mutex_unlock(&dev_opp_list_lock); + mutex_unlock(&opp_table_lock); return ret; } /* TODO: Support multiple regulators */ -static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev) +static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, + struct opp_table *opp_table) { u32 microvolt[3] = {0}; u32 val; int count, ret; + struct property *prop = NULL; + char name[NAME_MAX]; + + /* Search for "opp-microvolt-<name>" */ + if (opp_table->prop_name) { + snprintf(name, sizeof(name), "opp-microvolt-%s", + opp_table->prop_name); + prop = of_find_property(opp->np, name, NULL); + } - /* Missing property isn't a problem, but an invalid entry is */ - if (!of_find_property(opp->np, "opp-microvolt", NULL)) - return 0; + if (!prop) { + /* Search for "opp-microvolt" */ + sprintf(name, "opp-microvolt"); + prop = of_find_property(opp->np, name, NULL); - count = of_property_count_u32_elems(opp->np, "opp-microvolt"); + /* Missing property isn't a problem, but an invalid entry is */ + if (!prop) + return 0; + } + + count = of_property_count_u32_elems(opp->np, name); if (count < 0) { - dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n", - __func__, count); + dev_err(dev, "%s: Invalid %s property (%d)\n", + __func__, name, count); return count; } /* There can be one or three elements here */ if (count != 1 && count != 3) { - dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", - __func__, count); + dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n", + __func__, name, count); return -EINVAL; } - ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt, - count); + ret = of_property_read_u32_array(opp->np, name, microvolt, count); if (ret) { - dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__, - ret); + dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret); return -EINVAL; } @@ -818,22 +1169,391 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev) opp->u_volt_max = microvolt[2]; } - if (!of_property_read_u32(opp->np, "opp-microamp", &val)) + /* Search for "opp-microamp-<name>" */ + prop = NULL; + if (opp_table->prop_name) { + snprintf(name, sizeof(name), "opp-microamp-%s", + opp_table->prop_name); + prop = of_find_property(opp->np, name, NULL); + } + + if (!prop) { + /* Search for "opp-microamp" */ + sprintf(name, "opp-microamp"); + prop = of_find_property(opp->np, name, NULL); + } + + if (prop && !of_property_read_u32(opp->np, name, &val)) opp->u_amp = val; return 0; } /** + * dev_pm_opp_set_supported_hw() - Set supported platforms + * @dev: Device for which supported-hw has to be set. + * @versions: Array of hierarchy of versions to match. + * @count: Number of elements in the array. + * + * This is required only for the V2 bindings, and it enables a platform to + * specify the hierarchy of versions it supports. OPP layer will then enable + * OPPs, which are available for those versions, based on its 'opp-supported-hw' + * property. + * + * Locking: The internal opp_table and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, + unsigned int count) +{ + struct opp_table *opp_table; + int ret = 0; + + /* Hold our table modification lock here */ + mutex_lock(&opp_table_lock); + + opp_table = _add_opp_table(dev); + if (!opp_table) { + ret = -ENOMEM; + goto unlock; + } + + /* Make sure there are no concurrent readers while updating opp_table */ + WARN_ON(!list_empty(&opp_table->opp_list)); + + /* Do we already have a version hierarchy associated with opp_table? */ + if (opp_table->supported_hw) { + dev_err(dev, "%s: Already have supported hardware list\n", + __func__); + ret = -EBUSY; + goto err; + } + + opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions), + GFP_KERNEL); + if (!opp_table->supported_hw) { + ret = -ENOMEM; + goto err; + } + + opp_table->supported_hw_count = count; + mutex_unlock(&opp_table_lock); + return 0; + +err: + _remove_opp_table(opp_table); +unlock: + mutex_unlock(&opp_table_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw); + +/** + * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw + * @dev: Device for which supported-hw has to be put. + * + * This is required only for the V2 bindings, and is called for a matching + * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure + * will not be freed. + * + * Locking: The internal opp_table and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +void dev_pm_opp_put_supported_hw(struct device *dev) +{ + struct opp_table *opp_table; + + /* Hold our table modification lock here */ + mutex_lock(&opp_table_lock); + + /* Check for existing table for 'dev' first */ + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { + dev_err(dev, "Failed to find opp_table: %ld\n", + PTR_ERR(opp_table)); + goto unlock; + } + + /* Make sure there are no concurrent readers while updating opp_table */ + WARN_ON(!list_empty(&opp_table->opp_list)); + + if (!opp_table->supported_hw) { + dev_err(dev, "%s: Doesn't have supported hardware list\n", + __func__); + goto unlock; + } + + kfree(opp_table->supported_hw); + opp_table->supported_hw = NULL; + opp_table->supported_hw_count = 0; + + /* Try freeing opp_table if this was the last blocking resource */ + _remove_opp_table(opp_table); + +unlock: + mutex_unlock(&opp_table_lock); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw); + +/** + * dev_pm_opp_set_prop_name() - Set prop-extn name + * @dev: Device for which the prop-name has to be set. + * @name: name to postfix to properties. + * + * This is required only for the V2 bindings, and it enables a platform to + * specify the extn to be used for certain property names. The properties to + * which the extension will apply are opp-microvolt and opp-microamp. OPP core + * should postfix the property name with -<name> while looking for them. + * + * Locking: The internal opp_table and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +int dev_pm_opp_set_prop_name(struct device *dev, const char *name) +{ + struct opp_table *opp_table; + int ret = 0; + + /* Hold our table modification lock here */ + mutex_lock(&opp_table_lock); + + opp_table = _add_opp_table(dev); + if (!opp_table) { + ret = -ENOMEM; + goto unlock; + } + + /* Make sure there are no concurrent readers while updating opp_table */ + WARN_ON(!list_empty(&opp_table->opp_list)); + + /* Do we already have a prop-name associated with opp_table? */ + if (opp_table->prop_name) { + dev_err(dev, "%s: Already have prop-name %s\n", __func__, + opp_table->prop_name); + ret = -EBUSY; + goto err; + } + + opp_table->prop_name = kstrdup(name, GFP_KERNEL); + if (!opp_table->prop_name) { + ret = -ENOMEM; + goto err; + } + + mutex_unlock(&opp_table_lock); + return 0; + +err: + _remove_opp_table(opp_table); +unlock: + mutex_unlock(&opp_table_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name); + +/** + * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name + * @dev: Device for which the prop-name has to be put. + * + * This is required only for the V2 bindings, and is called for a matching + * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure + * will not be freed. + * + * Locking: The internal opp_table and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +void dev_pm_opp_put_prop_name(struct device *dev) +{ + struct opp_table *opp_table; + + /* Hold our table modification lock here */ + mutex_lock(&opp_table_lock); + + /* Check for existing table for 'dev' first */ + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { + dev_err(dev, "Failed to find opp_table: %ld\n", + PTR_ERR(opp_table)); + goto unlock; + } + + /* Make sure there are no concurrent readers while updating opp_table */ + WARN_ON(!list_empty(&opp_table->opp_list)); + + if (!opp_table->prop_name) { + dev_err(dev, "%s: Doesn't have a prop-name\n", __func__); + goto unlock; + } + + kfree(opp_table->prop_name); + opp_table->prop_name = NULL; + + /* Try freeing opp_table if this was the last blocking resource */ + _remove_opp_table(opp_table); + +unlock: + mutex_unlock(&opp_table_lock); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name); + +/** + * dev_pm_opp_set_regulator() - Set regulator name for the device + * @dev: Device for which regulator name is being set. + * @name: Name of the regulator. + * + * In order to support OPP switching, OPP layer needs to know the name of the + * device's regulator, as the core would be required to switch voltages as well. + * + * This must be called before any OPPs are initialized for the device. + * + * Locking: The internal opp_table and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +int dev_pm_opp_set_regulator(struct device *dev, const char *name) +{ + struct opp_table *opp_table; + struct regulator *reg; + int ret; + + mutex_lock(&opp_table_lock); + + opp_table = _add_opp_table(dev); + if (!opp_table) { + ret = -ENOMEM; + goto unlock; + } + + /* This should be called before OPPs are initialized */ + if (WARN_ON(!list_empty(&opp_table->opp_list))) { + ret = -EBUSY; + goto err; + } + + /* Already have a regulator set */ + if (WARN_ON(!IS_ERR(opp_table->regulator))) { + ret = -EBUSY; + goto err; + } + /* Allocate the regulator */ + reg = regulator_get_optional(dev, name); + if (IS_ERR(reg)) { + ret = PTR_ERR(reg); + if (ret != -EPROBE_DEFER) + dev_err(dev, "%s: no regulator (%s) found: %d\n", + __func__, name, ret); + goto err; + } + + opp_table->regulator = reg; + + mutex_unlock(&opp_table_lock); + return 0; + +err: + _remove_opp_table(opp_table); +unlock: + mutex_unlock(&opp_table_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator); + +/** + * dev_pm_opp_put_regulator() - Releases resources blocked for regulator + * @dev: Device for which regulator was set. + * + * Locking: The internal opp_table and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +void dev_pm_opp_put_regulator(struct device *dev) +{ + struct opp_table *opp_table; + + mutex_lock(&opp_table_lock); + + /* Check for existing table for 'dev' first */ + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { + dev_err(dev, "Failed to find opp_table: %ld\n", + PTR_ERR(opp_table)); + goto unlock; + } + + if (IS_ERR(opp_table->regulator)) { + dev_err(dev, "%s: Doesn't have regulator set\n", __func__); + goto unlock; + } + + /* Make sure there are no concurrent readers while updating opp_table */ + WARN_ON(!list_empty(&opp_table->opp_list)); + + regulator_put(opp_table->regulator); + opp_table->regulator = ERR_PTR(-ENXIO); + + /* Try freeing opp_table if this was the last blocking resource */ + _remove_opp_table(opp_table); + +unlock: + mutex_unlock(&opp_table_lock); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator); + +static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table, + struct device_node *np) +{ + unsigned int count = opp_table->supported_hw_count; + u32 version; + int ret; + + if (!opp_table->supported_hw) + return true; + + while (count--) { + ret = of_property_read_u32_index(np, "opp-supported-hw", count, + &version); + if (ret) { + dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n", + __func__, count, ret); + return false; + } + + /* Both of these are bitwise masks of the versions */ + if (!(version & opp_table->supported_hw[count])) + return false; + } + + return true; +} + +/** * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings) * @dev: device for which we do this operation * @np: device node * - * This function adds an opp definition to the opp list and returns status. The + * This function adds an opp definition to the opp table and returns status. The * opp can be controlled using dev_pm_opp_enable/disable functions and may be * removed by dev_pm_opp_remove. * - * Locking: The internal device_opp and opp structures are RCU protected. + * Locking: The internal opp_table and opp structures are RCU protected. * Hence this function internally uses RCU updater strategy with mutex locks * to keep the integrity of the internal data structures. Callers should ensure * that this function is *NOT* called under RCU protection or in contexts where @@ -849,16 +1569,16 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev) */ static int _opp_add_static_v2(struct device *dev, struct device_node *np) { - struct device_opp *dev_opp; + struct opp_table *opp_table; struct dev_pm_opp *new_opp; u64 rate; u32 val; int ret; - /* Hold our list modification lock here */ - mutex_lock(&dev_opp_list_lock); + /* Hold our table modification lock here */ + mutex_lock(&opp_table_lock); - new_opp = _allocate_opp(dev, &dev_opp); + new_opp = _allocate_opp(dev, &opp_table); if (!new_opp) { ret = -ENOMEM; goto unlock; @@ -870,6 +1590,12 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np) goto free_opp; } + /* Check if the OPP supports hardware's hierarchy of versions or not */ + if (!_opp_is_supported(dev, opp_table, np)) { + dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate); + goto free_opp; + } + /* * Rate is defined as an unsigned long in clk API, and so casting * explicitly to its type. Must be fixed once rate is 64 bit @@ -885,28 +1611,30 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np) if (!of_property_read_u32(np, "clock-latency-ns", &val)) new_opp->clock_latency_ns = val; - ret = opp_parse_supplies(new_opp, dev); + ret = opp_parse_supplies(new_opp, dev, opp_table); if (ret) goto free_opp; - ret = _opp_add(dev, new_opp, dev_opp); + ret = _opp_add(dev, new_opp, opp_table); if (ret) goto free_opp; /* OPP to select on device suspend */ if (of_property_read_bool(np, "opp-suspend")) { - if (dev_opp->suspend_opp) + if (opp_table->suspend_opp) { dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n", - __func__, dev_opp->suspend_opp->rate, + __func__, opp_table->suspend_opp->rate, new_opp->rate); - else - dev_opp->suspend_opp = new_opp; + } else { + new_opp->suspend = true; + opp_table->suspend_opp = new_opp; + } } - if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max) - dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns; + if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max) + opp_table->clock_latency_ns_max = new_opp->clock_latency_ns; - mutex_unlock(&dev_opp_list_lock); + mutex_unlock(&opp_table_lock); pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt, @@ -917,13 +1645,13 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np) * Notify the changes in the availability of the operable * frequency/voltage list. */ - srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); + srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp); return 0; free_opp: - _opp_remove(dev_opp, new_opp, false); + _opp_remove(opp_table, new_opp, false); unlock: - mutex_unlock(&dev_opp_list_lock); + mutex_unlock(&opp_table_lock); return ret; } @@ -933,11 +1661,11 @@ unlock: * @freq: Frequency in Hz for this OPP * @u_volt: Voltage in uVolts for this OPP * - * This function adds an opp definition to the opp list and returns status. + * This function adds an opp definition to the opp table and returns status. * The opp is made available by default and it can be controlled using * dev_pm_opp_enable/disable functions. * - * Locking: The internal device_opp and opp structures are RCU protected. + * Locking: The internal opp_table and opp structures are RCU protected. * Hence this function internally uses RCU updater strategy with mutex locks * to keep the integrity of the internal data structures. Callers should ensure * that this function is *NOT* called under RCU protection or in contexts where @@ -969,7 +1697,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add); * copy operation, returns 0 if no modification was done OR modification was * successful. * - * Locking: The internal device_opp and opp structures are RCU protected. + * Locking: The internal opp_table and opp structures are RCU protected. * Hence this function internally uses RCU updater strategy with mutex locks to * keep the integrity of the internal data structures. Callers should ensure * that this function is *NOT* called under RCU protection or in contexts where @@ -978,7 +1706,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add); static int _opp_set_availability(struct device *dev, unsigned long freq, bool availability_req) { - struct device_opp *dev_opp; + struct opp_table *opp_table; struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); int r = 0; @@ -987,18 +1715,18 @@ static int _opp_set_availability(struct device *dev, unsigned long freq, if (!new_opp) return -ENOMEM; - mutex_lock(&dev_opp_list_lock); + mutex_lock(&opp_table_lock); - /* Find the device_opp */ - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) { - r = PTR_ERR(dev_opp); + /* Find the opp_table */ + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { + r = PTR_ERR(opp_table); dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); goto unlock; } /* Do we have the frequency? */ - list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) { + list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { if (tmp_opp->rate == freq) { opp = tmp_opp; break; @@ -1019,21 +1747,21 @@ static int _opp_set_availability(struct device *dev, unsigned long freq, new_opp->available = availability_req; list_replace_rcu(&opp->node, &new_opp->node); - mutex_unlock(&dev_opp_list_lock); - call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); + mutex_unlock(&opp_table_lock); + call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); /* Notify the change of the OPP availability */ if (availability_req) - srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE, - new_opp); + srcu_notifier_call_chain(&opp_table->srcu_head, + OPP_EVENT_ENABLE, new_opp); else - srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE, - new_opp); + srcu_notifier_call_chain(&opp_table->srcu_head, + OPP_EVENT_DISABLE, new_opp); return 0; unlock: - mutex_unlock(&dev_opp_list_lock); + mutex_unlock(&opp_table_lock); kfree(new_opp); return r; } @@ -1047,7 +1775,7 @@ unlock: * corresponding error value. It is meant to be used for users an OPP available * after being temporarily made unavailable with dev_pm_opp_disable. * - * Locking: The internal device_opp and opp structures are RCU protected. + * Locking: The internal opp_table and opp structures are RCU protected. * Hence this function indirectly uses RCU and mutex locks to keep the * integrity of the internal data structures. Callers should ensure that * this function is *NOT* called under RCU protection or in contexts where @@ -1073,7 +1801,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable); * control by users to make this OPP not available until the circumstances are * right to make it available again (with a call to dev_pm_opp_enable). * - * Locking: The internal device_opp and opp structures are RCU protected. + * Locking: The internal opp_table and opp structures are RCU protected. * Hence this function indirectly uses RCU and mutex locks to keep the * integrity of the internal data structures. Callers should ensure that * this function is *NOT* called under RCU protection or in contexts where @@ -1091,26 +1819,26 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_disable); /** * dev_pm_opp_get_notifier() - find notifier_head of the device with opp - * @dev: device pointer used to lookup device OPPs. + * @dev: device pointer used to lookup OPP table. * * Return: pointer to notifier head if found, otherwise -ENODEV or * -EINVAL based on type of error casted as pointer. value must be checked * with IS_ERR to determine valid pointer or error result. * - * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU - * protected pointer. The reason for the same is that the opp pointer which is - * returned will remain valid for use with opp_get_{voltage, freq} only while + * Locking: This function must be called under rcu_read_lock(). opp_table is a + * RCU protected pointer. The reason for the same is that the opp pointer which + * is returned will remain valid for use with opp_get_{voltage, freq} only while * under the locked area. The pointer returned must be used prior to unlocking * with rcu_read_unlock() to maintain the integrity of the pointer. */ struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev) { - struct device_opp *dev_opp = _find_device_opp(dev); + struct opp_table *opp_table = _find_opp_table(dev); - if (IS_ERR(dev_opp)) - return ERR_CAST(dev_opp); /* matching type */ + if (IS_ERR(opp_table)) + return ERR_CAST(opp_table); /* matching type */ - return &dev_opp->srcu_head; + return &opp_table->srcu_head; } EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier); @@ -1118,11 +1846,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier); /** * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT * entries - * @dev: device pointer used to lookup device OPPs. + * @dev: device pointer used to lookup OPP table. * * Free OPPs created using static entries present in DT. * - * Locking: The internal device_opp and opp structures are RCU protected. + * Locking: The internal opp_table and opp structures are RCU protected. * Hence this function indirectly uses RCU updater strategy with mutex locks * to keep the integrity of the internal data structures. Callers should ensure * that this function is *NOT* called under RCU protection or in contexts where @@ -1130,38 +1858,38 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier); */ void dev_pm_opp_of_remove_table(struct device *dev) { - struct device_opp *dev_opp; + struct opp_table *opp_table; struct dev_pm_opp *opp, *tmp; - /* Hold our list modification lock here */ - mutex_lock(&dev_opp_list_lock); + /* Hold our table modification lock here */ + mutex_lock(&opp_table_lock); - /* Check for existing list for 'dev' */ - dev_opp = _find_device_opp(dev); - if (IS_ERR(dev_opp)) { - int error = PTR_ERR(dev_opp); + /* Check for existing table for 'dev' */ + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { + int error = PTR_ERR(opp_table); if (error != -ENODEV) - WARN(1, "%s: dev_opp: %d\n", + WARN(1, "%s: opp_table: %d\n", IS_ERR_OR_NULL(dev) ? "Invalid device" : dev_name(dev), error); goto unlock; } - /* Find if dev_opp manages a single device */ - if (list_is_singular(&dev_opp->dev_list)) { + /* Find if opp_table manages a single device */ + if (list_is_singular(&opp_table->dev_list)) { /* Free static OPPs */ - list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) { + list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { if (!opp->dynamic) - _opp_remove(dev_opp, opp, true); + _opp_remove(opp_table, opp, true); } } else { - _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp); + _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table); } unlock: - mutex_unlock(&dev_opp_list_lock); + mutex_unlock(&opp_table_lock); } EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); @@ -1182,22 +1910,22 @@ struct device_node *_of_get_opp_desc_node(struct device *dev) static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) { struct device_node *np; - struct device_opp *dev_opp; + struct opp_table *opp_table; int ret = 0, count = 0; - mutex_lock(&dev_opp_list_lock); + mutex_lock(&opp_table_lock); - dev_opp = _managed_opp(opp_np); - if (dev_opp) { + opp_table = _managed_opp(opp_np); + if (opp_table) { /* OPPs are already managed */ - if (!_add_list_dev(dev, dev_opp)) + if (!_add_opp_dev(dev, opp_table)) ret = -ENOMEM; - mutex_unlock(&dev_opp_list_lock); + mutex_unlock(&opp_table_lock); return ret; } - mutex_unlock(&dev_opp_list_lock); + mutex_unlock(&opp_table_lock); - /* We have opp-list node now, iterate over it and add OPPs */ + /* We have opp-table node now, iterate over it and add OPPs */ for_each_available_child_of_node(opp_np, np) { count++; @@ -1213,19 +1941,19 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) if (WARN_ON(!count)) return -ENOENT; - mutex_lock(&dev_opp_list_lock); + mutex_lock(&opp_table_lock); - dev_opp = _find_device_opp(dev); - if (WARN_ON(IS_ERR(dev_opp))) { - ret = PTR_ERR(dev_opp); - mutex_unlock(&dev_opp_list_lock); + opp_table = _find_opp_table(dev); + if (WARN_ON(IS_ERR(opp_table))) { + ret = PTR_ERR(opp_table); + mutex_unlock(&opp_table_lock); goto free_table; } - dev_opp->np = opp_np; - dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared"); + opp_table->np = opp_np; + opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared"); - mutex_unlock(&dev_opp_list_lock); + mutex_unlock(&opp_table_lock); return 0; @@ -1254,7 +1982,7 @@ static int _of_add_opp_table_v1(struct device *dev) */ nr = prop->length / sizeof(u32); if (nr % 2) { - dev_err(dev, "%s: Invalid OPP list\n", __func__); + dev_err(dev, "%s: Invalid OPP table\n", __func__); return -EINVAL; } @@ -1274,11 +2002,11 @@ static int _of_add_opp_table_v1(struct device *dev) /** * dev_pm_opp_of_add_table() - Initialize opp table from device tree - * @dev: device pointer used to lookup device OPPs. + * @dev: device pointer used to lookup OPP table. * * Register the initial OPP table with the OPP library for given device. * - * Locking: The internal device_opp and opp structures are RCU protected. + * Locking: The internal opp_table and opp structures are RCU protected. * Hence this function indirectly uses RCU updater strategy with mutex locks * to keep the integrity of the internal data structures. Callers should ensure * that this function is *NOT* called under RCU protection or in contexts where diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c index 7b445e88a0d5..ba2bdbd932ef 100644 --- a/drivers/base/power/opp/cpu.c +++ b/drivers/base/power/opp/cpu.c @@ -31,7 +31,7 @@ * @table: Cpufreq table returned back to caller * * Generate a cpufreq table for a provided device- this assumes that the - * opp list is already initialized and ready for usage. + * opp table is already initialized and ready for usage. * * This function allocates required memory for the cpufreq table. It is * expected that the caller does the required maintenance such as freeing @@ -44,7 +44,7 @@ * WARNING: It is important for the callers to ensure refreshing their copy of * the table if any of the mentioned functions have been invoked in the interim. * - * Locking: The internal device_opp and opp structures are RCU protected. + * Locking: The internal opp_table and opp structures are RCU protected. * Since we just use the regular accessor functions to access the internal data * structures, we use RCU read lock inside this function. As a result, users of * this function DONOT need to use explicit locks for invoking. @@ -122,15 +122,15 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table); /* Required only for V1 bindings, as v2 can manage it from DT itself */ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) { - struct device_list_opp *list_dev; - struct device_opp *dev_opp; + struct opp_device *opp_dev; + struct opp_table *opp_table; struct device *dev; int cpu, ret = 0; - mutex_lock(&dev_opp_list_lock); + mutex_lock(&opp_table_lock); - dev_opp = _find_device_opp(cpu_dev); - if (IS_ERR(dev_opp)) { + opp_table = _find_opp_table(cpu_dev); + if (IS_ERR(opp_table)) { ret = -EINVAL; goto unlock; } @@ -146,15 +146,15 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) continue; } - list_dev = _add_list_dev(dev, dev_opp); - if (!list_dev) { - dev_err(dev, "%s: failed to add list-dev for cpu%d device\n", + opp_dev = _add_opp_dev(dev, opp_table); + if (!opp_dev) { + dev_err(dev, "%s: failed to add opp-dev for cpu%d device\n", __func__, cpu); continue; } } unlock: - mutex_unlock(&dev_opp_list_lock); + mutex_unlock(&opp_table_lock); return ret; } @@ -214,7 +214,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); /* * Works only for OPP v2 bindings. * - * cpumask should be already set to mask of cpu_dev->id. * Returns -ENOENT if operating-points-v2 bindings aren't supported. */ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) @@ -230,6 +229,8 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask return -ENOENT; } + cpumask_set_cpu(cpu_dev->id, cpumask); + /* OPPs are shared ? */ if (!of_property_read_bool(np, "opp-shared")) goto put_cpu_node; diff --git a/drivers/base/power/opp/debugfs.c b/drivers/base/power/opp/debugfs.c new file mode 100644 index 000000000000..ef1ae6b52042 --- /dev/null +++ b/drivers/base/power/opp/debugfs.c @@ -0,0 +1,218 @@ +/* + * Generic OPP debugfs interface + * + * Copyright (C) 2015-2016 Viresh Kumar <viresh.kumar@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/limits.h> + +#include "opp.h" + +static struct dentry *rootdir; + +static void opp_set_dev_name(const struct device *dev, char *name) +{ + if (dev->parent) + snprintf(name, NAME_MAX, "%s-%s", dev_name(dev->parent), + dev_name(dev)); + else + snprintf(name, NAME_MAX, "%s", dev_name(dev)); +} + +void opp_debug_remove_one(struct dev_pm_opp *opp) +{ + debugfs_remove_recursive(opp->dentry); +} + +int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table) +{ + struct dentry *pdentry = opp_table->dentry; + struct dentry *d; + char name[25]; /* 20 chars for 64 bit value + 5 (opp:\0) */ + + /* Rate is unique to each OPP, use it to give opp-name */ + snprintf(name, sizeof(name), "opp:%lu", opp->rate); + + /* Create per-opp directory */ + d = debugfs_create_dir(name, pdentry); + if (!d) + return -ENOMEM; + + if (!debugfs_create_bool("available", S_IRUGO, d, &opp->available)) + return -ENOMEM; + + if (!debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic)) + return -ENOMEM; + + if (!debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo)) + return -ENOMEM; + + if (!debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend)) + return -ENOMEM; + + if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate)) + return -ENOMEM; + + if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d, &opp->u_volt)) + return -ENOMEM; + + if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d, &opp->u_volt_min)) + return -ENOMEM; + + if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d, &opp->u_volt_max)) + return -ENOMEM; + + if (!debugfs_create_ulong("u_amp", S_IRUGO, d, &opp->u_amp)) + return -ENOMEM; + + if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d, + &opp->clock_latency_ns)) + return -ENOMEM; + + opp->dentry = d; + return 0; +} + +static int opp_list_debug_create_dir(struct opp_device *opp_dev, + struct opp_table *opp_table) +{ + const struct device *dev = opp_dev->dev; + struct dentry *d; + + opp_set_dev_name(dev, opp_table->dentry_name); + + /* Create device specific directory */ + d = debugfs_create_dir(opp_table->dentry_name, rootdir); + if (!d) { + dev_err(dev, "%s: Failed to create debugfs dir\n", __func__); + return -ENOMEM; + } + + opp_dev->dentry = d; + opp_table->dentry = d; + + return 0; +} + +static int opp_list_debug_create_link(struct opp_device *opp_dev, + struct opp_table *opp_table) +{ + const struct device *dev = opp_dev->dev; + char name[NAME_MAX]; + struct dentry *d; + + opp_set_dev_name(opp_dev->dev, name); + + /* Create device specific directory link */ + d = debugfs_create_symlink(name, rootdir, opp_table->dentry_name); + if (!d) { + dev_err(dev, "%s: Failed to create link\n", __func__); + return -ENOMEM; + } + + opp_dev->dentry = d; + + return 0; +} + +/** + * opp_debug_register - add a device opp node to the debugfs 'opp' directory + * @opp_dev: opp-dev pointer for device + * @opp_table: the device-opp being added + * + * Dynamically adds device specific directory in debugfs 'opp' directory. If the + * device-opp is shared with other devices, then links will be created for all + * devices except the first. + * + * Return: 0 on success, otherwise negative error. + */ +int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table) +{ + if (!rootdir) { + pr_debug("%s: Uninitialized rootdir\n", __func__); + return -EINVAL; + } + + if (opp_table->dentry) + return opp_list_debug_create_link(opp_dev, opp_table); + + return opp_list_debug_create_dir(opp_dev, opp_table); +} + +static void opp_migrate_dentry(struct opp_device *opp_dev, + struct opp_table *opp_table) +{ + struct opp_device *new_dev; + const struct device *dev; + struct dentry *dentry; + + /* Look for next opp-dev */ + list_for_each_entry(new_dev, &opp_table->dev_list, node) + if (new_dev != opp_dev) + break; + + /* new_dev is guaranteed to be valid here */ + dev = new_dev->dev; + debugfs_remove_recursive(new_dev->dentry); + + opp_set_dev_name(dev, opp_table->dentry_name); + + dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir, + opp_table->dentry_name); + if (!dentry) { + dev_err(dev, "%s: Failed to rename link from: %s to %s\n", + __func__, dev_name(opp_dev->dev), dev_name(dev)); + return; + } + + new_dev->dentry = dentry; + opp_table->dentry = dentry; +} + +/** + * opp_debug_unregister - remove a device opp node from debugfs opp directory + * @opp_dev: opp-dev pointer for device + * @opp_table: the device-opp being removed + * + * Dynamically removes device specific directory from debugfs 'opp' directory. + */ +void opp_debug_unregister(struct opp_device *opp_dev, + struct opp_table *opp_table) +{ + if (opp_dev->dentry == opp_table->dentry) { + /* Move the real dentry object under another device */ + if (!list_is_singular(&opp_table->dev_list)) { + opp_migrate_dentry(opp_dev, opp_table); + goto out; + } + opp_table->dentry = NULL; + } + + debugfs_remove_recursive(opp_dev->dentry); + +out: + opp_dev->dentry = NULL; +} + +static int __init opp_debug_init(void) +{ + /* Create /sys/kernel/debug/opp directory */ + rootdir = debugfs_create_dir("opp", NULL); + if (!rootdir) { + pr_err("%s: Failed to create root directory\n", __func__); + return -ENOMEM; + } + + return 0; +} +core_initcall(opp_debug_init); diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h index 7366b2aa8997..f67f806fcf3a 100644 --- a/drivers/base/power/opp/opp.h +++ b/drivers/base/power/opp/opp.h @@ -17,17 +17,21 @@ #include <linux/device.h> #include <linux/kernel.h> #include <linux/list.h> +#include <linux/limits.h> #include <linux/pm_opp.h> #include <linux/rculist.h> #include <linux/rcupdate.h> +struct clk; +struct regulator; + /* Lock to allow exclusive modification to the device and opp lists */ -extern struct mutex dev_opp_list_lock; +extern struct mutex opp_table_lock; /* * Internal data structure organization with the OPP layer library is as * follows: - * dev_opp_list (root) + * opp_tables (root) * |- device 1 (represents voltage domain 1) * | |- opp 1 (availability, freq, voltage) * | |- opp 2 .. @@ -36,23 +40,24 @@ extern struct mutex dev_opp_list_lock; * |- device 2 (represents the next voltage domain) * ... * `- device m (represents mth voltage domain) - * device 1, 2.. are represented by dev_opp structure while each opp + * device 1, 2.. are represented by opp_table structure while each opp * is represented by the opp structure. */ /** * struct dev_pm_opp - Generic OPP description structure - * @node: opp list node. The nodes are maintained throughout the lifetime + * @node: opp table node. The nodes are maintained throughout the lifetime * of boot. It is expected only an optimal set of OPPs are * added to the library by the SoC framework. - * RCU usage: opp list is traversed with RCU locks. node + * RCU usage: opp table is traversed with RCU locks. node * modification is possible realtime, hence the modifications - * are protected by the dev_opp_list_lock for integrity. + * are protected by the opp_table_lock for integrity. * IMPORTANT: the opp nodes should be maintained in increasing * order. - * @dynamic: not-created from static DT entries. * @available: true/false - marks if this OPP as available or not + * @dynamic: not-created from static DT entries. * @turbo: true if turbo (boost) OPP + * @suspend: true if suspend OPP * @rate: Frequency in hertz * @u_volt: Target voltage in microvolts corresponding to this OPP * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP @@ -60,9 +65,10 @@ extern struct mutex dev_opp_list_lock; * @u_amp: Maximum current drawn by the device in microamperes * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's * frequency from any other OPP's frequency. - * @dev_opp: points back to the device_opp struct this opp belongs to + * @opp_table: points back to the opp_table struct this opp belongs to * @rcu_head: RCU callback head used for deferred freeing * @np: OPP's device node. + * @dentry: debugfs dentry pointer (per opp) * * This structure stores the OPP information for a given device. */ @@ -72,6 +78,7 @@ struct dev_pm_opp { bool available; bool dynamic; bool turbo; + bool suspend; unsigned long rate; unsigned long u_volt; @@ -80,40 +87,60 @@ struct dev_pm_opp { unsigned long u_amp; unsigned long clock_latency_ns; - struct device_opp *dev_opp; + struct opp_table *opp_table; struct rcu_head rcu_head; struct device_node *np; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dentry; +#endif }; /** - * struct device_list_opp - devices managed by 'struct device_opp' + * struct opp_device - devices managed by 'struct opp_table' * @node: list node * @dev: device to which the struct object belongs * @rcu_head: RCU callback head used for deferred freeing + * @dentry: debugfs dentry pointer (per device) * - * This is an internal data structure maintaining the list of devices that are - * managed by 'struct device_opp'. + * This is an internal data structure maintaining the devices that are managed + * by 'struct opp_table'. */ -struct device_list_opp { +struct opp_device { struct list_head node; const struct device *dev; struct rcu_head rcu_head; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dentry; +#endif }; /** - * struct device_opp - Device opp structure - * @node: list node - contains the devices with OPPs that + * struct opp_table - Device opp structure + * @node: table node - contains the devices with OPPs that * have been registered. Nodes once added are not modified in this - * list. - * RCU usage: nodes are not modified in the list of device_opp, - * however addition is possible and is secured by dev_opp_list_lock + * table. + * RCU usage: nodes are not modified in the table of opp_table, + * however addition is possible and is secured by opp_table_lock * @srcu_head: notifier head to notify the OPP availability changes. * @rcu_head: RCU callback head used for deferred freeing * @dev_list: list of devices that share these OPPs - * @opp_list: list of opps + * @opp_list: table of opps * @np: struct device_node pointer for opp's DT node. + * @clock_latency_ns_max: Max clock latency in nanoseconds. * @shared_opp: OPP is shared between multiple devices. + * @suspend_opp: Pointer to OPP to be used during device suspend. + * @supported_hw: Array of version number to support. + * @supported_hw_count: Number of elements in supported_hw array. + * @prop_name: A name to postfix to many DT properties, while parsing them. + * @clk: Device's clock handle + * @regulator: Supply regulator + * @dentry: debugfs dentry pointer of the real device directory (not links). + * @dentry_name: Name of the real dentry. + * + * @voltage_tolerance_v1: In percentage, for v1 bindings only. * * This is an internal data structure maintaining the link to opps attached to * a device. This structure is not meant to be shared to users as it is @@ -123,7 +150,7 @@ struct device_list_opp { * need to wait for the grace period of both of them before freeing any * resources. And so we have used kfree_rcu() from within call_srcu() handlers. */ -struct device_opp { +struct opp_table { struct list_head node; struct srcu_notifier_head srcu_head; @@ -133,14 +160,48 @@ struct device_opp { struct device_node *np; unsigned long clock_latency_ns_max; + + /* For backward compatibility with v1 bindings */ + unsigned int voltage_tolerance_v1; + bool shared_opp; struct dev_pm_opp *suspend_opp; + + unsigned int *supported_hw; + unsigned int supported_hw_count; + const char *prop_name; + struct clk *clk; + struct regulator *regulator; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dentry; + char dentry_name[NAME_MAX]; +#endif }; /* Routines internal to opp core */ -struct device_opp *_find_device_opp(struct device *dev); -struct device_list_opp *_add_list_dev(const struct device *dev, - struct device_opp *dev_opp); +struct opp_table *_find_opp_table(struct device *dev); +struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table); struct device_node *_of_get_opp_desc_node(struct device *dev); +#ifdef CONFIG_DEBUG_FS +void opp_debug_remove_one(struct dev_pm_opp *opp); +int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table); +int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table); +void opp_debug_unregister(struct opp_device *opp_dev, struct opp_table *opp_table); +#else +static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {} + +static inline int opp_debug_create_one(struct dev_pm_opp *opp, + struct opp_table *opp_table) +{ return 0; } +static inline int opp_debug_register(struct opp_device *opp_dev, + struct opp_table *opp_table) +{ return 0; } + +static inline void opp_debug_unregister(struct opp_device *opp_dev, + struct opp_table *opp_table) +{ } +#endif /* DEBUG_FS */ + #endif /* __DRIVER_OPP_H__ */ diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 90d64081ddb3..f951f911786e 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -31,9 +31,8 @@ struct private_data { struct device *cpu_dev; - struct regulator *cpu_reg; struct thermal_cooling_device *cdev; - unsigned int voltage_tolerance; /* in percentage */ + const char *reg_name; }; static struct freq_attr *cpufreq_dt_attr[] = { @@ -44,175 +43,128 @@ static struct freq_attr *cpufreq_dt_attr[] = { static int set_target(struct cpufreq_policy *policy, unsigned int index) { - struct dev_pm_opp *opp; - struct cpufreq_frequency_table *freq_table = policy->freq_table; - struct clk *cpu_clk = policy->clk; struct private_data *priv = policy->driver_data; - struct device *cpu_dev = priv->cpu_dev; - struct regulator *cpu_reg = priv->cpu_reg; - unsigned long volt = 0, volt_old = 0, tol = 0; - unsigned int old_freq, new_freq; - long freq_Hz, freq_exact; - int ret; - - freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000); - if (freq_Hz <= 0) - freq_Hz = freq_table[index].frequency * 1000; - freq_exact = freq_Hz; - new_freq = freq_Hz / 1000; - old_freq = clk_get_rate(cpu_clk) / 1000; + return dev_pm_opp_set_rate(priv->cpu_dev, + policy->freq_table[index].frequency * 1000); +} - if (!IS_ERR(cpu_reg)) { - unsigned long opp_freq; +/* + * An earlier version of opp-v1 bindings used to name the regulator + * "cpu0-supply", we still need to handle that for backwards compatibility. + */ +static const char *find_supply_name(struct device *dev) +{ + struct device_node *np; + struct property *pp; + int cpu = dev->id; + const char *name = NULL; - rcu_read_lock(); - opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz); - if (IS_ERR(opp)) { - rcu_read_unlock(); - dev_err(cpu_dev, "failed to find OPP for %ld\n", - freq_Hz); - return PTR_ERR(opp); - } - volt = dev_pm_opp_get_voltage(opp); - opp_freq = dev_pm_opp_get_freq(opp); - rcu_read_unlock(); - tol = volt * priv->voltage_tolerance / 100; - volt_old = regulator_get_voltage(cpu_reg); - dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n", - opp_freq / 1000, volt); - } + np = of_node_get(dev->of_node); - dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n", - old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1, - new_freq / 1000, volt ? volt / 1000 : -1); + /* This must be valid for sure */ + if (WARN_ON(!np)) + return NULL; - /* scaling up? scale voltage before frequency */ - if (!IS_ERR(cpu_reg) && new_freq > old_freq) { - ret = regulator_set_voltage_tol(cpu_reg, volt, tol); - if (ret) { - dev_err(cpu_dev, "failed to scale voltage up: %d\n", - ret); - return ret; + /* Try "cpu0" for older DTs */ + if (!cpu) { + pp = of_find_property(np, "cpu0-supply", NULL); + if (pp) { + name = "cpu0"; + goto node_put; } } - ret = clk_set_rate(cpu_clk, freq_exact); - if (ret) { - dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); - if (!IS_ERR(cpu_reg) && volt_old > 0) - regulator_set_voltage_tol(cpu_reg, volt_old, tol); - return ret; + pp = of_find_property(np, "cpu-supply", NULL); + if (pp) { + name = "cpu"; + goto node_put; } - /* scaling down? scale voltage after frequency */ - if (!IS_ERR(cpu_reg) && new_freq < old_freq) { - ret = regulator_set_voltage_tol(cpu_reg, volt, tol); - if (ret) { - dev_err(cpu_dev, "failed to scale voltage down: %d\n", - ret); - clk_set_rate(cpu_clk, old_freq * 1000); - } - } - - return ret; + dev_dbg(dev, "no regulator for cpu%d\n", cpu); +node_put: + of_node_put(np); + return name; } -static int allocate_resources(int cpu, struct device **cdev, - struct regulator **creg, struct clk **cclk) +static int resources_available(void) { struct device *cpu_dev; struct regulator *cpu_reg; struct clk *cpu_clk; int ret = 0; - char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg; + const char *name; - cpu_dev = get_cpu_device(cpu); + cpu_dev = get_cpu_device(0); if (!cpu_dev) { - pr_err("failed to get cpu%d device\n", cpu); + pr_err("failed to get cpu0 device\n"); return -ENODEV; } - /* Try "cpu0" for older DTs */ - if (!cpu) - reg = reg_cpu0; - else - reg = reg_cpu; - -try_again: - cpu_reg = regulator_get_optional(cpu_dev, reg); - if (IS_ERR(cpu_reg)) { + cpu_clk = clk_get(cpu_dev, NULL); + ret = PTR_ERR_OR_ZERO(cpu_clk); + if (ret) { /* - * If cpu's regulator supply node is present, but regulator is - * not yet registered, we should try defering probe. + * If cpu's clk node is present, but clock is not yet + * registered, we should try defering probe. */ - if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { - dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n", - cpu); - return -EPROBE_DEFER; - } - - /* Try with "cpu-supply" */ - if (reg == reg_cpu0) { - reg = reg_cpu; - goto try_again; - } + if (ret == -EPROBE_DEFER) + dev_dbg(cpu_dev, "clock not ready, retry\n"); + else + dev_err(cpu_dev, "failed to get clock: %d\n", ret); - dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n", - cpu, PTR_ERR(cpu_reg)); + return ret; } - cpu_clk = clk_get(cpu_dev, NULL); - if (IS_ERR(cpu_clk)) { - /* put regulator */ - if (!IS_ERR(cpu_reg)) - regulator_put(cpu_reg); + clk_put(cpu_clk); - ret = PTR_ERR(cpu_clk); + name = find_supply_name(cpu_dev); + /* Platform doesn't require regulator */ + if (!name) + return 0; + cpu_reg = regulator_get_optional(cpu_dev, name); + ret = PTR_ERR_OR_ZERO(cpu_reg); + if (ret) { /* - * If cpu's clk node is present, but clock is not yet - * registered, we should try defering probe. + * If cpu's regulator supply node is present, but regulator is + * not yet registered, we should try defering probe. */ if (ret == -EPROBE_DEFER) - dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu); + dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n"); else - dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu, - ret); - } else { - *cdev = cpu_dev; - *creg = cpu_reg; - *cclk = cpu_clk; + dev_dbg(cpu_dev, "no regulator for cpu0: %d\n", ret); + + return ret; } - return ret; + regulator_put(cpu_reg); + return 0; } static int cpufreq_init(struct cpufreq_policy *policy) { struct cpufreq_frequency_table *freq_table; - struct device_node *np; struct private_data *priv; struct device *cpu_dev; - struct regulator *cpu_reg; struct clk *cpu_clk; struct dev_pm_opp *suspend_opp; - unsigned long min_uV = ~0, max_uV = 0; unsigned int transition_latency; - bool need_update = false; + bool opp_v1 = false; + const char *name; int ret; - ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk); - if (ret) { - pr_err("%s: Failed to allocate resources: %d\n", __func__, ret); - return ret; + cpu_dev = get_cpu_device(policy->cpu); + if (!cpu_dev) { + pr_err("failed to get cpu%d device\n", policy->cpu); + return -ENODEV; } - np = of_node_get(cpu_dev->of_node); - if (!np) { - dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu); - ret = -ENOENT; - goto out_put_reg_clk; + cpu_clk = clk_get(cpu_dev, NULL); + if (IS_ERR(cpu_clk)) { + ret = PTR_ERR(cpu_clk); + dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret); + return ret; } /* Get OPP-sharing information from "operating-points-v2" bindings */ @@ -223,9 +175,23 @@ static int cpufreq_init(struct cpufreq_policy *policy) * finding shared-OPPs for backward compatibility. */ if (ret == -ENOENT) - need_update = true; + opp_v1 = true; else - goto out_node_put; + goto out_put_clk; + } + + /* + * OPP layer will be taking care of regulators now, but it needs to know + * the name of the regulator first. + */ + name = find_supply_name(cpu_dev); + if (name) { + ret = dev_pm_opp_set_regulator(cpu_dev, name); + if (ret) { + dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n", + policy->cpu, ret); + goto out_put_clk; + } } /* @@ -246,12 +212,12 @@ static int cpufreq_init(struct cpufreq_policy *policy) */ ret = dev_pm_opp_get_opp_count(cpu_dev); if (ret <= 0) { - pr_debug("OPP table is not ready, deferring probe\n"); + dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n"); ret = -EPROBE_DEFER; goto out_free_opp; } - if (need_update) { + if (opp_v1) { struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data(); if (!pd || !pd->independent_clocks) @@ -265,10 +231,6 @@ static int cpufreq_init(struct cpufreq_policy *policy) if (ret) dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", __func__, ret); - - of_property_read_u32(np, "clock-latency", &transition_latency); - } else { - transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev); } priv = kzalloc(sizeof(*priv), GFP_KERNEL); @@ -277,62 +239,16 @@ static int cpufreq_init(struct cpufreq_policy *policy) goto out_free_opp; } - of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance); - - if (!transition_latency) - transition_latency = CPUFREQ_ETERNAL; - - if (!IS_ERR(cpu_reg)) { - unsigned long opp_freq = 0; - - /* - * Disable any OPPs where the connected regulator isn't able to - * provide the specified voltage and record minimum and maximum - * voltage levels. - */ - while (1) { - struct dev_pm_opp *opp; - unsigned long opp_uV, tol_uV; - - rcu_read_lock(); - opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq); - if (IS_ERR(opp)) { - rcu_read_unlock(); - break; - } - opp_uV = dev_pm_opp_get_voltage(opp); - rcu_read_unlock(); - - tol_uV = opp_uV * priv->voltage_tolerance / 100; - if (regulator_is_supported_voltage(cpu_reg, - opp_uV - tol_uV, - opp_uV + tol_uV)) { - if (opp_uV < min_uV) - min_uV = opp_uV; - if (opp_uV > max_uV) - max_uV = opp_uV; - } else { - dev_pm_opp_disable(cpu_dev, opp_freq); - } - - opp_freq++; - } - - ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); - if (ret > 0) - transition_latency += ret * 1000; - } + priv->reg_name = name; ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { - pr_err("failed to init cpufreq table: %d\n", ret); + dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); goto out_free_priv; } priv->cpu_dev = cpu_dev; - priv->cpu_reg = cpu_reg; policy->driver_data = priv; - policy->clk = cpu_clk; rcu_read_lock(); @@ -357,9 +273,11 @@ static int cpufreq_init(struct cpufreq_policy *policy) cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; } - policy->cpuinfo.transition_latency = transition_latency; + transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev); + if (!transition_latency) + transition_latency = CPUFREQ_ETERNAL; - of_node_put(np); + policy->cpuinfo.transition_latency = transition_latency; return 0; @@ -369,12 +287,10 @@ out_free_priv: kfree(priv); out_free_opp: dev_pm_opp_of_cpumask_remove_table(policy->cpus); -out_node_put: - of_node_put(np); -out_put_reg_clk: + if (name) + dev_pm_opp_put_regulator(cpu_dev); +out_put_clk: clk_put(cpu_clk); - if (!IS_ERR(cpu_reg)) - regulator_put(cpu_reg); return ret; } @@ -386,9 +302,10 @@ static int cpufreq_exit(struct cpufreq_policy *policy) cpufreq_cooling_unregister(priv->cdev); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); + if (priv->reg_name) + dev_pm_opp_put_regulator(priv->cpu_dev); + clk_put(policy->clk); - if (!IS_ERR(priv->cpu_reg)) - regulator_put(priv->cpu_reg); kfree(priv); return 0; @@ -407,8 +324,13 @@ static void cpufreq_ready(struct cpufreq_policy *policy) * thermal DT code takes care of matching them. */ if (of_find_property(np, "#cooling-cells", NULL)) { - priv->cdev = of_cpufreq_cooling_register(np, - policy->related_cpus); + u32 power_coefficient = 0; + + of_property_read_u32(np, "dynamic-power-coefficient", + &power_coefficient); + + priv->cdev = of_cpufreq_power_cooling_register(np, + policy->related_cpus, power_coefficient, NULL); if (IS_ERR(priv->cdev)) { dev_err(priv->cpu_dev, "running cpufreq without cooling device: %ld\n", @@ -436,9 +358,6 @@ static struct cpufreq_driver dt_cpufreq_driver = { static int dt_cpufreq_probe(struct platform_device *pdev) { - struct device *cpu_dev; - struct regulator *cpu_reg; - struct clk *cpu_clk; int ret; /* @@ -448,19 +367,15 @@ static int dt_cpufreq_probe(struct platform_device *pdev) * * FIXME: Is checking this only for CPU0 sufficient ? */ - ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk); + ret = resources_available(); if (ret) return ret; - clk_put(cpu_clk); - if (!IS_ERR(cpu_reg)) - regulator_put(cpu_reg); - dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev); ret = cpufreq_register_driver(&dt_cpufreq_driver); if (ret) - dev_err(cpu_dev, "failed register driver: %d\n", ret); + dev_err(&pdev->dev, "failed register driver: %d\n", ret); return ret; } diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index cf478fe6b335..49a3a1185bb6 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -173,6 +173,9 @@ config QCOM_SCM_64 def_bool y depends on QCOM_SCM && ARM64 +config HAVE_ARM_SMCCC + bool + source "drivers/firmware/broadcom/Kconfig" source "drivers/firmware/google/Kconfig" source "drivers/firmware/efi/Kconfig" diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 3c0467d3688c..c4098748e1fe 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -8,7 +8,7 @@ cflags-$(CONFIG_X86_32) := -march=i386 cflags-$(CONFIG_X86_64) := -mcmodel=small cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \ -fPIC -fno-strict-aliasing -mno-red-zone \ - -mno-mmx -mno-sse -DDISABLE_BRANCH_PROFILING + -mno-mmx -mno-sse cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \ @@ -16,7 +16,7 @@ cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \ cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt -KBUILD_CFLAGS := $(cflags-y) \ +KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \ $(call cc-option,-ffreestanding) \ $(call cc-option,-fno-stack-protector) @@ -34,7 +34,8 @@ $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o \ $(patsubst %.c,lib-%.o,$(arm-deps)) -lib-$(CONFIG_ARM64) += arm64-stub.o +lib-$(CONFIG_ARM) += arm32-stub.o +lib-$(CONFIG_ARM64) += arm64-stub.o random.o CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) # diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index 950c87f5d279..d5aa1d16154f 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -18,6 +18,8 @@ #include "efistub.h" +bool __nokaslr; + static int efi_secureboot_enabled(efi_system_table_t *sys_table_arg) { static efi_guid_t const var_guid = EFI_GLOBAL_VARIABLE_GUID; @@ -207,14 +209,6 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, pr_efi_err(sys_table, "Failed to find DRAM base\n"); goto fail; } - status = handle_kernel_image(sys_table, image_addr, &image_size, - &reserve_addr, - &reserve_size, - dram_base, image); - if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Failed to relocate kernel\n"); - goto fail; - } /* * Get the command line from EFI, using the LOADED_IMAGE @@ -224,7 +218,28 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, cmdline_ptr = efi_convert_cmdline(sys_table, image, &cmdline_size); if (!cmdline_ptr) { pr_efi_err(sys_table, "getting command line via LOADED_IMAGE_PROTOCOL\n"); - goto fail_free_image; + goto fail; + } + + /* check whether 'nokaslr' was passed on the command line */ + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { + static const u8 default_cmdline[] = CONFIG_CMDLINE; + const u8 *str, *cmdline = cmdline_ptr; + + if (IS_ENABLED(CONFIG_CMDLINE_FORCE)) + cmdline = default_cmdline; + str = strstr(cmdline, "nokaslr"); + if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) + __nokaslr = true; + } + + status = handle_kernel_image(sys_table, image_addr, &image_size, + &reserve_addr, + &reserve_size, + dram_base, image); + if (status != EFI_SUCCESS) { + pr_efi_err(sys_table, "Failed to relocate kernel\n"); + goto fail_free_cmdline; } status = efi_parse_options(cmdline_ptr); @@ -244,7 +259,7 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, if (status != EFI_SUCCESS) { pr_efi_err(sys_table, "Failed to load device tree!\n"); - goto fail_free_cmdline; + goto fail_free_image; } } @@ -286,12 +301,11 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, efi_free(sys_table, initrd_size, initrd_addr); efi_free(sys_table, fdt_size, fdt_addr); -fail_free_cmdline: - efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr); - fail_free_image: efi_free(sys_table, image_size, *image_addr); efi_free(sys_table, reserve_size, reserve_addr); +fail_free_cmdline: + efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr); fail: return EFI_ERROR; } diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index 78dfbd34b6bf..377d935a3380 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -13,6 +13,10 @@ #include <asm/efi.h> #include <asm/sections.h> +#include "efistub.h" + +extern bool __nokaslr; + efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg, unsigned long *image_addr, unsigned long *image_size, @@ -23,26 +27,61 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg, { efi_status_t status; unsigned long kernel_size, kernel_memsize = 0; - unsigned long nr_pages; void *old_image_addr = (void *)*image_addr; unsigned long preferred_offset; + u64 phys_seed = 0; + + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { + if (!__nokaslr) { + status = efi_get_random_bytes(sys_table_arg, + sizeof(phys_seed), + (u8 *)&phys_seed); + if (status == EFI_NOT_FOUND) { + pr_efi(sys_table_arg, "EFI_RNG_PROTOCOL unavailable, no randomness supplied\n"); + } else if (status != EFI_SUCCESS) { + pr_efi_err(sys_table_arg, "efi_get_random_bytes() failed\n"); + return status; + } + } else { + pr_efi(sys_table_arg, "KASLR disabled on kernel command line\n"); + } + } /* * The preferred offset of the kernel Image is TEXT_OFFSET bytes beyond * a 2 MB aligned base, which itself may be lower than dram_base, as * long as the resulting offset equals or exceeds it. */ - preferred_offset = round_down(dram_base, SZ_2M) + TEXT_OFFSET; + preferred_offset = round_down(dram_base, MIN_KIMG_ALIGN) + TEXT_OFFSET; if (preferred_offset < dram_base) - preferred_offset += SZ_2M; + preferred_offset += MIN_KIMG_ALIGN; - /* Relocate the image, if required. */ kernel_size = _edata - _text; - if (*image_addr != preferred_offset) { - kernel_memsize = kernel_size + (_end - _edata); + kernel_memsize = kernel_size + (_end - _edata); + + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) { + /* + * If CONFIG_DEBUG_ALIGN_RODATA is not set, produce a + * displacement in the interval [0, MIN_KIMG_ALIGN) that + * is a multiple of the minimal segment alignment (SZ_64K) + */ + u32 mask = (MIN_KIMG_ALIGN - 1) & ~(SZ_64K - 1); + u32 offset = !IS_ENABLED(CONFIG_DEBUG_ALIGN_RODATA) ? + (phys_seed >> 32) & mask : TEXT_OFFSET; + + /* + * If KASLR is enabled, and we have some randomness available, + * locate the kernel at a randomized offset in physical memory. + */ + *reserve_size = kernel_memsize + offset; + status = efi_random_alloc(sys_table_arg, *reserve_size, + MIN_KIMG_ALIGN, reserve_addr, + (u32)phys_seed); + *image_addr = *reserve_addr + offset; + } else { /* - * First, try a straight allocation at the preferred offset. + * Else, try a straight allocation at the preferred offset. * This will work around the issue where, if dram_base == 0x0, * efi_low_alloc() refuses to allocate at 0x0 (to prevent the * address of the allocation to be mistaken for a FAIL return @@ -52,27 +91,31 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg, * Mustang), we can still place the kernel at the address * 'dram_base + TEXT_OFFSET'. */ + if (*image_addr == preferred_offset) + return EFI_SUCCESS; + *image_addr = *reserve_addr = preferred_offset; - nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) / - EFI_PAGE_SIZE; + *reserve_size = round_up(kernel_memsize, EFI_ALLOC_ALIGN); + status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS, - EFI_LOADER_DATA, nr_pages, + EFI_LOADER_DATA, + *reserve_size / EFI_PAGE_SIZE, (efi_physical_addr_t *)reserve_addr); - if (status != EFI_SUCCESS) { - kernel_memsize += TEXT_OFFSET; - status = efi_low_alloc(sys_table_arg, kernel_memsize, - SZ_2M, reserve_addr); + } - if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "Failed to relocate kernel\n"); - return status; - } - *image_addr = *reserve_addr + TEXT_OFFSET; + if (status != EFI_SUCCESS) { + *reserve_size = kernel_memsize + TEXT_OFFSET; + status = efi_low_alloc(sys_table_arg, *reserve_size, + MIN_KIMG_ALIGN, reserve_addr); + + if (status != EFI_SUCCESS) { + pr_efi_err(sys_table_arg, "Failed to relocate kernel\n"); + *reserve_size = 0; + return status; } - memcpy((void *)*image_addr, old_image_addr, kernel_size); - *reserve_size = kernel_memsize; + *image_addr = *reserve_addr + TEXT_OFFSET; } - + memcpy((void *)*image_addr, old_image_addr, kernel_size); return EFI_SUCCESS; } diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index f07d4a67fa76..29ed2f9b218c 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -649,6 +649,10 @@ static u8 *efi_utf16_to_utf8(u8 *dst, const u16 *src, int n) return dst; } +#ifndef MAX_CMDLINE_ADDRESS +#define MAX_CMDLINE_ADDRESS ULONG_MAX +#endif + /* * Convert the unicode UEFI command line to ASCII to pass to kernel. * Size of memory allocated return in *cmd_line_len. @@ -684,7 +688,8 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, options_bytes++; /* NUL termination */ - status = efi_low_alloc(sys_table_arg, options_bytes, 0, &cmdline_addr); + status = efi_high_alloc(sys_table_arg, options_bytes, 0, + &cmdline_addr, MAX_CMDLINE_ADDRESS); if (status != EFI_SUCCESS) return NULL; diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index 6b6548fda089..5ed3d3f38166 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -43,4 +43,11 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, unsigned long desc_size, efi_memory_desc_t *runtime_map, int *count); +efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table, + unsigned long size, u8 *out); + +efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, + unsigned long size, unsigned long align, + unsigned long *addr, unsigned long random_seed); + #endif diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index b62e2f5dcab3..9ee9973b5c3e 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -16,6 +16,22 @@ #include "efistub.h" +#define EFI_DT_ADDR_CELLS_DEFAULT 2 +#define EFI_DT_SIZE_CELLS_DEFAULT 2 + +static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt) +{ + int offset; + + offset = fdt_path_offset(fdt, "/"); + /* Set the #address-cells and #size-cells values for an empty tree */ + + fdt_setprop_u32(fdt, offset, "#address-cells", + EFI_DT_ADDR_CELLS_DEFAULT); + + fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT); +} + efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, unsigned long orig_fdt_size, void *fdt, int new_fdt_size, char *cmdline_ptr, @@ -45,10 +61,18 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, } } - if (orig_fdt) + if (orig_fdt) { status = fdt_open_into(orig_fdt, fdt, new_fdt_size); - else + } else { status = fdt_create_empty_tree(fdt, new_fdt_size); + if (status == 0) { + /* + * Any failure from the following function is non + * critical + */ + fdt_update_cell_size(sys_table, fdt); + } + } if (status != 0) goto fdt_set_fail; @@ -147,6 +171,20 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, if (status) goto fdt_set_fail; + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { + efi_status_t efi_status; + + efi_status = efi_get_random_bytes(sys_table, sizeof(fdt_val64), + (u8 *)&fdt_val64); + if (efi_status == EFI_SUCCESS) { + status = fdt_setprop(fdt, node, "kaslr-seed", + &fdt_val64, sizeof(fdt_val64)); + if (status) + goto fdt_set_fail; + } else if (efi_status != EFI_NOT_FOUND) { + return efi_status; + } + } return EFI_SUCCESS; fdt_set_fail: diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c new file mode 100644 index 000000000000..53f6d3fe6d86 --- /dev/null +++ b/drivers/firmware/efi/libstub/random.c @@ -0,0 +1,135 @@ +/* + * Copyright (C) 2016 Linaro Ltd; <ard.biesheuvel@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/efi.h> +#include <asm/efi.h> + +#include "efistub.h" + +struct efi_rng_protocol { + efi_status_t (*get_info)(struct efi_rng_protocol *, + unsigned long *, efi_guid_t *); + efi_status_t (*get_rng)(struct efi_rng_protocol *, + efi_guid_t *, unsigned long, u8 *out); +}; + +efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg, + unsigned long size, u8 *out) +{ + efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; + efi_status_t status; + struct efi_rng_protocol *rng; + + status = efi_call_early(locate_protocol, &rng_proto, NULL, + (void **)&rng); + if (status != EFI_SUCCESS) + return status; + + return rng->get_rng(rng, NULL, size, out); +} + +/* + * Return the number of slots covered by this entry, i.e., the number of + * addresses it covers that are suitably aligned and supply enough room + * for the allocation. + */ +static unsigned long get_entry_num_slots(efi_memory_desc_t *md, + unsigned long size, + unsigned long align) +{ + u64 start, end; + + if (md->type != EFI_CONVENTIONAL_MEMORY) + return 0; + + start = round_up(md->phys_addr, align); + end = round_down(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - size, + align); + + if (start > end) + return 0; + + return (end - start + 1) / align; +} + +/* + * The UEFI memory descriptors have a virtual address field that is only used + * when installing the virtual mapping using SetVirtualAddressMap(). Since it + * is unused here, we can reuse it to keep track of each descriptor's slot + * count. + */ +#define MD_NUM_SLOTS(md) ((md)->virt_addr) + +efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, + unsigned long size, + unsigned long align, + unsigned long *addr, + unsigned long random_seed) +{ + unsigned long map_size, desc_size, total_slots = 0, target_slot; + efi_status_t status; + efi_memory_desc_t *memory_map; + int map_offset; + + status = efi_get_memory_map(sys_table_arg, &memory_map, &map_size, + &desc_size, NULL, NULL); + if (status != EFI_SUCCESS) + return status; + + if (align < EFI_ALLOC_ALIGN) + align = EFI_ALLOC_ALIGN; + + /* count the suitable slots in each memory map entry */ + for (map_offset = 0; map_offset < map_size; map_offset += desc_size) { + efi_memory_desc_t *md = (void *)memory_map + map_offset; + unsigned long slots; + + slots = get_entry_num_slots(md, size, align); + MD_NUM_SLOTS(md) = slots; + total_slots += slots; + } + + /* find a random number between 0 and total_slots */ + target_slot = (total_slots * (u16)random_seed) >> 16; + + /* + * target_slot is now a value in the range [0, total_slots), and so + * it corresponds with exactly one of the suitable slots we recorded + * when iterating over the memory map the first time around. + * + * So iterate over the memory map again, subtracting the number of + * slots of each entry at each iteration, until we have found the entry + * that covers our chosen slot. Use the residual value of target_slot + * to calculate the randomly chosen address, and allocate it directly + * using EFI_ALLOCATE_ADDRESS. + */ + for (map_offset = 0; map_offset < map_size; map_offset += desc_size) { + efi_memory_desc_t *md = (void *)memory_map + map_offset; + efi_physical_addr_t target; + unsigned long pages; + + if (target_slot >= MD_NUM_SLOTS(md)) { + target_slot -= MD_NUM_SLOTS(md); + continue; + } + + target = round_up(md->phys_addr, align) + target_slot * align; + pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; + + status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, pages, &target); + if (status == EFI_SUCCESS) + *addr = target; + break; + } + + efi_call_early(free_pool, memory_map); + + return status; +} diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index d24f35d74b27..11bfee8b79a9 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c @@ -13,6 +13,8 @@ #define pr_fmt(fmt) "psci: " fmt +#include <linux/arm-smccc.h> +#include <linux/cpuidle.h> #include <linux/errno.h> #include <linux/linkage.h> #include <linux/of.h> @@ -20,10 +22,12 @@ #include <linux/printk.h> #include <linux/psci.h> #include <linux/reboot.h> +#include <linux/slab.h> #include <linux/suspend.h> #include <uapi/linux/psci.h> +#include <asm/cpuidle.h> #include <asm/cputype.h> #include <asm/system_misc.h> #include <asm/smp_plat.h> @@ -58,8 +62,6 @@ struct psci_operations psci_ops; typedef unsigned long (psci_fn)(unsigned long, unsigned long, unsigned long, unsigned long); -asmlinkage psci_fn __invoke_psci_fn_hvc; -asmlinkage psci_fn __invoke_psci_fn_smc; static psci_fn *invoke_psci_fn; enum psci_function { @@ -107,6 +109,26 @@ bool psci_power_state_is_valid(u32 state) return !(state & ~valid_mask); } +static unsigned long __invoke_psci_fn_hvc(unsigned long function_id, + unsigned long arg0, unsigned long arg1, + unsigned long arg2) +{ + struct arm_smccc_res res; + + arm_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res); + return res.a0; +} + +static unsigned long __invoke_psci_fn_smc(unsigned long function_id, + unsigned long arg0, unsigned long arg1, + unsigned long arg2) +{ + struct arm_smccc_res res; + + arm_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res); + return res.a0; +} + static int psci_to_linux_errno(int errno) { switch (errno) { @@ -225,6 +247,123 @@ static int __init psci_features(u32 psci_func_id) psci_func_id, 0, 0); } +#ifdef CONFIG_CPU_IDLE +static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); + +static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu) +{ + int i, ret, count = 0; + u32 *psci_states; + struct device_node *state_node; + + /* + * If the PSCI cpu_suspend function hook has not been initialized + * idle states must not be enabled, so bail out + */ + if (!psci_ops.cpu_suspend) + return -EOPNOTSUPP; + + /* Count idle states */ + while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states", + count))) { + count++; + of_node_put(state_node); + } + + if (!count) + return -ENODEV; + + psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); + if (!psci_states) + return -ENOMEM; + + for (i = 0; i < count; i++) { + u32 state; + + state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); + + ret = of_property_read_u32(state_node, + "arm,psci-suspend-param", + &state); + if (ret) { + pr_warn(" * %s missing arm,psci-suspend-param property\n", + state_node->full_name); + of_node_put(state_node); + goto free_mem; + } + + of_node_put(state_node); + pr_debug("psci-power-state %#x index %d\n", state, i); + if (!psci_power_state_is_valid(state)) { + pr_warn("Invalid PSCI power state %#x\n", state); + ret = -EINVAL; + goto free_mem; + } + psci_states[i] = state; + } + /* Idle states parsed correctly, initialize per-cpu pointer */ + per_cpu(psci_power_state, cpu) = psci_states; + return 0; + +free_mem: + kfree(psci_states); + return ret; +} + +int psci_cpu_init_idle(unsigned int cpu) +{ + struct device_node *cpu_node; + int ret; + + cpu_node = of_get_cpu_node(cpu, NULL); + if (!cpu_node) + return -ENODEV; + + ret = psci_dt_cpu_init_idle(cpu_node, cpu); + + of_node_put(cpu_node); + + return ret; +} + +static int psci_suspend_finisher(unsigned long index) +{ + u32 *state = __this_cpu_read(psci_power_state); + + return psci_ops.cpu_suspend(state[index - 1], + virt_to_phys(cpu_resume)); +} + +int psci_cpu_suspend_enter(unsigned long index) +{ + int ret; + u32 *state = __this_cpu_read(psci_power_state); + /* + * idle state index 0 corresponds to wfi, should never be called + * from the cpu_suspend operations + */ + if (WARN_ON_ONCE(!index)) + return -EINVAL; + + if (!psci_power_state_loses_context(state[index - 1])) + ret = psci_ops.cpu_suspend(state[index - 1], 0); + else + ret = cpu_suspend(index, psci_suspend_finisher); + + return ret; +} + +/* ARM specific CPU idle operations */ +#ifdef CONFIG_ARM +static struct cpuidle_ops psci_cpuidle_ops __initdata = { + .suspend = psci_cpu_suspend_enter, + .init = psci_dt_cpu_init_idle, +}; + +CPUIDLE_METHOD_OF_DECLARE(psci, "arm,psci", &psci_cpuidle_ops); +#endif +#endif + static int psci_system_suspend(unsigned long unused) { return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND), diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5044f2257e89..6fca39e1c419 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3475,11 +3475,6 @@ static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) return VGACNTRL; } -static inline void __user *to_user_ptr(u64 address) -{ - return (void __user *)(uintptr_t)address; -} - static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) { unsigned long j = msecs_to_jiffies(m); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f56af0aaafde..659b90657f36 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -324,7 +324,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; void *vaddr = obj->phys_handle->vaddr + args->offset; - char __user *user_data = to_user_ptr(args->data_ptr); + char __user *user_data = u64_to_user_ptr(args->data_ptr); int ret = 0; /* We manually control the domain here and pretend that it @@ -605,7 +605,7 @@ i915_gem_shmem_pread(struct drm_device *dev, int needs_clflush = 0; struct sg_page_iter sg_iter; - user_data = to_user_ptr(args->data_ptr); + user_data = u64_to_user_ptr(args->data_ptr); remain = args->size; obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); @@ -692,7 +692,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, return 0; if (!access_ok(VERIFY_WRITE, - to_user_ptr(args->data_ptr), + u64_to_user_ptr(args->data_ptr), args->size)) return -EFAULT; @@ -783,7 +783,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, if (ret) goto out_unpin; - user_data = to_user_ptr(args->data_ptr); + user_data = u64_to_user_ptr(args->data_ptr); remain = args->size; offset = i915_gem_obj_ggtt_offset(obj) + args->offset; @@ -907,7 +907,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, int needs_clflush_before = 0; struct sg_page_iter sg_iter; - user_data = to_user_ptr(args->data_ptr); + user_data = u64_to_user_ptr(args->data_ptr); remain = args->size; obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); @@ -1036,12 +1036,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, return 0; if (!access_ok(VERIFY_READ, - to_user_ptr(args->data_ptr), + u64_to_user_ptr(args->data_ptr), args->size)) return -EFAULT; if (likely(!i915.prefault_disable)) { - ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), + ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr), args->size); if (ret) return -EFAULT; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 6ed7d63a0688..8800f410b2d2 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -492,7 +492,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; int remain, ret; - user_relocs = to_user_ptr(entry->relocs_ptr); + user_relocs = u64_to_user_ptr(entry->relocs_ptr); remain = entry->relocation_count; while (remain) { @@ -513,9 +513,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, return ret; if (r->presumed_offset != offset && - __copy_to_user_inatomic(&user_relocs->presumed_offset, - &r->presumed_offset, - sizeof(r->presumed_offset))) { + __put_user(r->presumed_offset, &user_relocs->presumed_offset)) { return -EFAULT; } @@ -833,7 +831,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, u64 invalid_offset = (u64)-1; int j; - user_relocs = to_user_ptr(exec[i].relocs_ptr); + user_relocs = u64_to_user_ptr(exec[i].relocs_ptr); if (copy_from_user(reloc+total, user_relocs, exec[i].relocation_count * sizeof(*reloc))) { @@ -977,7 +975,7 @@ validate_exec_list(struct drm_device *dev, invalid_flags |= EXEC_OBJECT_NEEDS_GTT; for (i = 0; i < count; i++) { - char __user *ptr = to_user_ptr(exec[i].relocs_ptr); + char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr); int length; /* limited by fault_in_pages_readable() */ if (exec[i].flags & invalid_flags) @@ -1635,7 +1633,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, return -ENOMEM; } ret = copy_from_user(exec_list, - to_user_ptr(args->buffers_ptr), + u64_to_user_ptr(args->buffers_ptr), sizeof(*exec_list) * args->buffer_count); if (ret != 0) { DRM_DEBUG("copy %d exec entries failed %d\n", @@ -1671,7 +1669,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); if (!ret) { struct drm_i915_gem_exec_object __user *user_exec_list = - to_user_ptr(args->buffers_ptr); + u64_to_user_ptr(args->buffers_ptr); /* Copy the new buffer offsets back to the user's exec list. */ for (i = 0; i < args->buffer_count; i++) { @@ -1723,7 +1721,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, return -ENOMEM; } ret = copy_from_user(exec2_list, - to_user_ptr(args->buffers_ptr), + u64_to_user_ptr(args->buffers_ptr), sizeof(*exec2_list) * args->buffer_count); if (ret != 0) { DRM_DEBUG("copy %d exec entries failed %d\n", @@ -1736,7 +1734,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ struct drm_i915_gem_exec_object2 __user *user_exec_list = - to_user_ptr(args->buffers_ptr); + u64_to_user_ptr(args->buffers_ptr); int i; for (i = 0; i < args->buffer_count; i++) { diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index fed44d4e5b72..ed1efae0c3f2 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -28,11 +28,6 @@ #define BO_LOCKED 0x4000 #define BO_PINNED 0x2000 -static inline void __user *to_user_ptr(u64 address) -{ - return (void __user *)(uintptr_t)address; -} - static struct msm_gem_submit *submit_create(struct drm_device *dev, struct msm_gpu *gpu, int nr) { @@ -77,7 +72,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, struct drm_gem_object *obj; struct msm_gem_object *msm_obj; void __user *userptr = - to_user_ptr(args->bos + (i * sizeof(submit_bo))); + u64_to_user_ptr(args->bos + (i * sizeof(submit_bo))); ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo)); if (unlikely(ret)) { @@ -275,7 +270,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob for (i = 0; i < nr_relocs; i++) { struct drm_msm_gem_submit_reloc submit_reloc; void __user *userptr = - to_user_ptr(relocs + (i * sizeof(submit_reloc))); + u64_to_user_ptr(relocs + (i * sizeof(submit_reloc))); uint32_t iova, off; bool valid; @@ -375,7 +370,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, for (i = 0; i < args->nr_cmds; i++) { struct drm_msm_gem_submit_cmd submit_cmd; void __user *userptr = - to_user_ptr(args->cmds + (i * sizeof(submit_cmd))); + u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd))); struct msm_gem_object *msm_obj; uint32_t iova; diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig index 6c8921140f02..130cb2114059 100644 --- a/drivers/hwtracing/coresight/Kconfig +++ b/drivers/hwtracing/coresight/Kconfig @@ -4,11 +4,12 @@ menuconfig CORESIGHT bool "CoreSight Tracing Support" select ARM_AMBA + select PERF_EVENTS help This framework provides a kernel interface for the CoreSight debug and trace drivers to register themselves with. It's intended to build a topological view of the CoreSight components based on a DT - specification and configure the right serie of components when a + specification and configure the right series of components when a trace source gets enabled. if CORESIGHT @@ -77,4 +78,15 @@ config CORESIGHT_QCOM_REPLICATOR programmable ATB replicator sends the ATB trace stream from the ETB/ETF to the TPIUi and ETR. +config CORESIGHT_STM + bool "CoreSight System Trace Macrocell driver" + depends on (ARM && !(CPU_32v3 || CPU_32v4 || CPU_32v4T)) || ARM64 + select CORESIGHT_LINKS_AND_SINKS + select STM + help + This driver provides support for hardware assisted software + instrumentation based tracing. This is primarily used for + logging useful software events or data coming from various entities + in the system, possibly running different OSs + endif diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile index 99f8e5f6256e..af480d9c1441 100644 --- a/drivers/hwtracing/coresight/Makefile +++ b/drivers/hwtracing/coresight/Makefile @@ -1,13 +1,18 @@ # # Makefile for CoreSight drivers. # -obj-$(CONFIG_CORESIGHT) += coresight.o +obj-$(CONFIG_CORESIGHT) += coresight.o coresight-etm-perf.o obj-$(CONFIG_OF) += of_coresight.o -obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o +obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o \ + coresight-tmc-etf.o \ + coresight-tmc-etr.o obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \ coresight-replicator.o -obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o -obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o +obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \ + coresight-etm3x-sysfs.o +obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \ + coresight-etm4x-sysfs.o obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o +obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c index 77d0f9c1118d..4d20b0be0c0b 100644 --- a/drivers/hwtracing/coresight/coresight-etb10.c +++ b/drivers/hwtracing/coresight/coresight-etb10.c @@ -1,5 +1,7 @@ /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * + * Description: CoreSight Embedded Trace Buffer driver + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. @@ -10,8 +12,8 @@ * GNU General Public License for more details. */ +#include <asm/local.h> #include <linux/kernel.h> -#include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/device.h> @@ -27,6 +29,11 @@ #include <linux/coresight.h> #include <linux/amba/bus.h> #include <linux/clk.h> +#include <linux/circ_buf.h> +#include <linux/mm.h> +#include <linux/perf_event.h> + +#include <asm/local.h> #include "coresight-priv.h" @@ -71,10 +78,10 @@ * @csdev: component vitals needed by the framework. * @miscdev: specifics to handle "/dev/xyz.etb" entry. * @spinlock: only one at a time pls. - * @in_use: synchronise user space access to etb buffer. + * @reading: synchronise user space access to etb buffer. + * @mode: this ETB is being used. * @buf: area of memory where ETB buffer content gets sent. * @buffer_depth: size of @buf. - * @enable: this ETB is being used. * @trigger_cntr: amount of words to store after a trigger. */ struct etb_drvdata { @@ -84,10 +91,10 @@ struct etb_drvdata { struct coresight_device *csdev; struct miscdevice miscdev; spinlock_t spinlock; - atomic_t in_use; + local_t reading; + local_t mode; u8 *buf; u32 buffer_depth; - bool enable; u32 trigger_cntr; }; @@ -132,18 +139,31 @@ static void etb_enable_hw(struct etb_drvdata *drvdata) CS_LOCK(drvdata->base); } -static int etb_enable(struct coresight_device *csdev) +static int etb_enable(struct coresight_device *csdev, u32 mode) { - struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + u32 val; unsigned long flags; + struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - pm_runtime_get_sync(drvdata->dev); + val = local_cmpxchg(&drvdata->mode, + CS_MODE_DISABLED, mode); + /* + * When accessing from Perf, a HW buffer can be handled + * by a single trace entity. In sysFS mode many tracers + * can be logging to the same HW buffer. + */ + if (val == CS_MODE_PERF) + return -EBUSY; + + /* Nothing to do, the tracer is already enabled. */ + if (val == CS_MODE_SYSFS) + goto out; spin_lock_irqsave(&drvdata->spinlock, flags); etb_enable_hw(drvdata); - drvdata->enable = true; spin_unlock_irqrestore(&drvdata->spinlock, flags); +out: dev_info(drvdata->dev, "ETB enabled\n"); return 0; } @@ -244,17 +264,226 @@ static void etb_disable(struct coresight_device *csdev) spin_lock_irqsave(&drvdata->spinlock, flags); etb_disable_hw(drvdata); etb_dump_hw(drvdata); - drvdata->enable = false; spin_unlock_irqrestore(&drvdata->spinlock, flags); - pm_runtime_put(drvdata->dev); + local_set(&drvdata->mode, CS_MODE_DISABLED); dev_info(drvdata->dev, "ETB disabled\n"); } +static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu, + void **pages, int nr_pages, bool overwrite) +{ + int node; + struct cs_buffers *buf; + + if (cpu == -1) + cpu = smp_processor_id(); + node = cpu_to_node(cpu); + + buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node); + if (!buf) + return NULL; + + buf->snapshot = overwrite; + buf->nr_pages = nr_pages; + buf->data_pages = pages; + + return buf; +} + +static void etb_free_buffer(void *config) +{ + struct cs_buffers *buf = config; + + kfree(buf); +} + +static int etb_set_buffer(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *sink_config) +{ + int ret = 0; + unsigned long head; + struct cs_buffers *buf = sink_config; + + /* wrap head around to the amount of space we have */ + head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); + + /* find the page to write to */ + buf->cur = head / PAGE_SIZE; + + /* and offset within that page */ + buf->offset = head % PAGE_SIZE; + + local_set(&buf->data_size, 0); + + return ret; +} + +static unsigned long etb_reset_buffer(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *sink_config, bool *lost) +{ + unsigned long size = 0; + struct cs_buffers *buf = sink_config; + + if (buf) { + /* + * In snapshot mode ->data_size holds the new address of the + * ring buffer's head. The size itself is the whole address + * range since we want the latest information. + */ + if (buf->snapshot) + handle->head = local_xchg(&buf->data_size, + buf->nr_pages << PAGE_SHIFT); + + /* + * Tell the tracer PMU how much we got in this run and if + * something went wrong along the way. Nobody else can use + * this cs_buffers instance until we are done. As such + * resetting parameters here and squaring off with the ring + * buffer API in the tracer PMU is fine. + */ + *lost = !!local_xchg(&buf->lost, 0); + size = local_xchg(&buf->data_size, 0); + } + + return size; +} + +static void etb_update_buffer(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *sink_config) +{ + int i, cur; + u8 *buf_ptr; + u32 read_ptr, write_ptr, capacity; + u32 status, read_data, to_read; + unsigned long offset; + struct cs_buffers *buf = sink_config; + struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + if (!buf) + return; + + capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS; + + CS_UNLOCK(drvdata->base); + etb_disable_hw(drvdata); + + /* unit is in words, not bytes */ + read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER); + write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER); + + /* + * Entries should be aligned to the frame size. If they are not + * go back to the last alignement point to give decoding tools a + * chance to fix things. + */ + if (write_ptr % ETB_FRAME_SIZE_WORDS) { + dev_err(drvdata->dev, + "write_ptr: %lu not aligned to formatter frame size\n", + (unsigned long)write_ptr); + + write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1); + local_inc(&buf->lost); + } + + /* + * Get a hold of the status register and see if a wrap around + * has occurred. If so adjust things accordingly. Otherwise + * start at the beginning and go until the write pointer has + * been reached. + */ + status = readl_relaxed(drvdata->base + ETB_STATUS_REG); + if (status & ETB_STATUS_RAM_FULL) { + local_inc(&buf->lost); + to_read = capacity; + read_ptr = write_ptr; + } else { + to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->buffer_depth); + to_read *= ETB_FRAME_SIZE_WORDS; + } + + /* + * Make sure we don't overwrite data that hasn't been consumed yet. + * It is entirely possible that the HW buffer has more data than the + * ring buffer can currently handle. If so adjust the start address + * to take only the last traces. + * + * In snapshot mode we are looking to get the latest traces only and as + * such, we don't care about not overwriting data that hasn't been + * processed by user space. + */ + if (!buf->snapshot && to_read > handle->size) { + u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1); + + /* The new read pointer must be frame size aligned */ + to_read = handle->size & mask; + /* + * Move the RAM read pointer up, keeping in mind that + * everything is in frame size units. + */ + read_ptr = (write_ptr + drvdata->buffer_depth) - + to_read / ETB_FRAME_SIZE_WORDS; + /* Wrap around if need be*/ + if (read_ptr > (drvdata->buffer_depth - 1)) + read_ptr -= drvdata->buffer_depth; + /* let the decoder know we've skipped ahead */ + local_inc(&buf->lost); + } + + /* finally tell HW where we want to start reading from */ + writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER); + + cur = buf->cur; + offset = buf->offset; + for (i = 0; i < to_read; i += 4) { + buf_ptr = buf->data_pages[cur] + offset; + read_data = readl_relaxed(drvdata->base + + ETB_RAM_READ_DATA_REG); + *buf_ptr++ = read_data >> 0; + *buf_ptr++ = read_data >> 8; + *buf_ptr++ = read_data >> 16; + *buf_ptr++ = read_data >> 24; + + offset += 4; + if (offset >= PAGE_SIZE) { + offset = 0; + cur++; + /* wrap around at the end of the buffer */ + cur &= buf->nr_pages - 1; + } + } + + /* reset ETB buffer for next run */ + writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER); + writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER); + + /* + * In snapshot mode all we have to do is communicate to + * perf_aux_output_end() the address of the current head. In full + * trace mode the same function expects a size to move rb->aux_head + * forward. + */ + if (buf->snapshot) + local_set(&buf->data_size, (cur * PAGE_SIZE) + offset); + else + local_add(to_read, &buf->data_size); + + etb_enable_hw(drvdata); + CS_LOCK(drvdata->base); +} + static const struct coresight_ops_sink etb_sink_ops = { .enable = etb_enable, .disable = etb_disable, + .alloc_buffer = etb_alloc_buffer, + .free_buffer = etb_free_buffer, + .set_buffer = etb_set_buffer, + .reset_buffer = etb_reset_buffer, + .update_buffer = etb_update_buffer, }; static const struct coresight_ops etb_cs_ops = { @@ -266,7 +495,7 @@ static void etb_dump(struct etb_drvdata *drvdata) unsigned long flags; spin_lock_irqsave(&drvdata->spinlock, flags); - if (drvdata->enable) { + if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { etb_disable_hw(drvdata); etb_dump_hw(drvdata); etb_enable_hw(drvdata); @@ -281,7 +510,7 @@ static int etb_open(struct inode *inode, struct file *file) struct etb_drvdata *drvdata = container_of(file->private_data, struct etb_drvdata, miscdev); - if (atomic_cmpxchg(&drvdata->in_use, 0, 1)) + if (local_cmpxchg(&drvdata->reading, 0, 1)) return -EBUSY; dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__); @@ -317,7 +546,7 @@ static int etb_release(struct inode *inode, struct file *file) { struct etb_drvdata *drvdata = container_of(file->private_data, struct etb_drvdata, miscdev); - atomic_set(&drvdata->in_use, 0); + local_set(&drvdata->reading, 0); dev_dbg(drvdata->dev, "%s: released\n", __func__); return 0; @@ -331,47 +560,29 @@ static const struct file_operations etb_fops = { .llseek = no_llseek, }; -static ssize_t status_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long flags; - u32 etb_rdr, etb_sr, etb_rrp, etb_rwp; - u32 etb_trg, etb_cr, etb_ffsr, etb_ffcr; - struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent); - - pm_runtime_get_sync(drvdata->dev); - spin_lock_irqsave(&drvdata->spinlock, flags); - CS_UNLOCK(drvdata->base); - - etb_rdr = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG); - etb_sr = readl_relaxed(drvdata->base + ETB_STATUS_REG); - etb_rrp = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER); - etb_rwp = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER); - etb_trg = readl_relaxed(drvdata->base + ETB_TRG); - etb_cr = readl_relaxed(drvdata->base + ETB_CTL_REG); - etb_ffsr = readl_relaxed(drvdata->base + ETB_FFSR); - etb_ffcr = readl_relaxed(drvdata->base + ETB_FFCR); - - CS_LOCK(drvdata->base); - spin_unlock_irqrestore(&drvdata->spinlock, flags); - - pm_runtime_put(drvdata->dev); - - return sprintf(buf, - "Depth:\t\t0x%x\n" - "Status:\t\t0x%x\n" - "RAM read ptr:\t0x%x\n" - "RAM wrt ptr:\t0x%x\n" - "Trigger cnt:\t0x%x\n" - "Control:\t0x%x\n" - "Flush status:\t0x%x\n" - "Flush ctrl:\t0x%x\n", - etb_rdr, etb_sr, etb_rrp, etb_rwp, - etb_trg, etb_cr, etb_ffsr, etb_ffcr); - - return -EINVAL; -} -static DEVICE_ATTR_RO(status); +#define coresight_etb10_simple_func(name, offset) \ + coresight_simple_func(struct etb_drvdata, name, offset) + +coresight_etb10_simple_func(rdp, ETB_RAM_DEPTH_REG); +coresight_etb10_simple_func(sts, ETB_STATUS_REG); +coresight_etb10_simple_func(rrp, ETB_RAM_READ_POINTER); +coresight_etb10_simple_func(rwp, ETB_RAM_WRITE_POINTER); +coresight_etb10_simple_func(trg, ETB_TRG); +coresight_etb10_simple_func(ctl, ETB_CTL_REG); +coresight_etb10_simple_func(ffsr, ETB_FFSR); +coresight_etb10_simple_func(ffcr, ETB_FFCR); + +static struct attribute *coresight_etb_mgmt_attrs[] = { + &dev_attr_rdp.attr, + &dev_attr_sts.attr, + &dev_attr_rrp.attr, + &dev_attr_rwp.attr, + &dev_attr_trg.attr, + &dev_attr_ctl.attr, + &dev_attr_ffsr.attr, + &dev_attr_ffcr.attr, + NULL, +}; static ssize_t trigger_cntr_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -401,10 +612,23 @@ static DEVICE_ATTR_RW(trigger_cntr); static struct attribute *coresight_etb_attrs[] = { &dev_attr_trigger_cntr.attr, - &dev_attr_status.attr, NULL, }; -ATTRIBUTE_GROUPS(coresight_etb); + +static const struct attribute_group coresight_etb_group = { + .attrs = coresight_etb_attrs, +}; + +static const struct attribute_group coresight_etb_mgmt_group = { + .attrs = coresight_etb_mgmt_attrs, + .name = "mgmt", +}; + +const struct attribute_group *coresight_etb_groups[] = { + &coresight_etb_group, + &coresight_etb_mgmt_group, + NULL, +}; static int etb_probe(struct amba_device *adev, const struct amba_id *id) { @@ -481,7 +705,6 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id) if (ret) goto err_misc_register; - dev_info(dev, "ETB initialized\n"); return 0; err_misc_register: @@ -489,15 +712,6 @@ err_misc_register: return ret; } -static int etb_remove(struct amba_device *adev) -{ - struct etb_drvdata *drvdata = amba_get_drvdata(adev); - - misc_deregister(&drvdata->miscdev); - coresight_unregister(drvdata->csdev); - return 0; -} - #ifdef CONFIG_PM static int etb_runtime_suspend(struct device *dev) { @@ -537,14 +751,10 @@ static struct amba_driver etb_driver = { .name = "coresight-etb10", .owner = THIS_MODULE, .pm = &etb_dev_pm_ops, + .suppress_bind_attrs = true, }, .probe = etb_probe, - .remove = etb_remove, .id_table = etb_ids, }; - -module_amba_driver(etb_driver); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("CoreSight Embedded Trace Buffer driver"); +builtin_amba_driver(etb_driver); diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c new file mode 100644 index 000000000000..8fbb1dd9e243 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c @@ -0,0 +1,510 @@ +/* + * Copyright(C) 2015 Linaro Limited. All rights reserved. + * Author: Mathieu Poirier <mathieu.poirier@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/coresight.h> +#include <linux/coresight-pmu.h> +#include <linux/cpumask.h> +#include <linux/device.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <linux/parser.h> +#include <linux/perf_event.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#include "coresight-priv.h" + +static struct pmu etm_pmu; +static bool etm_perf_up; + +/** + * struct etm_event_data - Coresight specifics associated to an event + * @work: Handle to free allocated memory outside IRQ context. + * @mask: Hold the CPU(s) this event was set for. + * @snk_config: The sink configuration. + * @path: An array of path, each slot for one CPU. + */ +struct etm_event_data { + struct work_struct work; + cpumask_t mask; + void *snk_config; + struct list_head **path; +}; + +/** + * struct perf_pmu_drv_config - Driver specific configuration needed + * before a session can start. + * @sink: The name of the sink this session should use. + * @entry: Hook to the event->drv_configs list. + */ +struct perf_pmu_drv_config { + char *sink; + struct list_head entry; +}; + +static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle); +static DEFINE_PER_CPU(struct coresight_device *, csdev_src); + +/* ETMv3.5/PTM's ETMCR is 'config' */ +PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC)); +PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS)); + +static struct attribute *etm_config_formats_attr[] = { + &format_attr_cycacc.attr, + &format_attr_timestamp.attr, + NULL, +}; + +static struct attribute_group etm_pmu_format_group = { + .name = "format", + .attrs = etm_config_formats_attr, +}; + +static const struct attribute_group *etm_pmu_attr_groups[] = { + &etm_pmu_format_group, + NULL, +}; + +static void etm_event_read(struct perf_event *event) {} + +static int etm_event_init(struct perf_event *event) +{ + if (event->attr.type != etm_pmu.type) + return -ENOENT; + + return 0; +} + +static void free_event_data(struct work_struct *work) +{ + int cpu; + cpumask_t *mask; + struct etm_event_data *event_data; + struct coresight_device *sink; + + event_data = container_of(work, struct etm_event_data, work); + mask = &event_data->mask; + /* + * First deal with the sink configuration. See comment in + * etm_setup_aux() about why we take the first available path. + */ + if (event_data->snk_config) { + cpu = cpumask_first(mask); + sink = coresight_get_sink(event_data->path[cpu]); + if (sink_ops(sink)->free_buffer) + sink_ops(sink)->free_buffer(event_data->snk_config); + } + + for_each_cpu(cpu, mask) { + if (event_data->path[cpu]) + coresight_release_path(event_data->path[cpu]); + } + + kfree(event_data->path); + kfree(event_data); +} + +static void *alloc_event_data(int cpu) +{ + int size; + cpumask_t *mask; + struct etm_event_data *event_data; + + /* First get memory for the session's data */ + event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL); + if (!event_data) + return NULL; + + /* Make sure nothing disappears under us */ + get_online_cpus(); + size = num_online_cpus(); + + mask = &event_data->mask; + if (cpu != -1) + cpumask_set_cpu(cpu, mask); + else + cpumask_copy(mask, cpu_online_mask); + put_online_cpus(); + + /* + * Each CPU has a single path between source and destination. As such + * allocate an array using CPU numbers as indexes. That way a path + * for any CPU can easily be accessed at any given time. We proceed + * the same way for sessions involving a single CPU. The cost of + * unused memory when dealing with single CPU trace scenarios is small + * compared to the cost of searching through an optimized array. + */ + event_data->path = kcalloc(size, + sizeof(struct list_head *), GFP_KERNEL); + if (!event_data->path) { + kfree(event_data); + return NULL; + } + + return event_data; +} + +static void etm_free_aux(void *data) +{ + struct etm_event_data *event_data = data; + + schedule_work(&event_data->work); +} + +static void *etm_setup_aux(struct perf_event *event, void **pages, + int nr_pages, bool overwrite) +{ + int cpu; + char *sink_def = NULL; + cpumask_t *mask; + struct coresight_device *sink; + struct etm_event_data *event_data = NULL; + struct perf_pmu_drv_config *drv_config; + + /* + * Search the driver configurables looking for a sink. If more than + * one sink was specified the last one is taken. + */ + list_for_each_entry(drv_config, &event->drv_configs, entry) { + if (drv_config && drv_config->sink) { + sink_def = drv_config->sink; + break; + } + } + + event_data = alloc_event_data(event->cpu); + if (!event_data) + return NULL; + + INIT_WORK(&event_data->work, free_event_data); + + mask = &event_data->mask; + + /* Setup the path for each CPU in a trace session */ + for_each_cpu(cpu, mask) { + struct coresight_device *csdev; + + csdev = per_cpu(csdev_src, cpu); + if (!csdev) + goto err; + + /* + * Building a path doesn't enable it, it simply builds a + * list of devices from source to sink that can be + * referenced later when the path is actually needed. + */ + event_data->path[cpu] = coresight_build_path(csdev, sink_def); + if (!event_data->path[cpu]) + goto err; + } + + /* + * In theory nothing prevent tracers in a trace session from being + * associated with different sinks, nor having a sink per tracer. But + * until we have HW with this kind of topology and a way to convey + * sink assignement from the perf cmd line we need to assume tracers + * in a trace session are using the same sink. Therefore pick the sink + * found at the end of the first available path. + */ + cpu = cpumask_first(mask); + /* Grab the sink at the end of the path */ + sink = coresight_get_sink(event_data->path[cpu]); + if (!sink) + goto err; + + if (!sink_ops(sink)->alloc_buffer) + goto err; + + /* Get the AUX specific data from the sink buffer */ + event_data->snk_config = + sink_ops(sink)->alloc_buffer(sink, cpu, pages, + nr_pages, overwrite); + if (!event_data->snk_config) + goto err; + +out: + return event_data; + +err: + etm_free_aux(event_data); + event_data = NULL; + goto out; +} + +static void etm_event_start(struct perf_event *event, int flags) +{ + int cpu = smp_processor_id(); + struct etm_event_data *event_data; + struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); + struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); + + if (!csdev) + goto fail; + + /* + * Deal with the ring buffer API and get a handle on the + * session's information. + */ + event_data = perf_aux_output_begin(handle, event); + if (!event_data) + goto fail; + + /* We need a sink, no need to continue without one */ + sink = coresight_get_sink(event_data->path[cpu]); + if (WARN_ON_ONCE(!sink || !sink_ops(sink)->set_buffer)) + goto fail_end_stop; + + /* Configure the sink */ + if (sink_ops(sink)->set_buffer(sink, handle, + event_data->snk_config)) + goto fail_end_stop; + + /* Nothing will happen without a path */ + if (coresight_enable_path(event_data->path[cpu], CS_MODE_PERF)) + goto fail_end_stop; + + /* Tell the perf core the event is alive */ + event->hw.state = 0; + + /* Finally enable the tracer */ + if (source_ops(csdev)->enable(csdev, &event->attr, CS_MODE_PERF)) + goto fail_end_stop; + +out: + return; + +fail_end_stop: + perf_aux_output_end(handle, 0, true); +fail: + event->hw.state = PERF_HES_STOPPED; + goto out; +} + +static void etm_event_stop(struct perf_event *event, int mode) +{ + bool lost; + int cpu = smp_processor_id(); + unsigned long size; + struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); + struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); + struct etm_event_data *event_data = perf_get_aux(handle); + + if (event->hw.state == PERF_HES_STOPPED) + return; + + if (!csdev) + return; + + sink = coresight_get_sink(event_data->path[cpu]); + if (!sink) + return; + + /* stop tracer */ + source_ops(csdev)->disable(csdev); + + /* tell the core */ + event->hw.state = PERF_HES_STOPPED; + + if (mode & PERF_EF_UPDATE) { + if (WARN_ON_ONCE(handle->event != event)) + return; + + /* update trace information */ + if (!sink_ops(sink)->update_buffer) + return; + + sink_ops(sink)->update_buffer(sink, handle, + event_data->snk_config); + + if (!sink_ops(sink)->reset_buffer) + return; + + size = sink_ops(sink)->reset_buffer(sink, handle, + event_data->snk_config, + &lost); + + perf_aux_output_end(handle, size, lost); + } + + /* Disabling the path make its elements available to other sessions */ + coresight_disable_path(event_data->path[cpu]); +} + +static int etm_event_add(struct perf_event *event, int mode) +{ + int ret = 0; + struct hw_perf_event *hwc = &event->hw; + + if (mode & PERF_EF_START) { + etm_event_start(event, 0); + if (hwc->state & PERF_HES_STOPPED) + ret = -EINVAL; + } else { + hwc->state = PERF_HES_STOPPED; + } + + return ret; +} + +static void etm_event_del(struct perf_event *event, int mode) +{ + etm_event_stop(event, PERF_EF_UPDATE); +} + +enum { + ETM_TOKEN_SINK_CPU, + ETM_TOKEN_SINK, + ETM_TOKEN_ERR, +}; + +static const match_table_t drv_cfg_tokens = { + {ETM_TOKEN_SINK_CPU, "sink=cpu%d:%s"}, + {ETM_TOKEN_SINK, "sink=%s"}, + {ETM_TOKEN_ERR, NULL}, +}; + +static int etm_get_drv_configs(struct perf_event *event, void __user *arg) +{ + char *config, *sink = NULL; + int cpu = -1, token, ret = 0; + substring_t args[MAX_OPT_ARGS]; + struct perf_pmu_drv_config *drv_config = NULL; + + /* Make user supplied input usable */ + config = strndup_user(arg, PAGE_SIZE); + if (IS_ERR(config)) + return PTR_ERR(config); + + /* See above declared @drv_cfg_tokens for the usable formats */ + token = match_token(config, drv_cfg_tokens, args); + switch (token) { + case ETM_TOKEN_SINK: + /* Just a sink has been specified */ + sink = match_strdup(&args[0]); + if (IS_ERR(sink)) { + ret = PTR_ERR(sink); + goto err; + } + break; + case ETM_TOKEN_SINK_CPU: + /* We have a sink and a CPU */ + if (match_int(&args[0], &cpu)) { + ret = -EINVAL; + goto err; + } + sink = match_strdup(&args[1]); + if (IS_ERR(sink)) { + ret = PTR_ERR(sink); + goto err; + } + break; + default: + ret = -EINVAL; + goto err; + } + + /* If the CPUs don't match the sink is destined to another path */ + if (event->cpu != cpu) + goto err; + + /* + * We have a valid configuration, allocate memory and add to the list + * of driver configurables. + */ + drv_config = kzalloc(sizeof(*drv_config), GFP_KERNEL); + if (IS_ERR(drv_config)) { + ret = PTR_ERR(drv_config); + goto err; + } + + drv_config->sink = sink; + list_add(&drv_config->entry, &event->drv_configs); + +out: + kfree(config); + return ret; + +err: + kfree(sink); + goto out; +} + +static void etm_free_drv_configs(struct perf_event *event) +{ + struct perf_pmu_drv_config *config, *itr; + + list_for_each_entry_safe(config, itr, &event->drv_configs, entry) { + list_del(&config->entry); + kfree(config->sink); + kfree(config); + } +} + +int etm_perf_symlink(struct coresight_device *csdev, bool link) +{ + char entry[sizeof("cpu9999999")]; + int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev); + struct device *pmu_dev = etm_pmu.dev; + struct device *cs_dev = &csdev->dev; + + sprintf(entry, "cpu%d", cpu); + + if (!etm_perf_up) + return -EPROBE_DEFER; + + if (link) { + ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry); + if (ret) + return ret; + per_cpu(csdev_src, cpu) = csdev; + } else { + sysfs_remove_link(&pmu_dev->kobj, entry); + per_cpu(csdev_src, cpu) = NULL; + } + + return 0; +} + +static int __init etm_perf_init(void) +{ + int ret; + + etm_pmu.capabilities = PERF_PMU_CAP_EXCLUSIVE; + + etm_pmu.attr_groups = etm_pmu_attr_groups; + etm_pmu.task_ctx_nr = perf_sw_context; + etm_pmu.read = etm_event_read; + etm_pmu.event_init = etm_event_init; + etm_pmu.setup_aux = etm_setup_aux; + etm_pmu.free_aux = etm_free_aux; + etm_pmu.start = etm_event_start; + etm_pmu.stop = etm_event_stop; + etm_pmu.add = etm_event_add; + etm_pmu.del = etm_event_del; + etm_pmu.get_drv_configs = etm_get_drv_configs; + etm_pmu.free_drv_configs + = etm_free_drv_configs; + + ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1); + if (ret == 0) + etm_perf_up = true; + + return ret; +} +device_initcall(etm_perf_init); diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h new file mode 100644 index 000000000000..87f5a134eb6f --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-etm-perf.h @@ -0,0 +1,32 @@ +/* + * Copyright(C) 2015 Linaro Limited. All rights reserved. + * Author: Mathieu Poirier <mathieu.poirier@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _CORESIGHT_ETM_PERF_H +#define _CORESIGHT_ETM_PERF_H + +struct coresight_device; + +#ifdef CONFIG_CORESIGHT +int etm_perf_symlink(struct coresight_device *csdev, bool link); + +#else +static inline int etm_perf_symlink(struct coresight_device *csdev, bool link) +{ return -EINVAL; } + +#endif /* CONFIG_CORESIGHT */ + +#endif diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h index b4481eb29304..51597cb2c08a 100644 --- a/drivers/hwtracing/coresight/coresight-etm.h +++ b/drivers/hwtracing/coresight/coresight-etm.h @@ -13,6 +13,7 @@ #ifndef _CORESIGHT_CORESIGHT_ETM_H #define _CORESIGHT_CORESIGHT_ETM_H +#include <asm/local.h> #include <linux/spinlock.h> #include "coresight-priv.h" @@ -109,7 +110,10 @@ #define ETM_MODE_STALL BIT(2) #define ETM_MODE_TIMESTAMP BIT(3) #define ETM_MODE_CTXID BIT(4) -#define ETM_MODE_ALL 0x1f +#define ETM_MODE_ALL (ETM_MODE_EXCLUDE | ETM_MODE_CYCACC | \ + ETM_MODE_STALL | ETM_MODE_TIMESTAMP | \ + ETM_MODE_CTXID | ETM_MODE_EXCL_KERN | \ + ETM_MODE_EXCL_USER) #define ETM_SQR_MASK 0x3 #define ETM_TRACEID_MASK 0x3f @@ -136,35 +140,16 @@ #define ETM_DEFAULT_EVENT_VAL (ETM_HARD_WIRE_RES_A | \ ETM_ADD_COMP_0 | \ ETM_EVENT_NOT_A) + /** - * struct etm_drvdata - specifics associated to an ETM component - * @base: memory mapped base address for this component. - * @dev: the device entity associated to this component. - * @atclk: optional clock for the core parts of the ETM. - * @csdev: component vitals needed by the framework. - * @spinlock: only one at a time pls. - * @cpu: the cpu this component is affined to. - * @port_size: port size as reported by ETMCR bit 4-6 and 21. - * @arch: ETM/PTM version number. - * @use_cpu14: true if management registers need to be accessed via CP14. - * @enable: is this ETM/PTM currently tracing. - * @sticky_enable: true if ETM base configuration has been done. - * @boot_enable:true if we should start tracing at boot time. - * @os_unlock: true if access to management registers is allowed. - * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR. - * @nr_cntr: Number of counters as found in ETMCCR bit 13-15. - * @nr_ext_inp: Number of external input as found in ETMCCR bit 17-19. - * @nr_ext_out: Number of external output as found in ETMCCR bit 20-22. - * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25. - * @etmccr: value of register ETMCCR. - * @etmccer: value of register ETMCCER. - * @traceid: value of the current ID for this component. + * struct etm_config - configuration information related to an ETM * @mode: controls various modes supported by this ETM/PTM. * @ctrl: used in conjunction with @mode. * @trigger_event: setting for register ETMTRIGGER. * @startstop_ctrl: setting for register ETMTSSCR. * @enable_event: setting for register ETMTEEVR. * @enable_ctrl1: setting for register ETMTECR1. + * @enable_ctrl2: setting for register ETMTECR2. * @fifofull_level: setting for register ETMFFLR. * @addr_idx: index for the address comparator selection. * @addr_val: value for address comparator register. @@ -189,36 +174,16 @@ * @ctxid_mask: mask applicable to all the context IDs. * @sync_freq: Synchronisation frequency. * @timestamp_event: Defines an event that requests the insertion - of a timestamp into the trace stream. + * of a timestamp into the trace stream. */ -struct etm_drvdata { - void __iomem *base; - struct device *dev; - struct clk *atclk; - struct coresight_device *csdev; - spinlock_t spinlock; - int cpu; - int port_size; - u8 arch; - bool use_cp14; - bool enable; - bool sticky_enable; - bool boot_enable; - bool os_unlock; - u8 nr_addr_cmp; - u8 nr_cntr; - u8 nr_ext_inp; - u8 nr_ext_out; - u8 nr_ctxid_cmp; - u32 etmccr; - u32 etmccer; - u32 traceid; +struct etm_config { u32 mode; u32 ctrl; u32 trigger_event; u32 startstop_ctrl; u32 enable_event; u32 enable_ctrl1; + u32 enable_ctrl2; u32 fifofull_level; u8 addr_idx; u32 addr_val[ETM_MAX_ADDR_CMP]; @@ -244,6 +209,56 @@ struct etm_drvdata { u32 timestamp_event; }; +/** + * struct etm_drvdata - specifics associated to an ETM component + * @base: memory mapped base address for this component. + * @dev: the device entity associated to this component. + * @atclk: optional clock for the core parts of the ETM. + * @csdev: component vitals needed by the framework. + * @spinlock: only one at a time pls. + * @cpu: the cpu this component is affined to. + * @port_size: port size as reported by ETMCR bit 4-6 and 21. + * @arch: ETM/PTM version number. + * @use_cpu14: true if management registers need to be accessed via CP14. + * @mode: this tracer's mode, i.e sysFS, Perf or disabled. + * @sticky_enable: true if ETM base configuration has been done. + * @boot_enable:true if we should start tracing at boot time. + * @os_unlock: true if access to management registers is allowed. + * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR. + * @nr_cntr: Number of counters as found in ETMCCR bit 13-15. + * @nr_ext_inp: Number of external input as found in ETMCCR bit 17-19. + * @nr_ext_out: Number of external output as found in ETMCCR bit 20-22. + * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25. + * @etmccr: value of register ETMCCR. + * @etmccer: value of register ETMCCER. + * @traceid: value of the current ID for this component. + * @config: structure holding configuration parameters. + */ +struct etm_drvdata { + void __iomem *base; + struct device *dev; + struct clk *atclk; + struct coresight_device *csdev; + spinlock_t spinlock; + int cpu; + int port_size; + u8 arch; + bool use_cp14; + local_t mode; + bool sticky_enable; + bool boot_enable; + bool os_unlock; + u8 nr_addr_cmp; + u8 nr_cntr; + u8 nr_ext_inp; + u8 nr_ext_out; + u8 nr_ctxid_cmp; + u32 etmccr; + u32 etmccer; + u32 traceid; + struct etm_config config; +}; + enum etm_addr_type { ETM_ADDR_TYPE_NONE, ETM_ADDR_TYPE_SINGLE, @@ -251,4 +266,39 @@ enum etm_addr_type { ETM_ADDR_TYPE_START, ETM_ADDR_TYPE_STOP, }; + +static inline void etm_writel(struct etm_drvdata *drvdata, + u32 val, u32 off) +{ + if (drvdata->use_cp14) { + if (etm_writel_cp14(off, val)) { + dev_err(drvdata->dev, + "invalid CP14 access to ETM reg: %#x", off); + } + } else { + writel_relaxed(val, drvdata->base + off); + } +} + +static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off) +{ + u32 val; + + if (drvdata->use_cp14) { + if (etm_readl_cp14(off, &val)) { + dev_err(drvdata->dev, + "invalid CP14 access to ETM reg: %#x", off); + } + } else { + val = readl_relaxed(drvdata->base + off); + } + + return val; +} + +extern const struct attribute_group *coresight_etm_groups[]; +int etm_get_trace_id(struct etm_drvdata *drvdata); +void etm_set_default(struct etm_config *config); +void etm_config_trace_mode(struct etm_config *config); +struct etm_config *get_etm_config(struct etm_drvdata *drvdata); #endif diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c new file mode 100644 index 000000000000..02d4b629891f --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c @@ -0,0 +1,1265 @@ +/* + * Copyright(C) 2015 Linaro Limited. All rights reserved. + * Author: Mathieu Poirier <mathieu.poirier@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/pm_runtime.h> +#include <linux/sysfs.h> +#include "coresight-etm.h" + +static ssize_t nr_addr_cmp_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_addr_cmp; + return sprintf(buf, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_addr_cmp); + +static ssize_t nr_cntr_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_cntr; + return sprintf(buf, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_cntr); + +static ssize_t nr_ctxid_cmp_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_ctxid_cmp; + return sprintf(buf, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_ctxid_cmp); + +static ssize_t etmsr_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long flags, val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + pm_runtime_get_sync(drvdata->dev); + spin_lock_irqsave(&drvdata->spinlock, flags); + CS_UNLOCK(drvdata->base); + + val = etm_readl(drvdata, ETMSR); + + CS_LOCK(drvdata->base); + spin_unlock_irqrestore(&drvdata->spinlock, flags); + pm_runtime_put(drvdata->dev); + + return sprintf(buf, "%#lx\n", val); +} +static DEVICE_ATTR_RO(etmsr); + +static ssize_t reset_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int i, ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + if (val) { + spin_lock(&drvdata->spinlock); + memset(config, 0, sizeof(struct etm_config)); + config->mode = ETM_MODE_EXCLUDE; + config->trigger_event = ETM_DEFAULT_EVENT_VAL; + for (i = 0; i < drvdata->nr_addr_cmp; i++) { + config->addr_type[i] = ETM_ADDR_TYPE_NONE; + } + + etm_set_default(config); + spin_unlock(&drvdata->spinlock); + } + + return size; +} +static DEVICE_ATTR_WO(reset); + +static ssize_t mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + val = config->mode; + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + spin_lock(&drvdata->spinlock); + config->mode = val & ETM_MODE_ALL; + + if (config->mode & ETM_MODE_EXCLUDE) + config->enable_ctrl1 |= ETMTECR1_INC_EXC; + else + config->enable_ctrl1 &= ~ETMTECR1_INC_EXC; + + if (config->mode & ETM_MODE_CYCACC) + config->ctrl |= ETMCR_CYC_ACC; + else + config->ctrl &= ~ETMCR_CYC_ACC; + + if (config->mode & ETM_MODE_STALL) { + if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) { + dev_warn(drvdata->dev, "stall mode not supported\n"); + ret = -EINVAL; + goto err_unlock; + } + config->ctrl |= ETMCR_STALL_MODE; + } else + config->ctrl &= ~ETMCR_STALL_MODE; + + if (config->mode & ETM_MODE_TIMESTAMP) { + if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) { + dev_warn(drvdata->dev, "timestamp not supported\n"); + ret = -EINVAL; + goto err_unlock; + } + config->ctrl |= ETMCR_TIMESTAMP_EN; + } else + config->ctrl &= ~ETMCR_TIMESTAMP_EN; + + if (config->mode & ETM_MODE_CTXID) + config->ctrl |= ETMCR_CTXID_SIZE; + else + config->ctrl &= ~ETMCR_CTXID_SIZE; + + if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)) + etm_config_trace_mode(config); + + spin_unlock(&drvdata->spinlock); + + return size; + +err_unlock: + spin_unlock(&drvdata->spinlock); + return ret; +} +static DEVICE_ATTR_RW(mode); + +static ssize_t trigger_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + val = config->trigger_event; + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t trigger_event_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + config->trigger_event = val & ETM_EVENT_MASK; + + return size; +} +static DEVICE_ATTR_RW(trigger_event); + +static ssize_t enable_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + val = config->enable_event; + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t enable_event_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + config->enable_event = val & ETM_EVENT_MASK; + + return size; +} +static DEVICE_ATTR_RW(enable_event); + +static ssize_t fifofull_level_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + val = config->fifofull_level; + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t fifofull_level_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + config->fifofull_level = val; + + return size; +} +static DEVICE_ATTR_RW(fifofull_level); + +static ssize_t addr_idx_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + val = config->addr_idx; + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t addr_idx_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + if (val >= drvdata->nr_addr_cmp) + return -EINVAL; + + /* + * Use spinlock to ensure index doesn't change while it gets + * dereferenced multiple times within a spinlock block elsewhere. + */ + spin_lock(&drvdata->spinlock); + config->addr_idx = val; + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_RW(addr_idx); + +static ssize_t addr_single_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u8 idx; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || + config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { + spin_unlock(&drvdata->spinlock); + return -EINVAL; + } + + val = config->addr_val[idx]; + spin_unlock(&drvdata->spinlock); + + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t addr_single_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || + config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { + spin_unlock(&drvdata->spinlock); + return -EINVAL; + } + + config->addr_val[idx] = val; + config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE; + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_RW(addr_single); + +static ssize_t addr_range_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u8 idx; + unsigned long val1, val2; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (idx % 2 != 0) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && + config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || + (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && + config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + val1 = config->addr_val[idx]; + val2 = config->addr_val[idx + 1]; + spin_unlock(&drvdata->spinlock); + + return sprintf(buf, "%#lx %#lx\n", val1, val2); +} + +static ssize_t addr_range_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val1, val2; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) + return -EINVAL; + /* Lower address comparator cannot have a higher address value */ + if (val1 > val2) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (idx % 2 != 0) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && + config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || + (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && + config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + config->addr_val[idx] = val1; + config->addr_type[idx] = ETM_ADDR_TYPE_RANGE; + config->addr_val[idx + 1] = val2; + config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE; + config->enable_ctrl1 |= (1 << (idx/2)); + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_RW(addr_range); + +static ssize_t addr_start_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u8 idx; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || + config->addr_type[idx] == ETM_ADDR_TYPE_START)) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + val = config->addr_val[idx]; + spin_unlock(&drvdata->spinlock); + + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t addr_start_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || + config->addr_type[idx] == ETM_ADDR_TYPE_START)) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + config->addr_val[idx] = val; + config->addr_type[idx] = ETM_ADDR_TYPE_START; + config->startstop_ctrl |= (1 << idx); + config->enable_ctrl1 |= BIT(25); + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_RW(addr_start); + +static ssize_t addr_stop_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u8 idx; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || + config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + val = config->addr_val[idx]; + spin_unlock(&drvdata->spinlock); + + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t addr_stop_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || + config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + config->addr_val[idx] = val; + config->addr_type[idx] = ETM_ADDR_TYPE_STOP; + config->startstop_ctrl |= (1 << (idx + 16)); + config->enable_ctrl1 |= ETMTECR1_START_STOP; + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_RW(addr_stop); + +static ssize_t addr_acctype_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + val = config->addr_acctype[config->addr_idx]; + spin_unlock(&drvdata->spinlock); + + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t addr_acctype_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + spin_lock(&drvdata->spinlock); + config->addr_acctype[config->addr_idx] = val; + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_RW(addr_acctype); + +static ssize_t cntr_idx_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + val = config->cntr_idx; + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t cntr_idx_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + if (val >= drvdata->nr_cntr) + return -EINVAL; + /* + * Use spinlock to ensure index doesn't change while it gets + * dereferenced multiple times within a spinlock block elsewhere. + */ + spin_lock(&drvdata->spinlock); + config->cntr_idx = val; + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_RW(cntr_idx); + +static ssize_t cntr_rld_val_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + val = config->cntr_rld_val[config->cntr_idx]; + spin_unlock(&drvdata->spinlock); + + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t cntr_rld_val_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + spin_lock(&drvdata->spinlock); + config->cntr_rld_val[config->cntr_idx] = val; + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_RW(cntr_rld_val); + +static ssize_t cntr_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + val = config->cntr_event[config->cntr_idx]; + spin_unlock(&drvdata->spinlock); + + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t cntr_event_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + spin_lock(&drvdata->spinlock); + config->cntr_event[config->cntr_idx] = val & ETM_EVENT_MASK; + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_RW(cntr_event); + +static ssize_t cntr_rld_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + val = config->cntr_rld_event[config->cntr_idx]; + spin_unlock(&drvdata->spinlock); + + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t cntr_rld_event_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + spin_lock(&drvdata->spinlock); + config->cntr_rld_event[config->cntr_idx] = val & ETM_EVENT_MASK; + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_RW(cntr_rld_event); + +static ssize_t cntr_val_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int i, ret = 0; + u32 val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + if (!local_read(&drvdata->mode)) { + spin_lock(&drvdata->spinlock); + for (i = 0; i < drvdata->nr_cntr; i++) + ret += sprintf(buf, "counter %d: %x\n", + i, config->cntr_val[i]); + spin_unlock(&drvdata->spinlock); + return ret; + } + + for (i = 0; i < drvdata->nr_cntr; i++) { + val = etm_readl(drvdata, ETMCNTVRn(i)); + ret += sprintf(buf, "counter %d: %x\n", i, val); + } + + return ret; +} + +static ssize_t cntr_val_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + spin_lock(&drvdata->spinlock); + config->cntr_val[config->cntr_idx] = val; + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_RW(cntr_val); + +static ssize_t seq_12_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + val = config->seq_12_event; + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t seq_12_event_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + config->seq_12_event = val & ETM_EVENT_MASK; + return size; +} +static DEVICE_ATTR_RW(seq_12_event); + +static ssize_t seq_21_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + val = config->seq_21_event; + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t seq_21_event_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + config->seq_21_event = val & ETM_EVENT_MASK; + return size; +} +static DEVICE_ATTR_RW(seq_21_event); + +static ssize_t seq_23_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + val = config->seq_23_event; + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t seq_23_event_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + config->seq_23_event = val & ETM_EVENT_MASK; + return size; +} +static DEVICE_ATTR_RW(seq_23_event); + +static ssize_t seq_31_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + val = config->seq_31_event; + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t seq_31_event_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + config->seq_31_event = val & ETM_EVENT_MASK; + return size; +} +static DEVICE_ATTR_RW(seq_31_event); + +static ssize_t seq_32_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + val = config->seq_32_event; + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t seq_32_event_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + config->seq_32_event = val & ETM_EVENT_MASK; + return size; +} +static DEVICE_ATTR_RW(seq_32_event); + +static ssize_t seq_13_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + val = config->seq_13_event; + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t seq_13_event_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + config->seq_13_event = val & ETM_EVENT_MASK; + return size; +} +static DEVICE_ATTR_RW(seq_13_event); + +static ssize_t seq_curr_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val, flags; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + if (!local_read(&drvdata->mode)) { + val = config->seq_curr_state; + goto out; + } + + pm_runtime_get_sync(drvdata->dev); + spin_lock_irqsave(&drvdata->spinlock, flags); + + CS_UNLOCK(drvdata->base); + val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); + CS_LOCK(drvdata->base); + + spin_unlock_irqrestore(&drvdata->spinlock, flags); + pm_runtime_put(drvdata->dev); +out: + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t seq_curr_state_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + if (val > ETM_SEQ_STATE_MAX_VAL) + return -EINVAL; + + config->seq_curr_state = val; + + return size; +} +static DEVICE_ATTR_RW(seq_curr_state); + +static ssize_t ctxid_idx_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + val = config->ctxid_idx; + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t ctxid_idx_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + if (val >= drvdata->nr_ctxid_cmp) + return -EINVAL; + + /* + * Use spinlock to ensure index doesn't change while it gets + * dereferenced multiple times within a spinlock block elsewhere. + */ + spin_lock(&drvdata->spinlock); + config->ctxid_idx = val; + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_RW(ctxid_idx); + +static ssize_t ctxid_pid_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + val = config->ctxid_vpid[config->ctxid_idx]; + spin_unlock(&drvdata->spinlock); + + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t ctxid_pid_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long vpid, pid; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &vpid); + if (ret) + return ret; + + pid = coresight_vpid_to_pid(vpid); + + spin_lock(&drvdata->spinlock); + config->ctxid_pid[config->ctxid_idx] = pid; + config->ctxid_vpid[config->ctxid_idx] = vpid; + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_RW(ctxid_pid); + +static ssize_t ctxid_mask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + val = config->ctxid_mask; + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t ctxid_mask_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + config->ctxid_mask = val; + return size; +} +static DEVICE_ATTR_RW(ctxid_mask); + +static ssize_t sync_freq_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + val = config->sync_freq; + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t sync_freq_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + config->sync_freq = val & ETM_SYNC_MASK; + return size; +} +static DEVICE_ATTR_RW(sync_freq); + +static ssize_t timestamp_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + val = config->timestamp_event; + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t timestamp_event_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etm_config *config = &drvdata->config; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + config->timestamp_event = val & ETM_EVENT_MASK; + return size; +} +static DEVICE_ATTR_RW(timestamp_event); + +static ssize_t cpu_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->cpu; + return scnprintf(buf, PAGE_SIZE, "%d\n", val); + +} +static DEVICE_ATTR_RO(cpu); + +static ssize_t traceid_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = etm_get_trace_id(drvdata); + + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t traceid_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + drvdata->traceid = val & ETM_TRACEID_MASK; + return size; +} +static DEVICE_ATTR_RW(traceid); + +static struct attribute *coresight_etm_attrs[] = { + &dev_attr_nr_addr_cmp.attr, + &dev_attr_nr_cntr.attr, + &dev_attr_nr_ctxid_cmp.attr, + &dev_attr_etmsr.attr, + &dev_attr_reset.attr, + &dev_attr_mode.attr, + &dev_attr_trigger_event.attr, + &dev_attr_enable_event.attr, + &dev_attr_fifofull_level.attr, + &dev_attr_addr_idx.attr, + &dev_attr_addr_single.attr, + &dev_attr_addr_range.attr, + &dev_attr_addr_start.attr, + &dev_attr_addr_stop.attr, + &dev_attr_addr_acctype.attr, + &dev_attr_cntr_idx.attr, + &dev_attr_cntr_rld_val.attr, + &dev_attr_cntr_event.attr, + &dev_attr_cntr_rld_event.attr, + &dev_attr_cntr_val.attr, + &dev_attr_seq_12_event.attr, + &dev_attr_seq_21_event.attr, + &dev_attr_seq_23_event.attr, + &dev_attr_seq_31_event.attr, + &dev_attr_seq_32_event.attr, + &dev_attr_seq_13_event.attr, + &dev_attr_seq_curr_state.attr, + &dev_attr_ctxid_idx.attr, + &dev_attr_ctxid_pid.attr, + &dev_attr_ctxid_mask.attr, + &dev_attr_sync_freq.attr, + &dev_attr_timestamp_event.attr, + &dev_attr_traceid.attr, + &dev_attr_cpu.attr, + NULL, +}; + +#define coresight_etm3x_simple_func(name, offset) \ + coresight_simple_func(struct etm_drvdata, name, offset) + +coresight_etm3x_simple_func(etmccr, ETMCCR); +coresight_etm3x_simple_func(etmccer, ETMCCER); +coresight_etm3x_simple_func(etmscr, ETMSCR); +coresight_etm3x_simple_func(etmidr, ETMIDR); +coresight_etm3x_simple_func(etmcr, ETMCR); +coresight_etm3x_simple_func(etmtraceidr, ETMTRACEIDR); +coresight_etm3x_simple_func(etmteevr, ETMTEEVR); +coresight_etm3x_simple_func(etmtssvr, ETMTSSCR); +coresight_etm3x_simple_func(etmtecr1, ETMTECR1); +coresight_etm3x_simple_func(etmtecr2, ETMTECR2); + +static struct attribute *coresight_etm_mgmt_attrs[] = { + &dev_attr_etmccr.attr, + &dev_attr_etmccer.attr, + &dev_attr_etmscr.attr, + &dev_attr_etmidr.attr, + &dev_attr_etmcr.attr, + &dev_attr_etmtraceidr.attr, + &dev_attr_etmteevr.attr, + &dev_attr_etmtssvr.attr, + &dev_attr_etmtecr1.attr, + &dev_attr_etmtecr2.attr, + NULL, +}; + +static const struct attribute_group coresight_etm_group = { + .attrs = coresight_etm_attrs, +}; + +static const struct attribute_group coresight_etm_mgmt_group = { + .attrs = coresight_etm_mgmt_attrs, + .name = "mgmt", +}; + +const struct attribute_group *coresight_etm_groups[] = { + &coresight_etm_group, + &coresight_etm_mgmt_group, + NULL, +}; diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c index d630b7ece735..d83ab82672e4 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x.c +++ b/drivers/hwtracing/coresight/coresight-etm3x.c @@ -1,5 +1,7 @@ /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * + * Description: CoreSight Program Flow Trace driver + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. @@ -11,7 +13,7 @@ */ #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/moduleparam.h> #include <linux/init.h> #include <linux/types.h> #include <linux/device.h> @@ -27,14 +29,21 @@ #include <linux/cpu.h> #include <linux/of.h> #include <linux/coresight.h> +#include <linux/coresight-pmu.h> #include <linux/amba/bus.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/clk.h> +#include <linux/perf_event.h> #include <asm/sections.h> #include "coresight-etm.h" +#include "coresight-etm-perf.h" +/* + * Not really modular but using module_param is the easiest way to + * remain consistent with existing use cases for now. + */ static int boot_enable; module_param_named(boot_enable, boot_enable, int, S_IRUGO); @@ -42,45 +51,16 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO); static int etm_count; static struct etm_drvdata *etmdrvdata[NR_CPUS]; -static inline void etm_writel(struct etm_drvdata *drvdata, - u32 val, u32 off) -{ - if (drvdata->use_cp14) { - if (etm_writel_cp14(off, val)) { - dev_err(drvdata->dev, - "invalid CP14 access to ETM reg: %#x", off); - } - } else { - writel_relaxed(val, drvdata->base + off); - } -} - -static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off) -{ - u32 val; - - if (drvdata->use_cp14) { - if (etm_readl_cp14(off, &val)) { - dev_err(drvdata->dev, - "invalid CP14 access to ETM reg: %#x", off); - } - } else { - val = readl_relaxed(drvdata->base + off); - } - - return val; -} - /* * Memory mapped writes to clear os lock are not supported on some processors * and OS lock must be unlocked before any memory mapped access on such * processors, otherwise memory mapped reads/writes will be invalid. */ -static void etm_os_unlock(void *info) +static void etm_os_unlock(struct etm_drvdata *drvdata) { - struct etm_drvdata *drvdata = (struct etm_drvdata *)info; /* Writing any value to ETMOSLAR unlocks the trace registers */ etm_writel(drvdata, 0x0, ETMOSLAR); + drvdata->os_unlock = true; isb(); } @@ -215,36 +195,156 @@ static void etm_clr_prog(struct etm_drvdata *drvdata) } } -static void etm_set_default(struct etm_drvdata *drvdata) +void etm_set_default(struct etm_config *config) { int i; - drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL; - drvdata->enable_event = ETM_HARD_WIRE_RES_A; + if (WARN_ON_ONCE(!config)) + return; - drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL; - drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL; - drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL; - drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL; - drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL; - drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL; - drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL; + /* + * Taken verbatim from the TRM: + * + * To trace all memory: + * set bit [24] in register 0x009, the ETMTECR1, to 1 + * set all other bits in register 0x009, the ETMTECR1, to 0 + * set all bits in register 0x007, the ETMTECR2, to 0 + * set register 0x008, the ETMTEEVR, to 0x6F (TRUE). + */ + config->enable_ctrl1 = BIT(24); + config->enable_ctrl2 = 0x0; + config->enable_event = ETM_HARD_WIRE_RES_A; - for (i = 0; i < drvdata->nr_cntr; i++) { - drvdata->cntr_rld_val[i] = 0x0; - drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL; - drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL; - drvdata->cntr_val[i] = 0x0; + config->trigger_event = ETM_DEFAULT_EVENT_VAL; + config->enable_event = ETM_HARD_WIRE_RES_A; + + config->seq_12_event = ETM_DEFAULT_EVENT_VAL; + config->seq_21_event = ETM_DEFAULT_EVENT_VAL; + config->seq_23_event = ETM_DEFAULT_EVENT_VAL; + config->seq_31_event = ETM_DEFAULT_EVENT_VAL; + config->seq_32_event = ETM_DEFAULT_EVENT_VAL; + config->seq_13_event = ETM_DEFAULT_EVENT_VAL; + config->timestamp_event = ETM_DEFAULT_EVENT_VAL; + + for (i = 0; i < ETM_MAX_CNTR; i++) { + config->cntr_rld_val[i] = 0x0; + config->cntr_event[i] = ETM_DEFAULT_EVENT_VAL; + config->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL; + config->cntr_val[i] = 0x0; } - drvdata->seq_curr_state = 0x0; - drvdata->ctxid_idx = 0x0; - for (i = 0; i < drvdata->nr_ctxid_cmp; i++) { - drvdata->ctxid_pid[i] = 0x0; - drvdata->ctxid_vpid[i] = 0x0; + config->seq_curr_state = 0x0; + config->ctxid_idx = 0x0; + for (i = 0; i < ETM_MAX_CTXID_CMP; i++) { + config->ctxid_pid[i] = 0x0; + config->ctxid_vpid[i] = 0x0; } - drvdata->ctxid_mask = 0x0; + config->ctxid_mask = 0x0; +} + +void etm_config_trace_mode(struct etm_config *config) +{ + u32 flags, mode; + + mode = config->mode; + + mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER); + + /* excluding kernel AND user space doesn't make sense */ + if (mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)) + return; + + /* nothing to do if neither flags are set */ + if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER)) + return; + + flags = (1 << 0 | /* instruction execute */ + 3 << 3 | /* ARM instruction */ + 0 << 5 | /* No data value comparison */ + 0 << 7 | /* No exact mach */ + 0 << 8); /* Ignore context ID */ + + /* No need to worry about single address comparators. */ + config->enable_ctrl2 = 0x0; + + /* Bit 0 is address range comparator 1 */ + config->enable_ctrl1 = ETMTECR1_ADDR_COMP_1; + + /* + * On ETMv3.5: + * ETMACTRn[13,11] == Non-secure state comparison control + * ETMACTRn[12,10] == Secure state comparison control + * + * b00 == Match in all modes in this state + * b01 == Do not match in any more in this state + * b10 == Match in all modes excepts user mode in this state + * b11 == Match only in user mode in this state + */ + + /* Tracing in secure mode is not supported at this time */ + flags |= (0 << 12 | 1 << 10); + + if (mode & ETM_MODE_EXCL_USER) { + /* exclude user, match all modes except user mode */ + flags |= (1 << 13 | 0 << 11); + } else { + /* exclude kernel, match only in user mode */ + flags |= (1 << 13 | 1 << 11); + } + + /* + * The ETMEEVR register is already set to "hard wire A". As such + * all there is to do is setup an address comparator that spans + * the entire address range and configure the state and mode bits. + */ + config->addr_val[0] = (u32) 0x0; + config->addr_val[1] = (u32) ~0x0; + config->addr_acctype[0] = flags; + config->addr_acctype[1] = flags; + config->addr_type[0] = ETM_ADDR_TYPE_RANGE; + config->addr_type[1] = ETM_ADDR_TYPE_RANGE; +} + +#define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN) + +static int etm_parse_event_config(struct etm_drvdata *drvdata, + struct perf_event_attr *attr) +{ + struct etm_config *config = &drvdata->config; + + if (!attr) + return -EINVAL; + + /* Clear configuration from previous run */ + memset(config, 0, sizeof(struct etm_config)); + + if (attr->exclude_kernel) + config->mode = ETM_MODE_EXCL_KERN; + + if (attr->exclude_user) + config->mode = ETM_MODE_EXCL_USER; + + /* Always start from the default config */ + etm_set_default(config); + + /* + * By default the tracers are configured to trace the whole address + * range. Narrow the field only if requested by user space. + */ + if (config->mode) + etm_config_trace_mode(config); + + /* + * At this time only cycle accurate and timestamp options are + * available. + */ + if (attr->config & ~ETM3X_SUPPORTED_OPTIONS) + return -EINVAL; + + config->ctrl = attr->config; + + return 0; } static void etm_enable_hw(void *info) @@ -252,6 +352,7 @@ static void etm_enable_hw(void *info) int i; u32 etmcr; struct etm_drvdata *drvdata = info; + struct etm_config *config = &drvdata->config; CS_UNLOCK(drvdata->base); @@ -265,65 +366,74 @@ static void etm_enable_hw(void *info) etm_set_prog(drvdata); etmcr = etm_readl(drvdata, ETMCR); - etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG); + /* Clear setting from a previous run if need be */ + etmcr &= ~ETM3X_SUPPORTED_OPTIONS; etmcr |= drvdata->port_size; - etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR); - etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER); - etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR); - etm_writel(drvdata, drvdata->enable_event, ETMTEEVR); - etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1); - etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR); + etmcr |= ETMCR_ETM_EN; + etm_writel(drvdata, config->ctrl | etmcr, ETMCR); + etm_writel(drvdata, config->trigger_event, ETMTRIGGER); + etm_writel(drvdata, config->startstop_ctrl, ETMTSSCR); + etm_writel(drvdata, config->enable_event, ETMTEEVR); + etm_writel(drvdata, config->enable_ctrl1, ETMTECR1); + etm_writel(drvdata, config->fifofull_level, ETMFFLR); for (i = 0; i < drvdata->nr_addr_cmp; i++) { - etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i)); - etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i)); + etm_writel(drvdata, config->addr_val[i], ETMACVRn(i)); + etm_writel(drvdata, config->addr_acctype[i], ETMACTRn(i)); } for (i = 0; i < drvdata->nr_cntr; i++) { - etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i)); - etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i)); - etm_writel(drvdata, drvdata->cntr_rld_event[i], + etm_writel(drvdata, config->cntr_rld_val[i], ETMCNTRLDVRn(i)); + etm_writel(drvdata, config->cntr_event[i], ETMCNTENRn(i)); + etm_writel(drvdata, config->cntr_rld_event[i], ETMCNTRLDEVRn(i)); - etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i)); - } - etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR); - etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR); - etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR); - etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR); - etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR); - etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR); - etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR); + etm_writel(drvdata, config->cntr_val[i], ETMCNTVRn(i)); + } + etm_writel(drvdata, config->seq_12_event, ETMSQ12EVR); + etm_writel(drvdata, config->seq_21_event, ETMSQ21EVR); + etm_writel(drvdata, config->seq_23_event, ETMSQ23EVR); + etm_writel(drvdata, config->seq_31_event, ETMSQ31EVR); + etm_writel(drvdata, config->seq_32_event, ETMSQ32EVR); + etm_writel(drvdata, config->seq_13_event, ETMSQ13EVR); + etm_writel(drvdata, config->seq_curr_state, ETMSQR); for (i = 0; i < drvdata->nr_ext_out; i++) etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i)); for (i = 0; i < drvdata->nr_ctxid_cmp; i++) - etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i)); - etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR); - etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR); + etm_writel(drvdata, config->ctxid_pid[i], ETMCIDCVRn(i)); + etm_writel(drvdata, config->ctxid_mask, ETMCIDCMR); + etm_writel(drvdata, config->sync_freq, ETMSYNCFR); /* No external input selected */ etm_writel(drvdata, 0x0, ETMEXTINSELR); - etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR); + etm_writel(drvdata, config->timestamp_event, ETMTSEVR); /* No auxiliary control selected */ etm_writel(drvdata, 0x0, ETMAUXCR); etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR); /* No VMID comparator value selected */ etm_writel(drvdata, 0x0, ETMVMIDCVR); - /* Ensures trace output is enabled from this ETM */ - etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR); - etm_clr_prog(drvdata); CS_LOCK(drvdata->base); dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); } -static int etm_trace_id(struct coresight_device *csdev) +static int etm_cpu_id(struct coresight_device *csdev) { struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + return drvdata->cpu; +} + +int etm_get_trace_id(struct etm_drvdata *drvdata) +{ unsigned long flags; int trace_id = -1; - if (!drvdata->enable) + if (!drvdata) + goto out; + + if (!local_read(&drvdata->mode)) return drvdata->traceid; - pm_runtime_get_sync(csdev->dev.parent); + + pm_runtime_get_sync(drvdata->dev); spin_lock_irqsave(&drvdata->spinlock, flags); @@ -332,17 +442,41 @@ static int etm_trace_id(struct coresight_device *csdev) CS_LOCK(drvdata->base); spin_unlock_irqrestore(&drvdata->spinlock, flags); - pm_runtime_put(csdev->dev.parent); + pm_runtime_put(drvdata->dev); +out: return trace_id; + +} + +static int etm_trace_id(struct coresight_device *csdev) +{ + struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + return etm_get_trace_id(drvdata); } -static int etm_enable(struct coresight_device *csdev) +static int etm_enable_perf(struct coresight_device *csdev, + struct perf_event_attr *attr) +{ + struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) + return -EINVAL; + + /* Configure the tracer based on the session's specifics */ + etm_parse_event_config(drvdata, attr); + /* And enable it */ + etm_enable_hw(drvdata); + + return 0; +} + +static int etm_enable_sysfs(struct coresight_device *csdev) { struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); int ret; - pm_runtime_get_sync(csdev->dev.parent); spin_lock(&drvdata->spinlock); /* @@ -357,16 +491,45 @@ static int etm_enable(struct coresight_device *csdev) goto err; } - drvdata->enable = true; drvdata->sticky_enable = true; - spin_unlock(&drvdata->spinlock); dev_info(drvdata->dev, "ETM tracing enabled\n"); return 0; + err: spin_unlock(&drvdata->spinlock); - pm_runtime_put(csdev->dev.parent); + return ret; +} + +static int etm_enable(struct coresight_device *csdev, + struct perf_event_attr *attr, u32 mode) +{ + int ret; + u32 val; + struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode); + + /* Someone is already using the tracer */ + if (val) + return -EBUSY; + + switch (mode) { + case CS_MODE_SYSFS: + ret = etm_enable_sysfs(csdev); + break; + case CS_MODE_PERF: + ret = etm_enable_perf(csdev, attr); + break; + default: + ret = -EINVAL; + } + + /* The tracer didn't start */ + if (ret) + local_set(&drvdata->mode, CS_MODE_DISABLED); + return ret; } @@ -374,18 +537,16 @@ static void etm_disable_hw(void *info) { int i; struct etm_drvdata *drvdata = info; + struct etm_config *config = &drvdata->config; CS_UNLOCK(drvdata->base); etm_set_prog(drvdata); - /* Program trace enable to low by using always false event */ - etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR); - /* Read back sequencer and counters for post trace analysis */ - drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); + config->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); for (i = 0; i < drvdata->nr_cntr; i++) - drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i)); + config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i)); etm_set_pwrdwn(drvdata); CS_LOCK(drvdata->base); @@ -393,7 +554,28 @@ static void etm_disable_hw(void *info) dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu); } -static void etm_disable(struct coresight_device *csdev) +static void etm_disable_perf(struct coresight_device *csdev) +{ + struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) + return; + + CS_UNLOCK(drvdata->base); + + /* Setting the prog bit disables tracing immediately */ + etm_set_prog(drvdata); + + /* + * There is no way to know when the tracer will be used again so + * power down the tracer. + */ + etm_set_pwrdwn(drvdata); + + CS_LOCK(drvdata->base); +} + +static void etm_disable_sysfs(struct coresight_device *csdev) { struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); @@ -411,1235 +593,52 @@ static void etm_disable(struct coresight_device *csdev) * ensures that register writes occur when cpu is powered. */ smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1); - drvdata->enable = false; spin_unlock(&drvdata->spinlock); put_online_cpus(); - pm_runtime_put(csdev->dev.parent); dev_info(drvdata->dev, "ETM tracing disabled\n"); } -static const struct coresight_ops_source etm_source_ops = { - .trace_id = etm_trace_id, - .enable = etm_enable, - .disable = etm_disable, -}; - -static const struct coresight_ops etm_cs_ops = { - .source_ops = &etm_source_ops, -}; - -static ssize_t nr_addr_cmp_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->nr_addr_cmp; - return sprintf(buf, "%#lx\n", val); -} -static DEVICE_ATTR_RO(nr_addr_cmp); - -static ssize_t nr_cntr_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->nr_cntr; - return sprintf(buf, "%#lx\n", val); -} -static DEVICE_ATTR_RO(nr_cntr); - -static ssize_t nr_ctxid_cmp_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->nr_ctxid_cmp; - return sprintf(buf, "%#lx\n", val); -} -static DEVICE_ATTR_RO(nr_ctxid_cmp); - -static ssize_t etmsr_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long flags, val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - pm_runtime_get_sync(drvdata->dev); - spin_lock_irqsave(&drvdata->spinlock, flags); - CS_UNLOCK(drvdata->base); - - val = etm_readl(drvdata, ETMSR); - - CS_LOCK(drvdata->base); - spin_unlock_irqrestore(&drvdata->spinlock, flags); - pm_runtime_put(drvdata->dev); - - return sprintf(buf, "%#lx\n", val); -} -static DEVICE_ATTR_RO(etmsr); - -static ssize_t reset_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int i, ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - if (val) { - spin_lock(&drvdata->spinlock); - drvdata->mode = ETM_MODE_EXCLUDE; - drvdata->ctrl = 0x0; - drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL; - drvdata->startstop_ctrl = 0x0; - drvdata->addr_idx = 0x0; - for (i = 0; i < drvdata->nr_addr_cmp; i++) { - drvdata->addr_val[i] = 0x0; - drvdata->addr_acctype[i] = 0x0; - drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE; - } - drvdata->cntr_idx = 0x0; - - etm_set_default(drvdata); - spin_unlock(&drvdata->spinlock); - } - - return size; -} -static DEVICE_ATTR_WO(reset); - -static ssize_t mode_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->mode; - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t mode_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - spin_lock(&drvdata->spinlock); - drvdata->mode = val & ETM_MODE_ALL; - - if (drvdata->mode & ETM_MODE_EXCLUDE) - drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC; - else - drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC; - - if (drvdata->mode & ETM_MODE_CYCACC) - drvdata->ctrl |= ETMCR_CYC_ACC; - else - drvdata->ctrl &= ~ETMCR_CYC_ACC; - - if (drvdata->mode & ETM_MODE_STALL) { - if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) { - dev_warn(drvdata->dev, "stall mode not supported\n"); - ret = -EINVAL; - goto err_unlock; - } - drvdata->ctrl |= ETMCR_STALL_MODE; - } else - drvdata->ctrl &= ~ETMCR_STALL_MODE; - - if (drvdata->mode & ETM_MODE_TIMESTAMP) { - if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) { - dev_warn(drvdata->dev, "timestamp not supported\n"); - ret = -EINVAL; - goto err_unlock; - } - drvdata->ctrl |= ETMCR_TIMESTAMP_EN; - } else - drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN; - - if (drvdata->mode & ETM_MODE_CTXID) - drvdata->ctrl |= ETMCR_CTXID_SIZE; - else - drvdata->ctrl &= ~ETMCR_CTXID_SIZE; - spin_unlock(&drvdata->spinlock); - - return size; - -err_unlock: - spin_unlock(&drvdata->spinlock); - return ret; -} -static DEVICE_ATTR_RW(mode); - -static ssize_t trigger_event_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->trigger_event; - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t trigger_event_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - drvdata->trigger_event = val & ETM_EVENT_MASK; - - return size; -} -static DEVICE_ATTR_RW(trigger_event); - -static ssize_t enable_event_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->enable_event; - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t enable_event_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - drvdata->enable_event = val & ETM_EVENT_MASK; - - return size; -} -static DEVICE_ATTR_RW(enable_event); - -static ssize_t fifofull_level_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->fifofull_level; - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t fifofull_level_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - drvdata->fifofull_level = val; - - return size; -} -static DEVICE_ATTR_RW(fifofull_level); - -static ssize_t addr_idx_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->addr_idx; - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t addr_idx_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - if (val >= drvdata->nr_addr_cmp) - return -EINVAL; - - /* - * Use spinlock to ensure index doesn't change while it gets - * dereferenced multiple times within a spinlock block elsewhere. - */ - spin_lock(&drvdata->spinlock); - drvdata->addr_idx = val; - spin_unlock(&drvdata->spinlock); - - return size; -} -static DEVICE_ATTR_RW(addr_idx); - -static ssize_t addr_single_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - u8 idx; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || - drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { - spin_unlock(&drvdata->spinlock); - return -EINVAL; - } - - val = drvdata->addr_val[idx]; - spin_unlock(&drvdata->spinlock); - - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t addr_single_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || - drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { - spin_unlock(&drvdata->spinlock); - return -EINVAL; - } - - drvdata->addr_val[idx] = val; - drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE; - spin_unlock(&drvdata->spinlock); - - return size; -} -static DEVICE_ATTR_RW(addr_single); - -static ssize_t addr_range_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - u8 idx; - unsigned long val1, val2; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (idx % 2 != 0) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE && - drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || - (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE && - drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - val1 = drvdata->addr_val[idx]; - val2 = drvdata->addr_val[idx + 1]; - spin_unlock(&drvdata->spinlock); - - return sprintf(buf, "%#lx %#lx\n", val1, val2); -} - -static ssize_t addr_range_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val1, val2; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) - return -EINVAL; - /* Lower address comparator cannot have a higher address value */ - if (val1 > val2) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (idx % 2 != 0) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE && - drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || - (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE && - drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - drvdata->addr_val[idx] = val1; - drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE; - drvdata->addr_val[idx + 1] = val2; - drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE; - drvdata->enable_ctrl1 |= (1 << (idx/2)); - spin_unlock(&drvdata->spinlock); - - return size; -} -static DEVICE_ATTR_RW(addr_range); - -static ssize_t addr_start_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - u8 idx; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || - drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - val = drvdata->addr_val[idx]; - spin_unlock(&drvdata->spinlock); - - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t addr_start_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || - drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - drvdata->addr_val[idx] = val; - drvdata->addr_type[idx] = ETM_ADDR_TYPE_START; - drvdata->startstop_ctrl |= (1 << idx); - drvdata->enable_ctrl1 |= BIT(25); - spin_unlock(&drvdata->spinlock); - - return size; -} -static DEVICE_ATTR_RW(addr_start); - -static ssize_t addr_stop_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - u8 idx; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || - drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - val = drvdata->addr_val[idx]; - spin_unlock(&drvdata->spinlock); - - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t addr_stop_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || - drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - drvdata->addr_val[idx] = val; - drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP; - drvdata->startstop_ctrl |= (1 << (idx + 16)); - drvdata->enable_ctrl1 |= ETMTECR1_START_STOP; - spin_unlock(&drvdata->spinlock); - - return size; -} -static DEVICE_ATTR_RW(addr_stop); - -static ssize_t addr_acctype_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - val = drvdata->addr_acctype[drvdata->addr_idx]; - spin_unlock(&drvdata->spinlock); - - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t addr_acctype_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - spin_lock(&drvdata->spinlock); - drvdata->addr_acctype[drvdata->addr_idx] = val; - spin_unlock(&drvdata->spinlock); - - return size; -} -static DEVICE_ATTR_RW(addr_acctype); - -static ssize_t cntr_idx_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->cntr_idx; - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t cntr_idx_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - if (val >= drvdata->nr_cntr) - return -EINVAL; - /* - * Use spinlock to ensure index doesn't change while it gets - * dereferenced multiple times within a spinlock block elsewhere. - */ - spin_lock(&drvdata->spinlock); - drvdata->cntr_idx = val; - spin_unlock(&drvdata->spinlock); - - return size; -} -static DEVICE_ATTR_RW(cntr_idx); - -static ssize_t cntr_rld_val_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - val = drvdata->cntr_rld_val[drvdata->cntr_idx]; - spin_unlock(&drvdata->spinlock); - - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t cntr_rld_val_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - spin_lock(&drvdata->spinlock); - drvdata->cntr_rld_val[drvdata->cntr_idx] = val; - spin_unlock(&drvdata->spinlock); - - return size; -} -static DEVICE_ATTR_RW(cntr_rld_val); - -static ssize_t cntr_event_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - val = drvdata->cntr_event[drvdata->cntr_idx]; - spin_unlock(&drvdata->spinlock); - - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t cntr_event_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - spin_lock(&drvdata->spinlock); - drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK; - spin_unlock(&drvdata->spinlock); - - return size; -} -static DEVICE_ATTR_RW(cntr_event); - -static ssize_t cntr_rld_event_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - val = drvdata->cntr_rld_event[drvdata->cntr_idx]; - spin_unlock(&drvdata->spinlock); - - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t cntr_rld_event_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - spin_lock(&drvdata->spinlock); - drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK; - spin_unlock(&drvdata->spinlock); - - return size; -} -static DEVICE_ATTR_RW(cntr_rld_event); - -static ssize_t cntr_val_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - int i, ret = 0; - u32 val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (!drvdata->enable) { - spin_lock(&drvdata->spinlock); - for (i = 0; i < drvdata->nr_cntr; i++) - ret += sprintf(buf, "counter %d: %x\n", - i, drvdata->cntr_val[i]); - spin_unlock(&drvdata->spinlock); - return ret; - } - - for (i = 0; i < drvdata->nr_cntr; i++) { - val = etm_readl(drvdata, ETMCNTVRn(i)); - ret += sprintf(buf, "counter %d: %x\n", i, val); - } - - return ret; -} - -static ssize_t cntr_val_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - spin_lock(&drvdata->spinlock); - drvdata->cntr_val[drvdata->cntr_idx] = val; - spin_unlock(&drvdata->spinlock); - - return size; -} -static DEVICE_ATTR_RW(cntr_val); - -static ssize_t seq_12_event_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->seq_12_event; - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t seq_12_event_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - drvdata->seq_12_event = val & ETM_EVENT_MASK; - return size; -} -static DEVICE_ATTR_RW(seq_12_event); - -static ssize_t seq_21_event_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->seq_21_event; - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t seq_21_event_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - drvdata->seq_21_event = val & ETM_EVENT_MASK; - return size; -} -static DEVICE_ATTR_RW(seq_21_event); - -static ssize_t seq_23_event_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->seq_23_event; - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t seq_23_event_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - drvdata->seq_23_event = val & ETM_EVENT_MASK; - return size; -} -static DEVICE_ATTR_RW(seq_23_event); - -static ssize_t seq_31_event_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->seq_31_event; - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t seq_31_event_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - drvdata->seq_31_event = val & ETM_EVENT_MASK; - return size; -} -static DEVICE_ATTR_RW(seq_31_event); - -static ssize_t seq_32_event_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->seq_32_event; - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t seq_32_event_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - drvdata->seq_32_event = val & ETM_EVENT_MASK; - return size; -} -static DEVICE_ATTR_RW(seq_32_event); - -static ssize_t seq_13_event_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->seq_13_event; - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t seq_13_event_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - drvdata->seq_13_event = val & ETM_EVENT_MASK; - return size; -} -static DEVICE_ATTR_RW(seq_13_event); - -static ssize_t seq_curr_state_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val, flags; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (!drvdata->enable) { - val = drvdata->seq_curr_state; - goto out; - } - - pm_runtime_get_sync(drvdata->dev); - spin_lock_irqsave(&drvdata->spinlock, flags); - - CS_UNLOCK(drvdata->base); - val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); - CS_LOCK(drvdata->base); - - spin_unlock_irqrestore(&drvdata->spinlock, flags); - pm_runtime_put(drvdata->dev); -out: - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t seq_curr_state_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - if (val > ETM_SEQ_STATE_MAX_VAL) - return -EINVAL; - - drvdata->seq_curr_state = val; - - return size; -} -static DEVICE_ATTR_RW(seq_curr_state); - -static ssize_t ctxid_idx_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->ctxid_idx; - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t ctxid_idx_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) +static void etm_disable(struct coresight_device *csdev) { - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - if (val >= drvdata->nr_ctxid_cmp) - return -EINVAL; + u32 mode; + struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); /* - * Use spinlock to ensure index doesn't change while it gets - * dereferenced multiple times within a spinlock block elsewhere. + * For as long as the tracer isn't disabled another entity can't + * change its status. As such we can read the status here without + * fearing it will change under us. */ - spin_lock(&drvdata->spinlock); - drvdata->ctxid_idx = val; - spin_unlock(&drvdata->spinlock); - - return size; -} -static DEVICE_ATTR_RW(ctxid_idx); - -static ssize_t ctxid_pid_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - val = drvdata->ctxid_vpid[drvdata->ctxid_idx]; - spin_unlock(&drvdata->spinlock); + mode = local_read(&drvdata->mode); - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t ctxid_pid_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long vpid, pid; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &vpid); - if (ret) - return ret; - - pid = coresight_vpid_to_pid(vpid); - - spin_lock(&drvdata->spinlock); - drvdata->ctxid_pid[drvdata->ctxid_idx] = pid; - drvdata->ctxid_vpid[drvdata->ctxid_idx] = vpid; - spin_unlock(&drvdata->spinlock); - - return size; -} -static DEVICE_ATTR_RW(ctxid_pid); - -static ssize_t ctxid_mask_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->ctxid_mask; - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t ctxid_mask_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - drvdata->ctxid_mask = val; - return size; -} -static DEVICE_ATTR_RW(ctxid_mask); - -static ssize_t sync_freq_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->sync_freq; - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t sync_freq_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - drvdata->sync_freq = val & ETM_SYNC_MASK; - return size; -} -static DEVICE_ATTR_RW(sync_freq); - -static ssize_t timestamp_event_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->timestamp_event; - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t timestamp_event_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - drvdata->timestamp_event = val & ETM_EVENT_MASK; - return size; -} -static DEVICE_ATTR_RW(timestamp_event); - -static ssize_t cpu_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - int val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->cpu; - return scnprintf(buf, PAGE_SIZE, "%d\n", val); - -} -static DEVICE_ATTR_RO(cpu); - -static ssize_t traceid_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned long val, flags; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (!drvdata->enable) { - val = drvdata->traceid; - goto out; + switch (mode) { + case CS_MODE_DISABLED: + break; + case CS_MODE_SYSFS: + etm_disable_sysfs(csdev); + break; + case CS_MODE_PERF: + etm_disable_perf(csdev); + break; + default: + WARN_ON_ONCE(mode); + return; } - pm_runtime_get_sync(drvdata->dev); - spin_lock_irqsave(&drvdata->spinlock, flags); - CS_UNLOCK(drvdata->base); - - val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK); - - CS_LOCK(drvdata->base); - spin_unlock_irqrestore(&drvdata->spinlock, flags); - pm_runtime_put(drvdata->dev); -out: - return sprintf(buf, "%#lx\n", val); -} - -static ssize_t traceid_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int ret; - unsigned long val; - struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - - ret = kstrtoul(buf, 16, &val); - if (ret) - return ret; - - drvdata->traceid = val & ETM_TRACEID_MASK; - return size; + if (mode) + local_set(&drvdata->mode, CS_MODE_DISABLED); } -static DEVICE_ATTR_RW(traceid); - -static struct attribute *coresight_etm_attrs[] = { - &dev_attr_nr_addr_cmp.attr, - &dev_attr_nr_cntr.attr, - &dev_attr_nr_ctxid_cmp.attr, - &dev_attr_etmsr.attr, - &dev_attr_reset.attr, - &dev_attr_mode.attr, - &dev_attr_trigger_event.attr, - &dev_attr_enable_event.attr, - &dev_attr_fifofull_level.attr, - &dev_attr_addr_idx.attr, - &dev_attr_addr_single.attr, - &dev_attr_addr_range.attr, - &dev_attr_addr_start.attr, - &dev_attr_addr_stop.attr, - &dev_attr_addr_acctype.attr, - &dev_attr_cntr_idx.attr, - &dev_attr_cntr_rld_val.attr, - &dev_attr_cntr_event.attr, - &dev_attr_cntr_rld_event.attr, - &dev_attr_cntr_val.attr, - &dev_attr_seq_12_event.attr, - &dev_attr_seq_21_event.attr, - &dev_attr_seq_23_event.attr, - &dev_attr_seq_31_event.attr, - &dev_attr_seq_32_event.attr, - &dev_attr_seq_13_event.attr, - &dev_attr_seq_curr_state.attr, - &dev_attr_ctxid_idx.attr, - &dev_attr_ctxid_pid.attr, - &dev_attr_ctxid_mask.attr, - &dev_attr_sync_freq.attr, - &dev_attr_timestamp_event.attr, - &dev_attr_traceid.attr, - &dev_attr_cpu.attr, - NULL, -}; - -#define coresight_simple_func(name, offset) \ -static ssize_t name##_show(struct device *_dev, \ - struct device_attribute *attr, char *buf) \ -{ \ - struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent); \ - return scnprintf(buf, PAGE_SIZE, "0x%x\n", \ - readl_relaxed(drvdata->base + offset)); \ -} \ -DEVICE_ATTR_RO(name) - -coresight_simple_func(etmccr, ETMCCR); -coresight_simple_func(etmccer, ETMCCER); -coresight_simple_func(etmscr, ETMSCR); -coresight_simple_func(etmidr, ETMIDR); -coresight_simple_func(etmcr, ETMCR); -coresight_simple_func(etmtraceidr, ETMTRACEIDR); -coresight_simple_func(etmteevr, ETMTEEVR); -coresight_simple_func(etmtssvr, ETMTSSCR); -coresight_simple_func(etmtecr1, ETMTECR1); -coresight_simple_func(etmtecr2, ETMTECR2); - -static struct attribute *coresight_etm_mgmt_attrs[] = { - &dev_attr_etmccr.attr, - &dev_attr_etmccer.attr, - &dev_attr_etmscr.attr, - &dev_attr_etmidr.attr, - &dev_attr_etmcr.attr, - &dev_attr_etmtraceidr.attr, - &dev_attr_etmteevr.attr, - &dev_attr_etmtssvr.attr, - &dev_attr_etmtecr1.attr, - &dev_attr_etmtecr2.attr, - NULL, -}; -static const struct attribute_group coresight_etm_group = { - .attrs = coresight_etm_attrs, -}; - - -static const struct attribute_group coresight_etm_mgmt_group = { - .attrs = coresight_etm_mgmt_attrs, - .name = "mgmt", +static const struct coresight_ops_source etm_source_ops = { + .cpu_id = etm_cpu_id, + .trace_id = etm_trace_id, + .enable = etm_enable, + .disable = etm_disable, }; -static const struct attribute_group *coresight_etm_groups[] = { - &coresight_etm_group, - &coresight_etm_mgmt_group, - NULL, +static const struct coresight_ops etm_cs_ops = { + .source_ops = &etm_source_ops, }; static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action, @@ -1658,7 +657,7 @@ static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action, etmdrvdata[cpu]->os_unlock = true; } - if (etmdrvdata[cpu]->enable) + if (local_read(&etmdrvdata[cpu]->mode)) etm_enable_hw(etmdrvdata[cpu]); spin_unlock(&etmdrvdata[cpu]->spinlock); break; @@ -1671,7 +670,7 @@ static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action, case CPU_DYING: spin_lock(&etmdrvdata[cpu]->spinlock); - if (etmdrvdata[cpu]->enable) + if (local_read(&etmdrvdata[cpu]->mode)) etm_disable_hw(etmdrvdata[cpu]); spin_unlock(&etmdrvdata[cpu]->spinlock); break; @@ -1707,6 +706,9 @@ static void etm_init_arch_data(void *info) u32 etmccr; struct etm_drvdata *drvdata = info; + /* Make sure all registers are accessible */ + etm_os_unlock(drvdata); + CS_UNLOCK(drvdata->base); /* First dummy read */ @@ -1743,40 +745,9 @@ static void etm_init_arch_data(void *info) CS_LOCK(drvdata->base); } -static void etm_init_default_data(struct etm_drvdata *drvdata) +static void etm_init_trace_id(struct etm_drvdata *drvdata) { - /* - * A trace ID of value 0 is invalid, so let's start at some - * random value that fits in 7 bits and will be just as good. - */ - static int etm3x_traceid = 0x10; - - u32 flags = (1 << 0 | /* instruction execute*/ - 3 << 3 | /* ARM instruction */ - 0 << 5 | /* No data value comparison */ - 0 << 7 | /* No exact mach */ - 0 << 8 | /* Ignore context ID */ - 0 << 10); /* Security ignored */ - - /* - * Initial configuration only - guarantees sources handled by - * this driver have a unique ID at startup time but not between - * all other types of sources. For that we lean on the core - * framework. - */ - drvdata->traceid = etm3x_traceid++; - drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN); - drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1; - if (drvdata->nr_addr_cmp >= 2) { - drvdata->addr_val[0] = (u32) _stext; - drvdata->addr_val[1] = (u32) _etext; - drvdata->addr_acctype[0] = flags; - drvdata->addr_acctype[1] = flags; - drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE; - drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE; - } - - etm_set_default(drvdata); + drvdata->traceid = coresight_get_trace_id(drvdata->cpu); } static int etm_probe(struct amba_device *adev, const struct amba_id *id) @@ -1831,9 +802,6 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) get_online_cpus(); etmdrvdata[drvdata->cpu] = drvdata; - if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1)) - drvdata->os_unlock = true; - if (smp_call_function_single(drvdata->cpu, etm_init_arch_data, drvdata, 1)) dev_err(dev, "ETM arch init failed\n"); @@ -1847,7 +815,9 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) ret = -EINVAL; goto err_arch_supported; } - etm_init_default_data(drvdata); + + etm_init_trace_id(drvdata); + etm_set_default(&drvdata->config); desc->type = CORESIGHT_DEV_TYPE_SOURCE; desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; @@ -1861,6 +831,12 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) goto err_arch_supported; } + ret = etm_perf_symlink(drvdata->csdev, true); + if (ret) { + coresight_unregister(drvdata->csdev); + goto err_arch_supported; + } + pm_runtime_put(&adev->dev); dev_info(dev, "%s initialized\n", (char *)id->data); @@ -1877,17 +853,6 @@ err_arch_supported: return ret; } -static int etm_remove(struct amba_device *adev) -{ - struct etm_drvdata *drvdata = amba_get_drvdata(adev); - - coresight_unregister(drvdata->csdev); - if (--etm_count == 0) - unregister_hotcpu_notifier(&etm_cpu_notifier); - - return 0; -} - #ifdef CONFIG_PM static int etm_runtime_suspend(struct device *dev) { @@ -1948,13 +913,9 @@ static struct amba_driver etm_driver = { .name = "coresight-etm3x", .owner = THIS_MODULE, .pm = &etm_dev_pm_ops, + .suppress_bind_attrs = true, }, .probe = etm_probe, - .remove = etm_remove, .id_table = etm_ids, }; - -module_amba_driver(etm_driver); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("CoreSight Program Flow Trace driver"); +builtin_amba_driver(etm_driver); diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c new file mode 100644 index 000000000000..7c84308c5564 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c @@ -0,0 +1,2126 @@ +/* + * Copyright(C) 2015 Linaro Limited. All rights reserved. + * Author: Mathieu Poirier <mathieu.poirier@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/pm_runtime.h> +#include <linux/sysfs.h> +#include "coresight-etm4x.h" + +static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude) +{ + u8 idx; + struct etmv4_config *config = &drvdata->config; + + idx = config->addr_idx; + + /* + * TRCACATRn.TYPE bit[1:0]: type of comparison + * the trace unit performs + */ + if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) { + if (idx % 2 != 0) + return -EINVAL; + + /* + * We are performing instruction address comparison. Set the + * relevant bit of ViewInst Include/Exclude Control register + * for corresponding address comparator pair. + */ + if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE || + config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE) + return -EINVAL; + + if (exclude == true) { + /* + * Set exclude bit and unset the include bit + * corresponding to comparator pair + */ + config->viiectlr |= BIT(idx / 2 + 16); + config->viiectlr &= ~BIT(idx / 2); + } else { + /* + * Set include bit and unset exclude bit + * corresponding to comparator pair + */ + config->viiectlr |= BIT(idx / 2); + config->viiectlr &= ~BIT(idx / 2 + 16); + } + } + return 0; +} + +static ssize_t nr_pe_cmp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_pe_cmp; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_pe_cmp); + +static ssize_t nr_addr_cmp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_addr_cmp; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_addr_cmp); + +static ssize_t nr_cntr_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_cntr; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_cntr); + +static ssize_t nr_ext_inp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_ext_inp; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_ext_inp); + +static ssize_t numcidc_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->numcidc; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(numcidc); + +static ssize_t numvmidc_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->numvmidc; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(numvmidc); + +static ssize_t nrseqstate_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nrseqstate; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nrseqstate); + +static ssize_t nr_resource_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_resource; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_resource); + +static ssize_t nr_ss_cmp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->nr_ss_cmp; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} +static DEVICE_ATTR_RO(nr_ss_cmp); + +static ssize_t reset_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int i; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + if (val) + config->mode = 0x0; + + /* Disable data tracing: do not trace load and store data transfers */ + config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE); + config->cfg &= ~(BIT(1) | BIT(2)); + + /* Disable data value and data address tracing */ + config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR | + ETM_MODE_DATA_TRACE_VAL); + config->cfg &= ~(BIT(16) | BIT(17)); + + /* Disable all events tracing */ + config->eventctrl0 = 0x0; + config->eventctrl1 = 0x0; + + /* Disable timestamp event */ + config->ts_ctrl = 0x0; + + /* Disable stalling */ + config->stall_ctrl = 0x0; + + /* Reset trace synchronization period to 2^8 = 256 bytes*/ + if (drvdata->syncpr == false) + config->syncfreq = 0x8; + + /* + * Enable ViewInst to trace everything with start-stop logic in + * started state. ARM recommends start-stop logic is set before + * each trace run. + */ + config->vinst_ctrl |= BIT(0); + if (drvdata->nr_addr_cmp == true) { + config->mode |= ETM_MODE_VIEWINST_STARTSTOP; + /* SSSTATUS, bit[9] */ + config->vinst_ctrl |= BIT(9); + } + + /* No address range filtering for ViewInst */ + config->viiectlr = 0x0; + + /* No start-stop filtering for ViewInst */ + config->vissctlr = 0x0; + + /* Disable seq events */ + for (i = 0; i < drvdata->nrseqstate-1; i++) + config->seq_ctrl[i] = 0x0; + config->seq_rst = 0x0; + config->seq_state = 0x0; + + /* Disable external input events */ + config->ext_inp = 0x0; + + config->cntr_idx = 0x0; + for (i = 0; i < drvdata->nr_cntr; i++) { + config->cntrldvr[i] = 0x0; + config->cntr_ctrl[i] = 0x0; + config->cntr_val[i] = 0x0; + } + + config->res_idx = 0x0; + for (i = 0; i < drvdata->nr_resource; i++) + config->res_ctrl[i] = 0x0; + + for (i = 0; i < drvdata->nr_ss_cmp; i++) { + config->ss_ctrl[i] = 0x0; + config->ss_pe_cmp[i] = 0x0; + } + + config->addr_idx = 0x0; + for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) { + config->addr_val[i] = 0x0; + config->addr_acc[i] = 0x0; + config->addr_type[i] = ETM_ADDR_TYPE_NONE; + } + + config->ctxid_idx = 0x0; + for (i = 0; i < drvdata->numcidc; i++) { + config->ctxid_pid[i] = 0x0; + config->ctxid_vpid[i] = 0x0; + } + + config->ctxid_mask0 = 0x0; + config->ctxid_mask1 = 0x0; + + config->vmid_idx = 0x0; + for (i = 0; i < drvdata->numvmidc; i++) + config->vmid_val[i] = 0x0; + config->vmid_mask0 = 0x0; + config->vmid_mask1 = 0x0; + + drvdata->trcid = drvdata->cpu + 1; + + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_WO(reset); + +static ssize_t mode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->mode; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val, mode; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + config->mode = val & ETMv4_MODE_ALL; + + if (config->mode & ETM_MODE_EXCLUDE) + etm4_set_mode_exclude(drvdata, true); + else + etm4_set_mode_exclude(drvdata, false); + + if (drvdata->instrp0 == true) { + /* start by clearing instruction P0 field */ + config->cfg &= ~(BIT(1) | BIT(2)); + if (config->mode & ETM_MODE_LOAD) + /* 0b01 Trace load instructions as P0 instructions */ + config->cfg |= BIT(1); + if (config->mode & ETM_MODE_STORE) + /* 0b10 Trace store instructions as P0 instructions */ + config->cfg |= BIT(2); + if (config->mode & ETM_MODE_LOAD_STORE) + /* + * 0b11 Trace load and store instructions + * as P0 instructions + */ + config->cfg |= BIT(1) | BIT(2); + } + + /* bit[3], Branch broadcast mode */ + if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true)) + config->cfg |= BIT(3); + else + config->cfg &= ~BIT(3); + + /* bit[4], Cycle counting instruction trace bit */ + if ((config->mode & ETMv4_MODE_CYCACC) && + (drvdata->trccci == true)) + config->cfg |= BIT(4); + else + config->cfg &= ~BIT(4); + + /* bit[6], Context ID tracing bit */ + if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size)) + config->cfg |= BIT(6); + else + config->cfg &= ~BIT(6); + + if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size)) + config->cfg |= BIT(7); + else + config->cfg &= ~BIT(7); + + /* bits[10:8], Conditional instruction tracing bit */ + mode = ETM_MODE_COND(config->mode); + if (drvdata->trccond == true) { + config->cfg &= ~(BIT(8) | BIT(9) | BIT(10)); + config->cfg |= mode << 8; + } + + /* bit[11], Global timestamp tracing bit */ + if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size)) + config->cfg |= BIT(11); + else + config->cfg &= ~BIT(11); + + /* bit[12], Return stack enable bit */ + if ((config->mode & ETM_MODE_RETURNSTACK) && + (drvdata->retstack == true)) + config->cfg |= BIT(12); + else + config->cfg &= ~BIT(12); + + /* bits[14:13], Q element enable field */ + mode = ETM_MODE_QELEM(config->mode); + /* start by clearing QE bits */ + config->cfg &= ~(BIT(13) | BIT(14)); + /* if supported, Q elements with instruction counts are enabled */ + if ((mode & BIT(0)) && (drvdata->q_support & BIT(0))) + config->cfg |= BIT(13); + /* + * if supported, Q elements with and without instruction + * counts are enabled + */ + if ((mode & BIT(1)) && (drvdata->q_support & BIT(1))) + config->cfg |= BIT(14); + + /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */ + if ((config->mode & ETM_MODE_ATB_TRIGGER) && + (drvdata->atbtrig == true)) + config->eventctrl1 |= BIT(11); + else + config->eventctrl1 &= ~BIT(11); + + /* bit[12], Low-power state behavior override bit */ + if ((config->mode & ETM_MODE_LPOVERRIDE) && + (drvdata->lpoverride == true)) + config->eventctrl1 |= BIT(12); + else + config->eventctrl1 &= ~BIT(12); + + /* bit[8], Instruction stall bit */ + if (config->mode & ETM_MODE_ISTALL_EN) + config->stall_ctrl |= BIT(8); + else + config->stall_ctrl &= ~BIT(8); + + /* bit[10], Prioritize instruction trace bit */ + if (config->mode & ETM_MODE_INSTPRIO) + config->stall_ctrl |= BIT(10); + else + config->stall_ctrl &= ~BIT(10); + + /* bit[13], Trace overflow prevention bit */ + if ((config->mode & ETM_MODE_NOOVERFLOW) && + (drvdata->nooverflow == true)) + config->stall_ctrl |= BIT(13); + else + config->stall_ctrl &= ~BIT(13); + + /* bit[9] Start/stop logic control bit */ + if (config->mode & ETM_MODE_VIEWINST_STARTSTOP) + config->vinst_ctrl |= BIT(9); + else + config->vinst_ctrl &= ~BIT(9); + + /* bit[10], Whether a trace unit must trace a Reset exception */ + if (config->mode & ETM_MODE_TRACE_RESET) + config->vinst_ctrl |= BIT(10); + else + config->vinst_ctrl &= ~BIT(10); + + /* bit[11], Whether a trace unit must trace a system error exception */ + if ((config->mode & ETM_MODE_TRACE_ERR) && + (drvdata->trc_error == true)) + config->vinst_ctrl |= BIT(11); + else + config->vinst_ctrl &= ~BIT(11); + + if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)) + etm4_config_trace_mode(config); + + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_RW(mode); + +static ssize_t pe_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->pe_sel; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t pe_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + if (val > drvdata->nr_pe) { + spin_unlock(&drvdata->spinlock); + return -EINVAL; + } + + config->pe_sel = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(pe); + +static ssize_t event_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->eventctrl0; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t event_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + switch (drvdata->nr_event) { + case 0x0: + /* EVENT0, bits[7:0] */ + config->eventctrl0 = val & 0xFF; + break; + case 0x1: + /* EVENT1, bits[15:8] */ + config->eventctrl0 = val & 0xFFFF; + break; + case 0x2: + /* EVENT2, bits[23:16] */ + config->eventctrl0 = val & 0xFFFFFF; + break; + case 0x3: + /* EVENT3, bits[31:24] */ + config->eventctrl0 = val; + break; + default: + break; + } + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(event); + +static ssize_t event_instren_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = BMVAL(config->eventctrl1, 0, 3); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t event_instren_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + /* start by clearing all instruction event enable bits */ + config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3)); + switch (drvdata->nr_event) { + case 0x0: + /* generate Event element for event 1 */ + config->eventctrl1 |= val & BIT(1); + break; + case 0x1: + /* generate Event element for event 1 and 2 */ + config->eventctrl1 |= val & (BIT(0) | BIT(1)); + break; + case 0x2: + /* generate Event element for event 1, 2 and 3 */ + config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2)); + break; + case 0x3: + /* generate Event element for all 4 events */ + config->eventctrl1 |= val & 0xF; + break; + default: + break; + } + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(event_instren); + +static ssize_t event_ts_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->ts_ctrl; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t event_ts_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (!drvdata->ts_size) + return -EINVAL; + + config->ts_ctrl = val & ETMv4_EVENT_MASK; + return size; +} +static DEVICE_ATTR_RW(event_ts); + +static ssize_t syncfreq_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->syncfreq; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t syncfreq_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (drvdata->syncpr == true) + return -EINVAL; + + config->syncfreq = val & ETMv4_SYNC_MASK; + return size; +} +static DEVICE_ATTR_RW(syncfreq); + +static ssize_t cyc_threshold_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->ccctlr; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t cyc_threshold_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val < drvdata->ccitmin) + return -EINVAL; + + config->ccctlr = val & ETM_CYC_THRESHOLD_MASK; + return size; +} +static DEVICE_ATTR_RW(cyc_threshold); + +static ssize_t bb_ctrl_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->bb_ctrl; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t bb_ctrl_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (drvdata->trcbb == false) + return -EINVAL; + if (!drvdata->nr_addr_cmp) + return -EINVAL; + /* + * Bit[7:0] selects which address range comparator is used for + * branch broadcast control. + */ + if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp) + return -EINVAL; + + config->bb_ctrl = val; + return size; +} +static DEVICE_ATTR_RW(bb_ctrl); + +static ssize_t event_vinst_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->vinst_ctrl & ETMv4_EVENT_MASK; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t event_vinst_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + val &= ETMv4_EVENT_MASK; + config->vinst_ctrl &= ~ETMv4_EVENT_MASK; + config->vinst_ctrl |= val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(event_vinst); + +static ssize_t s_exlevel_vinst_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = BMVAL(config->vinst_ctrl, 16, 19); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t s_exlevel_vinst_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + /* clear all EXLEVEL_S bits (bit[18] is never implemented) */ + config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19)); + /* enable instruction tracing for corresponding exception level */ + val &= drvdata->s_ex_level; + config->vinst_ctrl |= (val << 16); + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(s_exlevel_vinst); + +static ssize_t ns_exlevel_vinst_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + /* EXLEVEL_NS, bits[23:20] */ + val = BMVAL(config->vinst_ctrl, 20, 23); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t ns_exlevel_vinst_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + /* clear EXLEVEL_NS bits (bit[23] is never implemented */ + config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22)); + /* enable instruction tracing for corresponding exception level */ + val &= drvdata->ns_ex_level; + config->vinst_ctrl |= (val << 20); + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(ns_exlevel_vinst); + +static ssize_t addr_idx_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->addr_idx; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t addr_idx_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val >= drvdata->nr_addr_cmp * 2) + return -EINVAL; + + /* + * Use spinlock to ensure index doesn't change while it gets + * dereferenced multiple times within a spinlock block elsewhere. + */ + spin_lock(&drvdata->spinlock); + config->addr_idx = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(addr_idx); + +static ssize_t addr_instdatatype_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + ssize_t len; + u8 val, idx; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + val = BMVAL(config->addr_acc[idx], 0, 1); + len = scnprintf(buf, PAGE_SIZE, "%s\n", + val == ETM_INSTR_ADDR ? "instr" : + (val == ETM_DATA_LOAD_ADDR ? "data_load" : + (val == ETM_DATA_STORE_ADDR ? "data_store" : + "data_load_store"))); + spin_unlock(&drvdata->spinlock); + return len; +} + +static ssize_t addr_instdatatype_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + char str[20] = ""; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (strlen(buf) >= 20) + return -EINVAL; + if (sscanf(buf, "%s", str) != 1) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (!strcmp(str, "instr")) + /* TYPE, bits[1:0] */ + config->addr_acc[idx] &= ~(BIT(0) | BIT(1)); + + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(addr_instdatatype); + +static ssize_t addr_single_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + idx = config->addr_idx; + spin_lock(&drvdata->spinlock); + if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || + config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + val = (unsigned long)config->addr_val[idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t addr_single_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || + config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + config->addr_val[idx] = (u64)val; + config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(addr_single); + +static ssize_t addr_range_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val1, val2; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (idx % 2 != 0) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && + config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || + (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && + config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + val1 = (unsigned long)config->addr_val[idx]; + val2 = (unsigned long)config->addr_val[idx + 1]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); +} + +static ssize_t addr_range_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val1, val2; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) + return -EINVAL; + /* lower address comparator cannot have a higher address value */ + if (val1 > val2) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (idx % 2 != 0) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && + config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || + (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && + config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + config->addr_val[idx] = (u64)val1; + config->addr_type[idx] = ETM_ADDR_TYPE_RANGE; + config->addr_val[idx + 1] = (u64)val2; + config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE; + /* + * Program include or exclude control bits for vinst or vdata + * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE + */ + if (config->mode & ETM_MODE_EXCLUDE) + etm4_set_mode_exclude(drvdata, true); + else + etm4_set_mode_exclude(drvdata, false); + + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(addr_range); + +static ssize_t addr_start_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + + if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || + config->addr_type[idx] == ETM_ADDR_TYPE_START)) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + val = (unsigned long)config->addr_val[idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t addr_start_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (!drvdata->nr_addr_cmp) { + spin_unlock(&drvdata->spinlock); + return -EINVAL; + } + if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || + config->addr_type[idx] == ETM_ADDR_TYPE_START)) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + config->addr_val[idx] = (u64)val; + config->addr_type[idx] = ETM_ADDR_TYPE_START; + config->vissctlr |= BIT(idx); + /* SSSTATUS, bit[9] - turn on start/stop logic */ + config->vinst_ctrl |= BIT(9); + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(addr_start); + +static ssize_t addr_stop_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + + if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || + config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + val = (unsigned long)config->addr_val[idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t addr_stop_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (!drvdata->nr_addr_cmp) { + spin_unlock(&drvdata->spinlock); + return -EINVAL; + } + if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || + config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { + spin_unlock(&drvdata->spinlock); + return -EPERM; + } + + config->addr_val[idx] = (u64)val; + config->addr_type[idx] = ETM_ADDR_TYPE_STOP; + config->vissctlr |= BIT(idx + 16); + /* SSSTATUS, bit[9] - turn on start/stop logic */ + config->vinst_ctrl |= BIT(9); + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(addr_stop); + +static ssize_t addr_ctxtype_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + ssize_t len; + u8 idx, val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + /* CONTEXTTYPE, bits[3:2] */ + val = BMVAL(config->addr_acc[idx], 2, 3); + len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" : + (val == ETM_CTX_CTXID ? "ctxid" : + (val == ETM_CTX_VMID ? "vmid" : "all"))); + spin_unlock(&drvdata->spinlock); + return len; +} + +static ssize_t addr_ctxtype_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + char str[10] = ""; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (strlen(buf) >= 10) + return -EINVAL; + if (sscanf(buf, "%s", str) != 1) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + if (!strcmp(str, "none")) + /* start by clearing context type bits */ + config->addr_acc[idx] &= ~(BIT(2) | BIT(3)); + else if (!strcmp(str, "ctxid")) { + /* 0b01 The trace unit performs a Context ID */ + if (drvdata->numcidc) { + config->addr_acc[idx] |= BIT(2); + config->addr_acc[idx] &= ~BIT(3); + } + } else if (!strcmp(str, "vmid")) { + /* 0b10 The trace unit performs a VMID */ + if (drvdata->numvmidc) { + config->addr_acc[idx] &= ~BIT(2); + config->addr_acc[idx] |= BIT(3); + } + } else if (!strcmp(str, "all")) { + /* + * 0b11 The trace unit performs a Context ID + * comparison and a VMID + */ + if (drvdata->numcidc) + config->addr_acc[idx] |= BIT(2); + if (drvdata->numvmidc) + config->addr_acc[idx] |= BIT(3); + } + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(addr_ctxtype); + +static ssize_t addr_context_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + /* context ID comparator bits[6:4] */ + val = BMVAL(config->addr_acc[idx], 4, 6); + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t addr_context_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1)) + return -EINVAL; + if (val >= (drvdata->numcidc >= drvdata->numvmidc ? + drvdata->numcidc : drvdata->numvmidc)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->addr_idx; + /* clear context ID comparator bits[6:4] */ + config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6)); + config->addr_acc[idx] |= (val << 4); + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(addr_context); + +static ssize_t seq_idx_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->seq_idx; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t seq_idx_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val >= drvdata->nrseqstate - 1) + return -EINVAL; + + /* + * Use spinlock to ensure index doesn't change while it gets + * dereferenced multiple times within a spinlock block elsewhere. + */ + spin_lock(&drvdata->spinlock); + config->seq_idx = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(seq_idx); + +static ssize_t seq_state_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->seq_state; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t seq_state_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val >= drvdata->nrseqstate) + return -EINVAL; + + config->seq_state = val; + return size; +} +static DEVICE_ATTR_RW(seq_state); + +static ssize_t seq_event_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->seq_idx; + val = config->seq_ctrl[idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t seq_event_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->seq_idx; + /* RST, bits[7:0] */ + config->seq_ctrl[idx] = val & 0xFF; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(seq_event); + +static ssize_t seq_reset_event_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->seq_rst; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t seq_reset_event_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (!(drvdata->nrseqstate)) + return -EINVAL; + + config->seq_rst = val & ETMv4_EVENT_MASK; + return size; +} +static DEVICE_ATTR_RW(seq_reset_event); + +static ssize_t cntr_idx_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->cntr_idx; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t cntr_idx_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val >= drvdata->nr_cntr) + return -EINVAL; + + /* + * Use spinlock to ensure index doesn't change while it gets + * dereferenced multiple times within a spinlock block elsewhere. + */ + spin_lock(&drvdata->spinlock); + config->cntr_idx = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(cntr_idx); + +static ssize_t cntrldvr_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->cntr_idx; + val = config->cntrldvr[idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t cntrldvr_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val > ETM_CNTR_MAX_VAL) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->cntr_idx; + config->cntrldvr[idx] = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(cntrldvr); + +static ssize_t cntr_val_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->cntr_idx; + val = config->cntr_val[idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t cntr_val_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val > ETM_CNTR_MAX_VAL) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->cntr_idx; + config->cntr_val[idx] = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(cntr_val); + +static ssize_t cntr_ctrl_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->cntr_idx; + val = config->cntr_ctrl[idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t cntr_ctrl_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->cntr_idx; + config->cntr_ctrl[idx] = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(cntr_ctrl); + +static ssize_t res_idx_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->res_idx; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t res_idx_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + /* Resource selector pair 0 is always implemented and reserved */ + if ((val == 0) || (val >= drvdata->nr_resource)) + return -EINVAL; + + /* + * Use spinlock to ensure index doesn't change while it gets + * dereferenced multiple times within a spinlock block elsewhere. + */ + spin_lock(&drvdata->spinlock); + config->res_idx = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(res_idx); + +static ssize_t res_ctrl_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->res_idx; + val = config->res_ctrl[idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t res_ctrl_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + idx = config->res_idx; + /* For odd idx pair inversal bit is RES0 */ + if (idx % 2 != 0) + /* PAIRINV, bit[21] */ + val &= ~BIT(21); + config->res_ctrl[idx] = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(res_ctrl); + +static ssize_t ctxid_idx_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->ctxid_idx; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t ctxid_idx_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val >= drvdata->numcidc) + return -EINVAL; + + /* + * Use spinlock to ensure index doesn't change while it gets + * dereferenced multiple times within a spinlock block elsewhere. + */ + spin_lock(&drvdata->spinlock); + config->ctxid_idx = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(ctxid_idx); + +static ssize_t ctxid_pid_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 idx; + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + idx = config->ctxid_idx; + val = (unsigned long)config->ctxid_vpid[idx]; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t ctxid_pid_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 idx; + unsigned long vpid, pid; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + /* + * only implemented when ctxid tracing is enabled, i.e. at least one + * ctxid comparator is implemented and ctxid is greater than 0 bits + * in length + */ + if (!drvdata->ctxid_size || !drvdata->numcidc) + return -EINVAL; + if (kstrtoul(buf, 16, &vpid)) + return -EINVAL; + + pid = coresight_vpid_to_pid(vpid); + + spin_lock(&drvdata->spinlock); + idx = config->ctxid_idx; + config->ctxid_pid[idx] = (u64)pid; + config->ctxid_vpid[idx] = (u64)vpid; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(ctxid_pid); + +static ssize_t ctxid_masks_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val1, val2; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + val1 = config->ctxid_mask0; + val2 = config->ctxid_mask1; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); +} + +static ssize_t ctxid_masks_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 i, j, maskbyte; + unsigned long val1, val2, mask; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + /* + * only implemented when ctxid tracing is enabled, i.e. at least one + * ctxid comparator is implemented and ctxid is greater than 0 bits + * in length + */ + if (!drvdata->ctxid_size || !drvdata->numcidc) + return -EINVAL; + if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + /* + * each byte[0..3] controls mask value applied to ctxid + * comparator[0..3] + */ + switch (drvdata->numcidc) { + case 0x1: + /* COMP0, bits[7:0] */ + config->ctxid_mask0 = val1 & 0xFF; + break; + case 0x2: + /* COMP1, bits[15:8] */ + config->ctxid_mask0 = val1 & 0xFFFF; + break; + case 0x3: + /* COMP2, bits[23:16] */ + config->ctxid_mask0 = val1 & 0xFFFFFF; + break; + case 0x4: + /* COMP3, bits[31:24] */ + config->ctxid_mask0 = val1; + break; + case 0x5: + /* COMP4, bits[7:0] */ + config->ctxid_mask0 = val1; + config->ctxid_mask1 = val2 & 0xFF; + break; + case 0x6: + /* COMP5, bits[15:8] */ + config->ctxid_mask0 = val1; + config->ctxid_mask1 = val2 & 0xFFFF; + break; + case 0x7: + /* COMP6, bits[23:16] */ + config->ctxid_mask0 = val1; + config->ctxid_mask1 = val2 & 0xFFFFFF; + break; + case 0x8: + /* COMP7, bits[31:24] */ + config->ctxid_mask0 = val1; + config->ctxid_mask1 = val2; + break; + default: + break; + } + /* + * If software sets a mask bit to 1, it must program relevant byte + * of ctxid comparator value 0x0, otherwise behavior is unpredictable. + * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24] + * of ctxid comparator0 value (corresponding to byte 0) register. + */ + mask = config->ctxid_mask0; + for (i = 0; i < drvdata->numcidc; i++) { + /* mask value of corresponding ctxid comparator */ + maskbyte = mask & ETMv4_EVENT_MASK; + /* + * each bit corresponds to a byte of respective ctxid comparator + * value register + */ + for (j = 0; j < 8; j++) { + if (maskbyte & 1) + config->ctxid_pid[i] &= ~(0xFF << (j * 8)); + maskbyte >>= 1; + } + /* Select the next ctxid comparator mask value */ + if (i == 3) + /* ctxid comparators[4-7] */ + mask = config->ctxid_mask1; + else + mask >>= 0x8; + } + + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(ctxid_masks); + +static ssize_t vmid_idx_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = config->vmid_idx; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t vmid_idx_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val >= drvdata->numvmidc) + return -EINVAL; + + /* + * Use spinlock to ensure index doesn't change while it gets + * dereferenced multiple times within a spinlock block elsewhere. + */ + spin_lock(&drvdata->spinlock); + config->vmid_idx = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(vmid_idx); + +static ssize_t vmid_val_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + val = (unsigned long)config->vmid_val[config->vmid_idx]; + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t vmid_val_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + /* + * only implemented when vmid tracing is enabled, i.e. at least one + * vmid comparator is implemented and at least 8 bit vmid size + */ + if (!drvdata->vmid_size || !drvdata->numvmidc) + return -EINVAL; + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + config->vmid_val[config->vmid_idx] = (u64)val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(vmid_val); + +static ssize_t vmid_masks_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val1, val2; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + spin_lock(&drvdata->spinlock); + val1 = config->vmid_mask0; + val2 = config->vmid_mask1; + spin_unlock(&drvdata->spinlock); + return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); +} + +static ssize_t vmid_masks_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + u8 i, j, maskbyte; + unsigned long val1, val2, mask; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_config *config = &drvdata->config; + + /* + * only implemented when vmid tracing is enabled, i.e. at least one + * vmid comparator is implemented and at least 8 bit vmid size + */ + if (!drvdata->vmid_size || !drvdata->numvmidc) + return -EINVAL; + if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + + /* + * each byte[0..3] controls mask value applied to vmid + * comparator[0..3] + */ + switch (drvdata->numvmidc) { + case 0x1: + /* COMP0, bits[7:0] */ + config->vmid_mask0 = val1 & 0xFF; + break; + case 0x2: + /* COMP1, bits[15:8] */ + config->vmid_mask0 = val1 & 0xFFFF; + break; + case 0x3: + /* COMP2, bits[23:16] */ + config->vmid_mask0 = val1 & 0xFFFFFF; + break; + case 0x4: + /* COMP3, bits[31:24] */ + config->vmid_mask0 = val1; + break; + case 0x5: + /* COMP4, bits[7:0] */ + config->vmid_mask0 = val1; + config->vmid_mask1 = val2 & 0xFF; + break; + case 0x6: + /* COMP5, bits[15:8] */ + config->vmid_mask0 = val1; + config->vmid_mask1 = val2 & 0xFFFF; + break; + case 0x7: + /* COMP6, bits[23:16] */ + config->vmid_mask0 = val1; + config->vmid_mask1 = val2 & 0xFFFFFF; + break; + case 0x8: + /* COMP7, bits[31:24] */ + config->vmid_mask0 = val1; + config->vmid_mask1 = val2; + break; + default: + break; + } + + /* + * If software sets a mask bit to 1, it must program relevant byte + * of vmid comparator value 0x0, otherwise behavior is unpredictable. + * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24] + * of vmid comparator0 value (corresponding to byte 0) register. + */ + mask = config->vmid_mask0; + for (i = 0; i < drvdata->numvmidc; i++) { + /* mask value of corresponding vmid comparator */ + maskbyte = mask & ETMv4_EVENT_MASK; + /* + * each bit corresponds to a byte of respective vmid comparator + * value register + */ + for (j = 0; j < 8; j++) { + if (maskbyte & 1) + config->vmid_val[i] &= ~(0xFF << (j * 8)); + maskbyte >>= 1; + } + /* Select the next vmid comparator mask value */ + if (i == 3) + /* vmid comparators[4-7] */ + mask = config->vmid_mask1; + else + mask >>= 0x8; + } + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(vmid_masks); + +static ssize_t cpu_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->cpu; + return scnprintf(buf, PAGE_SIZE, "%d\n", val); + +} +static DEVICE_ATTR_RO(cpu); + +static struct attribute *coresight_etmv4_attrs[] = { + &dev_attr_nr_pe_cmp.attr, + &dev_attr_nr_addr_cmp.attr, + &dev_attr_nr_cntr.attr, + &dev_attr_nr_ext_inp.attr, + &dev_attr_numcidc.attr, + &dev_attr_numvmidc.attr, + &dev_attr_nrseqstate.attr, + &dev_attr_nr_resource.attr, + &dev_attr_nr_ss_cmp.attr, + &dev_attr_reset.attr, + &dev_attr_mode.attr, + &dev_attr_pe.attr, + &dev_attr_event.attr, + &dev_attr_event_instren.attr, + &dev_attr_event_ts.attr, + &dev_attr_syncfreq.attr, + &dev_attr_cyc_threshold.attr, + &dev_attr_bb_ctrl.attr, + &dev_attr_event_vinst.attr, + &dev_attr_s_exlevel_vinst.attr, + &dev_attr_ns_exlevel_vinst.attr, + &dev_attr_addr_idx.attr, + &dev_attr_addr_instdatatype.attr, + &dev_attr_addr_single.attr, + &dev_attr_addr_range.attr, + &dev_attr_addr_start.attr, + &dev_attr_addr_stop.attr, + &dev_attr_addr_ctxtype.attr, + &dev_attr_addr_context.attr, + &dev_attr_seq_idx.attr, + &dev_attr_seq_state.attr, + &dev_attr_seq_event.attr, + &dev_attr_seq_reset_event.attr, + &dev_attr_cntr_idx.attr, + &dev_attr_cntrldvr.attr, + &dev_attr_cntr_val.attr, + &dev_attr_cntr_ctrl.attr, + &dev_attr_res_idx.attr, + &dev_attr_res_ctrl.attr, + &dev_attr_ctxid_idx.attr, + &dev_attr_ctxid_pid.attr, + &dev_attr_ctxid_masks.attr, + &dev_attr_vmid_idx.attr, + &dev_attr_vmid_val.attr, + &dev_attr_vmid_masks.attr, + &dev_attr_cpu.attr, + NULL, +}; + +#define coresight_etm4x_simple_func(name, offset) \ + coresight_simple_func(struct etmv4_drvdata, name, offset) + +coresight_etm4x_simple_func(trcoslsr, TRCOSLSR); +coresight_etm4x_simple_func(trcpdcr, TRCPDCR); +coresight_etm4x_simple_func(trcpdsr, TRCPDSR); +coresight_etm4x_simple_func(trclsr, TRCLSR); +coresight_etm4x_simple_func(trcconfig, TRCCONFIGR); +coresight_etm4x_simple_func(trctraceid, TRCTRACEIDR); +coresight_etm4x_simple_func(trcauthstatus, TRCAUTHSTATUS); +coresight_etm4x_simple_func(trcdevid, TRCDEVID); +coresight_etm4x_simple_func(trcdevtype, TRCDEVTYPE); +coresight_etm4x_simple_func(trcpidr0, TRCPIDR0); +coresight_etm4x_simple_func(trcpidr1, TRCPIDR1); +coresight_etm4x_simple_func(trcpidr2, TRCPIDR2); +coresight_etm4x_simple_func(trcpidr3, TRCPIDR3); + +static struct attribute *coresight_etmv4_mgmt_attrs[] = { + &dev_attr_trcoslsr.attr, + &dev_attr_trcpdcr.attr, + &dev_attr_trcpdsr.attr, + &dev_attr_trclsr.attr, + &dev_attr_trcconfig.attr, + &dev_attr_trctraceid.attr, + &dev_attr_trcauthstatus.attr, + &dev_attr_trcdevid.attr, + &dev_attr_trcdevtype.attr, + &dev_attr_trcpidr0.attr, + &dev_attr_trcpidr1.attr, + &dev_attr_trcpidr2.attr, + &dev_attr_trcpidr3.attr, + NULL, +}; + +coresight_etm4x_simple_func(trcidr0, TRCIDR0); +coresight_etm4x_simple_func(trcidr1, TRCIDR1); +coresight_etm4x_simple_func(trcidr2, TRCIDR2); +coresight_etm4x_simple_func(trcidr3, TRCIDR3); +coresight_etm4x_simple_func(trcidr4, TRCIDR4); +coresight_etm4x_simple_func(trcidr5, TRCIDR5); +/* trcidr[6,7] are reserved */ +coresight_etm4x_simple_func(trcidr8, TRCIDR8); +coresight_etm4x_simple_func(trcidr9, TRCIDR9); +coresight_etm4x_simple_func(trcidr10, TRCIDR10); +coresight_etm4x_simple_func(trcidr11, TRCIDR11); +coresight_etm4x_simple_func(trcidr12, TRCIDR12); +coresight_etm4x_simple_func(trcidr13, TRCIDR13); + +static struct attribute *coresight_etmv4_trcidr_attrs[] = { + &dev_attr_trcidr0.attr, + &dev_attr_trcidr1.attr, + &dev_attr_trcidr2.attr, + &dev_attr_trcidr3.attr, + &dev_attr_trcidr4.attr, + &dev_attr_trcidr5.attr, + /* trcidr[6,7] are reserved */ + &dev_attr_trcidr8.attr, + &dev_attr_trcidr9.attr, + &dev_attr_trcidr10.attr, + &dev_attr_trcidr11.attr, + &dev_attr_trcidr12.attr, + &dev_attr_trcidr13.attr, + NULL, +}; + +static const struct attribute_group coresight_etmv4_group = { + .attrs = coresight_etmv4_attrs, +}; + +static const struct attribute_group coresight_etmv4_mgmt_group = { + .attrs = coresight_etmv4_mgmt_attrs, + .name = "mgmt", +}; + +static const struct attribute_group coresight_etmv4_trcidr_group = { + .attrs = coresight_etmv4_trcidr_attrs, + .name = "trcidr", +}; + +const struct attribute_group *coresight_etmv4_groups[] = { + &coresight_etmv4_group, + &coresight_etmv4_mgmt_group, + &coresight_etmv4_trcidr_group, + NULL, +}; diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index a6707642bb23..462f0dc15757 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -15,7 +15,6 @@ #include <linux/init.h> #include <linux/types.h> #include <linux/device.h> -#include <linux/module.h> #include <linux/io.h> #include <linux/err.h> #include <linux/fs.h> @@ -27,14 +26,19 @@ #include <linux/clk.h> #include <linux/cpu.h> #include <linux/coresight.h> +#include <linux/coresight-pmu.h> #include <linux/pm_wakeup.h> #include <linux/amba/bus.h> #include <linux/seq_file.h> #include <linux/uaccess.h> +#include <linux/perf_event.h> #include <linux/pm_runtime.h> +#include <linux/perf_event.h> #include <asm/sections.h> +#include <asm/local.h> #include "coresight-etm4x.h" +#include "coresight-etm-perf.h" static int boot_enable; module_param_named(boot_enable, boot_enable, int, S_IRUGO); @@ -42,13 +46,13 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO); /* The number of ETMv4 currently registered */ static int etm4_count; static struct etmv4_drvdata *etmdrvdata[NR_CPUS]; +static void etm4_set_default(struct etmv4_config *config); -static void etm4_os_unlock(void *info) +static void etm4_os_unlock(struct etmv4_drvdata *drvdata) { - struct etmv4_drvdata *drvdata = (struct etmv4_drvdata *)info; - /* Writing any value to ETMOSLAR unlocks the trace registers */ writel_relaxed(0x0, drvdata->base + TRCOSLAR); + drvdata->os_unlock = true; isb(); } @@ -63,16 +67,22 @@ static bool etm4_arch_supported(u8 arch) return true; } +static int etm4_cpu_id(struct coresight_device *csdev) +{ + struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + return drvdata->cpu; +} + static int etm4_trace_id(struct coresight_device *csdev) { struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); unsigned long flags; int trace_id = -1; - if (!drvdata->enable) + if (!local_read(&drvdata->mode)) return drvdata->trcid; - pm_runtime_get_sync(drvdata->dev); spin_lock_irqsave(&drvdata->spinlock, flags); CS_UNLOCK(drvdata->base); @@ -81,7 +91,6 @@ static int etm4_trace_id(struct coresight_device *csdev) CS_LOCK(drvdata->base); spin_unlock_irqrestore(&drvdata->spinlock, flags); - pm_runtime_put(drvdata->dev); return trace_id; } @@ -90,6 +99,7 @@ static void etm4_enable_hw(void *info) { int i; struct etmv4_drvdata *drvdata = info; + struct etmv4_config *config = &drvdata->config; CS_UNLOCK(drvdata->base); @@ -104,69 +114,69 @@ static void etm4_enable_hw(void *info) "timeout observed when probing at offset %#x\n", TRCSTATR); - writel_relaxed(drvdata->pe_sel, drvdata->base + TRCPROCSELR); - writel_relaxed(drvdata->cfg, drvdata->base + TRCCONFIGR); + writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR); + writel_relaxed(config->cfg, drvdata->base + TRCCONFIGR); /* nothing specific implemented */ writel_relaxed(0x0, drvdata->base + TRCAUXCTLR); - writel_relaxed(drvdata->eventctrl0, drvdata->base + TRCEVENTCTL0R); - writel_relaxed(drvdata->eventctrl1, drvdata->base + TRCEVENTCTL1R); - writel_relaxed(drvdata->stall_ctrl, drvdata->base + TRCSTALLCTLR); - writel_relaxed(drvdata->ts_ctrl, drvdata->base + TRCTSCTLR); - writel_relaxed(drvdata->syncfreq, drvdata->base + TRCSYNCPR); - writel_relaxed(drvdata->ccctlr, drvdata->base + TRCCCCTLR); - writel_relaxed(drvdata->bb_ctrl, drvdata->base + TRCBBCTLR); + writel_relaxed(config->eventctrl0, drvdata->base + TRCEVENTCTL0R); + writel_relaxed(config->eventctrl1, drvdata->base + TRCEVENTCTL1R); + writel_relaxed(config->stall_ctrl, drvdata->base + TRCSTALLCTLR); + writel_relaxed(config->ts_ctrl, drvdata->base + TRCTSCTLR); + writel_relaxed(config->syncfreq, drvdata->base + TRCSYNCPR); + writel_relaxed(config->ccctlr, drvdata->base + TRCCCCTLR); + writel_relaxed(config->bb_ctrl, drvdata->base + TRCBBCTLR); writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR); - writel_relaxed(drvdata->vinst_ctrl, drvdata->base + TRCVICTLR); - writel_relaxed(drvdata->viiectlr, drvdata->base + TRCVIIECTLR); - writel_relaxed(drvdata->vissctlr, + writel_relaxed(config->vinst_ctrl, drvdata->base + TRCVICTLR); + writel_relaxed(config->viiectlr, drvdata->base + TRCVIIECTLR); + writel_relaxed(config->vissctlr, drvdata->base + TRCVISSCTLR); - writel_relaxed(drvdata->vipcssctlr, + writel_relaxed(config->vipcssctlr, drvdata->base + TRCVIPCSSCTLR); for (i = 0; i < drvdata->nrseqstate - 1; i++) - writel_relaxed(drvdata->seq_ctrl[i], + writel_relaxed(config->seq_ctrl[i], drvdata->base + TRCSEQEVRn(i)); - writel_relaxed(drvdata->seq_rst, drvdata->base + TRCSEQRSTEVR); - writel_relaxed(drvdata->seq_state, drvdata->base + TRCSEQSTR); - writel_relaxed(drvdata->ext_inp, drvdata->base + TRCEXTINSELR); + writel_relaxed(config->seq_rst, drvdata->base + TRCSEQRSTEVR); + writel_relaxed(config->seq_state, drvdata->base + TRCSEQSTR); + writel_relaxed(config->ext_inp, drvdata->base + TRCEXTINSELR); for (i = 0; i < drvdata->nr_cntr; i++) { - writel_relaxed(drvdata->cntrldvr[i], + writel_relaxed(config->cntrldvr[i], drvdata->base + TRCCNTRLDVRn(i)); - writel_relaxed(drvdata->cntr_ctrl[i], + writel_relaxed(config->cntr_ctrl[i], drvdata->base + TRCCNTCTLRn(i)); - writel_relaxed(drvdata->cntr_val[i], + writel_relaxed(config->cntr_val[i], drvdata->base + TRCCNTVRn(i)); } /* Resource selector pair 0 is always implemented and reserved */ - for (i = 2; i < drvdata->nr_resource * 2; i++) - writel_relaxed(drvdata->res_ctrl[i], + for (i = 0; i < drvdata->nr_resource * 2; i++) + writel_relaxed(config->res_ctrl[i], drvdata->base + TRCRSCTLRn(i)); for (i = 0; i < drvdata->nr_ss_cmp; i++) { - writel_relaxed(drvdata->ss_ctrl[i], + writel_relaxed(config->ss_ctrl[i], drvdata->base + TRCSSCCRn(i)); - writel_relaxed(drvdata->ss_status[i], + writel_relaxed(config->ss_status[i], drvdata->base + TRCSSCSRn(i)); - writel_relaxed(drvdata->ss_pe_cmp[i], + writel_relaxed(config->ss_pe_cmp[i], drvdata->base + TRCSSPCICRn(i)); } for (i = 0; i < drvdata->nr_addr_cmp; i++) { - writeq_relaxed(drvdata->addr_val[i], + writeq_relaxed(config->addr_val[i], drvdata->base + TRCACVRn(i)); - writeq_relaxed(drvdata->addr_acc[i], + writeq_relaxed(config->addr_acc[i], drvdata->base + TRCACATRn(i)); } for (i = 0; i < drvdata->numcidc; i++) - writeq_relaxed(drvdata->ctxid_pid[i], + writeq_relaxed(config->ctxid_pid[i], drvdata->base + TRCCIDCVRn(i)); - writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0); - writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1); + writel_relaxed(config->ctxid_mask0, drvdata->base + TRCCIDCCTLR0); + writel_relaxed(config->ctxid_mask1, drvdata->base + TRCCIDCCTLR1); for (i = 0; i < drvdata->numvmidc; i++) - writeq_relaxed(drvdata->vmid_val[i], + writeq_relaxed(config->vmid_val[i], drvdata->base + TRCVMIDCVRn(i)); - writel_relaxed(drvdata->vmid_mask0, drvdata->base + TRCVMIDCCTLR0); - writel_relaxed(drvdata->vmid_mask1, drvdata->base + TRCVMIDCCTLR1); + writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0); + writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1); /* Enable the trace unit */ writel_relaxed(1, drvdata->base + TRCPRGCTLR); @@ -182,2122 +192,210 @@ static void etm4_enable_hw(void *info) dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); } -static int etm4_enable(struct coresight_device *csdev) +static int etm4_parse_event_config(struct etmv4_drvdata *drvdata, + struct perf_event_attr *attr) { - struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - int ret; - - pm_runtime_get_sync(drvdata->dev); - spin_lock(&drvdata->spinlock); - - /* - * Executing etm4_enable_hw on the cpu whose ETM is being enabled - * ensures that register writes occur when cpu is powered. - */ - ret = smp_call_function_single(drvdata->cpu, - etm4_enable_hw, drvdata, 1); - if (ret) - goto err; - drvdata->enable = true; - drvdata->sticky_enable = true; + struct etmv4_config *config = &drvdata->config; - spin_unlock(&drvdata->spinlock); - - dev_info(drvdata->dev, "ETM tracing enabled\n"); - return 0; -err: - spin_unlock(&drvdata->spinlock); - pm_runtime_put(drvdata->dev); - return ret; -} - -static void etm4_disable_hw(void *info) -{ - u32 control; - struct etmv4_drvdata *drvdata = info; - - CS_UNLOCK(drvdata->base); - - control = readl_relaxed(drvdata->base + TRCPRGCTLR); - - /* EN, bit[0] Trace unit enable bit */ - control &= ~0x1; - - /* make sure everything completes before disabling */ - mb(); - isb(); - writel_relaxed(control, drvdata->base + TRCPRGCTLR); + if (!attr) + return -EINVAL; - CS_LOCK(drvdata->base); + /* Clear configuration from previous run */ + memset(config, 0, sizeof(struct etmv4_config)); - dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu); -} + if (attr->exclude_kernel) + config->mode = ETM_MODE_EXCL_KERN; -static void etm4_disable(struct coresight_device *csdev) -{ - struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + if (attr->exclude_user) + config->mode = ETM_MODE_EXCL_USER; - /* - * Taking hotplug lock here protects from clocks getting disabled - * with tracing being left on (crash scenario) if user disable occurs - * after cpu online mask indicates the cpu is offline but before the - * DYING hotplug callback is serviced by the ETM driver. - */ - get_online_cpus(); - spin_lock(&drvdata->spinlock); + /* Always start from the default config */ + etm4_set_default(config); /* - * Executing etm4_disable_hw on the cpu whose ETM is being disabled - * ensures that register writes occur when cpu is powered. + * By default the tracers are configured to trace the whole address + * range. Narrow the field only if requested by user space. */ - smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1); - drvdata->enable = false; - - spin_unlock(&drvdata->spinlock); - put_online_cpus(); + if (config->mode) + etm4_config_trace_mode(config); - pm_runtime_put(drvdata->dev); + /* Go from generic option to ETMv4 specifics */ + if (attr->config & BIT(ETM_OPT_CYCACC)) + config->cfg |= ETMv4_MODE_CYCACC; + if (attr->config & BIT(ETM_OPT_TS)) + config->cfg |= ETMv4_MODE_TIMESTAMP; - dev_info(drvdata->dev, "ETM tracing disabled\n"); -} - -static const struct coresight_ops_source etm4_source_ops = { - .trace_id = etm4_trace_id, - .enable = etm4_enable, - .disable = etm4_disable, -}; - -static const struct coresight_ops etm4_cs_ops = { - .source_ops = &etm4_source_ops, -}; - -static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude) -{ - u8 idx = drvdata->addr_idx; - - /* - * TRCACATRn.TYPE bit[1:0]: type of comparison - * the trace unit performs - */ - if (BMVAL(drvdata->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) { - if (idx % 2 != 0) - return -EINVAL; - - /* - * We are performing instruction address comparison. Set the - * relevant bit of ViewInst Include/Exclude Control register - * for corresponding address comparator pair. - */ - if (drvdata->addr_type[idx] != ETM_ADDR_TYPE_RANGE || - drvdata->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE) - return -EINVAL; - - if (exclude == true) { - /* - * Set exclude bit and unset the include bit - * corresponding to comparator pair - */ - drvdata->viiectlr |= BIT(idx / 2 + 16); - drvdata->viiectlr &= ~BIT(idx / 2); - } else { - /* - * Set include bit and unset exclude bit - * corresponding to comparator pair - */ - drvdata->viiectlr |= BIT(idx / 2); - drvdata->viiectlr &= ~BIT(idx / 2 + 16); - } - } return 0; } -static ssize_t nr_pe_cmp_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->nr_pe_cmp; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} -static DEVICE_ATTR_RO(nr_pe_cmp); - -static ssize_t nr_addr_cmp_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static int etm4_enable_perf(struct coresight_device *csdev, + struct perf_event_attr *attr) { - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->nr_addr_cmp; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} -static DEVICE_ATTR_RO(nr_addr_cmp); - -static ssize_t nr_cntr_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->nr_cntr; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} -static DEVICE_ATTR_RO(nr_cntr); - -static ssize_t nr_ext_inp_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->nr_ext_inp; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} -static DEVICE_ATTR_RO(nr_ext_inp); - -static ssize_t numcidc_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->numcidc; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} -static DEVICE_ATTR_RO(numcidc); - -static ssize_t numvmidc_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->numvmidc; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} -static DEVICE_ATTR_RO(numvmidc); - -static ssize_t nrseqstate_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->nrseqstate; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} -static DEVICE_ATTR_RO(nrseqstate); - -static ssize_t nr_resource_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->nr_resource; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} -static DEVICE_ATTR_RO(nr_resource); - -static ssize_t nr_ss_cmp_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->nr_ss_cmp; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} -static DEVICE_ATTR_RO(nr_ss_cmp); - -static ssize_t reset_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - int i; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - if (kstrtoul(buf, 16, &val)) + if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) return -EINVAL; - spin_lock(&drvdata->spinlock); - if (val) - drvdata->mode = 0x0; - - /* Disable data tracing: do not trace load and store data transfers */ - drvdata->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE); - drvdata->cfg &= ~(BIT(1) | BIT(2)); - - /* Disable data value and data address tracing */ - drvdata->mode &= ~(ETM_MODE_DATA_TRACE_ADDR | - ETM_MODE_DATA_TRACE_VAL); - drvdata->cfg &= ~(BIT(16) | BIT(17)); - - /* Disable all events tracing */ - drvdata->eventctrl0 = 0x0; - drvdata->eventctrl1 = 0x0; + /* Configure the tracer based on the session's specifics */ + etm4_parse_event_config(drvdata, attr); + /* And enable it */ + etm4_enable_hw(drvdata); - /* Disable timestamp event */ - drvdata->ts_ctrl = 0x0; - - /* Disable stalling */ - drvdata->stall_ctrl = 0x0; - - /* Reset trace synchronization period to 2^8 = 256 bytes*/ - if (drvdata->syncpr == false) - drvdata->syncfreq = 0x8; - - /* - * Enable ViewInst to trace everything with start-stop logic in - * started state. ARM recommends start-stop logic is set before - * each trace run. - */ - drvdata->vinst_ctrl |= BIT(0); - if (drvdata->nr_addr_cmp == true) { - drvdata->mode |= ETM_MODE_VIEWINST_STARTSTOP; - /* SSSTATUS, bit[9] */ - drvdata->vinst_ctrl |= BIT(9); - } - - /* No address range filtering for ViewInst */ - drvdata->viiectlr = 0x0; - - /* No start-stop filtering for ViewInst */ - drvdata->vissctlr = 0x0; - - /* Disable seq events */ - for (i = 0; i < drvdata->nrseqstate-1; i++) - drvdata->seq_ctrl[i] = 0x0; - drvdata->seq_rst = 0x0; - drvdata->seq_state = 0x0; - - /* Disable external input events */ - drvdata->ext_inp = 0x0; - - drvdata->cntr_idx = 0x0; - for (i = 0; i < drvdata->nr_cntr; i++) { - drvdata->cntrldvr[i] = 0x0; - drvdata->cntr_ctrl[i] = 0x0; - drvdata->cntr_val[i] = 0x0; - } - - /* Resource selector pair 0 is always implemented and reserved */ - drvdata->res_idx = 0x2; - for (i = 2; i < drvdata->nr_resource * 2; i++) - drvdata->res_ctrl[i] = 0x0; - - for (i = 0; i < drvdata->nr_ss_cmp; i++) { - drvdata->ss_ctrl[i] = 0x0; - drvdata->ss_pe_cmp[i] = 0x0; - } - - drvdata->addr_idx = 0x0; - for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) { - drvdata->addr_val[i] = 0x0; - drvdata->addr_acc[i] = 0x0; - drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE; - } - - drvdata->ctxid_idx = 0x0; - for (i = 0; i < drvdata->numcidc; i++) { - drvdata->ctxid_pid[i] = 0x0; - drvdata->ctxid_vpid[i] = 0x0; - } - - drvdata->ctxid_mask0 = 0x0; - drvdata->ctxid_mask1 = 0x0; - - drvdata->vmid_idx = 0x0; - for (i = 0; i < drvdata->numvmidc; i++) - drvdata->vmid_val[i] = 0x0; - drvdata->vmid_mask0 = 0x0; - drvdata->vmid_mask1 = 0x0; - - drvdata->trcid = drvdata->cpu + 1; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_WO(reset); - -static ssize_t mode_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->mode; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); + return 0; } -static ssize_t mode_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) +static int etm4_enable_sysfs(struct coresight_device *csdev) { - unsigned long val, mode; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; + struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + int ret; spin_lock(&drvdata->spinlock); - drvdata->mode = val & ETMv4_MODE_ALL; - - if (drvdata->mode & ETM_MODE_EXCLUDE) - etm4_set_mode_exclude(drvdata, true); - else - etm4_set_mode_exclude(drvdata, false); - - if (drvdata->instrp0 == true) { - /* start by clearing instruction P0 field */ - drvdata->cfg &= ~(BIT(1) | BIT(2)); - if (drvdata->mode & ETM_MODE_LOAD) - /* 0b01 Trace load instructions as P0 instructions */ - drvdata->cfg |= BIT(1); - if (drvdata->mode & ETM_MODE_STORE) - /* 0b10 Trace store instructions as P0 instructions */ - drvdata->cfg |= BIT(2); - if (drvdata->mode & ETM_MODE_LOAD_STORE) - /* - * 0b11 Trace load and store instructions - * as P0 instructions - */ - drvdata->cfg |= BIT(1) | BIT(2); - } - - /* bit[3], Branch broadcast mode */ - if ((drvdata->mode & ETM_MODE_BB) && (drvdata->trcbb == true)) - drvdata->cfg |= BIT(3); - else - drvdata->cfg &= ~BIT(3); - - /* bit[4], Cycle counting instruction trace bit */ - if ((drvdata->mode & ETMv4_MODE_CYCACC) && - (drvdata->trccci == true)) - drvdata->cfg |= BIT(4); - else - drvdata->cfg &= ~BIT(4); - - /* bit[6], Context ID tracing bit */ - if ((drvdata->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size)) - drvdata->cfg |= BIT(6); - else - drvdata->cfg &= ~BIT(6); - - if ((drvdata->mode & ETM_MODE_VMID) && (drvdata->vmid_size)) - drvdata->cfg |= BIT(7); - else - drvdata->cfg &= ~BIT(7); - - /* bits[10:8], Conditional instruction tracing bit */ - mode = ETM_MODE_COND(drvdata->mode); - if (drvdata->trccond == true) { - drvdata->cfg &= ~(BIT(8) | BIT(9) | BIT(10)); - drvdata->cfg |= mode << 8; - } - - /* bit[11], Global timestamp tracing bit */ - if ((drvdata->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size)) - drvdata->cfg |= BIT(11); - else - drvdata->cfg &= ~BIT(11); - /* bit[12], Return stack enable bit */ - if ((drvdata->mode & ETM_MODE_RETURNSTACK) && - (drvdata->retstack == true)) - drvdata->cfg |= BIT(12); - else - drvdata->cfg &= ~BIT(12); - - /* bits[14:13], Q element enable field */ - mode = ETM_MODE_QELEM(drvdata->mode); - /* start by clearing QE bits */ - drvdata->cfg &= ~(BIT(13) | BIT(14)); - /* if supported, Q elements with instruction counts are enabled */ - if ((mode & BIT(0)) && (drvdata->q_support & BIT(0))) - drvdata->cfg |= BIT(13); /* - * if supported, Q elements with and without instruction - * counts are enabled + * Executing etm4_enable_hw on the cpu whose ETM is being enabled + * ensures that register writes occur when cpu is powered. */ - if ((mode & BIT(1)) && (drvdata->q_support & BIT(1))) - drvdata->cfg |= BIT(14); - - /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */ - if ((drvdata->mode & ETM_MODE_ATB_TRIGGER) && - (drvdata->atbtrig == true)) - drvdata->eventctrl1 |= BIT(11); - else - drvdata->eventctrl1 &= ~BIT(11); - - /* bit[12], Low-power state behavior override bit */ - if ((drvdata->mode & ETM_MODE_LPOVERRIDE) && - (drvdata->lpoverride == true)) - drvdata->eventctrl1 |= BIT(12); - else - drvdata->eventctrl1 &= ~BIT(12); - - /* bit[8], Instruction stall bit */ - if (drvdata->mode & ETM_MODE_ISTALL_EN) - drvdata->stall_ctrl |= BIT(8); - else - drvdata->stall_ctrl &= ~BIT(8); - - /* bit[10], Prioritize instruction trace bit */ - if (drvdata->mode & ETM_MODE_INSTPRIO) - drvdata->stall_ctrl |= BIT(10); - else - drvdata->stall_ctrl &= ~BIT(10); - - /* bit[13], Trace overflow prevention bit */ - if ((drvdata->mode & ETM_MODE_NOOVERFLOW) && - (drvdata->nooverflow == true)) - drvdata->stall_ctrl |= BIT(13); - else - drvdata->stall_ctrl &= ~BIT(13); - - /* bit[9] Start/stop logic control bit */ - if (drvdata->mode & ETM_MODE_VIEWINST_STARTSTOP) - drvdata->vinst_ctrl |= BIT(9); - else - drvdata->vinst_ctrl &= ~BIT(9); - - /* bit[10], Whether a trace unit must trace a Reset exception */ - if (drvdata->mode & ETM_MODE_TRACE_RESET) - drvdata->vinst_ctrl |= BIT(10); - else - drvdata->vinst_ctrl &= ~BIT(10); - - /* bit[11], Whether a trace unit must trace a system error exception */ - if ((drvdata->mode & ETM_MODE_TRACE_ERR) && - (drvdata->trc_error == true)) - drvdata->vinst_ctrl |= BIT(11); - else - drvdata->vinst_ctrl &= ~BIT(11); - - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(mode); - -static ssize_t pe_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->pe_sel; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t pe_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - if (val > drvdata->nr_pe) { - spin_unlock(&drvdata->spinlock); - return -EINVAL; - } + ret = smp_call_function_single(drvdata->cpu, + etm4_enable_hw, drvdata, 1); + if (ret) + goto err; - drvdata->pe_sel = val; + drvdata->sticky_enable = true; spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(pe); - -static ssize_t event_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->eventctrl0; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} -static ssize_t event_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; + dev_info(drvdata->dev, "ETM tracing enabled\n"); + return 0; - spin_lock(&drvdata->spinlock); - switch (drvdata->nr_event) { - case 0x0: - /* EVENT0, bits[7:0] */ - drvdata->eventctrl0 = val & 0xFF; - break; - case 0x1: - /* EVENT1, bits[15:8] */ - drvdata->eventctrl0 = val & 0xFFFF; - break; - case 0x2: - /* EVENT2, bits[23:16] */ - drvdata->eventctrl0 = val & 0xFFFFFF; - break; - case 0x3: - /* EVENT3, bits[31:24] */ - drvdata->eventctrl0 = val; - break; - default: - break; - } +err: spin_unlock(&drvdata->spinlock); - return size; + return ret; } -static DEVICE_ATTR_RW(event); -static ssize_t event_instren_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static int etm4_enable(struct coresight_device *csdev, + struct perf_event_attr *attr, u32 mode) { - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + int ret; + u32 val; + struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - val = BMVAL(drvdata->eventctrl1, 0, 3); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} + val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode); -static ssize_t event_instren_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; + /* Someone is already using the tracer */ + if (val) + return -EBUSY; - spin_lock(&drvdata->spinlock); - /* start by clearing all instruction event enable bits */ - drvdata->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3)); - switch (drvdata->nr_event) { - case 0x0: - /* generate Event element for event 1 */ - drvdata->eventctrl1 |= val & BIT(1); - break; - case 0x1: - /* generate Event element for event 1 and 2 */ - drvdata->eventctrl1 |= val & (BIT(0) | BIT(1)); - break; - case 0x2: - /* generate Event element for event 1, 2 and 3 */ - drvdata->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2)); + switch (mode) { + case CS_MODE_SYSFS: + ret = etm4_enable_sysfs(csdev); break; - case 0x3: - /* generate Event element for all 4 events */ - drvdata->eventctrl1 |= val & 0xF; + case CS_MODE_PERF: + ret = etm4_enable_perf(csdev, attr); break; default: - break; - } - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(event_instren); - -static ssize_t event_ts_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->ts_ctrl; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t event_ts_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (!drvdata->ts_size) - return -EINVAL; - - drvdata->ts_ctrl = val & ETMv4_EVENT_MASK; - return size; -} -static DEVICE_ATTR_RW(event_ts); - -static ssize_t syncfreq_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->syncfreq; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t syncfreq_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (drvdata->syncpr == true) - return -EINVAL; - - drvdata->syncfreq = val & ETMv4_SYNC_MASK; - return size; -} -static DEVICE_ATTR_RW(syncfreq); - -static ssize_t cyc_threshold_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->ccctlr; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t cyc_threshold_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (val < drvdata->ccitmin) - return -EINVAL; - - drvdata->ccctlr = val & ETM_CYC_THRESHOLD_MASK; - return size; -} -static DEVICE_ATTR_RW(cyc_threshold); - -static ssize_t bb_ctrl_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->bb_ctrl; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t bb_ctrl_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (drvdata->trcbb == false) - return -EINVAL; - if (!drvdata->nr_addr_cmp) - return -EINVAL; - /* - * Bit[7:0] selects which address range comparator is used for - * branch broadcast control. - */ - if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp) - return -EINVAL; - - drvdata->bb_ctrl = val; - return size; -} -static DEVICE_ATTR_RW(bb_ctrl); - -static ssize_t event_vinst_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->vinst_ctrl & ETMv4_EVENT_MASK; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t event_vinst_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - val &= ETMv4_EVENT_MASK; - drvdata->vinst_ctrl &= ~ETMv4_EVENT_MASK; - drvdata->vinst_ctrl |= val; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(event_vinst); - -static ssize_t s_exlevel_vinst_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = BMVAL(drvdata->vinst_ctrl, 16, 19); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t s_exlevel_vinst_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - /* clear all EXLEVEL_S bits (bit[18] is never implemented) */ - drvdata->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19)); - /* enable instruction tracing for corresponding exception level */ - val &= drvdata->s_ex_level; - drvdata->vinst_ctrl |= (val << 16); - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(s_exlevel_vinst); - -static ssize_t ns_exlevel_vinst_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - /* EXLEVEL_NS, bits[23:20] */ - val = BMVAL(drvdata->vinst_ctrl, 20, 23); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t ns_exlevel_vinst_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - /* clear EXLEVEL_NS bits (bit[23] is never implemented */ - drvdata->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22)); - /* enable instruction tracing for corresponding exception level */ - val &= drvdata->ns_ex_level; - drvdata->vinst_ctrl |= (val << 20); - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(ns_exlevel_vinst); - -static ssize_t addr_idx_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->addr_idx; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t addr_idx_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (val >= drvdata->nr_addr_cmp * 2) - return -EINVAL; - - /* - * Use spinlock to ensure index doesn't change while it gets - * dereferenced multiple times within a spinlock block elsewhere. - */ - spin_lock(&drvdata->spinlock); - drvdata->addr_idx = val; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(addr_idx); - -static ssize_t addr_instdatatype_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - ssize_t len; - u8 val, idx; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - val = BMVAL(drvdata->addr_acc[idx], 0, 1); - len = scnprintf(buf, PAGE_SIZE, "%s\n", - val == ETM_INSTR_ADDR ? "instr" : - (val == ETM_DATA_LOAD_ADDR ? "data_load" : - (val == ETM_DATA_STORE_ADDR ? "data_store" : - "data_load_store"))); - spin_unlock(&drvdata->spinlock); - return len; -} - -static ssize_t addr_instdatatype_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - char str[20] = ""; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (strlen(buf) >= 20) - return -EINVAL; - if (sscanf(buf, "%s", str) != 1) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (!strcmp(str, "instr")) - /* TYPE, bits[1:0] */ - drvdata->addr_acc[idx] &= ~(BIT(0) | BIT(1)); - - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(addr_instdatatype); - -static ssize_t addr_single_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - idx = drvdata->addr_idx; - spin_lock(&drvdata->spinlock); - if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || - drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - val = (unsigned long)drvdata->addr_val[idx]; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t addr_single_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || - drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - drvdata->addr_val[idx] = (u64)val; - drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(addr_single); - -static ssize_t addr_range_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - u8 idx; - unsigned long val1, val2; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (idx % 2 != 0) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE && - drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || - (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE && - drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - val1 = (unsigned long)drvdata->addr_val[idx]; - val2 = (unsigned long)drvdata->addr_val[idx + 1]; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); -} - -static ssize_t addr_range_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val1, val2; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) - return -EINVAL; - /* lower address comparator cannot have a higher address value */ - if (val1 > val2) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (idx % 2 != 0) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE && - drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || - (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE && - drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - drvdata->addr_val[idx] = (u64)val1; - drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE; - drvdata->addr_val[idx + 1] = (u64)val2; - drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE; - /* - * Program include or exclude control bits for vinst or vdata - * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE - */ - if (drvdata->mode & ETM_MODE_EXCLUDE) - etm4_set_mode_exclude(drvdata, true); - else - etm4_set_mode_exclude(drvdata, false); - - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(addr_range); - -static ssize_t addr_start_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - - if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || - drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - val = (unsigned long)drvdata->addr_val[idx]; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t addr_start_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (!drvdata->nr_addr_cmp) { - spin_unlock(&drvdata->spinlock); - return -EINVAL; - } - if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || - drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - drvdata->addr_val[idx] = (u64)val; - drvdata->addr_type[idx] = ETM_ADDR_TYPE_START; - drvdata->vissctlr |= BIT(idx); - /* SSSTATUS, bit[9] - turn on start/stop logic */ - drvdata->vinst_ctrl |= BIT(9); - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(addr_start); - -static ssize_t addr_stop_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - - if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || - drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - val = (unsigned long)drvdata->addr_val[idx]; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t addr_stop_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (!drvdata->nr_addr_cmp) { - spin_unlock(&drvdata->spinlock); - return -EINVAL; - } - if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || - drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { - spin_unlock(&drvdata->spinlock); - return -EPERM; - } - - drvdata->addr_val[idx] = (u64)val; - drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP; - drvdata->vissctlr |= BIT(idx + 16); - /* SSSTATUS, bit[9] - turn on start/stop logic */ - drvdata->vinst_ctrl |= BIT(9); - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(addr_stop); - -static ssize_t addr_ctxtype_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - ssize_t len; - u8 idx, val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - /* CONTEXTTYPE, bits[3:2] */ - val = BMVAL(drvdata->addr_acc[idx], 2, 3); - len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" : - (val == ETM_CTX_CTXID ? "ctxid" : - (val == ETM_CTX_VMID ? "vmid" : "all"))); - spin_unlock(&drvdata->spinlock); - return len; -} - -static ssize_t addr_ctxtype_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - char str[10] = ""; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (strlen(buf) >= 10) - return -EINVAL; - if (sscanf(buf, "%s", str) != 1) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - if (!strcmp(str, "none")) - /* start by clearing context type bits */ - drvdata->addr_acc[idx] &= ~(BIT(2) | BIT(3)); - else if (!strcmp(str, "ctxid")) { - /* 0b01 The trace unit performs a Context ID */ - if (drvdata->numcidc) { - drvdata->addr_acc[idx] |= BIT(2); - drvdata->addr_acc[idx] &= ~BIT(3); - } - } else if (!strcmp(str, "vmid")) { - /* 0b10 The trace unit performs a VMID */ - if (drvdata->numvmidc) { - drvdata->addr_acc[idx] &= ~BIT(2); - drvdata->addr_acc[idx] |= BIT(3); - } - } else if (!strcmp(str, "all")) { - /* - * 0b11 The trace unit performs a Context ID - * comparison and a VMID - */ - if (drvdata->numcidc) - drvdata->addr_acc[idx] |= BIT(2); - if (drvdata->numvmidc) - drvdata->addr_acc[idx] |= BIT(3); + ret = -EINVAL; } - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(addr_ctxtype); -static ssize_t addr_context_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - /* context ID comparator bits[6:4] */ - val = BMVAL(drvdata->addr_acc[idx], 4, 6); - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t addr_context_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1)) - return -EINVAL; - if (val >= (drvdata->numcidc >= drvdata->numvmidc ? - drvdata->numcidc : drvdata->numvmidc)) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->addr_idx; - /* clear context ID comparator bits[6:4] */ - drvdata->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6)); - drvdata->addr_acc[idx] |= (val << 4); - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(addr_context); - -static ssize_t seq_idx_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->seq_idx; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t seq_idx_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (val >= drvdata->nrseqstate - 1) - return -EINVAL; - - /* - * Use spinlock to ensure index doesn't change while it gets - * dereferenced multiple times within a spinlock block elsewhere. - */ - spin_lock(&drvdata->spinlock); - drvdata->seq_idx = val; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(seq_idx); - -static ssize_t seq_state_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->seq_state; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t seq_state_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (val >= drvdata->nrseqstate) - return -EINVAL; - - drvdata->seq_state = val; - return size; -} -static DEVICE_ATTR_RW(seq_state); - -static ssize_t seq_event_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->seq_idx; - val = drvdata->seq_ctrl[idx]; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t seq_event_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->seq_idx; - /* RST, bits[7:0] */ - drvdata->seq_ctrl[idx] = val & 0xFF; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(seq_event); - -static ssize_t seq_reset_event_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->seq_rst; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t seq_reset_event_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (!(drvdata->nrseqstate)) - return -EINVAL; - - drvdata->seq_rst = val & ETMv4_EVENT_MASK; - return size; -} -static DEVICE_ATTR_RW(seq_reset_event); - -static ssize_t cntr_idx_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->cntr_idx; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t cntr_idx_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (val >= drvdata->nr_cntr) - return -EINVAL; - - /* - * Use spinlock to ensure index doesn't change while it gets - * dereferenced multiple times within a spinlock block elsewhere. - */ - spin_lock(&drvdata->spinlock); - drvdata->cntr_idx = val; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(cntr_idx); - -static ssize_t cntrldvr_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->cntr_idx; - val = drvdata->cntrldvr[idx]; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t cntrldvr_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (val > ETM_CNTR_MAX_VAL) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->cntr_idx; - drvdata->cntrldvr[idx] = val; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(cntrldvr); - -static ssize_t cntr_val_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->cntr_idx; - val = drvdata->cntr_val[idx]; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t cntr_val_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (val > ETM_CNTR_MAX_VAL) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->cntr_idx; - drvdata->cntr_val[idx] = val; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(cntr_val); - -static ssize_t cntr_ctrl_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->cntr_idx; - val = drvdata->cntr_ctrl[idx]; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t cntr_ctrl_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - idx = drvdata->cntr_idx; - drvdata->cntr_ctrl[idx] = val; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(cntr_ctrl); - -static ssize_t res_idx_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->res_idx; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t res_idx_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - /* Resource selector pair 0 is always implemented and reserved */ - if (val < 2 || val >= drvdata->nr_resource * 2) - return -EINVAL; + /* The tracer didn't start */ + if (ret) + local_set(&drvdata->mode, CS_MODE_DISABLED); - /* - * Use spinlock to ensure index doesn't change while it gets - * dereferenced multiple times within a spinlock block elsewhere. - */ - spin_lock(&drvdata->spinlock); - drvdata->res_idx = val; - spin_unlock(&drvdata->spinlock); - return size; + return ret; } -static DEVICE_ATTR_RW(res_idx); -static ssize_t res_ctrl_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static void etm4_disable_hw(void *info) { - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + u32 control; + struct etmv4_drvdata *drvdata = info; - spin_lock(&drvdata->spinlock); - idx = drvdata->res_idx; - val = drvdata->res_ctrl[idx]; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} + CS_UNLOCK(drvdata->base); -static ssize_t res_ctrl_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + control = readl_relaxed(drvdata->base + TRCPRGCTLR); - if (kstrtoul(buf, 16, &val)) - return -EINVAL; + /* EN, bit[0] Trace unit enable bit */ + control &= ~0x1; - spin_lock(&drvdata->spinlock); - idx = drvdata->res_idx; - /* For odd idx pair inversal bit is RES0 */ - if (idx % 2 != 0) - /* PAIRINV, bit[21] */ - val &= ~BIT(21); - drvdata->res_ctrl[idx] = val; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(res_ctrl); + /* make sure everything completes before disabling */ + mb(); + isb(); + writel_relaxed(control, drvdata->base + TRCPRGCTLR); -static ssize_t ctxid_idx_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + CS_LOCK(drvdata->base); - val = drvdata->ctxid_idx; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); + dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu); } -static ssize_t ctxid_idx_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) +static int etm4_disable_perf(struct coresight_device *csdev) { - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (val >= drvdata->numcidc) + if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) return -EINVAL; - /* - * Use spinlock to ensure index doesn't change while it gets - * dereferenced multiple times within a spinlock block elsewhere. - */ - spin_lock(&drvdata->spinlock); - drvdata->ctxid_idx = val; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(ctxid_idx); - -static ssize_t ctxid_pid_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - u8 idx; - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - idx = drvdata->ctxid_idx; - val = (unsigned long)drvdata->ctxid_vpid[idx]; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); + etm4_disable_hw(drvdata); + return 0; } -static ssize_t ctxid_pid_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) +static void etm4_disable_sysfs(struct coresight_device *csdev) { - u8 idx; - unsigned long vpid, pid; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); /* - * only implemented when ctxid tracing is enabled, i.e. at least one - * ctxid comparator is implemented and ctxid is greater than 0 bits - * in length + * Taking hotplug lock here protects from clocks getting disabled + * with tracing being left on (crash scenario) if user disable occurs + * after cpu online mask indicates the cpu is offline but before the + * DYING hotplug callback is serviced by the ETM driver. */ - if (!drvdata->ctxid_size || !drvdata->numcidc) - return -EINVAL; - if (kstrtoul(buf, 16, &vpid)) - return -EINVAL; - - pid = coresight_vpid_to_pid(vpid); - - spin_lock(&drvdata->spinlock); - idx = drvdata->ctxid_idx; - drvdata->ctxid_pid[idx] = (u64)pid; - drvdata->ctxid_vpid[idx] = (u64)vpid; - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(ctxid_pid); - -static ssize_t ctxid_masks_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val1, val2; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - + get_online_cpus(); spin_lock(&drvdata->spinlock); - val1 = drvdata->ctxid_mask0; - val2 = drvdata->ctxid_mask1; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); -} -static ssize_t ctxid_masks_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 i, j, maskbyte; - unsigned long val1, val2, mask; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - /* - * only implemented when ctxid tracing is enabled, i.e. at least one - * ctxid comparator is implemented and ctxid is greater than 0 bits - * in length - */ - if (!drvdata->ctxid_size || !drvdata->numcidc) - return -EINVAL; - if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) - return -EINVAL; - - spin_lock(&drvdata->spinlock); - /* - * each byte[0..3] controls mask value applied to ctxid - * comparator[0..3] - */ - switch (drvdata->numcidc) { - case 0x1: - /* COMP0, bits[7:0] */ - drvdata->ctxid_mask0 = val1 & 0xFF; - break; - case 0x2: - /* COMP1, bits[15:8] */ - drvdata->ctxid_mask0 = val1 & 0xFFFF; - break; - case 0x3: - /* COMP2, bits[23:16] */ - drvdata->ctxid_mask0 = val1 & 0xFFFFFF; - break; - case 0x4: - /* COMP3, bits[31:24] */ - drvdata->ctxid_mask0 = val1; - break; - case 0x5: - /* COMP4, bits[7:0] */ - drvdata->ctxid_mask0 = val1; - drvdata->ctxid_mask1 = val2 & 0xFF; - break; - case 0x6: - /* COMP5, bits[15:8] */ - drvdata->ctxid_mask0 = val1; - drvdata->ctxid_mask1 = val2 & 0xFFFF; - break; - case 0x7: - /* COMP6, bits[23:16] */ - drvdata->ctxid_mask0 = val1; - drvdata->ctxid_mask1 = val2 & 0xFFFFFF; - break; - case 0x8: - /* COMP7, bits[31:24] */ - drvdata->ctxid_mask0 = val1; - drvdata->ctxid_mask1 = val2; - break; - default: - break; - } /* - * If software sets a mask bit to 1, it must program relevant byte - * of ctxid comparator value 0x0, otherwise behavior is unpredictable. - * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24] - * of ctxid comparator0 value (corresponding to byte 0) register. + * Executing etm4_disable_hw on the cpu whose ETM is being disabled + * ensures that register writes occur when cpu is powered. */ - mask = drvdata->ctxid_mask0; - for (i = 0; i < drvdata->numcidc; i++) { - /* mask value of corresponding ctxid comparator */ - maskbyte = mask & ETMv4_EVENT_MASK; - /* - * each bit corresponds to a byte of respective ctxid comparator - * value register - */ - for (j = 0; j < 8; j++) { - if (maskbyte & 1) - drvdata->ctxid_pid[i] &= ~(0xFF << (j * 8)); - maskbyte >>= 1; - } - /* Select the next ctxid comparator mask value */ - if (i == 3) - /* ctxid comparators[4-7] */ - mask = drvdata->ctxid_mask1; - else - mask >>= 0x8; - } - - spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(ctxid_masks); - -static ssize_t vmid_idx_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->vmid_idx; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t vmid_idx_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - if (kstrtoul(buf, 16, &val)) - return -EINVAL; - if (val >= drvdata->numvmidc) - return -EINVAL; + smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1); - /* - * Use spinlock to ensure index doesn't change while it gets - * dereferenced multiple times within a spinlock block elsewhere. - */ - spin_lock(&drvdata->spinlock); - drvdata->vmid_idx = val; spin_unlock(&drvdata->spinlock); - return size; -} -static DEVICE_ATTR_RW(vmid_idx); - -static ssize_t vmid_val_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = (unsigned long)drvdata->vmid_val[drvdata->vmid_idx]; - return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); -} - -static ssize_t vmid_val_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - unsigned long val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - /* - * only implemented when vmid tracing is enabled, i.e. at least one - * vmid comparator is implemented and at least 8 bit vmid size - */ - if (!drvdata->vmid_size || !drvdata->numvmidc) - return -EINVAL; - if (kstrtoul(buf, 16, &val)) - return -EINVAL; + put_online_cpus(); - spin_lock(&drvdata->spinlock); - drvdata->vmid_val[drvdata->vmid_idx] = (u64)val; - spin_unlock(&drvdata->spinlock); - return size; + dev_info(drvdata->dev, "ETM tracing disabled\n"); } -static DEVICE_ATTR_RW(vmid_val); -static ssize_t vmid_masks_show(struct device *dev, - struct device_attribute *attr, char *buf) +static void etm4_disable(struct coresight_device *csdev) { - unsigned long val1, val2; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - spin_lock(&drvdata->spinlock); - val1 = drvdata->vmid_mask0; - val2 = drvdata->vmid_mask1; - spin_unlock(&drvdata->spinlock); - return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); -} + u32 mode; + struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); -static ssize_t vmid_masks_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - u8 i, j, maskbyte; - unsigned long val1, val2, mask; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); /* - * only implemented when vmid tracing is enabled, i.e. at least one - * vmid comparator is implemented and at least 8 bit vmid size + * For as long as the tracer isn't disabled another entity can't + * change its status. As such we can read the status here without + * fearing it will change under us. */ - if (!drvdata->vmid_size || !drvdata->numvmidc) - return -EINVAL; - if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) - return -EINVAL; + mode = local_read(&drvdata->mode); - spin_lock(&drvdata->spinlock); - - /* - * each byte[0..3] controls mask value applied to vmid - * comparator[0..3] - */ - switch (drvdata->numvmidc) { - case 0x1: - /* COMP0, bits[7:0] */ - drvdata->vmid_mask0 = val1 & 0xFF; - break; - case 0x2: - /* COMP1, bits[15:8] */ - drvdata->vmid_mask0 = val1 & 0xFFFF; - break; - case 0x3: - /* COMP2, bits[23:16] */ - drvdata->vmid_mask0 = val1 & 0xFFFFFF; - break; - case 0x4: - /* COMP3, bits[31:24] */ - drvdata->vmid_mask0 = val1; + switch (mode) { + case CS_MODE_DISABLED: break; - case 0x5: - /* COMP4, bits[7:0] */ - drvdata->vmid_mask0 = val1; - drvdata->vmid_mask1 = val2 & 0xFF; + case CS_MODE_SYSFS: + etm4_disable_sysfs(csdev); break; - case 0x6: - /* COMP5, bits[15:8] */ - drvdata->vmid_mask0 = val1; - drvdata->vmid_mask1 = val2 & 0xFFFF; - break; - case 0x7: - /* COMP6, bits[23:16] */ - drvdata->vmid_mask0 = val1; - drvdata->vmid_mask1 = val2 & 0xFFFFFF; - break; - case 0x8: - /* COMP7, bits[31:24] */ - drvdata->vmid_mask0 = val1; - drvdata->vmid_mask1 = val2; - break; - default: + case CS_MODE_PERF: + etm4_disable_perf(csdev); break; } - /* - * If software sets a mask bit to 1, it must program relevant byte - * of vmid comparator value 0x0, otherwise behavior is unpredictable. - * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24] - * of vmid comparator0 value (corresponding to byte 0) register. - */ - mask = drvdata->vmid_mask0; - for (i = 0; i < drvdata->numvmidc; i++) { - /* mask value of corresponding vmid comparator */ - maskbyte = mask & ETMv4_EVENT_MASK; - /* - * each bit corresponds to a byte of respective vmid comparator - * value register - */ - for (j = 0; j < 8; j++) { - if (maskbyte & 1) - drvdata->vmid_val[i] &= ~(0xFF << (j * 8)); - maskbyte >>= 1; - } - /* Select the next vmid comparator mask value */ - if (i == 3) - /* vmid comparators[4-7] */ - mask = drvdata->vmid_mask1; - else - mask >>= 0x8; - } - spin_unlock(&drvdata->spinlock); - return size; + if (mode) + local_set(&drvdata->mode, CS_MODE_DISABLED); } -static DEVICE_ATTR_RW(vmid_masks); - -static ssize_t cpu_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - int val; - struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); - - val = drvdata->cpu; - return scnprintf(buf, PAGE_SIZE, "%d\n", val); -} -static DEVICE_ATTR_RO(cpu); - -static struct attribute *coresight_etmv4_attrs[] = { - &dev_attr_nr_pe_cmp.attr, - &dev_attr_nr_addr_cmp.attr, - &dev_attr_nr_cntr.attr, - &dev_attr_nr_ext_inp.attr, - &dev_attr_numcidc.attr, - &dev_attr_numvmidc.attr, - &dev_attr_nrseqstate.attr, - &dev_attr_nr_resource.attr, - &dev_attr_nr_ss_cmp.attr, - &dev_attr_reset.attr, - &dev_attr_mode.attr, - &dev_attr_pe.attr, - &dev_attr_event.attr, - &dev_attr_event_instren.attr, - &dev_attr_event_ts.attr, - &dev_attr_syncfreq.attr, - &dev_attr_cyc_threshold.attr, - &dev_attr_bb_ctrl.attr, - &dev_attr_event_vinst.attr, - &dev_attr_s_exlevel_vinst.attr, - &dev_attr_ns_exlevel_vinst.attr, - &dev_attr_addr_idx.attr, - &dev_attr_addr_instdatatype.attr, - &dev_attr_addr_single.attr, - &dev_attr_addr_range.attr, - &dev_attr_addr_start.attr, - &dev_attr_addr_stop.attr, - &dev_attr_addr_ctxtype.attr, - &dev_attr_addr_context.attr, - &dev_attr_seq_idx.attr, - &dev_attr_seq_state.attr, - &dev_attr_seq_event.attr, - &dev_attr_seq_reset_event.attr, - &dev_attr_cntr_idx.attr, - &dev_attr_cntrldvr.attr, - &dev_attr_cntr_val.attr, - &dev_attr_cntr_ctrl.attr, - &dev_attr_res_idx.attr, - &dev_attr_res_ctrl.attr, - &dev_attr_ctxid_idx.attr, - &dev_attr_ctxid_pid.attr, - &dev_attr_ctxid_masks.attr, - &dev_attr_vmid_idx.attr, - &dev_attr_vmid_val.attr, - &dev_attr_vmid_masks.attr, - &dev_attr_cpu.attr, - NULL, -}; - -#define coresight_simple_func(name, offset) \ -static ssize_t name##_show(struct device *_dev, \ - struct device_attribute *attr, char *buf) \ -{ \ - struct etmv4_drvdata *drvdata = dev_get_drvdata(_dev->parent); \ - return scnprintf(buf, PAGE_SIZE, "0x%x\n", \ - readl_relaxed(drvdata->base + offset)); \ -} \ -DEVICE_ATTR_RO(name) - -coresight_simple_func(trcoslsr, TRCOSLSR); -coresight_simple_func(trcpdcr, TRCPDCR); -coresight_simple_func(trcpdsr, TRCPDSR); -coresight_simple_func(trclsr, TRCLSR); -coresight_simple_func(trcauthstatus, TRCAUTHSTATUS); -coresight_simple_func(trcdevid, TRCDEVID); -coresight_simple_func(trcdevtype, TRCDEVTYPE); -coresight_simple_func(trcpidr0, TRCPIDR0); -coresight_simple_func(trcpidr1, TRCPIDR1); -coresight_simple_func(trcpidr2, TRCPIDR2); -coresight_simple_func(trcpidr3, TRCPIDR3); - -static struct attribute *coresight_etmv4_mgmt_attrs[] = { - &dev_attr_trcoslsr.attr, - &dev_attr_trcpdcr.attr, - &dev_attr_trcpdsr.attr, - &dev_attr_trclsr.attr, - &dev_attr_trcauthstatus.attr, - &dev_attr_trcdevid.attr, - &dev_attr_trcdevtype.attr, - &dev_attr_trcpidr0.attr, - &dev_attr_trcpidr1.attr, - &dev_attr_trcpidr2.attr, - &dev_attr_trcpidr3.attr, - NULL, -}; - -coresight_simple_func(trcidr0, TRCIDR0); -coresight_simple_func(trcidr1, TRCIDR1); -coresight_simple_func(trcidr2, TRCIDR2); -coresight_simple_func(trcidr3, TRCIDR3); -coresight_simple_func(trcidr4, TRCIDR4); -coresight_simple_func(trcidr5, TRCIDR5); -/* trcidr[6,7] are reserved */ -coresight_simple_func(trcidr8, TRCIDR8); -coresight_simple_func(trcidr9, TRCIDR9); -coresight_simple_func(trcidr10, TRCIDR10); -coresight_simple_func(trcidr11, TRCIDR11); -coresight_simple_func(trcidr12, TRCIDR12); -coresight_simple_func(trcidr13, TRCIDR13); - -static struct attribute *coresight_etmv4_trcidr_attrs[] = { - &dev_attr_trcidr0.attr, - &dev_attr_trcidr1.attr, - &dev_attr_trcidr2.attr, - &dev_attr_trcidr3.attr, - &dev_attr_trcidr4.attr, - &dev_attr_trcidr5.attr, - /* trcidr[6,7] are reserved */ - &dev_attr_trcidr8.attr, - &dev_attr_trcidr9.attr, - &dev_attr_trcidr10.attr, - &dev_attr_trcidr11.attr, - &dev_attr_trcidr12.attr, - &dev_attr_trcidr13.attr, - NULL, -}; - -static const struct attribute_group coresight_etmv4_group = { - .attrs = coresight_etmv4_attrs, -}; - -static const struct attribute_group coresight_etmv4_mgmt_group = { - .attrs = coresight_etmv4_mgmt_attrs, - .name = "mgmt", -}; - -static const struct attribute_group coresight_etmv4_trcidr_group = { - .attrs = coresight_etmv4_trcidr_attrs, - .name = "trcidr", +static const struct coresight_ops_source etm4_source_ops = { + .cpu_id = etm4_cpu_id, + .trace_id = etm4_trace_id, + .enable = etm4_enable, + .disable = etm4_disable, }; -static const struct attribute_group *coresight_etmv4_groups[] = { - &coresight_etmv4_group, - &coresight_etmv4_mgmt_group, - &coresight_etmv4_trcidr_group, - NULL, +static const struct coresight_ops etm4_cs_ops = { + .source_ops = &etm4_source_ops, }; static void etm4_init_arch_data(void *info) @@ -2310,6 +408,9 @@ static void etm4_init_arch_data(void *info) u32 etmidr5; struct etmv4_drvdata *drvdata = info; + /* Make sure all registers are accessible */ + etm4_os_unlock(drvdata); + CS_UNLOCK(drvdata->base); /* find all capabilities of the tracing unit */ @@ -2461,93 +562,115 @@ static void etm4_init_arch_data(void *info) CS_LOCK(drvdata->base); } -static void etm4_init_default_data(struct etmv4_drvdata *drvdata) +static void etm4_set_default(struct etmv4_config *config) { - int i; + if (WARN_ON_ONCE(!config)) + return; - drvdata->pe_sel = 0x0; - drvdata->cfg = (ETMv4_MODE_CTXID | ETM_MODE_VMID | - ETMv4_MODE_TIMESTAMP | ETM_MODE_RETURNSTACK); + /* + * Make default initialisation trace everything + * + * Select the "always true" resource selector on the + * "Enablign Event" line and configure address range comparator + * '0' to trace all the possible address range. From there + * configure the "include/exclude" engine to include address + * range comparator '0'. + */ /* disable all events tracing */ - drvdata->eventctrl0 = 0x0; - drvdata->eventctrl1 = 0x0; + config->eventctrl0 = 0x0; + config->eventctrl1 = 0x0; /* disable stalling */ - drvdata->stall_ctrl = 0x0; + config->stall_ctrl = 0x0; + + /* enable trace synchronization every 4096 bytes, if available */ + config->syncfreq = 0xC; /* disable timestamp event */ - drvdata->ts_ctrl = 0x0; + config->ts_ctrl = 0x0; - /* enable trace synchronization every 4096 bytes for trace */ - if (drvdata->syncpr == false) - drvdata->syncfreq = 0xC; + /* TRCVICTLR::EVENT = 0x01, select the always on logic */ + config->vinst_ctrl |= BIT(0); /* - * enable viewInst to trace everything with start-stop logic in - * started state + * TRCVICTLR::SSSTATUS == 1, the start-stop logic is + * in the started state */ - drvdata->vinst_ctrl |= BIT(0); - /* set initial state of start-stop logic */ - if (drvdata->nr_addr_cmp) - drvdata->vinst_ctrl |= BIT(9); + config->vinst_ctrl |= BIT(9); - /* no address range filtering for ViewInst */ - drvdata->viiectlr = 0x0; - /* no start-stop filtering for ViewInst */ - drvdata->vissctlr = 0x0; + /* + * Configure address range comparator '0' to encompass all + * possible addresses. + */ - /* disable seq events */ - for (i = 0; i < drvdata->nrseqstate-1; i++) - drvdata->seq_ctrl[i] = 0x0; - drvdata->seq_rst = 0x0; - drvdata->seq_state = 0x0; + /* First half of default address comparator: start at address 0 */ + config->addr_val[ETM_DEFAULT_ADDR_COMP] = 0x0; + /* trace instruction addresses */ + config->addr_acc[ETM_DEFAULT_ADDR_COMP] &= ~(BIT(0) | BIT(1)); + /* EXLEVEL_NS, bits[12:15], only trace application and kernel space */ + config->addr_acc[ETM_DEFAULT_ADDR_COMP] |= ETM_EXLEVEL_NS_HYP; + /* EXLEVEL_S, bits[11:8], don't trace anything in secure state */ + config->addr_acc[ETM_DEFAULT_ADDR_COMP] |= (ETM_EXLEVEL_S_APP | + ETM_EXLEVEL_S_OS | + ETM_EXLEVEL_S_HYP); + config->addr_type[ETM_DEFAULT_ADDR_COMP] = ETM_ADDR_TYPE_RANGE; - /* disable external input events */ - drvdata->ext_inp = 0x0; + /* + * Second half of default address comparator: go all + * the way to the top. + */ + config->addr_val[ETM_DEFAULT_ADDR_COMP + 1] = ~0x0; + /* trace instruction addresses */ + config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] &= ~(BIT(0) | BIT(1)); + /* Address comparator type must be equal for both halves */ + config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = + config->addr_acc[ETM_DEFAULT_ADDR_COMP]; + config->addr_type[ETM_DEFAULT_ADDR_COMP + 1] = ETM_ADDR_TYPE_RANGE; - for (i = 0; i < drvdata->nr_cntr; i++) { - drvdata->cntrldvr[i] = 0x0; - drvdata->cntr_ctrl[i] = 0x0; - drvdata->cntr_val[i] = 0x0; - } + /* + * Configure the ViewInst function to filter on address range + * comparator '0'. + */ + config->viiectlr = BIT(0); - /* Resource selector pair 0 is always implemented and reserved */ - drvdata->res_idx = 0x2; - for (i = 2; i < drvdata->nr_resource * 2; i++) - drvdata->res_ctrl[i] = 0x0; + /* no start-stop filtering for ViewInst */ + config->vissctlr = 0x0; +} - for (i = 0; i < drvdata->nr_ss_cmp; i++) { - drvdata->ss_ctrl[i] = 0x0; - drvdata->ss_pe_cmp[i] = 0x0; - } +void etm4_config_trace_mode(struct etmv4_config *config) +{ + u32 addr_acc, mode; - if (drvdata->nr_addr_cmp >= 1) { - drvdata->addr_val[0] = (unsigned long)_stext; - drvdata->addr_val[1] = (unsigned long)_etext; - drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE; - drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE; - } + mode = config->mode; + mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER); - for (i = 0; i < drvdata->numcidc; i++) { - drvdata->ctxid_pid[i] = 0x0; - drvdata->ctxid_vpid[i] = 0x0; - } + /* excluding kernel AND user space doesn't make sense */ + WARN_ON_ONCE(mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)); - drvdata->ctxid_mask0 = 0x0; - drvdata->ctxid_mask1 = 0x0; + /* nothing to do if neither flags are set */ + if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER)) + return; - for (i = 0; i < drvdata->numvmidc; i++) - drvdata->vmid_val[i] = 0x0; - drvdata->vmid_mask0 = 0x0; - drvdata->vmid_mask1 = 0x0; + addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP]; + /* clear default config */ + addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS); /* - * A trace ID value of 0 is invalid, so let's start at some - * random value that fits in 7 bits. ETMv3.x has 0x10 so let's - * start at 0x20. + * EXLEVEL_NS, bits[15:12] + * The Exception levels are: + * Bit[12] Exception level 0 - Application + * Bit[13] Exception level 1 - OS + * Bit[14] Exception level 2 - Hypervisor + * Bit[15] Never implemented */ - drvdata->trcid = 0x20 + drvdata->cpu; + if (mode & ETM_MODE_EXCL_KERN) + addr_acc |= ETM_EXLEVEL_NS_OS; + else + addr_acc |= ETM_EXLEVEL_NS_APP; + + config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc; + config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc; } static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action, @@ -2566,7 +689,7 @@ static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action, etmdrvdata[cpu]->os_unlock = true; } - if (etmdrvdata[cpu]->enable) + if (local_read(&etmdrvdata[cpu]->mode)) etm4_enable_hw(etmdrvdata[cpu]); spin_unlock(&etmdrvdata[cpu]->spinlock); break; @@ -2579,7 +702,7 @@ static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action, case CPU_DYING: spin_lock(&etmdrvdata[cpu]->spinlock); - if (etmdrvdata[cpu]->enable) + if (local_read(&etmdrvdata[cpu]->mode)) etm4_disable_hw(etmdrvdata[cpu]); spin_unlock(&etmdrvdata[cpu]->spinlock); break; @@ -2592,6 +715,11 @@ static struct notifier_block etm4_cpu_notifier = { .notifier_call = etm4_cpu_callback, }; +static void etm4_init_trace_id(struct etmv4_drvdata *drvdata) +{ + drvdata->trcid = coresight_get_trace_id(drvdata->cpu); +} + static int etm4_probe(struct amba_device *adev, const struct amba_id *id) { int ret; @@ -2635,9 +763,6 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) get_online_cpus(); etmdrvdata[drvdata->cpu] = drvdata; - if (!smp_call_function_single(drvdata->cpu, etm4_os_unlock, drvdata, 1)) - drvdata->os_unlock = true; - if (smp_call_function_single(drvdata->cpu, etm4_init_arch_data, drvdata, 1)) dev_err(dev, "ETM arch init failed\n"); @@ -2651,9 +776,9 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) ret = -EINVAL; goto err_arch_supported; } - etm4_init_default_data(drvdata); - pm_runtime_put(&adev->dev); + etm4_init_trace_id(drvdata); + etm4_set_default(&drvdata->config); desc->type = CORESIGHT_DEV_TYPE_SOURCE; desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; @@ -2664,9 +789,16 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) drvdata->csdev = coresight_register(desc); if (IS_ERR(drvdata->csdev)) { ret = PTR_ERR(drvdata->csdev); - goto err_coresight_register; + goto err_arch_supported; + } + + ret = etm_perf_symlink(drvdata->csdev, true); + if (ret) { + coresight_unregister(drvdata->csdev); + goto err_arch_supported; } + pm_runtime_put(&adev->dev); dev_info(dev, "%s initialized\n", (char *)id->data); if (boot_enable) { @@ -2677,24 +809,11 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) return 0; err_arch_supported: - pm_runtime_put(&adev->dev); -err_coresight_register: if (--etm4_count == 0) unregister_hotcpu_notifier(&etm4_cpu_notifier); return ret; } -static int etm4_remove(struct amba_device *adev) -{ - struct etmv4_drvdata *drvdata = amba_get_drvdata(adev); - - coresight_unregister(drvdata->csdev); - if (--etm4_count == 0) - unregister_hotcpu_notifier(&etm4_cpu_notifier); - - return 0; -} - static struct amba_id etm4_ids[] = { { /* ETM 4.0 - Qualcomm */ .id = 0x0003b95d, @@ -2706,16 +825,20 @@ static struct amba_id etm4_ids[] = { .mask = 0x000fffff, .data = "ETM 4.0", }, + { /* ETM 4.0 - A72, Maia, HiSilicon */ + .id = 0x000bb95a, + .mask = 0x000fffff, + .data = "ETM 4.0", + }, { 0, 0}, }; static struct amba_driver etm4x_driver = { .drv = { .name = "coresight-etm4x", + .suppress_bind_attrs = true, }, .probe = etm4_probe, - .remove = etm4_remove, .id_table = etm4_ids, }; - -module_amba_driver(etm4x_driver); +builtin_amba_driver(etm4x_driver); diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h index c34100205ca9..5359c5197c1d 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.h +++ b/drivers/hwtracing/coresight/coresight-etm4x.h @@ -13,6 +13,7 @@ #ifndef _CORESIGHT_CORESIGHT_ETM_H #define _CORESIGHT_CORESIGHT_ETM_H +#include <asm/local.h> #include <linux/spinlock.h> #include "coresight-priv.h" @@ -175,71 +176,38 @@ #define ETM_MODE_TRACE_RESET BIT(25) #define ETM_MODE_TRACE_ERR BIT(26) #define ETM_MODE_VIEWINST_STARTSTOP BIT(27) -#define ETMv4_MODE_ALL 0xFFFFFFF +#define ETMv4_MODE_ALL (GENMASK(27, 0) | \ + ETM_MODE_EXCL_KERN | \ + ETM_MODE_EXCL_USER) #define TRCSTATR_IDLE_BIT 0 +#define ETM_DEFAULT_ADDR_COMP 0 + +/* secure state access levels */ +#define ETM_EXLEVEL_S_APP BIT(8) +#define ETM_EXLEVEL_S_OS BIT(9) +#define ETM_EXLEVEL_S_NA BIT(10) +#define ETM_EXLEVEL_S_HYP BIT(11) +/* non-secure state access levels */ +#define ETM_EXLEVEL_NS_APP BIT(12) +#define ETM_EXLEVEL_NS_OS BIT(13) +#define ETM_EXLEVEL_NS_HYP BIT(14) +#define ETM_EXLEVEL_NS_NA BIT(15) /** - * struct etm4_drvdata - specifics associated to an ETM component - * @base: Memory mapped base address for this component. - * @dev: The device entity associated to this component. - * @csdev: Component vitals needed by the framework. - * @spinlock: Only one at a time pls. - * @cpu: The cpu this component is affined to. - * @arch: ETM version number. - * @enable: Is this ETM currently tracing. - * @sticky_enable: true if ETM base configuration has been done. - * @boot_enable:True if we should start tracing at boot time. - * @os_unlock: True if access to management registers is allowed. - * @nr_pe: The number of processing entity available for tracing. - * @nr_pe_cmp: The number of processing entity comparator inputs that are - * available for tracing. - * @nr_addr_cmp:Number of pairs of address comparators available - * as found in ETMIDR4 0-3. - * @nr_cntr: Number of counters as found in ETMIDR5 bit 28-30. - * @nr_ext_inp: Number of external input. - * @numcidc: Number of contextID comparators. - * @numvmidc: Number of VMID comparators. - * @nrseqstate: The number of sequencer states that are implemented. - * @nr_event: Indicates how many events the trace unit support. - * @nr_resource:The number of resource selection pairs available for tracing. - * @nr_ss_cmp: Number of single-shot comparator controls that are available. + * struct etmv4_config - configuration information related to an ETMv4 * @mode: Controls various modes supported by this ETM. - * @trcid: value of the current ID for this component. - * @trcid_size: Indicates the trace ID width. - * @instrp0: Tracing of load and store instructions - * as P0 elements is supported. - * @trccond: If the trace unit supports conditional - * instruction tracing. - * @retstack: Indicates if the implementation supports a return stack. - * @trc_error: Whether a trace unit can trace a system - * error exception. - * @atbtrig: If the implementation can support ATB triggers - * @lpoverride: If the implementation can support low-power state over. * @pe_sel: Controls which PE to trace. * @cfg: Controls the tracing options. * @eventctrl0: Controls the tracing of arbitrary events. * @eventctrl1: Controls the behavior of the events that @event_ctrl0 selects. * @stallctl: If functionality that prevents trace unit buffer overflows * is available. - * @sysstall: Does the system support stall control of the PE? - * @nooverflow: Indicate if overflow prevention is supported. - * @stall_ctrl: Enables trace unit functionality that prevents trace - * unit buffer overflows. - * @ts_size: Global timestamp size field. * @ts_ctrl: Controls the insertion of global timestamps in the * trace streams. - * @syncpr: Indicates if an implementation has a fixed - * synchronization period. * @syncfreq: Controls how often trace synchronization requests occur. - * @trccci: Indicates if the trace unit supports cycle counting - * for instruction. - * @ccsize: Indicates the size of the cycle counter in bits. - * @ccitmin: minimum value that can be programmed in * the TRCCCCTLR register. * @ccctlr: Sets the threshold value for cycle counting. - * @trcbb: Indicates if the trace unit supports branch broadcast tracing. - * @q_support: Q element support characteristics. * @vinst_ctrl: Controls instruction trace filtering. * @viiectlr: Set or read, the address range comparators. * @vissctlr: Set, or read, the single address comparators that control the @@ -264,73 +232,28 @@ * @addr_acc: Address comparator access type. * @addr_type: Current status of the comparator register. * @ctxid_idx: Context ID index selector. - * @ctxid_size: Size of the context ID field to consider. * @ctxid_pid: Value of the context ID comparator. * @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise * the same value of ctxid_pid. * @ctxid_mask0:Context ID comparator mask for comparator 0-3. * @ctxid_mask1:Context ID comparator mask for comparator 4-7. * @vmid_idx: VM ID index selector. - * @vmid_size: Size of the VM ID comparator to consider. * @vmid_val: Value of the VM ID comparator. * @vmid_mask0: VM ID comparator mask for comparator 0-3. * @vmid_mask1: VM ID comparator mask for comparator 4-7. - * @s_ex_level: In secure state, indicates whether instruction tracing is - * supported for the corresponding Exception level. - * @ns_ex_level:In non-secure state, indicates whether instruction tracing is - * supported for the corresponding Exception level. * @ext_inp: External input selection. */ -struct etmv4_drvdata { - void __iomem *base; - struct device *dev; - struct coresight_device *csdev; - spinlock_t spinlock; - int cpu; - u8 arch; - bool enable; - bool sticky_enable; - bool boot_enable; - bool os_unlock; - u8 nr_pe; - u8 nr_pe_cmp; - u8 nr_addr_cmp; - u8 nr_cntr; - u8 nr_ext_inp; - u8 numcidc; - u8 numvmidc; - u8 nrseqstate; - u8 nr_event; - u8 nr_resource; - u8 nr_ss_cmp; +struct etmv4_config { u32 mode; - u8 trcid; - u8 trcid_size; - bool instrp0; - bool trccond; - bool retstack; - bool trc_error; - bool atbtrig; - bool lpoverride; u32 pe_sel; u32 cfg; u32 eventctrl0; u32 eventctrl1; - bool stallctl; - bool sysstall; - bool nooverflow; u32 stall_ctrl; - u8 ts_size; u32 ts_ctrl; - bool syncpr; u32 syncfreq; - bool trccci; - u8 ccsize; - u8 ccitmin; u32 ccctlr; - bool trcbb; u32 bb_ctrl; - bool q_support; u32 vinst_ctrl; u32 viiectlr; u32 vissctlr; @@ -353,19 +276,119 @@ struct etmv4_drvdata { u64 addr_acc[ETM_MAX_SINGLE_ADDR_CMP]; u8 addr_type[ETM_MAX_SINGLE_ADDR_CMP]; u8 ctxid_idx; - u8 ctxid_size; u64 ctxid_pid[ETMv4_MAX_CTXID_CMP]; u64 ctxid_vpid[ETMv4_MAX_CTXID_CMP]; u32 ctxid_mask0; u32 ctxid_mask1; u8 vmid_idx; - u8 vmid_size; u64 vmid_val[ETM_MAX_VMID_CMP]; u32 vmid_mask0; u32 vmid_mask1; + u32 ext_inp; +}; + +/** + * struct etm4_drvdata - specifics associated to an ETM component + * @base: Memory mapped base address for this component. + * @dev: The device entity associated to this component. + * @csdev: Component vitals needed by the framework. + * @spinlock: Only one at a time pls. + * @mode: This tracer's mode, i.e sysFS, Perf or disabled. + * @cpu: The cpu this component is affined to. + * @arch: ETM version number. + * @nr_pe: The number of processing entity available for tracing. + * @nr_pe_cmp: The number of processing entity comparator inputs that are + * available for tracing. + * @nr_addr_cmp:Number of pairs of address comparators available + * as found in ETMIDR4 0-3. + * @nr_cntr: Number of counters as found in ETMIDR5 bit 28-30. + * @nr_ext_inp: Number of external input. + * @numcidc: Number of contextID comparators. + * @numvmidc: Number of VMID comparators. + * @nrseqstate: The number of sequencer states that are implemented. + * @nr_event: Indicates how many events the trace unit support. + * @nr_resource:The number of resource selection pairs available for tracing. + * @nr_ss_cmp: Number of single-shot comparator controls that are available. + * @trcid: value of the current ID for this component. + * @trcid_size: Indicates the trace ID width. + * @ts_size: Global timestamp size field. + * @ctxid_size: Size of the context ID field to consider. + * @vmid_size: Size of the VM ID comparator to consider. + * @ccsize: Indicates the size of the cycle counter in bits. + * @ccitmin: minimum value that can be programmed in + * @s_ex_level: In secure state, indicates whether instruction tracing is + * supported for the corresponding Exception level. + * @ns_ex_level:In non-secure state, indicates whether instruction tracing is + * supported for the corresponding Exception level. + * @sticky_enable: true if ETM base configuration has been done. + * @boot_enable:True if we should start tracing at boot time. + * @os_unlock: True if access to management registers is allowed. + * @instrp0: Tracing of load and store instructions + * as P0 elements is supported. + * @trcbb: Indicates if the trace unit supports branch broadcast tracing. + * @trccond: If the trace unit supports conditional + * instruction tracing. + * @retstack: Indicates if the implementation supports a return stack. + * @trccci: Indicates if the trace unit supports cycle counting + * for instruction. + * @q_support: Q element support characteristics. + * @trc_error: Whether a trace unit can trace a system + * error exception. + * @syncpr: Indicates if an implementation has a fixed + * synchronization period. + * @stall_ctrl: Enables trace unit functionality that prevents trace + * unit buffer overflows. + * @sysstall: Does the system support stall control of the PE? + * @nooverflow: Indicate if overflow prevention is supported. + * @atbtrig: If the implementation can support ATB triggers + * @lpoverride: If the implementation can support low-power state over. + * @config: structure holding configuration parameters. + */ +struct etmv4_drvdata { + void __iomem *base; + struct device *dev; + struct coresight_device *csdev; + spinlock_t spinlock; + local_t mode; + int cpu; + u8 arch; + u8 nr_pe; + u8 nr_pe_cmp; + u8 nr_addr_cmp; + u8 nr_cntr; + u8 nr_ext_inp; + u8 numcidc; + u8 numvmidc; + u8 nrseqstate; + u8 nr_event; + u8 nr_resource; + u8 nr_ss_cmp; + u8 trcid; + u8 trcid_size; + u8 ts_size; + u8 ctxid_size; + u8 vmid_size; + u8 ccsize; + u8 ccitmin; u8 s_ex_level; u8 ns_ex_level; - u32 ext_inp; + u8 q_support; + bool sticky_enable; + bool boot_enable; + bool os_unlock; + bool instrp0; + bool trcbb; + bool trccond; + bool retstack; + bool trccci; + bool trc_error; + bool syncpr; + bool stallctl; + bool sysstall; + bool nooverflow; + bool atbtrig; + bool lpoverride; + struct etmv4_config config; }; /* Address comparator access types */ @@ -391,4 +414,7 @@ enum etm_addr_type { ETM_ADDR_TYPE_START, ETM_ADDR_TYPE_STOP, }; + +extern const struct attribute_group *coresight_etmv4_groups[]; +void etm4_config_trace_mode(struct etmv4_config *config); #endif diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c index 2e36bde7fcb4..05df789056cc 100644 --- a/drivers/hwtracing/coresight/coresight-funnel.c +++ b/drivers/hwtracing/coresight/coresight-funnel.c @@ -1,5 +1,7 @@ /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * + * Description: CoreSight Funnel driver + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. @@ -11,7 +13,6 @@ */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/device.h> @@ -69,7 +70,6 @@ static int funnel_enable(struct coresight_device *csdev, int inport, { struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - pm_runtime_get_sync(drvdata->dev); funnel_enable_hw(drvdata, inport); dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport); @@ -95,7 +95,6 @@ static void funnel_disable(struct coresight_device *csdev, int inport, struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); funnel_disable_hw(drvdata, inport); - pm_runtime_put(drvdata->dev); dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport); } @@ -222,15 +221,6 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id) if (IS_ERR(drvdata->csdev)) return PTR_ERR(drvdata->csdev); - dev_info(dev, "FUNNEL initialized\n"); - return 0; -} - -static int funnel_remove(struct amba_device *adev) -{ - struct funnel_drvdata *drvdata = amba_get_drvdata(adev); - - coresight_unregister(drvdata->csdev); return 0; } @@ -273,13 +263,9 @@ static struct amba_driver funnel_driver = { .name = "coresight-funnel", .owner = THIS_MODULE, .pm = &funnel_dev_pm_ops, + .suppress_bind_attrs = true, }, .probe = funnel_probe, - .remove = funnel_remove, .id_table = funnel_ids, }; - -module_amba_driver(funnel_driver); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("CoreSight Funnel driver"); +builtin_amba_driver(funnel_driver); diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index 62fcd98cc7cf..3cb574b3cdd9 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h @@ -34,6 +34,45 @@ #define TIMEOUT_US 100 #define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb) +#define ETM_MODE_EXCL_KERN BIT(30) +#define ETM_MODE_EXCL_USER BIT(31) + +#define coresight_simple_func(type, name, offset) \ +static ssize_t name##_show(struct device *_dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + type *drvdata = dev_get_drvdata(_dev->parent); \ + return scnprintf(buf, PAGE_SIZE, "0x%x\n", \ + readl_relaxed(drvdata->base + offset)); \ +} \ +static DEVICE_ATTR_RO(name) + +enum cs_mode { + CS_MODE_DISABLED, + CS_MODE_SYSFS, + CS_MODE_PERF, +}; + +/** + * struct cs_buffer - keep track of a recording session' specifics + * @cur: index of the current buffer + * @nr_pages: max number of pages granted to us + * @offset: offset within the current buffer + * @data_size: how much we collected in this run + * @lost: other than zero if we had a HW buffer wrap around + * @snapshot: is this run in snapshot mode + * @data_pages: a handle the ring buffer + */ +struct cs_buffers { + unsigned int cur; + unsigned int nr_pages; + unsigned long offset; + local_t data_size; + local_t lost; + bool snapshot; + void **data_pages; +}; + static inline void CS_LOCK(void __iomem *addr) { do { @@ -52,6 +91,13 @@ static inline void CS_UNLOCK(void __iomem *addr) } while (0); } +void coresight_disable_path(struct list_head *path); +int coresight_enable_path(struct list_head *path, u32 mode); +struct coresight_device *coresight_get_sink(struct list_head *path); +struct list_head *coresight_build_path(struct coresight_device *csdev, + const char *sink); +void coresight_release_path(struct list_head *path); + #ifdef CONFIG_CORESIGHT_SOURCE_ETM3X extern int etm_readl_cp14(u32 off, unsigned int *val); extern int etm_writel_cp14(u32 off, u32 val); diff --git a/drivers/hwtracing/coresight/coresight-replicator-qcom.c b/drivers/hwtracing/coresight/coresight-replicator-qcom.c index 584059e9e866..700f710e4bfa 100644 --- a/drivers/hwtracing/coresight/coresight-replicator-qcom.c +++ b/drivers/hwtracing/coresight/coresight-replicator-qcom.c @@ -15,7 +15,6 @@ #include <linux/clk.h> #include <linux/coresight.h> #include <linux/device.h> -#include <linux/module.h> #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> @@ -48,8 +47,6 @@ static int replicator_enable(struct coresight_device *csdev, int inport, { struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent); - pm_runtime_get_sync(drvdata->dev); - CS_UNLOCK(drvdata->base); /* @@ -86,8 +83,6 @@ static void replicator_disable(struct coresight_device *csdev, int inport, CS_LOCK(drvdata->base); - pm_runtime_put(drvdata->dev); - dev_info(drvdata->dev, "REPLICATOR disabled\n"); } @@ -156,15 +151,6 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id) return 0; } -static int replicator_remove(struct amba_device *adev) -{ - struct replicator_state *drvdata = amba_get_drvdata(adev); - - pm_runtime_disable(&adev->dev); - coresight_unregister(drvdata->csdev); - return 0; -} - #ifdef CONFIG_PM static int replicator_runtime_suspend(struct device *dev) { @@ -206,10 +192,9 @@ static struct amba_driver replicator_driver = { .drv = { .name = "coresight-replicator-qcom", .pm = &replicator_dev_pm_ops, + .suppress_bind_attrs = true, }, .probe = replicator_probe, - .remove = replicator_remove, .id_table = replicator_ids, }; - -module_amba_driver(replicator_driver); +builtin_amba_driver(replicator_driver); diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c index 963ac197c253..c6982e312e15 100644 --- a/drivers/hwtracing/coresight/coresight-replicator.c +++ b/drivers/hwtracing/coresight/coresight-replicator.c @@ -1,5 +1,7 @@ /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * + * Description: CoreSight Replicator driver + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. @@ -11,7 +13,6 @@ */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/io.h> @@ -41,7 +42,6 @@ static int replicator_enable(struct coresight_device *csdev, int inport, { struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - pm_runtime_get_sync(drvdata->dev); dev_info(drvdata->dev, "REPLICATOR enabled\n"); return 0; } @@ -51,7 +51,6 @@ static void replicator_disable(struct coresight_device *csdev, int inport, { struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - pm_runtime_put(drvdata->dev); dev_info(drvdata->dev, "REPLICATOR disabled\n"); } @@ -115,7 +114,6 @@ static int replicator_probe(struct platform_device *pdev) pm_runtime_put(&pdev->dev); - dev_info(dev, "REPLICATOR initialized\n"); return 0; out_disable_pm: @@ -127,20 +125,6 @@ out_disable_pm: return ret; } -static int replicator_remove(struct platform_device *pdev) -{ - struct replicator_drvdata *drvdata = platform_get_drvdata(pdev); - - coresight_unregister(drvdata->csdev); - pm_runtime_get_sync(&pdev->dev); - if (!IS_ERR(drvdata->atclk)) - clk_disable_unprepare(drvdata->atclk); - pm_runtime_put_noidle(&pdev->dev); - pm_runtime_disable(&pdev->dev); - - return 0; -} - #ifdef CONFIG_PM static int replicator_runtime_suspend(struct device *dev) { @@ -175,15 +159,11 @@ static const struct of_device_id replicator_match[] = { static struct platform_driver replicator_driver = { .probe = replicator_probe, - .remove = replicator_remove, .driver = { .name = "coresight-replicator", .of_match_table = replicator_match, .pm = &replicator_dev_pm_ops, + .suppress_bind_attrs = true, }, }; - builtin_platform_driver(replicator_driver); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("CoreSight Replicator driver"); diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c new file mode 100644 index 000000000000..73be58a11e4f --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-stm.c @@ -0,0 +1,920 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * Description: CoreSight System Trace Macrocell driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Initial implementation by Pratik Patel + * (C) 2014-2015 Pratik Patel <pratikp@codeaurora.org> + * + * Serious refactoring, code cleanup and upgrading to the Coresight upstream + * framework by Mathieu Poirier + * (C) 2015-2016 Mathieu Poirier <mathieu.poirier@linaro.org> + * + * Guaranteed timing and support for various packet type coming from the + * generic STM API by Chunyan Zhang + * (C) 2015-2016 Chunyan Zhang <zhang.chunyan@linaro.org> + */ +#include <asm/local.h> +#include <linux/amba/bus.h> +#include <linux/bitmap.h> +#include <linux/clk.h> +#include <linux/coresight.h> +#include <linux/coresight-stm.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/moduleparam.h> +#include <linux/of_address.h> +#include <linux/perf_event.h> +#include <linux/pm_runtime.h> +#include <linux/stm.h> + +#include "coresight-priv.h" + +#define STMDMASTARTR 0xc04 +#define STMDMASTOPR 0xc08 +#define STMDMASTATR 0xc0c +#define STMDMACTLR 0xc10 +#define STMDMAIDR 0xcfc +#define STMHEER 0xd00 +#define STMHETER 0xd20 +#define STMHEBSR 0xd60 +#define STMHEMCR 0xd64 +#define STMHEMASTR 0xdf4 +#define STMHEFEAT1R 0xdf8 +#define STMHEIDR 0xdfc +#define STMSPER 0xe00 +#define STMSPTER 0xe20 +#define STMPRIVMASKR 0xe40 +#define STMSPSCR 0xe60 +#define STMSPMSCR 0xe64 +#define STMSPOVERRIDER 0xe68 +#define STMSPMOVERRIDER 0xe6c +#define STMSPTRIGCSR 0xe70 +#define STMTCSR 0xe80 +#define STMTSSTIMR 0xe84 +#define STMTSFREQR 0xe8c +#define STMSYNCR 0xe90 +#define STMAUXCR 0xe94 +#define STMSPFEAT1R 0xea0 +#define STMSPFEAT2R 0xea4 +#define STMSPFEAT3R 0xea8 +#define STMITTRIGGER 0xee8 +#define STMITATBDATA0 0xeec +#define STMITATBCTR2 0xef0 +#define STMITATBID 0xef4 +#define STMITATBCTR0 0xef8 + +#define STM_32_CHANNEL 32 +#define BYTES_PER_CHANNEL 256 +#define STM_TRACE_BUF_SIZE 4096 +#define STM_SW_MASTER_END 127 + +/* Register bit definition */ +#define STMTCSR_BUSY_BIT 23 +/* Reserve the first 10 channels for kernel usage */ +#define STM_CHANNEL_OFFSET 0 + +enum stm_pkt_type { + STM_PKT_TYPE_DATA = 0x98, + STM_PKT_TYPE_FLAG = 0xE8, + STM_PKT_TYPE_TRIG = 0xF8, +}; + +#define stm_channel_addr(drvdata, ch) (drvdata->chs.base + \ + (ch * BYTES_PER_CHANNEL)) +#define stm_channel_off(type, opts) (type & ~opts) + +static int boot_nr_channel; + +/* + * Not really modular but using module_param is the easiest way to + * remain consistent with existing use cases for now. + */ +module_param_named( + boot_nr_channel, boot_nr_channel, int, S_IRUGO +); + +/** + * struct channel_space - central management entity for extended ports + * @base: memory mapped base address where channels start. + * @guaraneed: is the channel delivery guaranteed. + */ +struct channel_space { + void __iomem *base; + unsigned long *guaranteed; +}; + +/** + * struct stm_drvdata - specifics associated to an STM component + * @base: memory mapped base address for this component. + * @dev: the device entity associated to this component. + * @atclk: optional clock for the core parts of the STM. + * @csdev: component vitals needed by the framework. + * @spinlock: only one at a time pls. + * @chs: the channels accociated to this STM. + * @stm: structure associated to the generic STM interface. + * @mode: this tracer's mode, i.e sysFS, or disabled. + * @traceid: value of the current ID for this component. + * @write_bytes: Maximus bytes this STM can write at a time. + * @stmsper: settings for register STMSPER. + * @stmspscr: settings for register STMSPSCR. + * @numsp: the total number of stimulus port support by this STM. + * @stmheer: settings for register STMHEER. + * @stmheter: settings for register STMHETER. + * @stmhebsr: settings for register STMHEBSR. + */ +struct stm_drvdata { + void __iomem *base; + struct device *dev; + struct clk *atclk; + struct coresight_device *csdev; + spinlock_t spinlock; + struct channel_space chs; + struct stm_data stm; + local_t mode; + u8 traceid; + u32 write_bytes; + u32 stmsper; + u32 stmspscr; + u32 numsp; + u32 stmheer; + u32 stmheter; + u32 stmhebsr; +}; + +static void stm_hwevent_enable_hw(struct stm_drvdata *drvdata) +{ + CS_UNLOCK(drvdata->base); + + writel_relaxed(drvdata->stmhebsr, drvdata->base + STMHEBSR); + writel_relaxed(drvdata->stmheter, drvdata->base + STMHETER); + writel_relaxed(drvdata->stmheer, drvdata->base + STMHEER); + writel_relaxed(0x01 | /* Enable HW event tracing */ + 0x04, /* Error detection on event tracing */ + drvdata->base + STMHEMCR); + + CS_LOCK(drvdata->base); +} + +static void stm_port_enable_hw(struct stm_drvdata *drvdata) +{ + CS_UNLOCK(drvdata->base); + /* ATB trigger enable on direct writes to TRIG locations */ + writel_relaxed(0x10, + drvdata->base + STMSPTRIGCSR); + writel_relaxed(drvdata->stmspscr, drvdata->base + STMSPSCR); + writel_relaxed(drvdata->stmsper, drvdata->base + STMSPER); + + CS_LOCK(drvdata->base); +} + +static void stm_enable_hw(struct stm_drvdata *drvdata) +{ + if (drvdata->stmheer) + stm_hwevent_enable_hw(drvdata); + + stm_port_enable_hw(drvdata); + + CS_UNLOCK(drvdata->base); + + /* 4096 byte between synchronisation packets */ + writel_relaxed(0xFFF, drvdata->base + STMSYNCR); + writel_relaxed((drvdata->traceid << 16 | /* trace id */ + 0x02 | /* timestamp enable */ + 0x01), /* global STM enable */ + drvdata->base + STMTCSR); + + CS_LOCK(drvdata->base); +} + +static int stm_enable(struct coresight_device *csdev, + struct perf_event_attr *attr, u32 mode) +{ + u32 val; + struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + if (mode != CS_MODE_SYSFS) + return -EINVAL; + + val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode); + + /* Someone is already using the tracer */ + if (val) + return -EBUSY; + + pm_runtime_get_sync(drvdata->dev); + + spin_lock(&drvdata->spinlock); + stm_enable_hw(drvdata); + spin_unlock(&drvdata->spinlock); + + dev_info(drvdata->dev, "STM tracing enabled\n"); + return 0; +} + +static void stm_hwevent_disable_hw(struct stm_drvdata *drvdata) +{ + CS_UNLOCK(drvdata->base); + + writel_relaxed(0x0, drvdata->base + STMHEMCR); + writel_relaxed(0x0, drvdata->base + STMHEER); + writel_relaxed(0x0, drvdata->base + STMHETER); + + CS_LOCK(drvdata->base); +} + +static void stm_port_disable_hw(struct stm_drvdata *drvdata) +{ + CS_UNLOCK(drvdata->base); + + writel_relaxed(0x0, drvdata->base + STMSPER); + writel_relaxed(0x0, drvdata->base + STMSPTRIGCSR); + + CS_LOCK(drvdata->base); +} + +static void stm_disable_hw(struct stm_drvdata *drvdata) +{ + u32 val; + + CS_UNLOCK(drvdata->base); + + val = readl_relaxed(drvdata->base + STMTCSR); + val &= ~0x1; /* clear global STM enable [0] */ + writel_relaxed(val, drvdata->base + STMTCSR); + + CS_LOCK(drvdata->base); + + stm_port_disable_hw(drvdata); + if (drvdata->stmheer) + stm_hwevent_disable_hw(drvdata); +} + +static void stm_disable(struct coresight_device *csdev) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + /* + * For as long as the tracer isn't disabled another entity can't + * change its status. As such we can read the status here without + * fearing it will change under us. + */ + if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { + spin_lock(&drvdata->spinlock); + stm_disable_hw(drvdata); + spin_unlock(&drvdata->spinlock); + + /* Wait until the engine has completely stopped */ + coresight_timeout(drvdata, STMTCSR, STMTCSR_BUSY_BIT, 0); + + pm_runtime_put(drvdata->dev); + + local_set(&drvdata->mode, CS_MODE_DISABLED); + dev_info(drvdata->dev, "STM tracing disabled\n"); + } +} + +static int stm_trace_id(struct coresight_device *csdev) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + return drvdata->traceid; +} + +static const struct coresight_ops_source stm_source_ops = { + .trace_id = stm_trace_id, + .enable = stm_enable, + .disable = stm_disable, +}; + +static const struct coresight_ops stm_cs_ops = { + .source_ops = &stm_source_ops, +}; + +static inline bool stm_addr_unaligned(const void *addr, u8 write_bytes) +{ + return ((unsigned long)addr & (write_bytes - 1)); +} + +static void stm_send(void *addr, const void *data, u32 size, u8 write_bytes) +{ + u8 paload[8]; + + if (stm_addr_unaligned(data, write_bytes)) { + memcpy(paload, data, size); + data = paload; + } + + /* now we are 64bit/32bit aligned */ + switch (size) { +#ifdef CONFIG_64BIT + case 8: + writeq_relaxed(*(u64 *)data, addr); + break; +#endif + case 4: + writel_relaxed(*(u32 *)data, addr); + break; + case 2: + writew_relaxed(*(u16 *)data, addr); + break; + case 1: + writeb_relaxed(*(u8 *)data, addr); + break; + default: + break; + } +} + +static int stm_generic_link(struct stm_data *stm_data, + unsigned int master, unsigned int channel) +{ + struct stm_drvdata *drvdata = container_of(stm_data, + struct stm_drvdata, stm); + if (!drvdata || !drvdata->csdev) + return -EINVAL; + + return coresight_enable(drvdata->csdev); +} + +static void stm_generic_unlink(struct stm_data *stm_data, + unsigned int master, unsigned int channel) +{ + struct stm_drvdata *drvdata = container_of(stm_data, + struct stm_drvdata, stm); + if (!drvdata || !drvdata->csdev) + return; + + stm_disable(drvdata->csdev); +} + +static long stm_generic_set_options(struct stm_data *stm_data, + unsigned int master, + unsigned int channel, + unsigned int nr_chans, + unsigned long options) +{ + struct stm_drvdata *drvdata = container_of(stm_data, + struct stm_drvdata, stm); + if (!(drvdata && local_read(&drvdata->mode))) + return -EINVAL; + + if (channel >= drvdata->numsp) + return -EINVAL; + + switch (options) { + case STM_OPTION_GUARANTEED: + set_bit(channel, drvdata->chs.guaranteed); + break; + + case STM_OPTION_INVARIANT: + clear_bit(channel, drvdata->chs.guaranteed); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static ssize_t stm_generic_packet(struct stm_data *stm_data, + unsigned int master, + unsigned int channel, + unsigned int packet, + unsigned int flags, + unsigned int size, + const unsigned char *payload) +{ + unsigned long ch_addr; + struct stm_drvdata *drvdata = container_of(stm_data, + struct stm_drvdata, stm); + + if (!(drvdata && local_read(&drvdata->mode))) + return 0; + + if (channel >= drvdata->numsp) + return 0; + + ch_addr = (unsigned long)stm_channel_addr(drvdata, channel); + + flags = (flags == STP_PACKET_TIMESTAMPED) ? STM_FLAG_TIMESTAMPED : 0; + flags |= test_bit(channel, drvdata->chs.guaranteed) ? + STM_FLAG_GUARANTEED : 0; + + if (size > drvdata->write_bytes) + size = drvdata->write_bytes; + else + size = rounddown_pow_of_two(size); + + switch (packet) { + case STP_PACKET_FLAG: + ch_addr |= stm_channel_off(STM_PKT_TYPE_FLAG, flags); + + /* + * The generic STM core sets a size of '0' on flag packets. + * As such send a flag packet of size '1' and tell the + * core we did so. + */ + stm_send((void *)ch_addr, payload, 1, drvdata->write_bytes); + size = 1; + break; + + case STP_PACKET_DATA: + ch_addr |= stm_channel_off(STM_PKT_TYPE_DATA, flags); + stm_send((void *)ch_addr, payload, size, + drvdata->write_bytes); + break; + + default: + return -ENOTSUPP; + } + + return size; +} + +static ssize_t hwevent_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val = drvdata->stmheer; + + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t hwevent_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val; + int ret = 0; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return -EINVAL; + + drvdata->stmheer = val; + /* HW event enable and trigger go hand in hand */ + drvdata->stmheter = val; + + return size; +} +static DEVICE_ATTR_RW(hwevent_enable); + +static ssize_t hwevent_select_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val = drvdata->stmhebsr; + + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t hwevent_select_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val; + int ret = 0; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return -EINVAL; + + drvdata->stmhebsr = val; + + return size; +} +static DEVICE_ATTR_RW(hwevent_select); + +static ssize_t port_select_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val; + + if (!local_read(&drvdata->mode)) { + val = drvdata->stmspscr; + } else { + spin_lock(&drvdata->spinlock); + val = readl_relaxed(drvdata->base + STMSPSCR); + spin_unlock(&drvdata->spinlock); + } + + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t port_select_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val, stmsper; + int ret = 0; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + spin_lock(&drvdata->spinlock); + drvdata->stmspscr = val; + + if (local_read(&drvdata->mode)) { + CS_UNLOCK(drvdata->base); + /* Process as per ARM's TRM recommendation */ + stmsper = readl_relaxed(drvdata->base + STMSPER); + writel_relaxed(0x0, drvdata->base + STMSPER); + writel_relaxed(drvdata->stmspscr, drvdata->base + STMSPSCR); + writel_relaxed(stmsper, drvdata->base + STMSPER); + CS_LOCK(drvdata->base); + } + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_RW(port_select); + +static ssize_t port_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val; + + if (!local_read(&drvdata->mode)) { + val = drvdata->stmsper; + } else { + spin_lock(&drvdata->spinlock); + val = readl_relaxed(drvdata->base + STMSPER); + spin_unlock(&drvdata->spinlock); + } + + return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); +} + +static ssize_t port_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val; + int ret = 0; + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + spin_lock(&drvdata->spinlock); + drvdata->stmsper = val; + + if (local_read(&drvdata->mode)) { + CS_UNLOCK(drvdata->base); + writel_relaxed(drvdata->stmsper, drvdata->base + STMSPER); + CS_LOCK(drvdata->base); + } + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_RW(port_enable); + +static ssize_t traceid_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->traceid; + return sprintf(buf, "%#lx\n", val); +} + +static ssize_t traceid_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + ret = kstrtoul(buf, 16, &val); + if (ret) + return ret; + + /* traceid field is 7bit wide on STM32 */ + drvdata->traceid = val & 0x7f; + return size; +} +static DEVICE_ATTR_RW(traceid); + +#define coresight_stm_simple_func(name, offset) \ + coresight_simple_func(struct stm_drvdata, name, offset) + +coresight_stm_simple_func(tcsr, STMTCSR); +coresight_stm_simple_func(tsfreqr, STMTSFREQR); +coresight_stm_simple_func(syncr, STMSYNCR); +coresight_stm_simple_func(sper, STMSPER); +coresight_stm_simple_func(spter, STMSPTER); +coresight_stm_simple_func(privmaskr, STMPRIVMASKR); +coresight_stm_simple_func(spscr, STMSPSCR); +coresight_stm_simple_func(spmscr, STMSPMSCR); +coresight_stm_simple_func(spfeat1r, STMSPFEAT1R); +coresight_stm_simple_func(spfeat2r, STMSPFEAT2R); +coresight_stm_simple_func(spfeat3r, STMSPFEAT3R); +coresight_stm_simple_func(devid, CORESIGHT_DEVID); + +static struct attribute *coresight_stm_attrs[] = { + &dev_attr_hwevent_enable.attr, + &dev_attr_hwevent_select.attr, + &dev_attr_port_enable.attr, + &dev_attr_port_select.attr, + &dev_attr_traceid.attr, + NULL, +}; + +static struct attribute *coresight_stm_mgmt_attrs[] = { + &dev_attr_tcsr.attr, + &dev_attr_tsfreqr.attr, + &dev_attr_syncr.attr, + &dev_attr_sper.attr, + &dev_attr_spter.attr, + &dev_attr_privmaskr.attr, + &dev_attr_spscr.attr, + &dev_attr_spmscr.attr, + &dev_attr_spfeat1r.attr, + &dev_attr_spfeat2r.attr, + &dev_attr_spfeat3r.attr, + &dev_attr_devid.attr, + NULL, +}; + +static const struct attribute_group coresight_stm_group = { + .attrs = coresight_stm_attrs, +}; + +static const struct attribute_group coresight_stm_mgmt_group = { + .attrs = coresight_stm_mgmt_attrs, + .name = "mgmt", +}; + +static const struct attribute_group *coresight_stm_groups[] = { + &coresight_stm_group, + &coresight_stm_mgmt_group, + NULL, +}; + +static int stm_get_resource_byname(struct device_node *np, + char *ch_base, struct resource *res) +{ + const char *name = NULL; + int index = 0, found = 0; + + while (!of_property_read_string_index(np, "reg-names", index, &name)) { + if (strcmp(ch_base, name)) { + index++; + continue; + } + + /* We have a match and @index is where it's at */ + found = 1; + break; + } + + if (!found) + return -EINVAL; + + return of_address_to_resource(np, index, res); +} + +static u32 stm_fundamental_data_size(struct stm_drvdata *drvdata) +{ + u32 stmspfeat2r; + + if (!IS_ENABLED(CONFIG_64BIT)) + return 4; + + stmspfeat2r = readl_relaxed(drvdata->base + STMSPFEAT2R); + + /* + * bit[15:12] represents the fundamental data size + * 0 - 32-bit data + * 1 - 64-bit data + */ + return BMVAL(stmspfeat2r, 12, 15) ? 8 : 4; +} + +static u32 stm_num_stimulus_port(struct stm_drvdata *drvdata) +{ + u32 numsp; + + numsp = readl_relaxed(drvdata->base + CORESIGHT_DEVID); + /* + * NUMPS in STMDEVID is 17 bit long and if equal to 0x0, + * 32 stimulus ports are supported. + */ + numsp &= 0x1ffff; + if (!numsp) + numsp = STM_32_CHANNEL; + return numsp; +} + +static void stm_init_default_data(struct stm_drvdata *drvdata) +{ + /* Don't use port selection */ + drvdata->stmspscr = 0x0; + /* + * Enable all channel regardless of their number. When port + * selection isn't used (see above) STMSPER applies to all + * 32 channel group available, hence setting all 32 bits to 1 + */ + drvdata->stmsper = ~0x0; + + /* + * The trace ID value for *ETM* tracers start at CPU_ID * 2 + 0x10 and + * anything equal to or higher than 0x70 is reserved. Since 0x00 is + * also reserved the STM trace ID needs to be higher than 0x00 and + * lowner than 0x10. + */ + drvdata->traceid = 0x1; + + /* Set invariant transaction timing on all channels */ + bitmap_clear(drvdata->chs.guaranteed, 0, drvdata->numsp); +} + +static void stm_init_generic_data(struct stm_drvdata *drvdata) +{ + drvdata->stm.name = dev_name(drvdata->dev); + + /* + * MasterIDs are assigned at HW design phase. As such the core is + * using a single master for interaction with this device. + */ + drvdata->stm.sw_start = 1; + drvdata->stm.sw_end = 1; + drvdata->stm.hw_override = true; + drvdata->stm.sw_nchannels = drvdata->numsp; + drvdata->stm.packet = stm_generic_packet; + drvdata->stm.link = stm_generic_link; + drvdata->stm.unlink = stm_generic_unlink; + drvdata->stm.set_options = stm_generic_set_options; +} + +static int stm_probe(struct amba_device *adev, const struct amba_id *id) +{ + int ret; + void __iomem *base; + unsigned long *guaranteed; + struct device *dev = &adev->dev; + struct coresight_platform_data *pdata = NULL; + struct stm_drvdata *drvdata; + struct resource *res = &adev->res; + struct resource ch_res; + size_t res_size, bitmap_size; + struct coresight_desc *desc; + struct device_node *np = adev->dev.of_node; + + if (np) { + pdata = of_get_coresight_platform_data(dev, np); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + adev->dev.platform_data = pdata; + } + drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + + drvdata->dev = &adev->dev; + drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */ + if (!IS_ERR(drvdata->atclk)) { + ret = clk_prepare_enable(drvdata->atclk); + if (ret) + return ret; + } + dev_set_drvdata(dev, drvdata); + + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + drvdata->base = base; + + ret = stm_get_resource_byname(np, "stm-stimulus-base", &ch_res); + if (ret) + return ret; + + base = devm_ioremap_resource(dev, &ch_res); + if (IS_ERR(base)) + return PTR_ERR(base); + drvdata->chs.base = base; + + drvdata->write_bytes = stm_fundamental_data_size(drvdata); + + if (boot_nr_channel) { + drvdata->numsp = boot_nr_channel; + res_size = min((resource_size_t)(boot_nr_channel * + BYTES_PER_CHANNEL), resource_size(res)); + } else { + drvdata->numsp = stm_num_stimulus_port(drvdata); + res_size = min((resource_size_t)(drvdata->numsp * + BYTES_PER_CHANNEL), resource_size(res)); + } + bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long); + + guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL); + if (!guaranteed) + return -ENOMEM; + drvdata->chs.guaranteed = guaranteed; + + spin_lock_init(&drvdata->spinlock); + + stm_init_default_data(drvdata); + stm_init_generic_data(drvdata); + + if (stm_register_device(dev, &drvdata->stm, THIS_MODULE)) { + dev_info(dev, + "stm_register_device failed, probing deffered\n"); + return -EPROBE_DEFER; + } + + desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); + if (!desc) { + ret = -ENOMEM; + goto stm_unregister; + } + + desc->type = CORESIGHT_DEV_TYPE_SOURCE; + desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE; + desc->ops = &stm_cs_ops; + desc->pdata = pdata; + desc->dev = dev; + desc->groups = coresight_stm_groups; + drvdata->csdev = coresight_register(desc); + if (IS_ERR(drvdata->csdev)) { + ret = PTR_ERR(drvdata->csdev); + goto stm_unregister; + } + + pm_runtime_put(&adev->dev); + + dev_info(dev, "%s initialized\n", (char *)id->data); + return 0; + +stm_unregister: + stm_unregister_device(&drvdata->stm); + return ret; +} + +#ifdef CONFIG_PM +static int stm_runtime_suspend(struct device *dev) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev); + + if (drvdata && !IS_ERR(drvdata->atclk)) + clk_disable_unprepare(drvdata->atclk); + + return 0; +} + +static int stm_runtime_resume(struct device *dev) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev); + + if (drvdata && !IS_ERR(drvdata->atclk)) + clk_prepare_enable(drvdata->atclk); + + return 0; +} +#endif + +static const struct dev_pm_ops stm_dev_pm_ops = { + SET_RUNTIME_PM_OPS(stm_runtime_suspend, stm_runtime_resume, NULL) +}; + +static struct amba_id stm_ids[] = { + { + .id = 0x0003b962, + .mask = 0x0003ffff, + .data = "STM32", + }, + { 0, 0}, +}; + +static struct amba_driver stm_driver = { + .drv = { + .name = "coresight-stm", + .owner = THIS_MODULE, + .pm = &stm_dev_pm_ops, + .suppress_bind_attrs = true, + }, + .probe = stm_probe, + .id_table = stm_ids, +}; + +builtin_amba_driver(stm_driver); diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c new file mode 100644 index 000000000000..466af86fd76f --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c @@ -0,0 +1,604 @@ +/* + * Copyright(C) 2016 Linaro Limited. All rights reserved. + * Author: Mathieu Poirier <mathieu.poirier@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/circ_buf.h> +#include <linux/coresight.h> +#include <linux/perf_event.h> +#include <linux/slab.h> +#include "coresight-priv.h" +#include "coresight-tmc.h" + +void tmc_etb_enable_hw(struct tmc_drvdata *drvdata) +{ + CS_UNLOCK(drvdata->base); + + /* Wait for TMCSReady bit to be set */ + tmc_wait_for_tmcready(drvdata); + + writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); + writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | + TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | + TMC_FFCR_TRIGON_TRIGIN, + drvdata->base + TMC_FFCR); + + writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); + tmc_enable_hw(drvdata); + + CS_LOCK(drvdata->base); +} + +static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata) +{ + char *bufp; + u32 read_data; + int i; + + bufp = drvdata->buf; + while (1) { + for (i = 0; i < drvdata->memwidth; i++) { + read_data = readl_relaxed(drvdata->base + TMC_RRD); + if (read_data == 0xFFFFFFFF) + return; + memcpy(bufp, &read_data, 4); + bufp += 4; + } + } +} + +static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata) +{ + CS_UNLOCK(drvdata->base); + + tmc_flush_and_stop(drvdata); + /* + * When operating in sysFS mode the content of the buffer needs to be + * read before the TMC is disabled. + */ + if (local_read(&drvdata->mode) == CS_MODE_SYSFS) + tmc_etb_dump_hw(drvdata); + tmc_disable_hw(drvdata); + + CS_LOCK(drvdata->base); +} + +static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata) +{ + CS_UNLOCK(drvdata->base); + + /* Wait for TMCSReady bit to be set */ + tmc_wait_for_tmcready(drvdata); + + writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE); + writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI, + drvdata->base + TMC_FFCR); + writel_relaxed(0x0, drvdata->base + TMC_BUFWM); + tmc_enable_hw(drvdata); + + CS_LOCK(drvdata->base); +} + +static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata) +{ + CS_UNLOCK(drvdata->base); + + tmc_flush_and_stop(drvdata); + tmc_disable_hw(drvdata); + + CS_LOCK(drvdata->base); +} + +static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode) +{ + int ret = 0; + bool used = false; + char *buf = NULL; + long val; + unsigned long flags; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + /* This shouldn't be happening */ + if (WARN_ON(mode != CS_MODE_SYSFS)) + return -EINVAL; + + /* + * If we don't have a buffer release the lock and allocate memory. + * Otherwise keep the lock and move along. + */ + spin_lock_irqsave(&drvdata->spinlock, flags); + if (!drvdata->buf) { + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + /* Allocating the memory here while outside of the spinlock */ + buf = kzalloc(drvdata->size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Let's try again */ + spin_lock_irqsave(&drvdata->spinlock, flags); + } + + if (drvdata->reading) { + ret = -EBUSY; + goto out; + } + + val = local_xchg(&drvdata->mode, mode); + /* + * In sysFS mode we can have multiple writers per sink. Since this + * sink is already enabled no memory is needed and the HW need not be + * touched. + */ + if (val == CS_MODE_SYSFS) + goto out; + + /* + * If drvdata::buf isn't NULL, memory was allocated for a previous + * trace run but wasn't read. If so simply zero-out the memory. + * Otherwise use the memory allocated above. + * + * The memory is freed when users read the buffer using the + * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for + * details. + */ + if (drvdata->buf) { + memset(drvdata->buf, 0, drvdata->size); + } else { + used = true; + drvdata->buf = buf; + } + + tmc_etb_enable_hw(drvdata); +out: + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + /* Free memory outside the spinlock if need be */ + if (!used && buf) + kfree(buf); + + if (!ret) + dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n"); + + return ret; +} + +static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, u32 mode) +{ + int ret = 0; + long val; + unsigned long flags; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + /* This shouldn't be happening */ + if (WARN_ON(mode != CS_MODE_PERF)) + return -EINVAL; + + spin_lock_irqsave(&drvdata->spinlock, flags); + if (drvdata->reading) { + ret = -EINVAL; + goto out; + } + + val = local_xchg(&drvdata->mode, mode); + /* + * In Perf mode there can be only one writer per sink. There + * is also no need to continue if the ETB/ETR is already operated + * from sysFS. + */ + if (val != CS_MODE_DISABLED) { + ret = -EINVAL; + goto out; + } + + tmc_etb_enable_hw(drvdata); +out: + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + return ret; +} + +static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode) +{ + switch (mode) { + case CS_MODE_SYSFS: + return tmc_enable_etf_sink_sysfs(csdev, mode); + case CS_MODE_PERF: + return tmc_enable_etf_sink_perf(csdev, mode); + } + + /* We shouldn't be here */ + return -EINVAL; +} + +static void tmc_disable_etf_sink(struct coresight_device *csdev) +{ + long val; + unsigned long flags; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + spin_lock_irqsave(&drvdata->spinlock, flags); + if (drvdata->reading) { + spin_unlock_irqrestore(&drvdata->spinlock, flags); + return; + } + + val = local_xchg(&drvdata->mode, CS_MODE_DISABLED); + /* Disable the TMC only if it needs to */ + if (val != CS_MODE_DISABLED) + tmc_etb_disable_hw(drvdata); + + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n"); +} + +static int tmc_enable_etf_link(struct coresight_device *csdev, + int inport, int outport) +{ + unsigned long flags; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + spin_lock_irqsave(&drvdata->spinlock, flags); + if (drvdata->reading) { + spin_unlock_irqrestore(&drvdata->spinlock, flags); + return -EBUSY; + } + + tmc_etf_enable_hw(drvdata); + local_set(&drvdata->mode, CS_MODE_SYSFS); + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + dev_info(drvdata->dev, "TMC-ETF enabled\n"); + return 0; +} + +static void tmc_disable_etf_link(struct coresight_device *csdev, + int inport, int outport) +{ + unsigned long flags; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + spin_lock_irqsave(&drvdata->spinlock, flags); + if (drvdata->reading) { + spin_unlock_irqrestore(&drvdata->spinlock, flags); + return; + } + + tmc_etf_disable_hw(drvdata); + local_set(&drvdata->mode, CS_MODE_DISABLED); + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + dev_info(drvdata->dev, "TMC disabled\n"); +} + +static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu, + void **pages, int nr_pages, bool overwrite) +{ + int node; + struct cs_buffers *buf; + + if (cpu == -1) + cpu = smp_processor_id(); + node = cpu_to_node(cpu); + + /* Allocate memory structure for interaction with Perf */ + buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node); + if (!buf) + return NULL; + + buf->snapshot = overwrite; + buf->nr_pages = nr_pages; + buf->data_pages = pages; + + return buf; +} + +static void tmc_free_etf_buffer(void *config) +{ + struct cs_buffers *buf = config; + + kfree(buf); +} + +static int tmc_set_etf_buffer(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *sink_config) +{ + int ret = 0; + unsigned long head; + struct cs_buffers *buf = sink_config; + + /* wrap head around to the amount of space we have */ + head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); + + /* find the page to write to */ + buf->cur = head / PAGE_SIZE; + + /* and offset within that page */ + buf->offset = head % PAGE_SIZE; + + local_set(&buf->data_size, 0); + + return ret; +} + +static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *sink_config, bool *lost) +{ + long size = 0; + struct cs_buffers *buf = sink_config; + + if (buf) { + /* + * In snapshot mode ->data_size holds the new address of the + * ring buffer's head. The size itself is the whole address + * range since we want the latest information. + */ + if (buf->snapshot) + handle->head = local_xchg(&buf->data_size, + buf->nr_pages << PAGE_SHIFT); + /* + * Tell the tracer PMU how much we got in this run and if + * something went wrong along the way. Nobody else can use + * this cs_buffers instance until we are done. As such + * resetting parameters here and squaring off with the ring + * buffer API in the tracer PMU is fine. + */ + *lost = !!local_xchg(&buf->lost, 0); + size = local_xchg(&buf->data_size, 0); + } + + return size; +} + +static void tmc_update_etf_buffer(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *sink_config) +{ + int i, cur; + u32 *buf_ptr; + u32 read_ptr, write_ptr; + u32 status, to_read; + unsigned long offset; + struct cs_buffers *buf = sink_config; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + if (!buf) + return; + + /* This shouldn't happen */ + if (WARN_ON_ONCE(local_read(&drvdata->mode) != CS_MODE_PERF)) + return; + + CS_UNLOCK(drvdata->base); + + tmc_flush_and_stop(drvdata); + + read_ptr = readl_relaxed(drvdata->base + TMC_RRP); + write_ptr = readl_relaxed(drvdata->base + TMC_RWP); + + /* + * Get a hold of the status register and see if a wrap around + * has occurred. If so adjust things accordingly. + */ + status = readl_relaxed(drvdata->base + TMC_STS); + if (status & TMC_STS_FULL) { + local_inc(&buf->lost); + to_read = drvdata->size; + } else { + to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size); + } + + /* + * The TMC RAM buffer may be bigger than the space available in the + * perf ring buffer (handle->size). If so advance the RRP so that we + * get the latest trace data. + */ + if (to_read > handle->size) { + u32 mask = 0; + + /* + * The value written to RRP must be byte-address aligned to + * the width of the trace memory databus _and_ to a frame + * boundary (16 byte), whichever is the biggest. For example, + * for 32-bit, 64-bit and 128-bit wide trace memory, the four + * LSBs must be 0s. For 256-bit wide trace memory, the five + * LSBs must be 0s. + */ + switch (drvdata->memwidth) { + case TMC_MEM_INTF_WIDTH_32BITS: + case TMC_MEM_INTF_WIDTH_64BITS: + case TMC_MEM_INTF_WIDTH_128BITS: + mask = GENMASK(31, 5); + break; + case TMC_MEM_INTF_WIDTH_256BITS: + mask = GENMASK(31, 6); + break; + } + + /* + * Make sure the new size is aligned in accordance with the + * requirement explained above. + */ + to_read = handle->size & mask; + /* Move the RAM read pointer up */ + read_ptr = (write_ptr + drvdata->size) - to_read; + /* Make sure we are still within our limits */ + if (read_ptr > (drvdata->size - 1)) + read_ptr -= drvdata->size; + /* Tell the HW */ + writel_relaxed(read_ptr, drvdata->base + TMC_RRP); + local_inc(&buf->lost); + } + + cur = buf->cur; + offset = buf->offset; + + /* for every byte to read */ + for (i = 0; i < to_read; i += 4) { + buf_ptr = buf->data_pages[cur] + offset; + *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD); + + offset += 4; + if (offset >= PAGE_SIZE) { + offset = 0; + cur++; + /* wrap around at the end of the buffer */ + cur &= buf->nr_pages - 1; + } + } + + /* + * In snapshot mode all we have to do is communicate to + * perf_aux_output_end() the address of the current head. In full + * trace mode the same function expects a size to move rb->aux_head + * forward. + */ + if (buf->snapshot) + local_set(&buf->data_size, (cur * PAGE_SIZE) + offset); + else + local_add(to_read, &buf->data_size); + + CS_LOCK(drvdata->base); +} + +static const struct coresight_ops_sink tmc_etf_sink_ops = { + .enable = tmc_enable_etf_sink, + .disable = tmc_disable_etf_sink, + .alloc_buffer = tmc_alloc_etf_buffer, + .free_buffer = tmc_free_etf_buffer, + .set_buffer = tmc_set_etf_buffer, + .reset_buffer = tmc_reset_etf_buffer, + .update_buffer = tmc_update_etf_buffer, +}; + +static const struct coresight_ops_link tmc_etf_link_ops = { + .enable = tmc_enable_etf_link, + .disable = tmc_disable_etf_link, +}; + +const struct coresight_ops tmc_etb_cs_ops = { + .sink_ops = &tmc_etf_sink_ops, +}; + +const struct coresight_ops tmc_etf_cs_ops = { + .sink_ops = &tmc_etf_sink_ops, + .link_ops = &tmc_etf_link_ops, +}; + +int tmc_read_prepare_etb(struct tmc_drvdata *drvdata) +{ + long val; + enum tmc_mode mode; + int ret = 0; + unsigned long flags; + + /* config types are set a boot time and never change */ + if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB && + drvdata->config_type != TMC_CONFIG_TYPE_ETF)) + return -EINVAL; + + spin_lock_irqsave(&drvdata->spinlock, flags); + + if (drvdata->reading) { + ret = -EBUSY; + goto out; + } + + /* There is no point in reading a TMC in HW FIFO mode */ + mode = readl_relaxed(drvdata->base + TMC_MODE); + if (mode != TMC_MODE_CIRCULAR_BUFFER) { + ret = -EINVAL; + goto out; + } + + val = local_read(&drvdata->mode); + /* Don't interfere if operated from Perf */ + if (val == CS_MODE_PERF) { + ret = -EINVAL; + goto out; + } + + /* If drvdata::buf is NULL the trace data has been read already */ + if (drvdata->buf == NULL) { + ret = -EINVAL; + goto out; + } + + /* Disable the TMC if need be */ + if (val == CS_MODE_SYSFS) + tmc_etb_disable_hw(drvdata); + + drvdata->reading = true; +out: + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + return ret; +} + +int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata) +{ + char *buf = NULL; + enum tmc_mode mode; + unsigned long flags; + + /* config types are set a boot time and never change */ + if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB && + drvdata->config_type != TMC_CONFIG_TYPE_ETF)) + return -EINVAL; + + spin_lock_irqsave(&drvdata->spinlock, flags); + + /* There is no point in reading a TMC in HW FIFO mode */ + mode = readl_relaxed(drvdata->base + TMC_MODE); + if (mode != TMC_MODE_CIRCULAR_BUFFER) { + spin_unlock_irqrestore(&drvdata->spinlock, flags); + return -EINVAL; + } + + /* Re-enable the TMC if need be */ + if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { + /* + * The trace run will continue with the same allocated trace + * buffer. As such zero-out the buffer so that we don't end + * up with stale data. + * + * Since the tracer is still enabled drvdata::buf + * can't be NULL. + */ + memset(drvdata->buf, 0, drvdata->size); + tmc_etb_enable_hw(drvdata); + } else { + /* + * The ETB/ETF is not tracing and the buffer was just read. + * As such prepare to free the trace buffer. + */ + buf = drvdata->buf; + drvdata->buf = NULL; + } + + drvdata->reading = false; + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + /* + * Free allocated memory outside of the spinlock. There is no need + * to assert the validity of 'buf' since calling kfree(NULL) is safe. + */ + kfree(buf); + + return 0; +} diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c new file mode 100644 index 000000000000..ba54e1942a8c --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -0,0 +1,569 @@ +/* + * Copyright(C) 2016 Linaro Limited. All rights reserved. + * Author: Mathieu Poirier <mathieu.poirier@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/circ_buf.h> +#include <linux/coresight.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> + +#include "coresight-priv.h" +#include "coresight-tmc.h" + +/** + * struct cs_etr_buffer - keep track of a recording session' specifics + * @tmc: generic portion of the TMC buffers + * @paddr: the physical address of a DMA'able contiguous memory area + * @vaddr: the virtual address associated to @paddr + * @size: how much memory we have, starting at @paddr + * @dev: the device @vaddr has been tied to + */ +struct cs_etr_buffers { + struct cs_buffers tmc; + dma_addr_t paddr; + void __iomem *vaddr; + u32 size; + struct device *dev; +}; + +void tmc_etr_enable_hw(struct tmc_drvdata *drvdata) +{ + u32 axictl; + + /* Zero out the memory to help with debug */ + memset(drvdata->vaddr, 0, drvdata->size); + + CS_UNLOCK(drvdata->base); + + /* Wait for TMCSReady bit to be set */ + tmc_wait_for_tmcready(drvdata); + + writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ); + writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); + + axictl = readl_relaxed(drvdata->base + TMC_AXICTL); + axictl |= TMC_AXICTL_WR_BURST_16; + writel_relaxed(axictl, drvdata->base + TMC_AXICTL); + axictl &= ~TMC_AXICTL_SCT_GAT_MODE; + writel_relaxed(axictl, drvdata->base + TMC_AXICTL); + axictl = (axictl & + ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) | + TMC_AXICTL_PROT_CTL_B1; + writel_relaxed(axictl, drvdata->base + TMC_AXICTL); + + writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO); + writel_relaxed(0x0, drvdata->base + TMC_DBAHI); + writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | + TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | + TMC_FFCR_TRIGON_TRIGIN, + drvdata->base + TMC_FFCR); + writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); + tmc_enable_hw(drvdata); + + CS_LOCK(drvdata->base); +} + +static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata) +{ + u32 rwp, val; + + rwp = readl_relaxed(drvdata->base + TMC_RWP); + val = readl_relaxed(drvdata->base + TMC_STS); + + /* How much memory do we still have */ + if (val & BIT(0)) + drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr; + else + drvdata->buf = drvdata->vaddr; +} + +static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) +{ + CS_UNLOCK(drvdata->base); + + tmc_flush_and_stop(drvdata); + /* + * When operating in sysFS mode the content of the buffer needs to be + * read before the TMC is disabled. + */ + if (local_read(&drvdata->mode) == CS_MODE_SYSFS) + tmc_etr_dump_hw(drvdata); + tmc_disable_hw(drvdata); + + CS_LOCK(drvdata->base); +} + +static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode) +{ + int ret = 0; + bool used = false; + long val; + unsigned long flags; + void __iomem *vaddr = NULL; + dma_addr_t paddr; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + /* This shouldn't be happening */ + if (WARN_ON(mode != CS_MODE_SYSFS)) + return -EINVAL; + + /* + * If we don't have a buffer release the lock and allocate memory. + * Otherwise keep the lock and move along. + */ + spin_lock_irqsave(&drvdata->spinlock, flags); + if (!drvdata->vaddr) { + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + /* + * Contiguous memory can't be allocated while a spinlock is + * held. As such allocate memory here and free it if a buffer + * has already been allocated (from a previous session). + */ + vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size, + &paddr, GFP_KERNEL); + if (!vaddr) + return -ENOMEM; + + /* Let's try again */ + spin_lock_irqsave(&drvdata->spinlock, flags); + } + + if (drvdata->reading) { + ret = -EBUSY; + goto out; + } + + val = local_xchg(&drvdata->mode, mode); + /* + * In sysFS mode we can have multiple writers per sink. Since this + * sink is already enabled no memory is needed and the HW need not be + * touched. + */ + if (val == CS_MODE_SYSFS) + goto out; + + /* + * If drvdata::buf == NULL, use the memory allocated above. + * Otherwise a buffer still exists from a previous session, so + * simply use that. + */ + if (drvdata->buf == NULL) { + used = true; + drvdata->vaddr = vaddr; + drvdata->paddr = paddr; + drvdata->buf = drvdata->vaddr; + } + + memset(drvdata->vaddr, 0, drvdata->size); + + tmc_etr_enable_hw(drvdata); +out: + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + /* Free memory outside the spinlock if need be */ + if (!used && vaddr) + dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr); + + if (!ret) + dev_info(drvdata->dev, "TMC-ETR enabled\n"); + + return ret; +} + +static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, u32 mode) +{ + int ret = 0; + long val; + unsigned long flags; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + /* This shouldn't be happening */ + if (WARN_ON(mode != CS_MODE_PERF)) + return -EINVAL; + + spin_lock_irqsave(&drvdata->spinlock, flags); + if (drvdata->reading) { + ret = -EINVAL; + goto out; + } + + val = local_xchg(&drvdata->mode, mode); + /* + * In Perf mode there can be only one writer per sink. There + * is also no need to continue if the ETR is already operated + * from sysFS. + */ + if (val != CS_MODE_DISABLED) { + ret = -EINVAL; + goto out; + } + + tmc_etr_enable_hw(drvdata); +out: + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + return ret; +} + +static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode) +{ + switch (mode) { + case CS_MODE_SYSFS: + return tmc_enable_etr_sink_sysfs(csdev, mode); + case CS_MODE_PERF: + return tmc_enable_etr_sink_perf(csdev, mode); + } + + /* We shouldn't be here */ + return -EINVAL; +} + +static void tmc_disable_etr_sink(struct coresight_device *csdev) +{ + long val; + unsigned long flags; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + spin_lock_irqsave(&drvdata->spinlock, flags); + if (drvdata->reading) { + spin_unlock_irqrestore(&drvdata->spinlock, flags); + return; + } + + val = local_xchg(&drvdata->mode, CS_MODE_DISABLED); + /* Disable the TMC only if it needs to */ + if (val != CS_MODE_DISABLED) + tmc_etr_disable_hw(drvdata); + + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + dev_info(drvdata->dev, "TMC-ETR disabled\n"); +} + +static void *tmc_alloc_etr_buffer(struct coresight_device *csdev, int cpu, + void **pages, int nr_pages, bool overwrite) +{ + int node; + struct cs_etr_buffers *buf; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + if (cpu == -1) + cpu = smp_processor_id(); + node = cpu_to_node(cpu); + + /* Allocate memory structure for interaction with Perf */ + buf = kzalloc_node(sizeof(struct cs_etr_buffers), GFP_KERNEL, node); + if (!buf) + return NULL; + + buf->dev = drvdata->dev; + buf->size = drvdata->size; + buf->vaddr = dma_alloc_coherent(buf->dev, buf->size, + &buf->paddr, GFP_KERNEL); + if (!buf->vaddr) { + kfree(buf); + return NULL; + } + + buf->tmc.snapshot = overwrite; + buf->tmc.nr_pages = nr_pages; + buf->tmc.data_pages = pages; + + return buf; +} + +static void tmc_free_etr_buffer(void *config) +{ + struct cs_etr_buffers *buf = config; + + dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->paddr); + kfree(buf); +} + +static int tmc_set_etr_buffer(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *sink_config) +{ + int ret = 0; + unsigned long head; + struct cs_etr_buffers *buf = sink_config; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + /* wrap head around to the amount of space we have */ + head = handle->head & ((buf->tmc.nr_pages << PAGE_SHIFT) - 1); + + /* find the page to write to */ + buf->tmc.cur = head / PAGE_SIZE; + + /* and offset within that page */ + buf->tmc.offset = head % PAGE_SIZE; + + local_set(&buf->tmc.data_size, 0); + + /* Tell the HW where to put the trace data */ + drvdata->vaddr = buf->vaddr; + drvdata->paddr = buf->paddr; + memset(drvdata->vaddr, 0, drvdata->size); + + return ret; +} + +static unsigned long tmc_reset_etr_buffer(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *sink_config, bool *lost) +{ + long size = 0; + struct cs_etr_buffers *buf = sink_config; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + if (buf) { + /* + * In snapshot mode ->data_size holds the new address of the + * ring buffer's head. The size itself is the whole address + * range since we want the latest information. + */ + if (buf->tmc.snapshot) { + size = buf->tmc.nr_pages << PAGE_SHIFT; + handle->head = local_xchg(&buf->tmc.data_size, size); + } + + /* + * Tell the tracer PMU how much we got in this run and if + * something went wrong along the way. Nobody else can use + * this cs_etr_buffers instance until we are done. As such + * resetting parameters here and squaring off with the ring + * buffer API in the tracer PMU is fine. + */ + *lost = !!local_xchg(&buf->tmc.lost, 0); + size = local_xchg(&buf->tmc.data_size, 0); + } + + /* Get ready for another run */ + drvdata->vaddr = NULL; + drvdata->paddr = 0; + + return size; +} + +static void tmc_update_etr_buffer(struct coresight_device *csdev, + struct perf_output_handle *handle, + void *sink_config) +{ + int i, cur; + u32 *buf_ptr; + u32 read_ptr, write_ptr; + u32 status, to_read; + unsigned long offset; + struct cs_buffers *buf = sink_config; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + if (!buf) + return; + + /* This shouldn't happen */ + if (WARN_ON_ONCE(local_read(&drvdata->mode) != CS_MODE_PERF)) + return; + + CS_UNLOCK(drvdata->base); + + tmc_flush_and_stop(drvdata); + + read_ptr = readl_relaxed(drvdata->base + TMC_RRP); + write_ptr = readl_relaxed(drvdata->base + TMC_RWP); + + /* + * Get a hold of the status register and see if a wrap around + * has occurred. If so adjust things accordingly. + */ + status = readl_relaxed(drvdata->base + TMC_STS); + if (status & TMC_STS_FULL) { + local_inc(&buf->lost); + to_read = drvdata->size; + } else { + to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size); + } + + /* + * The TMC RAM buffer may be bigger than the space available in the + * perf ring buffer (handle->size). If so advance the RRP so that we + * get the latest trace data. + */ + if (to_read > handle->size) { + u32 buffer_start, mask = 0; + + /* Read buffer start address in system memory */ + buffer_start = readl_relaxed(drvdata->base + TMC_DBALO); + + /* + * The value written to RRP must be byte-address aligned to + * the width of the trace memory databus _and_ to a frame + * boundary (16 byte), whichever is the biggest. For example, + * for 32-bit, 64-bit and 128-bit wide trace memory, the four + * LSBs must be 0s. For 256-bit wide trace memory, the five + * LSBs must be 0s. + */ + switch (drvdata->memwidth) { + case TMC_MEM_INTF_WIDTH_32BITS: + case TMC_MEM_INTF_WIDTH_64BITS: + case TMC_MEM_INTF_WIDTH_128BITS: + mask = GENMASK(31, 5); + break; + case TMC_MEM_INTF_WIDTH_256BITS: + mask = GENMASK(31, 6); + break; + } + + /* + * Make sure the new size is aligned in accordance with the + * requirement explained above. + */ + to_read = handle->size & mask; + /* Move the RAM read pointer up */ + read_ptr = (write_ptr + drvdata->size) - to_read; + /* Make sure we are still within our limits */ + if (read_ptr > (buffer_start + (drvdata->size - 1))) + read_ptr -= drvdata->size; + /* Tell the HW */ + writel_relaxed(read_ptr, drvdata->base + TMC_RRP); + local_inc(&buf->lost); + } + + cur = buf->cur; + offset = buf->offset; + + /* for every byte to read */ + for (i = 0; i < to_read; i += 4) { + buf_ptr = buf->data_pages[cur] + offset; + *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD); + + offset += 4; + if (offset >= PAGE_SIZE) { + offset = 0; + cur++; + /* wrap around at the end of the buffer */ + cur &= buf->nr_pages - 1; + } + } + + /* + * In snapshot mode all we have to do is communicate to + * perf_aux_output_end() the address of the current head. In full + * trace mode the same function expects a size to move rb->aux_head + * forward. + */ + if (buf->snapshot) + local_set(&buf->data_size, (cur * PAGE_SIZE) + offset); + else + local_add(to_read, &buf->data_size); + + CS_LOCK(drvdata->base); +} + +static const struct coresight_ops_sink tmc_etr_sink_ops = { + .enable = tmc_enable_etr_sink, + .disable = tmc_disable_etr_sink, + .alloc_buffer = tmc_alloc_etr_buffer, + .free_buffer = tmc_free_etr_buffer, + .set_buffer = tmc_set_etr_buffer, + .reset_buffer = tmc_reset_etr_buffer, + .update_buffer = tmc_update_etr_buffer, +}; + +const struct coresight_ops tmc_etr_cs_ops = { + .sink_ops = &tmc_etr_sink_ops, +}; + +int tmc_read_prepare_etr(struct tmc_drvdata *drvdata) +{ + int ret = 0; + long val; + unsigned long flags; + + /* config types are set a boot time and never change */ + if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) + return -EINVAL; + + spin_lock_irqsave(&drvdata->spinlock, flags); + if (drvdata->reading) { + ret = -EBUSY; + goto out; + } + + val = local_read(&drvdata->mode); + /* Don't interfere if operated from Perf */ + if (val == CS_MODE_PERF) { + ret = -EINVAL; + goto out; + } + + /* If drvdata::buf is NULL the trace data has been read already */ + if (drvdata->buf == NULL) { + ret = -EINVAL; + goto out; + } + + /* Disable the TMC if need be */ + if (val == CS_MODE_SYSFS) + tmc_etr_disable_hw(drvdata); + + drvdata->reading = true; +out: + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + return ret; +} + +int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) +{ + unsigned long flags; + dma_addr_t paddr; + void __iomem *vaddr = NULL; + + /* config types are set a boot time and never change */ + if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) + return -EINVAL; + + spin_lock_irqsave(&drvdata->spinlock, flags); + + /* RE-enable the TMC if need be */ + if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { + /* + * The trace run will continue with the same allocated trace + * buffer. The trace buffer is cleared in tmc_etr_enable_hw(), + * so we don't have to explicitly clear it. Also, since the + * tracer is still enabled drvdata::buf can't be NULL. + */ + tmc_etr_enable_hw(drvdata); + } else { + /* + * The ETR is not tracing and the buffer was just read. + * As such prepare to free the trace buffer. + */ + vaddr = drvdata->vaddr; + paddr = drvdata->paddr; + drvdata->buf = drvdata->vaddr = NULL; + } + + drvdata->reading = false; + spin_unlock_irqrestore(&drvdata->spinlock, flags); + + /* Free allocated memory out side of the spinlock */ + if (vaddr) + dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr); + + return 0; +} diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index a57c7ec1661f..3978cbb6b038 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c @@ -1,5 +1,7 @@ /* Copyright (c) 2012, The Linux Foundation. All rights reserved. * + * Description: CoreSight Trace Memory Controller driver + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. @@ -11,7 +13,6 @@ */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/device.h> @@ -29,127 +30,27 @@ #include <linux/amba/bus.h> #include "coresight-priv.h" +#include "coresight-tmc.h" -#define TMC_RSZ 0x004 -#define TMC_STS 0x00c -#define TMC_RRD 0x010 -#define TMC_RRP 0x014 -#define TMC_RWP 0x018 -#define TMC_TRG 0x01c -#define TMC_CTL 0x020 -#define TMC_RWD 0x024 -#define TMC_MODE 0x028 -#define TMC_LBUFLEVEL 0x02c -#define TMC_CBUFLEVEL 0x030 -#define TMC_BUFWM 0x034 -#define TMC_RRPHI 0x038 -#define TMC_RWPHI 0x03c -#define TMC_AXICTL 0x110 -#define TMC_DBALO 0x118 -#define TMC_DBAHI 0x11c -#define TMC_FFSR 0x300 -#define TMC_FFCR 0x304 -#define TMC_PSCR 0x308 -#define TMC_ITMISCOP0 0xee0 -#define TMC_ITTRFLIN 0xee8 -#define TMC_ITATBDATA0 0xeec -#define TMC_ITATBCTR2 0xef0 -#define TMC_ITATBCTR1 0xef4 -#define TMC_ITATBCTR0 0xef8 - -/* register description */ -/* TMC_CTL - 0x020 */ -#define TMC_CTL_CAPT_EN BIT(0) -/* TMC_STS - 0x00C */ -#define TMC_STS_TRIGGERED BIT(1) -/* TMC_AXICTL - 0x110 */ -#define TMC_AXICTL_PROT_CTL_B0 BIT(0) -#define TMC_AXICTL_PROT_CTL_B1 BIT(1) -#define TMC_AXICTL_SCT_GAT_MODE BIT(7) -#define TMC_AXICTL_WR_BURST_LEN 0xF00 -/* TMC_FFCR - 0x304 */ -#define TMC_FFCR_EN_FMT BIT(0) -#define TMC_FFCR_EN_TI BIT(1) -#define TMC_FFCR_FON_FLIN BIT(4) -#define TMC_FFCR_FON_TRIG_EVT BIT(5) -#define TMC_FFCR_FLUSHMAN BIT(6) -#define TMC_FFCR_TRIGON_TRIGIN BIT(8) -#define TMC_FFCR_STOP_ON_FLUSH BIT(12) - -#define TMC_STS_TRIGGERED_BIT 2 -#define TMC_FFCR_FLUSHMAN_BIT 6 - -enum tmc_config_type { - TMC_CONFIG_TYPE_ETB, - TMC_CONFIG_TYPE_ETR, - TMC_CONFIG_TYPE_ETF, -}; - -enum tmc_mode { - TMC_MODE_CIRCULAR_BUFFER, - TMC_MODE_SOFTWARE_FIFO, - TMC_MODE_HARDWARE_FIFO, -}; - -enum tmc_mem_intf_width { - TMC_MEM_INTF_WIDTH_32BITS = 0x2, - TMC_MEM_INTF_WIDTH_64BITS = 0x3, - TMC_MEM_INTF_WIDTH_128BITS = 0x4, - TMC_MEM_INTF_WIDTH_256BITS = 0x5, -}; - -/** - * struct tmc_drvdata - specifics associated to an TMC component - * @base: memory mapped base address for this component. - * @dev: the device entity associated to this component. - * @csdev: component vitals needed by the framework. - * @miscdev: specifics to handle "/dev/xyz.tmc" entry. - * @spinlock: only one at a time pls. - * @read_count: manages preparation of buffer for reading. - * @buf: area of memory where trace data get sent. - * @paddr: DMA start location in RAM. - * @vaddr: virtual representation of @paddr. - * @size: @buf size. - * @enable: this TMC is being used. - * @config_type: TMC variant, must be of type @tmc_config_type. - * @trigger_cntr: amount of words to store after a trigger. - */ -struct tmc_drvdata { - void __iomem *base; - struct device *dev; - struct coresight_device *csdev; - struct miscdevice miscdev; - spinlock_t spinlock; - int read_count; - bool reading; - char *buf; - dma_addr_t paddr; - void __iomem *vaddr; - u32 size; - bool enable; - enum tmc_config_type config_type; - u32 trigger_cntr; -}; - -static void tmc_wait_for_ready(struct tmc_drvdata *drvdata) +void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata) { /* Ensure formatter, unformatter and hardware fifo are empty */ if (coresight_timeout(drvdata->base, - TMC_STS, TMC_STS_TRIGGERED_BIT, 1)) { + TMC_STS, TMC_STS_TMCREADY_BIT, 1)) { dev_err(drvdata->dev, "timeout observed when probing at offset %#x\n", TMC_STS); } } -static void tmc_flush_and_stop(struct tmc_drvdata *drvdata) +void tmc_flush_and_stop(struct tmc_drvdata *drvdata) { u32 ffcr; ffcr = readl_relaxed(drvdata->base + TMC_FFCR); ffcr |= TMC_FFCR_STOP_ON_FLUSH; writel_relaxed(ffcr, drvdata->base + TMC_FFCR); - ffcr |= TMC_FFCR_FLUSHMAN; + ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT); writel_relaxed(ffcr, drvdata->base + TMC_FFCR); /* Ensure flush completes */ if (coresight_timeout(drvdata->base, @@ -159,343 +60,73 @@ static void tmc_flush_and_stop(struct tmc_drvdata *drvdata) TMC_FFCR); } - tmc_wait_for_ready(drvdata); + tmc_wait_for_tmcready(drvdata); } -static void tmc_enable_hw(struct tmc_drvdata *drvdata) +void tmc_enable_hw(struct tmc_drvdata *drvdata) { writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL); } -static void tmc_disable_hw(struct tmc_drvdata *drvdata) +void tmc_disable_hw(struct tmc_drvdata *drvdata) { writel_relaxed(0x0, drvdata->base + TMC_CTL); } -static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata) -{ - /* Zero out the memory to help with debug */ - memset(drvdata->buf, 0, drvdata->size); - - CS_UNLOCK(drvdata->base); - - writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); - writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | - TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | - TMC_FFCR_TRIGON_TRIGIN, - drvdata->base + TMC_FFCR); - - writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); - tmc_enable_hw(drvdata); - - CS_LOCK(drvdata->base); -} - -static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata) -{ - u32 axictl; - - /* Zero out the memory to help with debug */ - memset(drvdata->vaddr, 0, drvdata->size); - - CS_UNLOCK(drvdata->base); - - writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ); - writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); - - axictl = readl_relaxed(drvdata->base + TMC_AXICTL); - axictl |= TMC_AXICTL_WR_BURST_LEN; - writel_relaxed(axictl, drvdata->base + TMC_AXICTL); - axictl &= ~TMC_AXICTL_SCT_GAT_MODE; - writel_relaxed(axictl, drvdata->base + TMC_AXICTL); - axictl = (axictl & - ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) | - TMC_AXICTL_PROT_CTL_B1; - writel_relaxed(axictl, drvdata->base + TMC_AXICTL); - - writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO); - writel_relaxed(0x0, drvdata->base + TMC_DBAHI); - writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | - TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | - TMC_FFCR_TRIGON_TRIGIN, - drvdata->base + TMC_FFCR); - writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); - tmc_enable_hw(drvdata); - - CS_LOCK(drvdata->base); -} - -static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata) -{ - CS_UNLOCK(drvdata->base); - - writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE); - writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI, - drvdata->base + TMC_FFCR); - writel_relaxed(0x0, drvdata->base + TMC_BUFWM); - tmc_enable_hw(drvdata); - - CS_LOCK(drvdata->base); -} - -static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode) -{ - unsigned long flags; - - pm_runtime_get_sync(drvdata->dev); - - spin_lock_irqsave(&drvdata->spinlock, flags); - if (drvdata->reading) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); - pm_runtime_put(drvdata->dev); - return -EBUSY; - } - - if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { - tmc_etb_enable_hw(drvdata); - } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { - tmc_etr_enable_hw(drvdata); - } else { - if (mode == TMC_MODE_CIRCULAR_BUFFER) - tmc_etb_enable_hw(drvdata); - else - tmc_etf_enable_hw(drvdata); - } - drvdata->enable = true; - spin_unlock_irqrestore(&drvdata->spinlock, flags); - - dev_info(drvdata->dev, "TMC enabled\n"); - return 0; -} - -static int tmc_enable_sink(struct coresight_device *csdev) -{ - struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - - return tmc_enable(drvdata, TMC_MODE_CIRCULAR_BUFFER); -} - -static int tmc_enable_link(struct coresight_device *csdev, int inport, - int outport) +static int tmc_read_prepare(struct tmc_drvdata *drvdata) { - struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - - return tmc_enable(drvdata, TMC_MODE_HARDWARE_FIFO); -} + int ret = 0; -static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata) -{ - enum tmc_mem_intf_width memwidth; - u8 memwords; - char *bufp; - u32 read_data; - int i; - - memwidth = BMVAL(readl_relaxed(drvdata->base + CORESIGHT_DEVID), 8, 10); - if (memwidth == TMC_MEM_INTF_WIDTH_32BITS) - memwords = 1; - else if (memwidth == TMC_MEM_INTF_WIDTH_64BITS) - memwords = 2; - else if (memwidth == TMC_MEM_INTF_WIDTH_128BITS) - memwords = 4; - else - memwords = 8; - - bufp = drvdata->buf; - while (1) { - for (i = 0; i < memwords; i++) { - read_data = readl_relaxed(drvdata->base + TMC_RRD); - if (read_data == 0xFFFFFFFF) - return; - memcpy(bufp, &read_data, 4); - bufp += 4; - } + switch (drvdata->config_type) { + case TMC_CONFIG_TYPE_ETB: + case TMC_CONFIG_TYPE_ETF: + ret = tmc_read_prepare_etb(drvdata); + break; + case TMC_CONFIG_TYPE_ETR: + ret = tmc_read_prepare_etr(drvdata); + break; + default: + ret = -EINVAL; } -} - -static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata) -{ - CS_UNLOCK(drvdata->base); - - tmc_flush_and_stop(drvdata); - tmc_etb_dump_hw(drvdata); - tmc_disable_hw(drvdata); - - CS_LOCK(drvdata->base); -} - -static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata) -{ - u32 rwp, val; - rwp = readl_relaxed(drvdata->base + TMC_RWP); - val = readl_relaxed(drvdata->base + TMC_STS); + if (!ret) + dev_info(drvdata->dev, "TMC read start\n"); - /* How much memory do we still have */ - if (val & BIT(0)) - drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr; - else - drvdata->buf = drvdata->vaddr; -} - -static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) -{ - CS_UNLOCK(drvdata->base); - - tmc_flush_and_stop(drvdata); - tmc_etr_dump_hw(drvdata); - tmc_disable_hw(drvdata); - - CS_LOCK(drvdata->base); -} - -static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata) -{ - CS_UNLOCK(drvdata->base); - - tmc_flush_and_stop(drvdata); - tmc_disable_hw(drvdata); - - CS_LOCK(drvdata->base); + return ret; } -static void tmc_disable(struct tmc_drvdata *drvdata, enum tmc_mode mode) +static int tmc_read_unprepare(struct tmc_drvdata *drvdata) { - unsigned long flags; - - spin_lock_irqsave(&drvdata->spinlock, flags); - if (drvdata->reading) - goto out; + int ret = 0; - if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { - tmc_etb_disable_hw(drvdata); - } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { - tmc_etr_disable_hw(drvdata); - } else { - if (mode == TMC_MODE_CIRCULAR_BUFFER) - tmc_etb_disable_hw(drvdata); - else - tmc_etf_disable_hw(drvdata); + switch (drvdata->config_type) { + case TMC_CONFIG_TYPE_ETB: + case TMC_CONFIG_TYPE_ETF: + ret = tmc_read_unprepare_etb(drvdata); + break; + case TMC_CONFIG_TYPE_ETR: + ret = tmc_read_unprepare_etr(drvdata); + break; + default: + ret = -EINVAL; } -out: - drvdata->enable = false; - spin_unlock_irqrestore(&drvdata->spinlock, flags); - - pm_runtime_put(drvdata->dev); - - dev_info(drvdata->dev, "TMC disabled\n"); -} - -static void tmc_disable_sink(struct coresight_device *csdev) -{ - struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - - tmc_disable(drvdata, TMC_MODE_CIRCULAR_BUFFER); -} -static void tmc_disable_link(struct coresight_device *csdev, int inport, - int outport) -{ - struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - - tmc_disable(drvdata, TMC_MODE_HARDWARE_FIFO); -} - -static const struct coresight_ops_sink tmc_sink_ops = { - .enable = tmc_enable_sink, - .disable = tmc_disable_sink, -}; + if (!ret) + dev_info(drvdata->dev, "TMC read end\n"); -static const struct coresight_ops_link tmc_link_ops = { - .enable = tmc_enable_link, - .disable = tmc_disable_link, -}; - -static const struct coresight_ops tmc_etb_cs_ops = { - .sink_ops = &tmc_sink_ops, -}; - -static const struct coresight_ops tmc_etr_cs_ops = { - .sink_ops = &tmc_sink_ops, -}; - -static const struct coresight_ops tmc_etf_cs_ops = { - .sink_ops = &tmc_sink_ops, - .link_ops = &tmc_link_ops, -}; - -static int tmc_read_prepare(struct tmc_drvdata *drvdata) -{ - int ret; - unsigned long flags; - enum tmc_mode mode; - - spin_lock_irqsave(&drvdata->spinlock, flags); - if (!drvdata->enable) - goto out; - - if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { - tmc_etb_disable_hw(drvdata); - } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { - tmc_etr_disable_hw(drvdata); - } else { - mode = readl_relaxed(drvdata->base + TMC_MODE); - if (mode == TMC_MODE_CIRCULAR_BUFFER) { - tmc_etb_disable_hw(drvdata); - } else { - ret = -ENODEV; - goto err; - } - } -out: - drvdata->reading = true; - spin_unlock_irqrestore(&drvdata->spinlock, flags); - - dev_info(drvdata->dev, "TMC read start\n"); - return 0; -err: - spin_unlock_irqrestore(&drvdata->spinlock, flags); return ret; } -static void tmc_read_unprepare(struct tmc_drvdata *drvdata) -{ - unsigned long flags; - enum tmc_mode mode; - - spin_lock_irqsave(&drvdata->spinlock, flags); - if (!drvdata->enable) - goto out; - - if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { - tmc_etb_enable_hw(drvdata); - } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { - tmc_etr_enable_hw(drvdata); - } else { - mode = readl_relaxed(drvdata->base + TMC_MODE); - if (mode == TMC_MODE_CIRCULAR_BUFFER) - tmc_etb_enable_hw(drvdata); - } -out: - drvdata->reading = false; - spin_unlock_irqrestore(&drvdata->spinlock, flags); - - dev_info(drvdata->dev, "TMC read end\n"); -} - static int tmc_open(struct inode *inode, struct file *file) { + int ret; struct tmc_drvdata *drvdata = container_of(file->private_data, struct tmc_drvdata, miscdev); - int ret = 0; - - if (drvdata->read_count++) - goto out; ret = tmc_read_prepare(drvdata); if (ret) return ret; -out: + nonseekable_open(inode, file); dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__); @@ -535,19 +166,14 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len, static int tmc_release(struct inode *inode, struct file *file) { + int ret; struct tmc_drvdata *drvdata = container_of(file->private_data, struct tmc_drvdata, miscdev); - if (--drvdata->read_count) { - if (drvdata->read_count < 0) { - dev_err(drvdata->dev, "mismatched close\n"); - drvdata->read_count = 0; - } - goto out; - } + ret = tmc_read_unprepare(drvdata); + if (ret) + return ret; - tmc_read_unprepare(drvdata); -out: dev_dbg(drvdata->dev, "%s: released\n", __func__); return 0; } @@ -560,56 +186,71 @@ static const struct file_operations tmc_fops = { .llseek = no_llseek, }; -static ssize_t status_show(struct device *dev, - struct device_attribute *attr, char *buf) +static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid) { - unsigned long flags; - u32 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg; - u32 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr; - u32 devid; - struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); + enum tmc_mem_intf_width memwidth; - pm_runtime_get_sync(drvdata->dev); - spin_lock_irqsave(&drvdata->spinlock, flags); - CS_UNLOCK(drvdata->base); - - tmc_rsz = readl_relaxed(drvdata->base + TMC_RSZ); - tmc_sts = readl_relaxed(drvdata->base + TMC_STS); - tmc_rrp = readl_relaxed(drvdata->base + TMC_RRP); - tmc_rwp = readl_relaxed(drvdata->base + TMC_RWP); - tmc_trg = readl_relaxed(drvdata->base + TMC_TRG); - tmc_ctl = readl_relaxed(drvdata->base + TMC_CTL); - tmc_ffsr = readl_relaxed(drvdata->base + TMC_FFSR); - tmc_ffcr = readl_relaxed(drvdata->base + TMC_FFCR); - tmc_mode = readl_relaxed(drvdata->base + TMC_MODE); - tmc_pscr = readl_relaxed(drvdata->base + TMC_PSCR); - devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID); + /* + * Excerpt from the TRM: + * + * DEVID::MEMWIDTH[10:8] + * 0x2 Memory interface databus is 32 bits wide. + * 0x3 Memory interface databus is 64 bits wide. + * 0x4 Memory interface databus is 128 bits wide. + * 0x5 Memory interface databus is 256 bits wide. + */ + switch (BMVAL(devid, 8, 10)) { + case 0x2: + memwidth = TMC_MEM_INTF_WIDTH_32BITS; + break; + case 0x3: + memwidth = TMC_MEM_INTF_WIDTH_64BITS; + break; + case 0x4: + memwidth = TMC_MEM_INTF_WIDTH_128BITS; + break; + case 0x5: + memwidth = TMC_MEM_INTF_WIDTH_256BITS; + break; + default: + memwidth = 0; + } - CS_LOCK(drvdata->base); - spin_unlock_irqrestore(&drvdata->spinlock, flags); - pm_runtime_put(drvdata->dev); - - return sprintf(buf, - "Depth:\t\t0x%x\n" - "Status:\t\t0x%x\n" - "RAM read ptr:\t0x%x\n" - "RAM wrt ptr:\t0x%x\n" - "Trigger cnt:\t0x%x\n" - "Control:\t0x%x\n" - "Flush status:\t0x%x\n" - "Flush ctrl:\t0x%x\n" - "Mode:\t\t0x%x\n" - "PSRC:\t\t0x%x\n" - "DEVID:\t\t0x%x\n", - tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg, - tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr, devid); - - return -EINVAL; + return memwidth; } -static DEVICE_ATTR_RO(status); -static ssize_t trigger_cntr_show(struct device *dev, - struct device_attribute *attr, char *buf) +#define coresight_tmc_simple_func(name, offset) \ + coresight_simple_func(struct tmc_drvdata, name, offset) + +coresight_tmc_simple_func(rsz, TMC_RSZ); +coresight_tmc_simple_func(sts, TMC_STS); +coresight_tmc_simple_func(rrp, TMC_RRP); +coresight_tmc_simple_func(rwp, TMC_RWP); +coresight_tmc_simple_func(trg, TMC_TRG); +coresight_tmc_simple_func(ctl, TMC_CTL); +coresight_tmc_simple_func(ffsr, TMC_FFSR); +coresight_tmc_simple_func(ffcr, TMC_FFCR); +coresight_tmc_simple_func(mode, TMC_MODE); +coresight_tmc_simple_func(pscr, TMC_PSCR); +coresight_tmc_simple_func(devid, CORESIGHT_DEVID); + +static struct attribute *coresight_tmc_mgmt_attrs[] = { + &dev_attr_rsz.attr, + &dev_attr_sts.attr, + &dev_attr_rrp.attr, + &dev_attr_rwp.attr, + &dev_attr_trg.attr, + &dev_attr_ctl.attr, + &dev_attr_ffsr.attr, + &dev_attr_ffcr.attr, + &dev_attr_mode.attr, + &dev_attr_pscr.attr, + &dev_attr_devid.attr, + NULL, +}; + +ssize_t trigger_cntr_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); unsigned long val = drvdata->trigger_cntr; @@ -634,26 +275,25 @@ static ssize_t trigger_cntr_store(struct device *dev, } static DEVICE_ATTR_RW(trigger_cntr); -static struct attribute *coresight_etb_attrs[] = { +static struct attribute *coresight_tmc_attrs[] = { &dev_attr_trigger_cntr.attr, - &dev_attr_status.attr, NULL, }; -ATTRIBUTE_GROUPS(coresight_etb); -static struct attribute *coresight_etr_attrs[] = { - &dev_attr_trigger_cntr.attr, - &dev_attr_status.attr, - NULL, +static const struct attribute_group coresight_tmc_group = { + .attrs = coresight_tmc_attrs, }; -ATTRIBUTE_GROUPS(coresight_etr); -static struct attribute *coresight_etf_attrs[] = { - &dev_attr_trigger_cntr.attr, - &dev_attr_status.attr, +static const struct attribute_group coresight_tmc_mgmt_group = { + .attrs = coresight_tmc_mgmt_attrs, + .name = "mgmt", +}; + +const struct attribute_group *coresight_tmc_groups[] = { + &coresight_tmc_group, + &coresight_tmc_mgmt_group, NULL, }; -ATTRIBUTE_GROUPS(coresight_etf); static int tmc_probe(struct amba_device *adev, const struct amba_id *id) { @@ -692,6 +332,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID); drvdata->config_type = BMVAL(devid, 6, 7); + drvdata->memwidth = tmc_get_memwidth(devid); if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { if (np) @@ -706,20 +347,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) pm_runtime_put(&adev->dev); - if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { - drvdata->vaddr = dma_alloc_coherent(dev, drvdata->size, - &drvdata->paddr, GFP_KERNEL); - if (!drvdata->vaddr) - return -ENOMEM; - - memset(drvdata->vaddr, 0, drvdata->size); - drvdata->buf = drvdata->vaddr; - } else { - drvdata->buf = devm_kzalloc(dev, drvdata->size, GFP_KERNEL); - if (!drvdata->buf) - return -ENOMEM; - } - desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); if (!desc) { ret = -ENOMEM; @@ -729,20 +356,18 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) desc->pdata = pdata; desc->dev = dev; desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER; + desc->groups = coresight_tmc_groups; if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { desc->type = CORESIGHT_DEV_TYPE_SINK; desc->ops = &tmc_etb_cs_ops; - desc->groups = coresight_etb_groups; } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { desc->type = CORESIGHT_DEV_TYPE_SINK; desc->ops = &tmc_etr_cs_ops; - desc->groups = coresight_etr_groups; } else { desc->type = CORESIGHT_DEV_TYPE_LINKSINK; desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO; desc->ops = &tmc_etf_cs_ops; - desc->groups = coresight_etf_groups; } drvdata->csdev = coresight_register(desc); @@ -758,31 +383,14 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) if (ret) goto err_misc_register; - dev_info(dev, "TMC initialized\n"); return 0; err_misc_register: coresight_unregister(drvdata->csdev); err_devm_kzalloc: - if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) - dma_free_coherent(dev, drvdata->size, - &drvdata->paddr, GFP_KERNEL); return ret; } -static int tmc_remove(struct amba_device *adev) -{ - struct tmc_drvdata *drvdata = amba_get_drvdata(adev); - - misc_deregister(&drvdata->miscdev); - coresight_unregister(drvdata->csdev); - if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) - dma_free_coherent(drvdata->dev, drvdata->size, - &drvdata->paddr, GFP_KERNEL); - - return 0; -} - static struct amba_id tmc_ids[] = { { .id = 0x0003b961, @@ -795,13 +403,9 @@ static struct amba_driver tmc_driver = { .drv = { .name = "coresight-tmc", .owner = THIS_MODULE, + .suppress_bind_attrs = true, }, .probe = tmc_probe, - .remove = tmc_remove, .id_table = tmc_ids, }; - -module_amba_driver(tmc_driver); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("CoreSight Trace Memory Controller driver"); +builtin_amba_driver(tmc_driver); diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h new file mode 100644 index 000000000000..5c5fe2ad2ca7 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-tmc.h @@ -0,0 +1,140 @@ +/* + * Copyright(C) 2015 Linaro Limited. All rights reserved. + * Author: Mathieu Poirier <mathieu.poirier@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _CORESIGHT_TMC_H +#define _CORESIGHT_TMC_H + +#include <linux/miscdevice.h> + +#define TMC_RSZ 0x004 +#define TMC_STS 0x00c +#define TMC_RRD 0x010 +#define TMC_RRP 0x014 +#define TMC_RWP 0x018 +#define TMC_TRG 0x01c +#define TMC_CTL 0x020 +#define TMC_RWD 0x024 +#define TMC_MODE 0x028 +#define TMC_LBUFLEVEL 0x02c +#define TMC_CBUFLEVEL 0x030 +#define TMC_BUFWM 0x034 +#define TMC_RRPHI 0x038 +#define TMC_RWPHI 0x03c +#define TMC_AXICTL 0x110 +#define TMC_DBALO 0x118 +#define TMC_DBAHI 0x11c +#define TMC_FFSR 0x300 +#define TMC_FFCR 0x304 +#define TMC_PSCR 0x308 +#define TMC_ITMISCOP0 0xee0 +#define TMC_ITTRFLIN 0xee8 +#define TMC_ITATBDATA0 0xeec +#define TMC_ITATBCTR2 0xef0 +#define TMC_ITATBCTR1 0xef4 +#define TMC_ITATBCTR0 0xef8 + +/* register description */ +/* TMC_CTL - 0x020 */ +#define TMC_CTL_CAPT_EN BIT(0) +/* TMC_STS - 0x00C */ +#define TMC_STS_TMCREADY_BIT 2 +#define TMC_STS_FULL BIT(0) +#define TMC_STS_TRIGGERED BIT(1) +/* TMC_AXICTL - 0x110 */ +#define TMC_AXICTL_PROT_CTL_B0 BIT(0) +#define TMC_AXICTL_PROT_CTL_B1 BIT(1) +#define TMC_AXICTL_SCT_GAT_MODE BIT(7) +#define TMC_AXICTL_WR_BURST_16 0xF00 +/* TMC_FFCR - 0x304 */ +#define TMC_FFCR_FLUSHMAN_BIT 6 +#define TMC_FFCR_EN_FMT BIT(0) +#define TMC_FFCR_EN_TI BIT(1) +#define TMC_FFCR_FON_FLIN BIT(4) +#define TMC_FFCR_FON_TRIG_EVT BIT(5) +#define TMC_FFCR_TRIGON_TRIGIN BIT(8) +#define TMC_FFCR_STOP_ON_FLUSH BIT(12) + + +enum tmc_config_type { + TMC_CONFIG_TYPE_ETB, + TMC_CONFIG_TYPE_ETR, + TMC_CONFIG_TYPE_ETF, +}; + +enum tmc_mode { + TMC_MODE_CIRCULAR_BUFFER, + TMC_MODE_SOFTWARE_FIFO, + TMC_MODE_HARDWARE_FIFO, +}; + +enum tmc_mem_intf_width { + TMC_MEM_INTF_WIDTH_32BITS = 1, + TMC_MEM_INTF_WIDTH_64BITS = 2, + TMC_MEM_INTF_WIDTH_128BITS = 4, + TMC_MEM_INTF_WIDTH_256BITS = 8, +}; + +/** + * struct tmc_drvdata - specifics associated to an TMC component + * @base: memory mapped base address for this component. + * @dev: the device entity associated to this component. + * @csdev: component vitals needed by the framework. + * @miscdev: specifics to handle "/dev/xyz.tmc" entry. + * @spinlock: only one at a time pls. + * @buf: area of memory where trace data get sent. + * @paddr: DMA start location in RAM. + * @vaddr: virtual representation of @paddr. + * @size: @buf size. + * @mode: how this TMC is being used. + * @config_type: TMC variant, must be of type @tmc_config_type. + * @memwidth: width of the memory interface databus, in bytes. + * @trigger_cntr: amount of words to store after a trigger. + */ +struct tmc_drvdata { + void __iomem *base; + struct device *dev; + struct coresight_device *csdev; + struct miscdevice miscdev; + spinlock_t spinlock; + bool reading; + char *buf; + dma_addr_t paddr; + void __iomem *vaddr; + u32 size; + local_t mode; + enum tmc_config_type config_type; + enum tmc_mem_intf_width memwidth; + u32 trigger_cntr; +}; + +/* Generic functions */ +void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata); +void tmc_flush_and_stop(struct tmc_drvdata *drvdata); +void tmc_enable_hw(struct tmc_drvdata *drvdata); +void tmc_disable_hw(struct tmc_drvdata *drvdata); + +/* ETB/ETF functions */ +int tmc_read_prepare_etb(struct tmc_drvdata *drvdata); +int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata); +extern const struct coresight_ops tmc_etb_cs_ops; +extern const struct coresight_ops tmc_etf_cs_ops; + +/* ETR functions */ +int tmc_read_prepare_etr(struct tmc_drvdata *drvdata); +int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata); +extern const struct coresight_ops tmc_etr_cs_ops; +#endif diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c index 7214efd10db5..4e471e2e9d89 100644 --- a/drivers/hwtracing/coresight/coresight-tpiu.c +++ b/drivers/hwtracing/coresight/coresight-tpiu.c @@ -1,5 +1,7 @@ /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * + * Description: CoreSight Trace Port Interface Unit driver + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. @@ -11,7 +13,6 @@ */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/io.h> @@ -70,11 +71,10 @@ static void tpiu_enable_hw(struct tpiu_drvdata *drvdata) CS_LOCK(drvdata->base); } -static int tpiu_enable(struct coresight_device *csdev) +static int tpiu_enable(struct coresight_device *csdev, u32 mode) { struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - pm_runtime_get_sync(csdev->dev.parent); tpiu_enable_hw(drvdata); dev_info(drvdata->dev, "TPIU enabled\n"); @@ -98,7 +98,6 @@ static void tpiu_disable(struct coresight_device *csdev) struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); tpiu_disable_hw(drvdata); - pm_runtime_put(csdev->dev.parent); dev_info(drvdata->dev, "TPIU disabled\n"); } @@ -168,15 +167,6 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id) if (IS_ERR(drvdata->csdev)) return PTR_ERR(drvdata->csdev); - dev_info(dev, "TPIU initialized\n"); - return 0; -} - -static int tpiu_remove(struct amba_device *adev) -{ - struct tpiu_drvdata *drvdata = amba_get_drvdata(adev); - - coresight_unregister(drvdata->csdev); return 0; } @@ -223,13 +213,9 @@ static struct amba_driver tpiu_driver = { .name = "coresight-tpiu", .owner = THIS_MODULE, .pm = &tpiu_dev_pm_ops, + .suppress_bind_attrs = true, }, .probe = tpiu_probe, - .remove = tpiu_remove, .id_table = tpiu_ids, }; - -module_amba_driver(tpiu_driver); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("CoreSight Trace Port Interface Unit driver"); +builtin_amba_driver(tpiu_driver); diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index 93738dfbf631..508532b3fcac 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c @@ -11,7 +11,6 @@ */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/device.h> @@ -24,11 +23,36 @@ #include <linux/coresight.h> #include <linux/of_platform.h> #include <linux/delay.h> +#include <linux/pm_runtime.h> #include "coresight-priv.h" static DEFINE_MUTEX(coresight_mutex); +/** + * struct coresight_node - elements of a path, from source to sink + * @csdev: Address of an element. + * @link: hook to the list. + */ +struct coresight_node { + struct coresight_device *csdev; + struct list_head link; +}; + +/* + * When operating Coresight drivers from the sysFS interface, only a single + * path can exist from a tracer (associated to a CPU) to a sink. + */ +static DEFINE_PER_CPU(struct list_head *, tracer_path); + +/* + * As of this writing only a single STM can be found in CS topologies. Since + * there is no way to know if we'll ever see more and what kind of + * configuration they will enact, for the time being only define a single path + * for STM. + */ +static struct list_head *stm_path; + static int coresight_id_match(struct device *dev, void *data) { int trace_id, i_trace_id; @@ -68,15 +92,12 @@ static int coresight_source_is_unique(struct coresight_device *csdev) csdev, coresight_id_match); } -static int coresight_find_link_inport(struct coresight_device *csdev) +static int coresight_find_link_inport(struct coresight_device *csdev, + struct coresight_device *parent) { int i; - struct coresight_device *parent; struct coresight_connection *conn; - parent = container_of(csdev->path_link.next, - struct coresight_device, path_link); - for (i = 0; i < parent->nr_outport; i++) { conn = &parent->conns[i]; if (conn->child_dev == csdev) @@ -89,15 +110,12 @@ static int coresight_find_link_inport(struct coresight_device *csdev) return 0; } -static int coresight_find_link_outport(struct coresight_device *csdev) +static int coresight_find_link_outport(struct coresight_device *csdev, + struct coresight_device *child) { int i; - struct coresight_device *child; struct coresight_connection *conn; - child = container_of(csdev->path_link.prev, - struct coresight_device, path_link); - for (i = 0; i < csdev->nr_outport; i++) { conn = &csdev->conns[i]; if (conn->child_dev == child) @@ -110,13 +128,13 @@ static int coresight_find_link_outport(struct coresight_device *csdev) return 0; } -static int coresight_enable_sink(struct coresight_device *csdev) +static int coresight_enable_sink(struct coresight_device *csdev, u32 mode) { int ret; if (!csdev->enable) { if (sink_ops(csdev)->enable) { - ret = sink_ops(csdev)->enable(csdev); + ret = sink_ops(csdev)->enable(csdev, mode); if (ret) return ret; } @@ -138,14 +156,19 @@ static void coresight_disable_sink(struct coresight_device *csdev) } } -static int coresight_enable_link(struct coresight_device *csdev) +static int coresight_enable_link(struct coresight_device *csdev, + struct coresight_device *parent, + struct coresight_device *child) { int ret; int link_subtype; int refport, inport, outport; - inport = coresight_find_link_inport(csdev); - outport = coresight_find_link_outport(csdev); + if (!parent || !child) + return -EINVAL; + + inport = coresight_find_link_inport(csdev, parent); + outport = coresight_find_link_outport(csdev, child); link_subtype = csdev->subtype.link_subtype; if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) @@ -168,14 +191,19 @@ static int coresight_enable_link(struct coresight_device *csdev) return 0; } -static void coresight_disable_link(struct coresight_device *csdev) +static void coresight_disable_link(struct coresight_device *csdev, + struct coresight_device *parent, + struct coresight_device *child) { int i, nr_conns; int link_subtype; int refport, inport, outport; - inport = coresight_find_link_inport(csdev); - outport = coresight_find_link_outport(csdev); + if (!parent || !child) + return; + + inport = coresight_find_link_inport(csdev, parent); + outport = coresight_find_link_outport(csdev, child); link_subtype = csdev->subtype.link_subtype; if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) { @@ -201,7 +229,7 @@ static void coresight_disable_link(struct coresight_device *csdev) csdev->enable = false; } -static int coresight_enable_source(struct coresight_device *csdev) +static int coresight_enable_source(struct coresight_device *csdev, u32 mode) { int ret; @@ -213,7 +241,7 @@ static int coresight_enable_source(struct coresight_device *csdev) if (!csdev->enable) { if (source_ops(csdev)->enable) { - ret = source_ops(csdev)->enable(csdev); + ret = source_ops(csdev)->enable(csdev, NULL, mode); if (ret) return ret; } @@ -235,147 +263,343 @@ static void coresight_disable_source(struct coresight_device *csdev) } } -static int coresight_enable_path(struct list_head *path) +void coresight_disable_path(struct list_head *path) { - int ret = 0; - struct coresight_device *cd; + u32 type; + struct coresight_node *nd; + struct coresight_device *csdev, *parent, *child; - /* - * At this point we have a full @path, from source to sink. The - * sink is the first entry and the source the last one. Go through - * all the components and enable them one by one. - */ - list_for_each_entry(cd, path, path_link) { - if (cd == list_first_entry(path, struct coresight_device, - path_link)) { - ret = coresight_enable_sink(cd); - } else if (list_is_last(&cd->path_link, path)) { - /* - * Don't enable the source just yet - this needs to - * happen at the very end when all links and sink - * along the path have been configured properly. - */ - ; - } else { - ret = coresight_enable_link(cd); + list_for_each_entry(nd, path, link) { + csdev = nd->csdev; + type = csdev->type; + + /* + * ETF devices are tricky... They can be a link or a sink, + * depending on how they are configured. If an ETF has been + * "activated" it will be configured as a sink, otherwise + * go ahead with the link configuration. + */ + if (type == CORESIGHT_DEV_TYPE_LINKSINK) + type = (csdev == coresight_get_sink(path)) ? + CORESIGHT_DEV_TYPE_SINK : + CORESIGHT_DEV_TYPE_LINK; + + switch (type) { + case CORESIGHT_DEV_TYPE_SINK: + coresight_disable_sink(csdev); + break; + case CORESIGHT_DEV_TYPE_SOURCE: + /* sources are disabled from either sysFS or Perf */ + break; + case CORESIGHT_DEV_TYPE_LINK: + parent = list_prev_entry(nd, link)->csdev; + child = list_next_entry(nd, link)->csdev; + coresight_disable_link(csdev, parent, child); + break; + default: + break; } - if (ret) - goto err; } +} - return 0; -err: - list_for_each_entry_continue_reverse(cd, path, path_link) { - if (cd == list_first_entry(path, struct coresight_device, - path_link)) { - coresight_disable_sink(cd); - } else if (list_is_last(&cd->path_link, path)) { - ; - } else { - coresight_disable_link(cd); +int coresight_enable_path(struct list_head *path, u32 mode) +{ + + int ret = 0; + u32 type; + struct coresight_node *nd; + struct coresight_device *csdev, *parent, *child; + + list_for_each_entry_reverse(nd, path, link) { + csdev = nd->csdev; + type = csdev->type; + + /* + * ETF devices are tricky... They can be a link or a sink, + * depending on how they are configured. If an ETF has been + * "activated" it will be configured as a sink, otherwise + * go ahead with the link configuration. + */ + if (type == CORESIGHT_DEV_TYPE_LINKSINK) + type = (csdev == coresight_get_sink(path)) ? + CORESIGHT_DEV_TYPE_SINK : + CORESIGHT_DEV_TYPE_LINK; + + switch (type) { + case CORESIGHT_DEV_TYPE_SINK: + ret = coresight_enable_sink(csdev, mode); + if (ret) + goto err; + break; + case CORESIGHT_DEV_TYPE_SOURCE: + /* sources are enabled from either sysFS or Perf */ + break; + case CORESIGHT_DEV_TYPE_LINK: + parent = list_prev_entry(nd, link)->csdev; + child = list_next_entry(nd, link)->csdev; + ret = coresight_enable_link(csdev, parent, child); + if (ret) + goto err; + break; + default: + goto err; } } +out: return ret; +err: + coresight_disable_path(path); + goto out; } -static int coresight_disable_path(struct list_head *path) +struct coresight_device *coresight_get_sink(struct list_head *path) { - struct coresight_device *cd; + struct coresight_device *csdev; - list_for_each_entry_reverse(cd, path, path_link) { - if (cd == list_first_entry(path, struct coresight_device, - path_link)) { - coresight_disable_sink(cd); - } else if (list_is_last(&cd->path_link, path)) { - /* - * The source has already been stopped, no need - * to do it again here. - */ - ; - } else { - coresight_disable_link(cd); + if (!path) + return NULL; + + csdev = list_last_entry(path, struct coresight_node, link)->csdev; + if (csdev->type != CORESIGHT_DEV_TYPE_SINK && + csdev->type != CORESIGHT_DEV_TYPE_LINKSINK) + return NULL; + + return csdev; +} + +/** + * _coresight_build_path - recursively build a path from a @csdev to a sink. + * @csdev: The device to start from. + * @sink: The name of the sink this path should connect with. + * @path: The list to add devices to. + * + * The tree of Coresight device is traversed until an activated sink or + * the one specified by @sink is found. + * From there the sink is added to the list along with all the devices that + * led to that point - the end result is a list from source to sink. In that + * list the source is the first device and the sink the last one. + */ +static int _coresight_build_path(struct coresight_device *csdev, + const char *sink, struct list_head *path) +{ + int i; + bool found = false; + struct coresight_node *node; + + /* + * First see if we are dealing with a sink. If we have one check if + * it was selected via sysFS or the perf cmd line. + */ + if (csdev->type == CORESIGHT_DEV_TYPE_SINK || + csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) { + /* Activated via perf cmd line */ + if (sink && !strcmp(dev_name(&csdev->dev), sink)) + goto out; + /* Activatred via sysFS */ + if (csdev->activated) + goto out; + } + + /* Not a sink - recursively explore each port found on this element */ + for (i = 0; i < csdev->nr_outport; i++) { + struct coresight_device *child_dev = csdev->conns[i].child_dev; + + if (child_dev && + _coresight_build_path(child_dev, sink, path) == 0) { + found = true; + break; } } + if (!found) + return -ENODEV; + +out: + /* + * A path from this element to a sink has been found. The elements + * leading to the sink are already enqueued, all that is left to do + * is tell the PM runtime core we need this element and add a node + * for it. + */ + node = kzalloc(sizeof(struct coresight_node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + node->csdev = csdev; + list_add(&node->link, path); + pm_runtime_get_sync(csdev->dev.parent); + return 0; } -static int coresight_build_paths(struct coresight_device *csdev, - struct list_head *path, - bool enable) +struct list_head *coresight_build_path(struct coresight_device *csdev, + const char *sink) { - int i, ret = -EINVAL; - struct coresight_connection *conn; + struct list_head *path; + int rc; - list_add(&csdev->path_link, path); + path = kzalloc(sizeof(struct list_head), GFP_KERNEL); + if (!path) + return NULL; - if ((csdev->type == CORESIGHT_DEV_TYPE_SINK || - csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && - csdev->activated) { - if (enable) - ret = coresight_enable_path(path); - else - ret = coresight_disable_path(path); - } else { - for (i = 0; i < csdev->nr_outport; i++) { - conn = &csdev->conns[i]; - if (coresight_build_paths(conn->child_dev, - path, enable) == 0) - ret = 0; - } + INIT_LIST_HEAD(path); + + rc = _coresight_build_path(csdev, sink, path); + if (rc) { + kfree(path); + return ERR_PTR(rc); } - if (list_first_entry(path, struct coresight_device, path_link) != csdev) - dev_err(&csdev->dev, "wrong device in %s\n", __func__); + return path; +} - list_del(&csdev->path_link); +/** + * coresight_release_path - release a previously built path. + * @path: the path to release. + * + * Go through all the elements of a path and 1) removed it from the list and + * 2) free the memory allocated for each node. + */ +void coresight_release_path(struct list_head *path) +{ + struct coresight_device *csdev; + struct coresight_node *nd, *next; - return ret; + list_for_each_entry_safe(nd, next, path, link) { + csdev = nd->csdev; + + pm_runtime_put_sync(csdev->dev.parent); + list_del(&nd->link); + kfree(nd); + } + + kfree(path); + path = NULL; +} + +/** coresight_validate_source - make sure a source has the right credentials + * @csdev: the device structure for a source. + * @function: the function this was called from. + * + * Assumes the coresight_mutex is held. + */ +static int coresight_validate_source(struct coresight_device *csdev, + const char *function) +{ + u32 type, subtype; + + type = csdev->type; + subtype = csdev->subtype.source_subtype; + + if (type != CORESIGHT_DEV_TYPE_SOURCE) { + dev_err(&csdev->dev, "wrong device type in %s\n", function); + return -EINVAL; + } + + if (subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_PROC && + subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE) { + dev_err(&csdev->dev, "wrong device subtype in %s\n", function); + return -EINVAL; + } + + return 0; } int coresight_enable(struct coresight_device *csdev) { - int ret = 0; - LIST_HEAD(path); + int cpu, ret = 0; + struct list_head *path; mutex_lock(&coresight_mutex); - if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) { - ret = -EINVAL; - dev_err(&csdev->dev, "wrong device type in %s\n", __func__); + + ret = coresight_validate_source(csdev, __func__); + if (ret) goto out; - } + if (csdev->enable) goto out; - if (coresight_build_paths(csdev, &path, true)) { - dev_err(&csdev->dev, "building path(s) failed\n"); + path = coresight_build_path(csdev, NULL); + if (IS_ERR(path)) { + pr_err("building path(s) failed\n"); + ret = PTR_ERR(path); goto out; } - if (coresight_enable_source(csdev)) - dev_err(&csdev->dev, "source enable failed\n"); + ret = coresight_enable_path(path, CS_MODE_SYSFS); + if (ret) + goto err_path; + + ret = coresight_enable_source(csdev, CS_MODE_SYSFS); + if (ret) + goto err_source; + + switch (csdev->subtype.source_subtype) { + case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC: + /* + * When working from sysFS it is important to keep track + * of the paths that were created so that they can be + * undone in 'coresight_disable()'. Since there can only + * be a single session per tracer (when working from sysFS) + * a per-cpu variable will do just fine. + */ + cpu = source_ops(csdev)->cpu_id(csdev); + per_cpu(tracer_path, cpu) = path; + break; + case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE: + stm_path = path; + break; + default: + /* We can't be here */ + break; + } + out: mutex_unlock(&coresight_mutex); return ret; + +err_source: + coresight_disable_path(path); + +err_path: + coresight_release_path(path); + goto out; } EXPORT_SYMBOL_GPL(coresight_enable); void coresight_disable(struct coresight_device *csdev) { - LIST_HEAD(path); + int cpu, ret; + struct list_head *path = NULL; mutex_lock(&coresight_mutex); - if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) { - dev_err(&csdev->dev, "wrong device type in %s\n", __func__); + + ret = coresight_validate_source(csdev, __func__); + if (ret) goto out; - } + if (!csdev->enable) goto out; + switch (csdev->subtype.source_subtype) { + case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC: + cpu = source_ops(csdev)->cpu_id(csdev); + path = per_cpu(tracer_path, cpu); + per_cpu(tracer_path, cpu) = NULL; + break; + case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE: + path = stm_path; + stm_path = NULL; + break; + default: + /* We can't be here */ + break; + } + coresight_disable_source(csdev); - if (coresight_build_paths(csdev, &path, false)) - dev_err(&csdev->dev, "releasing path(s) failed\n"); + coresight_disable_path(path); + coresight_release_path(path); out: mutex_unlock(&coresight_mutex); @@ -387,7 +611,7 @@ static ssize_t enable_sink_show(struct device *dev, { struct coresight_device *csdev = to_coresight_device(dev); - return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->activated); + return scnprintf(buf, PAGE_SIZE, "%u\n", csdev->activated); } static ssize_t enable_sink_store(struct device *dev, @@ -417,7 +641,7 @@ static ssize_t enable_source_show(struct device *dev, { struct coresight_device *csdev = to_coresight_device(dev); - return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->enable); + return scnprintf(buf, PAGE_SIZE, "%u\n", csdev->enable); } static ssize_t enable_source_store(struct device *dev, @@ -481,6 +705,8 @@ static void coresight_device_release(struct device *dev) { struct coresight_device *csdev = to_coresight_device(dev); + kfree(csdev->conns); + kfree(csdev->refcnt); kfree(csdev); } @@ -536,7 +762,7 @@ static void coresight_fixup_orphan_conns(struct coresight_device *csdev) * are hooked-up with each newly added component. */ bus_for_each_dev(&coresight_bustype, NULL, - csdev, coresight_orphan_match); + csdev, coresight_orphan_match); } @@ -568,6 +794,8 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev) if (dev) { conn->child_dev = to_coresight_device(dev); + /* and put reference from 'bus_find_device()' */ + put_device(dev); } else { csdev->orphan = true; conn->child_dev = NULL; @@ -575,6 +803,50 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev) } } +static int coresight_remove_match(struct device *dev, void *data) +{ + int i; + struct coresight_device *csdev, *iterator; + struct coresight_connection *conn; + + csdev = data; + iterator = to_coresight_device(dev); + + /* No need to check oneself */ + if (csdev == iterator) + return 0; + + /* + * Circle throuch all the connection of that component. If we find + * a connection whose name matches @csdev, remove it. + */ + for (i = 0; i < iterator->nr_outport; i++) { + conn = &iterator->conns[i]; + + if (conn->child_dev == NULL) + continue; + + if (!strcmp(dev_name(&csdev->dev), conn->child_name)) { + iterator->orphan = true; + conn->child_dev = NULL; + /* No need to continue */ + break; + } + } + + /* + * Returning '0' ensures that all known component on the + * bus will be checked. + */ + return 0; +} + +static void coresight_remove_conns(struct coresight_device *csdev) +{ + bus_for_each_dev(&coresight_bustype, NULL, + csdev, coresight_remove_match); +} + /** * coresight_timeout - loop until a bit has changed to a specific state. * @addr: base address of the area of interest. @@ -713,13 +985,8 @@ EXPORT_SYMBOL_GPL(coresight_register); void coresight_unregister(struct coresight_device *csdev) { - mutex_lock(&coresight_mutex); - - kfree(csdev->conns); + /* Remove references of that device in the topology */ + coresight_remove_conns(csdev); device_unregister(&csdev->dev); - - mutex_unlock(&coresight_mutex); } EXPORT_SYMBOL_GPL(coresight_unregister); - -MODULE_LICENSE("GPL v2"); diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c index b0973617826f..b68da1888fd5 100644 --- a/drivers/hwtracing/coresight/of_coresight.c +++ b/drivers/hwtracing/coresight/of_coresight.c @@ -10,7 +10,6 @@ * GNU General Public License for more details. */ -#include <linux/module.h> #include <linux/types.h> #include <linux/err.h> #include <linux/slab.h> @@ -86,7 +85,7 @@ static int of_coresight_alloc_memory(struct device *dev, return -ENOMEM; /* Children connected to this component via @outports */ - pdata->child_names = devm_kzalloc(dev, pdata->nr_outport * + pdata->child_names = devm_kzalloc(dev, pdata->nr_outport * sizeof(*pdata->child_names), GFP_KERNEL); if (!pdata->child_names) diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig index e7a348807f0c..847a39b35307 100644 --- a/drivers/hwtracing/stm/Kconfig +++ b/drivers/hwtracing/stm/Kconfig @@ -9,6 +9,8 @@ config STM Say Y here to enable System Trace Module device support. +if STM + config STM_DUMMY tristate "Dummy STM driver" help @@ -25,3 +27,16 @@ config STM_SOURCE_CONSOLE If you want to send kernel console messages over STM devices, say Y. + +config STM_SOURCE_HEARTBEAT + tristate "Heartbeat over STM devices" + help + This is a kernel space trace source that sends periodic + heartbeat messages to trace hosts over STM devices. It is + also useful for testing stm class drivers and the stm class + framework itself. + + If you want to send heartbeat messages over STM devices, + say Y. + +endif diff --git a/drivers/hwtracing/stm/Makefile b/drivers/hwtracing/stm/Makefile index f9312c38dd7a..a9ce3d487e57 100644 --- a/drivers/hwtracing/stm/Makefile +++ b/drivers/hwtracing/stm/Makefile @@ -5,5 +5,7 @@ stm_core-y := core.o policy.o obj-$(CONFIG_STM_DUMMY) += dummy_stm.o obj-$(CONFIG_STM_SOURCE_CONSOLE) += stm_console.o +obj-$(CONFIG_STM_SOURCE_HEARTBEAT) += stm_heartbeat.o stm_console-y := console.o +stm_heartbeat-y := heartbeat.o diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index b6445d9e5453..02095410cb33 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c @@ -67,9 +67,24 @@ static ssize_t channels_show(struct device *dev, static DEVICE_ATTR_RO(channels); +static ssize_t hw_override_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct stm_device *stm = to_stm_device(dev); + int ret; + + ret = sprintf(buf, "%u\n", stm->data->hw_override); + + return ret; +} + +static DEVICE_ATTR_RO(hw_override); + static struct attribute *stm_attrs[] = { &dev_attr_masters.attr, &dev_attr_channels.attr, + &dev_attr_hw_override.attr, NULL, }; @@ -113,6 +128,7 @@ struct stm_device *stm_find_device(const char *buf) stm = to_stm_device(dev); if (!try_module_get(stm->owner)) { + /* matches class_find_device() above */ put_device(dev); return NULL; } @@ -125,7 +141,7 @@ struct stm_device *stm_find_device(const char *buf) * @stm: stm device, previously acquired by stm_find_device() * * This drops the module reference and device reference taken by - * stm_find_device(). + * stm_find_device() or stm_char_open(). */ void stm_put_device(struct stm_device *stm) { @@ -185,6 +201,9 @@ static void stm_output_claim(struct stm_device *stm, struct stm_output *output) { struct stp_master *master = stm_master(stm, output->master); + lockdep_assert_held(&stm->mc_lock); + lockdep_assert_held(&output->lock); + if (WARN_ON_ONCE(master->nr_free < output->nr_chans)) return; @@ -199,6 +218,9 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output) { struct stp_master *master = stm_master(stm, output->master); + lockdep_assert_held(&stm->mc_lock); + lockdep_assert_held(&output->lock); + bitmap_release_region(&master->chan_map[0], output->channel, ilog2(output->nr_chans)); @@ -288,6 +310,7 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width, } spin_lock(&stm->mc_lock); + spin_lock(&output->lock); /* output is already assigned -- shouldn't happen */ if (WARN_ON_ONCE(output->nr_chans)) goto unlock; @@ -304,6 +327,7 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width, ret = 0; unlock: + spin_unlock(&output->lock); spin_unlock(&stm->mc_lock); return ret; @@ -312,11 +336,18 @@ unlock: static void stm_output_free(struct stm_device *stm, struct stm_output *output) { spin_lock(&stm->mc_lock); + spin_lock(&output->lock); if (output->nr_chans) stm_output_disclaim(stm, output); + spin_unlock(&output->lock); spin_unlock(&stm->mc_lock); } +static void stm_output_init(struct stm_output *output) +{ + spin_lock_init(&output->lock); +} + static int major_match(struct device *dev, const void *data) { unsigned int major = *(unsigned int *)data; @@ -339,6 +370,7 @@ static int stm_char_open(struct inode *inode, struct file *file) if (!stmf) return -ENOMEM; + stm_output_init(&stmf->output); stmf->stm = to_stm_device(dev); if (!try_module_get(stmf->stm->owner)) @@ -349,6 +381,8 @@ static int stm_char_open(struct inode *inode, struct file *file) return nonseekable_open(inode, file); err_free: + /* matches class_find_device() above */ + put_device(dev); kfree(stmf); return err; @@ -357,9 +391,19 @@ err_free: static int stm_char_release(struct inode *inode, struct file *file) { struct stm_file *stmf = file->private_data; + struct stm_device *stm = stmf->stm; + + if (stm->data->unlink) + stm->data->unlink(stm->data, stmf->output.master, + stmf->output.channel); + + stm_output_free(stm, &stmf->output); - stm_output_free(stmf->stm, &stmf->output); - stm_put_device(stmf->stm); + /* + * matches the stm_char_open()'s + * class_find_device() + try_module_get() + */ + stm_put_device(stm); kfree(stmf); return 0; @@ -380,8 +424,8 @@ static int stm_file_assign(struct stm_file *stmf, char *id, unsigned int width) return ret; } -static void stm_write(struct stm_data *data, unsigned int master, - unsigned int channel, const char *buf, size_t count) +static ssize_t stm_write(struct stm_data *data, unsigned int master, + unsigned int channel, const char *buf, size_t count) { unsigned int flags = STP_PACKET_TIMESTAMPED; const unsigned char *p = buf, nil = 0; @@ -393,9 +437,14 @@ static void stm_write(struct stm_data *data, unsigned int master, sz = data->packet(data, master, channel, STP_PACKET_DATA, flags, sz, p); flags = 0; + + if (sz < 0) + break; } data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil); + + return pos; } static ssize_t stm_char_write(struct file *file, const char __user *buf, @@ -406,6 +455,9 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf, char *kbuf; int err; + if (count + 1 > PAGE_SIZE) + count = PAGE_SIZE - 1; + /* * if no m/c have been assigned to this writer up to this * point, use "default" policy entry @@ -430,8 +482,8 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf, return -EFAULT; } - stm_write(stm->data, stmf->output.master, stmf->output.channel, kbuf, - count); + count = stm_write(stm->data, stmf->output.master, stmf->output.channel, + kbuf, count); kfree(kbuf); @@ -509,16 +561,12 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg) if (ret) goto err_free; - ret = 0; - if (stm->data->link) ret = stm->data->link(stm->data, stmf->output.master, stmf->output.channel); - if (ret) { + if (ret) stm_output_free(stmf->stm, &stmf->output); - stm_put_device(stmf->stm); - } err_free: kfree(id); @@ -633,17 +681,11 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, stm->dev.parent = parent; stm->dev.release = stm_device_release; - err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name); - if (err) - goto err_device; - - err = device_add(&stm->dev); - if (err) - goto err_device; - + mutex_init(&stm->link_mutex); spin_lock_init(&stm->link_lock); INIT_LIST_HEAD(&stm->link_list); + /* initialize the object before it is accessible via sysfs */ spin_lock_init(&stm->mc_lock); mutex_init(&stm->policy_mutex); stm->sw_nmasters = nmasters; @@ -651,9 +693,20 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, stm->data = stm_data; stm_data->stm = stm; + err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name); + if (err) + goto err_device; + + err = device_add(&stm->dev); + if (err) + goto err_device; + return 0; err_device: + unregister_chrdev(stm->major, stm_data->name); + + /* matches device_initialize() above */ put_device(&stm->dev); err_free: kfree(stm); @@ -662,20 +715,28 @@ err_free: } EXPORT_SYMBOL_GPL(stm_register_device); -static void __stm_source_link_drop(struct stm_source_device *src, - struct stm_device *stm); +static int __stm_source_link_drop(struct stm_source_device *src, + struct stm_device *stm); void stm_unregister_device(struct stm_data *stm_data) { struct stm_device *stm = stm_data->stm; struct stm_source_device *src, *iter; - int i; + int i, ret; - spin_lock(&stm->link_lock); + mutex_lock(&stm->link_mutex); list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) { - __stm_source_link_drop(src, stm); + ret = __stm_source_link_drop(src, stm); + /* + * src <-> stm link must not change under the same + * stm::link_mutex, so complain loudly if it has; + * also in this situation ret!=0 means this src is + * not connected to this stm and it should be otherwise + * safe to proceed with the tear-down of stm. + */ + WARN_ON_ONCE(ret); } - spin_unlock(&stm->link_lock); + mutex_unlock(&stm->link_mutex); synchronize_srcu(&stm_source_srcu); @@ -694,6 +755,17 @@ void stm_unregister_device(struct stm_data *stm_data) } EXPORT_SYMBOL_GPL(stm_unregister_device); +/* + * stm::link_list access serialization uses a spinlock and a mutex; holding + * either of them guarantees that the list is stable; modification requires + * holding both of them. + * + * Lock ordering is as follows: + * stm::link_mutex + * stm::link_lock + * src::link_lock + */ + /** * stm_source_link_add() - connect an stm_source device to an stm device * @src: stm_source device @@ -710,6 +782,7 @@ static int stm_source_link_add(struct stm_source_device *src, char *id; int err; + mutex_lock(&stm->link_mutex); spin_lock(&stm->link_lock); spin_lock(&src->link_lock); @@ -719,6 +792,7 @@ static int stm_source_link_add(struct stm_source_device *src, spin_unlock(&src->link_lock); spin_unlock(&stm->link_lock); + mutex_unlock(&stm->link_mutex); id = kstrdup(src->data->name, GFP_KERNEL); if (id) { @@ -753,9 +827,9 @@ static int stm_source_link_add(struct stm_source_device *src, fail_free_output: stm_output_free(stm, &src->output); - stm_put_device(stm); fail_detach: + mutex_lock(&stm->link_mutex); spin_lock(&stm->link_lock); spin_lock(&src->link_lock); @@ -764,6 +838,7 @@ fail_detach: spin_unlock(&src->link_lock); spin_unlock(&stm->link_lock); + mutex_unlock(&stm->link_mutex); return err; } @@ -776,28 +851,55 @@ fail_detach: * If @stm is @src::link, disconnect them from one another and put the * reference on the @stm device. * - * Caller must hold stm::link_lock. + * Caller must hold stm::link_mutex. */ -static void __stm_source_link_drop(struct stm_source_device *src, - struct stm_device *stm) +static int __stm_source_link_drop(struct stm_source_device *src, + struct stm_device *stm) { struct stm_device *link; + int ret = 0; + + lockdep_assert_held(&stm->link_mutex); + /* for stm::link_list modification, we hold both mutex and spinlock */ + spin_lock(&stm->link_lock); spin_lock(&src->link_lock); link = srcu_dereference_check(src->link, &stm_source_srcu, 1); - if (WARN_ON_ONCE(link != stm)) { - spin_unlock(&src->link_lock); - return; + + /* + * The linked device may have changed since we last looked, because + * we weren't holding the src::link_lock back then; if this is the + * case, tell the caller to retry. + */ + if (link != stm) { + ret = -EAGAIN; + goto unlock; } stm_output_free(link, &src->output); - /* caller must hold stm::link_lock */ list_del_init(&src->link_entry); /* matches stm_find_device() from stm_source_link_store() */ stm_put_device(link); rcu_assign_pointer(src->link, NULL); +unlock: spin_unlock(&src->link_lock); + spin_unlock(&stm->link_lock); + + /* + * Call the unlink callbacks for both source and stm, when we know + * that we have actually performed the unlinking. + */ + if (!ret) { + if (src->data->unlink) + src->data->unlink(src->data); + + if (stm->data->unlink) + stm->data->unlink(stm->data, src->output.master, + src->output.channel); + } + + return ret; } /** @@ -813,21 +915,29 @@ static void __stm_source_link_drop(struct stm_source_device *src, static void stm_source_link_drop(struct stm_source_device *src) { struct stm_device *stm; - int idx; + int idx, ret; +retry: idx = srcu_read_lock(&stm_source_srcu); + /* + * The stm device will be valid for the duration of this + * read section, but the link may change before we grab + * the src::link_lock in __stm_source_link_drop(). + */ stm = srcu_dereference(src->link, &stm_source_srcu); + ret = 0; if (stm) { - if (src->data->unlink) - src->data->unlink(src->data); - - spin_lock(&stm->link_lock); - __stm_source_link_drop(src, stm); - spin_unlock(&stm->link_lock); + mutex_lock(&stm->link_mutex); + ret = __stm_source_link_drop(src, stm); + mutex_unlock(&stm->link_mutex); } srcu_read_unlock(&stm_source_srcu, idx); + + /* if it did change, retry */ + if (ret == -EAGAIN) + goto retry; } static ssize_t stm_source_link_show(struct device *dev, @@ -862,8 +972,10 @@ static ssize_t stm_source_link_store(struct device *dev, return -EINVAL; err = stm_source_link_add(src, link); - if (err) + if (err) { + /* matches the stm_find_device() above */ stm_put_device(link); + } return err ? : count; } @@ -925,6 +1037,7 @@ int stm_source_register_device(struct device *parent, if (err) goto err; + stm_output_init(&src->output); spin_lock_init(&src->link_lock); INIT_LIST_HEAD(&src->link_entry); src->data = data; @@ -973,9 +1086,9 @@ int stm_source_write(struct stm_source_data *data, unsigned int chan, stm = srcu_dereference(src->link, &stm_source_srcu); if (stm) - stm_write(stm->data, src->output.master, - src->output.channel + chan, - buf, count); + count = stm_write(stm->data, src->output.master, + src->output.channel + chan, + buf, count); else count = -ENODEV; diff --git a/drivers/hwtracing/stm/dummy_stm.c b/drivers/hwtracing/stm/dummy_stm.c index 3709bef0b21f..a86612d989f9 100644 --- a/drivers/hwtracing/stm/dummy_stm.c +++ b/drivers/hwtracing/stm/dummy_stm.c @@ -40,22 +40,71 @@ dummy_stm_packet(struct stm_data *stm_data, unsigned int master, return size; } -static struct stm_data dummy_stm = { - .name = "dummy_stm", - .sw_start = 0x0000, - .sw_end = 0xffff, - .sw_nchannels = 0xffff, - .packet = dummy_stm_packet, -}; +#define DUMMY_STM_MAX 32 + +static struct stm_data dummy_stm[DUMMY_STM_MAX]; + +static int nr_dummies = 4; + +module_param(nr_dummies, int, 0400); + +static unsigned int fail_mode; + +module_param(fail_mode, int, 0600); + +static int dummy_stm_link(struct stm_data *data, unsigned int master, + unsigned int channel) +{ + if (fail_mode && (channel & fail_mode)) + return -EINVAL; + + return 0; +} static int dummy_stm_init(void) { - return stm_register_device(NULL, &dummy_stm, THIS_MODULE); + int i, ret = -ENOMEM; + + if (nr_dummies < 0 || nr_dummies > DUMMY_STM_MAX) + return -EINVAL; + + for (i = 0; i < nr_dummies; i++) { + dummy_stm[i].name = kasprintf(GFP_KERNEL, "dummy_stm.%d", i); + if (!dummy_stm[i].name) + goto fail_unregister; + + dummy_stm[i].sw_start = 0x0000; + dummy_stm[i].sw_end = 0xffff; + dummy_stm[i].sw_nchannels = 0xffff; + dummy_stm[i].packet = dummy_stm_packet; + dummy_stm[i].link = dummy_stm_link; + + ret = stm_register_device(NULL, &dummy_stm[i], THIS_MODULE); + if (ret) + goto fail_free; + } + + return 0; + +fail_unregister: + for (i--; i >= 0; i--) { + stm_unregister_device(&dummy_stm[i]); +fail_free: + kfree(dummy_stm[i].name); + } + + return ret; + } static void dummy_stm_exit(void) { - stm_unregister_device(&dummy_stm); + int i; + + for (i = 0; i < nr_dummies; i++) { + stm_unregister_device(&dummy_stm[i]); + kfree(dummy_stm[i].name); + } } module_init(dummy_stm_init); diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c new file mode 100644 index 000000000000..3da7b673aab2 --- /dev/null +++ b/drivers/hwtracing/stm/heartbeat.c @@ -0,0 +1,126 @@ +/* + * Simple heartbeat STM source driver + * Copyright (c) 2016, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * Heartbeat STM source will send repetitive messages over STM devices to a + * trace host. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/hrtimer.h> +#include <linux/slab.h> +#include <linux/stm.h> + +#define STM_HEARTBEAT_MAX 32 + +static int nr_devs = 4; +static int interval_ms = 10; + +module_param(nr_devs, int, 0400); +module_param(interval_ms, int, 0600); + +static struct stm_heartbeat { + struct stm_source_data data; + struct hrtimer hrtimer; + unsigned int active; +} stm_heartbeat[STM_HEARTBEAT_MAX]; + +static const char str[] = "heartbeat stm source driver is here to serve you"; + +static enum hrtimer_restart stm_heartbeat_hrtimer_handler(struct hrtimer *hr) +{ + struct stm_heartbeat *heartbeat = container_of(hr, struct stm_heartbeat, + hrtimer); + + stm_source_write(&heartbeat->data, 0, str, sizeof str); + if (heartbeat->active) + hrtimer_forward_now(hr, ms_to_ktime(interval_ms)); + + return heartbeat->active ? HRTIMER_RESTART : HRTIMER_NORESTART; +} + +static int stm_heartbeat_link(struct stm_source_data *data) +{ + struct stm_heartbeat *heartbeat = + container_of(data, struct stm_heartbeat, data); + + heartbeat->active = 1; + hrtimer_start(&heartbeat->hrtimer, ms_to_ktime(interval_ms), + HRTIMER_MODE_ABS); + + return 0; +} + +static void stm_heartbeat_unlink(struct stm_source_data *data) +{ + struct stm_heartbeat *heartbeat = + container_of(data, struct stm_heartbeat, data); + + heartbeat->active = 0; + hrtimer_cancel(&heartbeat->hrtimer); +} + +static int stm_heartbeat_init(void) +{ + int i, ret = -ENOMEM; + + if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX) + return -EINVAL; + + for (i = 0; i < nr_devs; i++) { + stm_heartbeat[i].data.name = + kasprintf(GFP_KERNEL, "heartbeat.%d", i); + if (!stm_heartbeat[i].data.name) + goto fail_unregister; + + stm_heartbeat[i].data.nr_chans = 1; + stm_heartbeat[i].data.link = stm_heartbeat_link; + stm_heartbeat[i].data.unlink = stm_heartbeat_unlink; + hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS); + stm_heartbeat[i].hrtimer.function = + stm_heartbeat_hrtimer_handler; + + ret = stm_source_register_device(NULL, &stm_heartbeat[i].data); + if (ret) + goto fail_free; + } + + return 0; + +fail_unregister: + for (i--; i >= 0; i--) { + stm_source_unregister_device(&stm_heartbeat[i].data); +fail_free: + kfree(stm_heartbeat[i].data.name); + } + + return ret; +} + +static void stm_heartbeat_exit(void) +{ + int i; + + for (i = 0; i < nr_devs; i++) { + stm_source_unregister_device(&stm_heartbeat[i].data); + kfree(stm_heartbeat[i].data.name); + } +} + +module_init(stm_heartbeat_init); +module_exit(stm_heartbeat_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("stm_heartbeat driver"); +MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>"); diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c index 11ab6d01adf6..1c061cb9bff0 100644 --- a/drivers/hwtracing/stm/policy.c +++ b/drivers/hwtracing/stm/policy.c @@ -272,13 +272,17 @@ void stp_policy_unbind(struct stp_policy *policy) { struct stm_device *stm = policy->stm; + /* + * stp_policy_release() will not call here if the policy is already + * unbound; other users should not either, as no link exists between + * this policy and anything else in that case + */ if (WARN_ON_ONCE(!policy->stm)) return; - mutex_lock(&stm->policy_mutex); - stm->policy = NULL; - mutex_unlock(&stm->policy_mutex); + lockdep_assert_held(&stm->policy_mutex); + stm->policy = NULL; policy->stm = NULL; stm_put_device(stm); @@ -287,8 +291,16 @@ void stp_policy_unbind(struct stp_policy *policy) static void stp_policy_release(struct config_item *item) { struct stp_policy *policy = to_stp_policy(item); + struct stm_device *stm = policy->stm; + /* a policy *can* be unbound and still exist in configfs tree */ + if (!stm) + return; + + mutex_lock(&stm->policy_mutex); stp_policy_unbind(policy); + mutex_unlock(&stm->policy_mutex); + kfree(policy); } @@ -320,16 +332,17 @@ stp_policies_make(struct config_group *group, const char *name) /* * node must look like <device_name>.<policy_name>, where - * <device_name> is the name of an existing stm device and - * <policy_name> is an arbitrary string + * <device_name> is the name of an existing stm device; may + * contain dots; + * <policy_name> is an arbitrary string; may not contain dots */ - p = strchr(devname, '.'); + p = strrchr(devname, '.'); if (!p) { kfree(devname); return ERR_PTR(-EINVAL); } - *p++ = '\0'; + *p = '\0'; stm = stm_find_device(devname); kfree(devname); diff --git a/drivers/hwtracing/stm/stm.h b/drivers/hwtracing/stm/stm.h index 95ece0292c99..4e8c6926260f 100644 --- a/drivers/hwtracing/stm/stm.h +++ b/drivers/hwtracing/stm/stm.h @@ -45,6 +45,7 @@ struct stm_device { int major; unsigned int sw_nmasters; struct stm_data *data; + struct mutex link_mutex; spinlock_t link_lock; struct list_head link_list; /* master allocation */ @@ -56,6 +57,7 @@ struct stm_device { container_of((_d), struct stm_device, dev) struct stm_output { + spinlock_t lock; unsigned int master; unsigned int channel; unsigned int nr_chans; diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c index 11fdadc68e53..2a6eaf1122b4 100644 --- a/drivers/misc/lkdtm.c +++ b/drivers/misc/lkdtm.c @@ -103,6 +103,7 @@ enum ctype { CT_EXEC_USERSPACE, CT_ACCESS_USERSPACE, CT_WRITE_RO, + CT_WRITE_RO_AFTER_INIT, CT_WRITE_KERN, }; @@ -140,6 +141,7 @@ static char* cp_type[] = { "EXEC_USERSPACE", "ACCESS_USERSPACE", "WRITE_RO", + "WRITE_RO_AFTER_INIT", "WRITE_KERN", }; @@ -162,6 +164,7 @@ static DEFINE_SPINLOCK(lock_me_up); static u8 data_area[EXEC_SIZE]; static const unsigned long rodata = 0xAA55AA55; +static unsigned long ro_after_init __ro_after_init = 0x55AA5500; module_param(recur_count, int, 0644); MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test"); @@ -503,11 +506,28 @@ static void lkdtm_do_action(enum ctype which) break; } case CT_WRITE_RO: { - unsigned long *ptr; + /* Explicitly cast away "const" for the test. */ + unsigned long *ptr = (unsigned long *)&rodata; - ptr = (unsigned long *)&rodata; + pr_info("attempting bad rodata write at %p\n", ptr); + *ptr ^= 0xabcd1234; - pr_info("attempting bad write at %p\n", ptr); + break; + } + case CT_WRITE_RO_AFTER_INIT: { + unsigned long *ptr = &ro_after_init; + + /* + * Verify we were written to during init. Since an Oops + * is considered a "success", a failure is to just skip the + * real test. + */ + if ((*ptr & 0xAA) != 0xAA) { + pr_info("%p was NOT written during init!?\n", ptr); + break; + } + + pr_info("attempting bad ro_after_init write at %p\n", ptr); *ptr ^= 0xabcd1234; break; @@ -817,6 +837,9 @@ static int __init lkdtm_module_init(void) int n_debugfs_entries = 1; /* Assume only the direct entry */ int i; + /* Make sure we can write to __ro_after_init values during __init */ + ro_after_init |= 0xAA; + /* Register debugfs interface */ lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL); if (!lkdtm_debugfs_root) { diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 58048dd5fcd0..3319df172f6a 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -763,6 +763,16 @@ const void * __init of_flat_dt_match_machine(const void *default_match, } #ifdef CONFIG_BLK_DEV_INITRD +#ifndef __early_init_dt_declare_initrd +static void __early_init_dt_declare_initrd(unsigned long start, + unsigned long end) +{ + initrd_start = (unsigned long)__va(start); + initrd_end = (unsigned long)__va(end); + initrd_below_start_ok = 1; +} +#endif + /** * early_init_dt_check_for_initrd - Decode initrd location from flat tree * @node: reference to node containing initrd location ('chosen') @@ -785,9 +795,7 @@ static void __init early_init_dt_check_for_initrd(unsigned long node) return; end = of_read_number(prop, len/4); - initrd_start = (unsigned long)__va(start); - initrd_end = (unsigned long)__va(end); - initrd_below_start_ok = 1; + __early_init_dt_declare_initrd(start, end); pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n", (unsigned long long)start, (unsigned long long)end); @@ -979,13 +987,16 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, } #ifdef CONFIG_HAVE_MEMBLOCK +#ifndef MIN_MEMBLOCK_ADDR +#define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET) +#endif #ifndef MAX_MEMBLOCK_ADDR #define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0) #endif void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) { - const u64 phys_offset = __pa(PAGE_OFFSET); + const u64 phys_offset = MIN_MEMBLOCK_ADDR; if (!PAGE_ALIGNED(base)) { if (size < PAGE_SIZE - (base & ~PAGE_MASK)) { diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c index 65bce1eecaf8..5548a31e1a39 100644 --- a/drivers/soc/qcom/spm.c +++ b/drivers/soc/qcom/spm.c @@ -116,7 +116,7 @@ static const struct spm_reg_data spm_reg_8064_cpu = { static DEFINE_PER_CPU(struct spm_driver_data *, cpu_spm_drv); -typedef int (*idle_fn)(int); +typedef int (*idle_fn)(void); static DEFINE_PER_CPU(idle_fn*, qcom_idle_ops); static inline void spm_register_write(struct spm_driver_data *drv, @@ -179,10 +179,10 @@ static int qcom_pm_collapse(unsigned long int unused) return -1; } -static int qcom_cpu_spc(int cpu) +static int qcom_cpu_spc(void) { int ret; - struct spm_driver_data *drv = per_cpu(cpu_spm_drv, cpu); + struct spm_driver_data *drv = __this_cpu_read(cpu_spm_drv); spm_set_low_power_mode(drv, PM_SLEEP_MODE_SPC); ret = cpu_suspend(0, qcom_pm_collapse); @@ -197,9 +197,9 @@ static int qcom_cpu_spc(int cpu) return ret; } -static int qcom_idle_enter(int cpu, unsigned long index) +static int qcom_idle_enter(unsigned long index) { - return per_cpu(qcom_idle_ops, cpu)[index](cpu); + return __this_cpu_read(qcom_idle_ops)[index](); } static const struct of_device_id qcom_idle_state_match[] __initconst = { diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig new file mode 100644 index 000000000000..2330a4eb4e8b --- /dev/null +++ b/drivers/tee/Kconfig @@ -0,0 +1,18 @@ +# Generic Trusted Execution Environment Configuration +config TEE + tristate "Trusted Execution Environment support" + select DMA_SHARED_BUFFER + select GENERIC_ALLOCATOR + help + This implements a generic interface towards a Trusted Execution + Environment (TEE). + +if TEE + +menu "TEE drivers" + +source "drivers/tee/optee/Kconfig" + +endmenu + +endif diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile new file mode 100644 index 000000000000..7a4e4a1ac39c --- /dev/null +++ b/drivers/tee/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_TEE) += tee.o +tee-objs += tee_core.o +tee-objs += tee_shm.o +tee-objs += tee_shm_pool.o +obj-$(CONFIG_OPTEE) += optee/ diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig new file mode 100644 index 000000000000..0126de898036 --- /dev/null +++ b/drivers/tee/optee/Kconfig @@ -0,0 +1,7 @@ +# OP-TEE Trusted Execution Environment Configuration +config OPTEE + tristate "OP-TEE" + depends on HAVE_ARM_SMCCC + help + This implements the OP-TEE Trusted Execution Environment (TEE) + driver. diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile new file mode 100644 index 000000000000..92fe5789bcce --- /dev/null +++ b/drivers/tee/optee/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_OPTEE) += optee.o +optee-objs += core.o +optee-objs += call.o +optee-objs += rpc.o +optee-objs += supp.o diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c new file mode 100644 index 000000000000..f7b7b404c990 --- /dev/null +++ b/drivers/tee/optee/call.c @@ -0,0 +1,444 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/arm-smccc.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include "optee_private.h" +#include "optee_smc.h" + +struct optee_call_waiter { + struct list_head list_node; + struct completion c; +}; + +static void optee_cq_wait_init(struct optee_call_queue *cq, + struct optee_call_waiter *w) +{ + /* + * We're preparing to make a call to secure world. In case we can't + * allocate a thread in secure world we'll end up waiting in + * optee_cq_wait_for_completion(). + * + * Normally if there's no contention in secure world the call will + * complete and we can cleanup directly with optee_cq_wait_final(). + */ + mutex_lock(&cq->mutex); + + /* + * We add ourselves to the queue, but we don't wait. This + * guarantees that we don't lose a completion if secure world + * returns busy and another thread just exited and try to complete + * someone. + */ + init_completion(&w->c); + list_add_tail(&w->list_node, &cq->waiters); + + mutex_unlock(&cq->mutex); +} + +static void optee_cq_wait_for_completion(struct optee_call_queue *cq, + struct optee_call_waiter *w) +{ + wait_for_completion(&w->c); + + mutex_lock(&cq->mutex); + + /* Move to end of list to get out of the way for other waiters */ + list_del(&w->list_node); + reinit_completion(&w->c); + list_add_tail(&w->list_node, &cq->waiters); + + mutex_unlock(&cq->mutex); +} + +static void optee_cq_complete_one(struct optee_call_queue *cq) +{ + struct optee_call_waiter *w; + + list_for_each_entry(w, &cq->waiters, list_node) { + if (!completion_done(&w->c)) { + complete(&w->c); + break; + } + } +} + +static void optee_cq_wait_final(struct optee_call_queue *cq, + struct optee_call_waiter *w) +{ + /* + * We're done with the call to secure world. The thread in secure + * world that was used for this call is now available for some + * other task to use. + */ + mutex_lock(&cq->mutex); + + /* Get out of the list */ + list_del(&w->list_node); + + /* Wake up one eventual waiting task */ + optee_cq_complete_one(cq); + + /* + * If we're completed we've got a completion from another task that + * was just done with its call to secure world. Since yet another + * thread now is available in secure world wake up another eventual + * waiting task. + */ + if (completion_done(&w->c)) + optee_cq_complete_one(cq); + + mutex_unlock(&cq->mutex); +} + +/* Requires the filpstate mutex to be held */ +static struct optee_session *find_session(struct optee_context_data *ctxdata, + u32 session_id) +{ + struct optee_session *sess; + + list_for_each_entry(sess, &ctxdata->sess_list, list_node) + if (sess->session_id == session_id) + return sess; + + return NULL; +} + +/** + * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world + * @ctx: calling context + * @parg: physical address of message to pass to secure world + * + * Does and SMC to OP-TEE in secure world and handles eventual resulting + * Remote Procedure Calls (RPC) from OP-TEE. + * + * Returns return code from secure world, 0 is OK + */ +u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) +{ + struct optee *optee = tee_get_drvdata(ctx->teedev); + struct optee_call_waiter w; + struct optee_rpc_param param = { }; + u32 ret; + + param.a0 = OPTEE_SMC_CALL_WITH_ARG; + reg_pair_from_64(¶m.a1, ¶m.a2, parg); + /* Initialize waiter */ + optee_cq_wait_init(&optee->call_queue, &w); + while (true) { + struct arm_smccc_res res; + + optee->invoke_fn(param.a0, param.a1, param.a2, param.a3, + param.a4, param.a5, param.a6, param.a7, + &res); + + if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) { + /* + * Out of threads in secure world, wait for a thread + * become available. + */ + optee_cq_wait_for_completion(&optee->call_queue, &w); + } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) { + param.a0 = res.a0; + param.a1 = res.a1; + param.a2 = res.a2; + param.a3 = res.a3; + optee_handle_rpc(ctx, ¶m); + } else { + ret = res.a0; + break; + } + } + + /* + * We're done with our thread in secure world, if there's any + * thread waiters wake up one. + */ + optee_cq_wait_final(&optee->call_queue, &w); + + return ret; +} + +static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params, + struct optee_msg_arg **msg_arg, + phys_addr_t *msg_parg) +{ + int rc; + struct tee_shm *shm; + struct optee_msg_arg *ma; + + shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params), + TEE_SHM_MAPPED); + if (IS_ERR(shm)) + return shm; + + ma = tee_shm_get_va(shm, 0); + if (IS_ERR(ma)) { + rc = PTR_ERR(ma); + goto out; + } + + rc = tee_shm_get_pa(shm, 0, msg_parg); + if (rc) + goto out; + + memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params)); + ma->num_params = num_params; + *msg_arg = ma; +out: + if (rc) { + tee_shm_free(shm); + return ERR_PTR(rc); + } + + return shm; +} + +int optee_open_session(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param) +{ + struct optee_context_data *ctxdata = ctx->data; + int rc; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess = NULL; + + /* +2 for the meta parameters added below */ + shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION; + msg_arg->cancel_id = arg->cancel_id; + + /* + * Initialize and add the meta parameters needed when opening a + * session. + */ + msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | + OPTEE_MSG_ATTR_META; + msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | + OPTEE_MSG_ATTR_META; + memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid)); + memcpy(&msg_arg->params[1].u.value, arg->uuid, sizeof(arg->clnt_uuid)); + msg_arg->params[1].u.value.c = arg->clnt_login; + + rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param); + if (rc) + goto out; + + sess = kzalloc(sizeof(*sess), GFP_KERNEL); + if (!sess) { + rc = -ENOMEM; + goto out; + } + + if (optee_do_call_with_arg(ctx, msg_parg)) { + msg_arg->ret = TEEC_ERROR_COMMUNICATION; + msg_arg->ret_origin = TEEC_ORIGIN_COMMS; + } + + if (msg_arg->ret == TEEC_SUCCESS) { + /* A new session has been created, add it to the list. */ + sess->session_id = msg_arg->session; + mutex_lock(&ctxdata->mutex); + list_add(&sess->list_node, &ctxdata->sess_list); + mutex_unlock(&ctxdata->mutex); + } else { + kfree(sess); + } + + if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) { + arg->ret = TEEC_ERROR_COMMUNICATION; + arg->ret_origin = TEEC_ORIGIN_COMMS; + /* Close session again to avoid leakage */ + optee_close_session(ctx, msg_arg->session); + } else { + arg->session = msg_arg->session; + arg->ret = msg_arg->ret; + arg->ret_origin = msg_arg->ret_origin; + } +out: + tee_shm_free(shm); + + return rc; +} + +int optee_close_session(struct tee_context *ctx, u32 session) +{ + struct optee_context_data *ctxdata = ctx->data; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess; + + /* Check that the session is valid and remove it from the list */ + mutex_lock(&ctxdata->mutex); + sess = find_session(ctxdata, session); + if (sess) + list_del(&sess->list_node); + mutex_unlock(&ctxdata->mutex); + if (!sess) + return -EINVAL; + kfree(sess); + + shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; + msg_arg->session = session; + optee_do_call_with_arg(ctx, msg_parg); + + tee_shm_free(shm); + return 0; +} + +int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, + struct tee_param *param) +{ + struct optee_context_data *ctxdata = ctx->data; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess; + int rc; + + /* Check that the session is valid */ + mutex_lock(&ctxdata->mutex); + sess = find_session(ctxdata, arg->session); + mutex_unlock(&ctxdata->mutex); + if (!sess) + return -EINVAL; + + shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND; + msg_arg->func = arg->func; + msg_arg->session = arg->session; + msg_arg->cancel_id = arg->cancel_id; + + rc = optee_to_msg_param(msg_arg->params, arg->num_params, param); + if (rc) + goto out; + + if (optee_do_call_with_arg(ctx, msg_parg)) { + msg_arg->ret = TEEC_ERROR_COMMUNICATION; + msg_arg->ret_origin = TEEC_ORIGIN_COMMS; + } + + if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) { + msg_arg->ret = TEEC_ERROR_COMMUNICATION; + msg_arg->ret_origin = TEEC_ORIGIN_COMMS; + } + + arg->ret = msg_arg->ret; + arg->ret_origin = msg_arg->ret_origin; +out: + tee_shm_free(shm); + return rc; +} + +int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session) +{ + struct optee_context_data *ctxdata = ctx->data; + struct tee_shm *shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess; + + /* Check that the session is valid */ + mutex_lock(&ctxdata->mutex); + sess = find_session(ctxdata, session); + mutex_unlock(&ctxdata->mutex); + if (!sess) + return -EINVAL; + + shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + msg_arg->cmd = OPTEE_MSG_CMD_CANCEL; + msg_arg->session = session; + msg_arg->cancel_id = cancel_id; + optee_do_call_with_arg(ctx, msg_parg); + + tee_shm_free(shm); + return 0; +} + +/** + * optee_enable_shm_cache() - Enables caching of some shared memory allocation + * in OP-TEE + * @optee: main service struct + */ +void optee_enable_shm_cache(struct optee *optee) +{ + struct optee_call_waiter w; + + /* We need to retry until secure world isn't busy. */ + optee_cq_wait_init(&optee->call_queue, &w); + while (true) { + struct arm_smccc_res res; + + optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0, + 0, &res); + if (res.a0 == OPTEE_SMC_RETURN_OK) + break; + optee_cq_wait_for_completion(&optee->call_queue, &w); + } + optee_cq_wait_final(&optee->call_queue, &w); +} + +/** + * optee_disable_shm_cache() - Disables caching of some shared memory allocation + * in OP-TEE + * @optee: main service struct + */ +void optee_disable_shm_cache(struct optee *optee) +{ + struct optee_call_waiter w; + + /* We need to retry until secure world isn't busy. */ + optee_cq_wait_init(&optee->call_queue, &w); + while (true) { + union { + struct arm_smccc_res smccc; + struct optee_smc_disable_shm_cache_result result; + } res; + + optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0, + 0, &res.smccc); + if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL) + break; /* All shm's freed */ + if (res.result.status == OPTEE_SMC_RETURN_OK) { + struct tee_shm *shm; + + shm = reg_pair_to_ptr(res.result.shm_upper32, + res.result.shm_lower32); + tee_shm_free(shm); + } else { + optee_cq_wait_for_completion(&optee->call_queue, &w); + } + } + optee_cq_wait_final(&optee->call_queue, &w); +} diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c new file mode 100644 index 000000000000..58169e519422 --- /dev/null +++ b/drivers/tee/optee/core.c @@ -0,0 +1,622 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/arm-smccc.h> +#include <linux/errno.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/tee_drv.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include "optee_private.h" +#include "optee_smc.h" + +#define DRIVER_NAME "optee" + +#define OPTEE_SHM_NUM_PRIV_PAGES 1 + +/** + * optee_from_msg_param() - convert from OPTEE_MSG parameters to + * struct tee_param + * @params: subsystem internal parameter representation + * @num_params: number of elements in the parameter arrays + * @msg_params: OPTEE_MSG parameters + * Returns 0 on success or <0 on failure + */ +int optee_from_msg_param(struct tee_param *params, size_t num_params, + const struct optee_msg_param *msg_params) +{ + int rc; + size_t n; + struct tee_shm *shm; + phys_addr_t pa; + + for (n = 0; n < num_params; n++) { + struct tee_param *p = params + n; + const struct optee_msg_param *mp = msg_params + n; + u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK; + + switch (attr) { + case OPTEE_MSG_ATTR_TYPE_NONE: + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; + memset(&p->u, 0, sizeof(p->u)); + break; + case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT: + case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: + case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT + + attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; + p->u.value.a = mp->u.value.a; + p->u.value.b = mp->u.value.b; + p->u.value.c = mp->u.value.c; + break; + case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT: + case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: + case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: + p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + + attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT; + p->u.memref.size = mp->u.tmem.size; + shm = (struct tee_shm *)(unsigned long) + mp->u.tmem.shm_ref; + if (!shm) { + p->u.memref.shm_offs = 0; + p->u.memref.shm = NULL; + break; + } + rc = tee_shm_get_pa(shm, 0, &pa); + if (rc) + return rc; + p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa; + p->u.memref.shm = shm; + + /* Check that the memref is covered by the shm object */ + if (p->u.memref.size) { + size_t o = p->u.memref.shm_offs + + p->u.memref.size - 1; + + rc = tee_shm_get_pa(shm, o, NULL); + if (rc) + return rc; + } + break; + default: + return -EINVAL; + } + } + return 0; +} + +/** + * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters + * @msg_params: OPTEE_MSG parameters + * @num_params: number of elements in the parameter arrays + * @params: subsystem itnernal parameter representation + * Returns 0 on success or <0 on failure + */ +int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, + const struct tee_param *params) +{ + int rc; + size_t n; + phys_addr_t pa; + + for (n = 0; n < num_params; n++) { + const struct tee_param *p = params + n; + struct optee_msg_param *mp = msg_params + n; + + switch (p->attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: + mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; + memset(&mp->u, 0, sizeof(mp->u)); + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr - + TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; + mp->u.value.a = p->u.value.a; + mp->u.value.b = p->u.value.b; + mp->u.value.c = p->u.value.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + + p->attr - + TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; + mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm; + mp->u.tmem.size = p->u.memref.size; + if (!p->u.memref.shm) { + mp->u.tmem.buf_ptr = 0; + break; + } + rc = tee_shm_get_pa(p->u.memref.shm, + p->u.memref.shm_offs, &pa); + if (rc) + return rc; + mp->u.tmem.buf_ptr = pa; + mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED << + OPTEE_MSG_ATTR_CACHE_SHIFT; + break; + default: + return -EINVAL; + } + } + return 0; +} + +static void optee_get_version(struct tee_device *teedev, + struct tee_ioctl_version_data *vers) +{ + struct tee_ioctl_version_data v = { + .impl_id = TEE_IMPL_ID_OPTEE, + .impl_caps = TEE_OPTEE_CAP_TZ, + .gen_caps = TEE_GEN_CAP_GP, + }; + *vers = v; +} + +static int optee_open(struct tee_context *ctx) +{ + struct optee_context_data *ctxdata; + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + + ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL); + if (!ctxdata) + return -ENOMEM; + + if (teedev == optee->supp_teedev) { + bool busy = true; + + mutex_lock(&optee->supp.ctx_mutex); + if (!optee->supp.ctx) { + busy = false; + optee->supp.ctx = ctx; + } + mutex_unlock(&optee->supp.ctx_mutex); + if (busy) { + kfree(ctxdata); + return -EBUSY; + } + } + + mutex_init(&ctxdata->mutex); + INIT_LIST_HEAD(&ctxdata->sess_list); + + ctx->data = ctxdata; + return 0; +} + +static void optee_release(struct tee_context *ctx) +{ + struct optee_context_data *ctxdata = ctx->data; + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + struct tee_shm *shm; + struct optee_msg_arg *arg = NULL; + phys_addr_t parg; + struct optee_session *sess; + struct optee_session *sess_tmp; + + if (!ctxdata) + return; + + shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED); + if (!IS_ERR(shm)) { + arg = tee_shm_get_va(shm, 0); + /* + * If va2pa fails for some reason, we can't call + * optee_close_session(), only free the memory. Secure OS + * will leak sessions and finally refuse more sessions, but + * we will at least let normal world reclaim its memory. + */ + if (!IS_ERR(arg)) + tee_shm_va2pa(shm, arg, &parg); + } + + list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list, + list_node) { + list_del(&sess->list_node); + if (!IS_ERR_OR_NULL(arg)) { + memset(arg, 0, sizeof(*arg)); + arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; + arg->session = sess->session_id; + optee_do_call_with_arg(ctx, parg); + } + kfree(sess); + } + kfree(ctxdata); + + if (!IS_ERR(shm)) + tee_shm_free(shm); + + ctx->data = NULL; + + if (teedev == optee->supp_teedev) { + mutex_lock(&optee->supp.ctx_mutex); + optee->supp.ctx = NULL; + mutex_unlock(&optee->supp.ctx_mutex); + } +} + +static struct tee_driver_ops optee_ops = { + .get_version = optee_get_version, + .open = optee_open, + .release = optee_release, + .open_session = optee_open_session, + .close_session = optee_close_session, + .invoke_func = optee_invoke_func, + .cancel_req = optee_cancel_req, +}; + +static struct tee_desc optee_desc = { + .name = DRIVER_NAME "-clnt", + .ops = &optee_ops, + .owner = THIS_MODULE, +}; + +static struct tee_driver_ops optee_supp_ops = { + .get_version = optee_get_version, + .open = optee_open, + .release = optee_release, + .supp_recv = optee_supp_recv, + .supp_send = optee_supp_send, +}; + +static struct tee_desc optee_supp_desc = { + .name = DRIVER_NAME "-supp", + .ops = &optee_supp_ops, + .owner = THIS_MODULE, + .flags = TEE_DESC_PRIVILEGED, +}; + +static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn) +{ + struct arm_smccc_res res; + + invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res); + + if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 && + res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3) + return true; + return false; +} + +static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn) +{ + union { + struct arm_smccc_res smccc; + struct optee_smc_calls_revision_result result; + } res; + + invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc); + + if (res.result.major == OPTEE_MSG_REVISION_MAJOR && + (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR) + return true; + return false; +} + +static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn, + u32 *sec_caps) +{ + union { + struct arm_smccc_res smccc; + struct optee_smc_exchange_capabilities_result result; + } res; + u32 a1 = 0; + + /* + * TODO This isn't enough to tell if it's UP system (from kernel + * point of view) or not, is_smp() returns the the information + * needed, but can't be called directly from here. + */ + if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1) + a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR; + + invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0, + &res.smccc); + + if (res.result.status != OPTEE_SMC_RETURN_OK) + return false; + + *sec_caps = res.result.capabilities; + return true; +} + +static struct tee_shm_pool * +optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) +{ + union { + struct arm_smccc_res smccc; + struct optee_smc_get_shm_config_result result; + } res; + struct tee_shm_pool *pool; + unsigned long vaddr; + phys_addr_t paddr; + size_t size; + phys_addr_t begin; + phys_addr_t end; + void *va; + struct tee_shm_pool_mem_info priv_info; + struct tee_shm_pool_mem_info dmabuf_info; + + invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc); + if (res.result.status != OPTEE_SMC_RETURN_OK) { + pr_info("shm service not available\n"); + return ERR_PTR(-ENOENT); + } + + if (res.result.settings != OPTEE_SMC_SHM_CACHED) { + pr_err("only normal cached shared memory supported\n"); + return ERR_PTR(-EINVAL); + } + + begin = roundup(res.result.start, PAGE_SIZE); + end = rounddown(res.result.start + res.result.size, PAGE_SIZE); + paddr = begin; + size = end - begin; + + if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) { + pr_err("too small shared memory area\n"); + return ERR_PTR(-EINVAL); + } + + va = memremap(paddr, size, MEMREMAP_WB); + if (!va) { + pr_err("shared memory ioremap failed\n"); + return ERR_PTR(-EINVAL); + } + vaddr = (unsigned long)va; + + priv_info.vaddr = vaddr; + priv_info.paddr = paddr; + priv_info.size = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; + dmabuf_info.vaddr = vaddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; + dmabuf_info.paddr = paddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; + dmabuf_info.size = size - OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; + + pool = tee_shm_pool_alloc_res_mem(&priv_info, &dmabuf_info); + if (IS_ERR(pool)) { + memunmap(va); + goto out; + } + + *memremaped_shm = va; +out: + return pool; +} + +/* Simple wrapper functions to be able to use a function pointer */ +static void optee_smccc_smc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, + unsigned long a4, unsigned long a5, + unsigned long a6, unsigned long a7, + struct arm_smccc_res *res) +{ + arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res); +} + +static void optee_smccc_hvc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, + unsigned long a4, unsigned long a5, + unsigned long a6, unsigned long a7, + struct arm_smccc_res *res) +{ + arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res); +} + +static optee_invoke_fn *get_invoke_func(struct device_node *np) +{ + const char *method; + + pr_info("probing for conduit method from DT.\n"); + + if (of_property_read_string(np, "method", &method)) { + pr_warn("missing \"method\" property\n"); + return ERR_PTR(-ENXIO); + } + + if (!strcmp("hvc", method)) + return optee_smccc_hvc; + else if (!strcmp("smc", method)) + return optee_smccc_smc; + + pr_warn("invalid \"method\" property: %s\n", method); + return ERR_PTR(-EINVAL); +} + +static struct optee *optee_probe(struct device_node *np) +{ + optee_invoke_fn *invoke_fn; + struct tee_shm_pool *pool; + struct optee *optee = NULL; + void *memremaped_shm = NULL; + struct tee_device *teedev; + u32 sec_caps; + int rc; + + invoke_fn = get_invoke_func(np); + if (IS_ERR(invoke_fn)) + return (void *)invoke_fn; + + if (!optee_msg_api_uid_is_optee_api(invoke_fn)) { + pr_warn("api uid mismatch\n"); + return ERR_PTR(-EINVAL); + } + + if (!optee_msg_api_revision_is_compatible(invoke_fn)) { + pr_warn("api revision mismatch\n"); + return ERR_PTR(-EINVAL); + } + + if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) { + pr_warn("capabilities mismatch\n"); + return ERR_PTR(-EINVAL); + } + + /* + * We have no other option for shared memory, if secure world + * doesn't have any reserved memory we can use we can't continue. + */ + if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) + return ERR_PTR(-EINVAL); + + pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm); + if (IS_ERR(pool)) + return (void *)pool; + + optee = kzalloc(sizeof(*optee), GFP_KERNEL); + if (!optee) { + rc = -ENOMEM; + goto err; + } + + optee->invoke_fn = invoke_fn; + + teedev = tee_device_alloc(&optee_desc, NULL, pool, optee); + if (IS_ERR(teedev)) { + rc = PTR_ERR(teedev); + goto err; + } + optee->teedev = teedev; + + teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee); + if (IS_ERR(teedev)) { + rc = PTR_ERR(teedev); + goto err; + } + optee->supp_teedev = teedev; + + rc = tee_device_register(optee->teedev); + if (rc) + goto err; + + rc = tee_device_register(optee->supp_teedev); + if (rc) + goto err; + + mutex_init(&optee->call_queue.mutex); + INIT_LIST_HEAD(&optee->call_queue.waiters); + optee_wait_queue_init(&optee->wait_queue); + optee_supp_init(&optee->supp); + optee->memremaped_shm = memremaped_shm; + optee->pool = pool; + + optee_enable_shm_cache(optee); + + pr_info("initialized driver\n"); + return optee; +err: + if (optee) { + /* + * tee_device_unregister() is safe to call even if the + * devices hasn't been registered with + * tee_device_register() yet. + */ + tee_device_unregister(optee->supp_teedev); + tee_device_unregister(optee->teedev); + kfree(optee); + } + if (pool) + tee_shm_pool_free(pool); + if (memremaped_shm) + memunmap(memremaped_shm); + return ERR_PTR(rc); +} + +static void optee_remove(struct optee *optee) +{ + /* + * Ask OP-TEE to free all cached shared memory objects to decrease + * reference counters and also avoid wild pointers in secure world + * into the old shared memory range. + */ + optee_disable_shm_cache(optee); + + /* + * The two devices has to be unregistered before we can free the + * other resources. + */ + tee_device_unregister(optee->supp_teedev); + tee_device_unregister(optee->teedev); + + tee_shm_pool_free(optee->pool); + if (optee->memremaped_shm) + memunmap(optee->memremaped_shm); + optee_wait_queue_exit(&optee->wait_queue); + optee_supp_uninit(&optee->supp); + mutex_destroy(&optee->call_queue.mutex); + + kfree(optee); +} + +static const struct of_device_id optee_match[] = { + { .compatible = "linaro,optee-tz" }, + {}, +}; + +static struct optee *optee_svc; + +static int __init optee_driver_init(void) +{ + struct device_node *fw_np; + struct device_node *np; + struct optee *optee; + + /* Node is supposed to be below /firmware */ + fw_np = of_find_node_by_name(NULL, "firmware"); + if (!fw_np) + return -ENODEV; + + np = of_find_matching_node(fw_np, optee_match); + of_node_put(fw_np); + if (!np) + return -ENODEV; + + optee = optee_probe(np); + of_node_put(np); + + if (IS_ERR(optee)) + return PTR_ERR(optee); + + optee_svc = optee; + + return 0; +} +module_init(optee_driver_init); + +static void __exit optee_driver_exit(void) +{ + struct optee *optee = optee_svc; + + optee_svc = NULL; + if (optee) + optee_remove(optee); +} +module_exit(optee_driver_exit); + +MODULE_AUTHOR("Linaro"); +MODULE_DESCRIPTION("OP-TEE driver"); +MODULE_SUPPORTED_DEVICE(""); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h new file mode 100644 index 000000000000..dd7a06ee0462 --- /dev/null +++ b/drivers/tee/optee/optee_msg.h @@ -0,0 +1,418 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _OPTEE_MSG_H +#define _OPTEE_MSG_H + +#include <linux/bitops.h> +#include <linux/types.h> + +/* + * This file defines the OP-TEE message protocol used to communicate + * with an instance of OP-TEE running in secure world. + * + * This file is divided into three sections. + * 1. Formatting of messages. + * 2. Requests from normal world + * 3. Requests from secure world, Remote Procedure Call (RPC), handled by + * tee-supplicant. + */ + +/***************************************************************************** + * Part 1 - formatting of messages + *****************************************************************************/ + +#define OPTEE_MSG_ATTR_TYPE_NONE 0x0 +#define OPTEE_MSG_ATTR_TYPE_VALUE_INPUT 0x1 +#define OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT 0x2 +#define OPTEE_MSG_ATTR_TYPE_VALUE_INOUT 0x3 +#define OPTEE_MSG_ATTR_TYPE_RMEM_INPUT 0x5 +#define OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT 0x6 +#define OPTEE_MSG_ATTR_TYPE_RMEM_INOUT 0x7 +#define OPTEE_MSG_ATTR_TYPE_TMEM_INPUT 0x9 +#define OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT 0xa +#define OPTEE_MSG_ATTR_TYPE_TMEM_INOUT 0xb + +#define OPTEE_MSG_ATTR_TYPE_MASK GENMASK(7, 0) + +/* + * Meta parameter to be absorbed by the Secure OS and not passed + * to the Trusted Application. + * + * Currently only used with OPTEE_MSG_CMD_OPEN_SESSION. + */ +#define OPTEE_MSG_ATTR_META BIT(8) + +/* + * The temporary shared memory object is not physically contigous and this + * temp memref is followed by another fragment until the last temp memref + * that doesn't have this bit set. + */ +#define OPTEE_MSG_ATTR_FRAGMENT BIT(9) + +/* + * Memory attributes for caching passed with temp memrefs. The actual value + * used is defined outside the message protocol with the exception of + * OPTEE_MSG_ATTR_CACHE_PREDEFINED which means the attributes already + * defined for the memory range should be used. If optee_smc.h is used as + * bearer of this protocol OPTEE_SMC_SHM_* is used for values. + */ +#define OPTEE_MSG_ATTR_CACHE_SHIFT 16 +#define OPTEE_MSG_ATTR_CACHE_MASK GENMASK(2, 0) +#define OPTEE_MSG_ATTR_CACHE_PREDEFINED 0 + +/* + * Same values as TEE_LOGIN_* from TEE Internal API + */ +#define OPTEE_MSG_LOGIN_PUBLIC 0x00000000 +#define OPTEE_MSG_LOGIN_USER 0x00000001 +#define OPTEE_MSG_LOGIN_GROUP 0x00000002 +#define OPTEE_MSG_LOGIN_APPLICATION 0x00000004 +#define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005 +#define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006 + +/** + * struct optee_msg_param_tmem - temporary memory reference parameter + * @buf_ptr: Address of the buffer + * @size: Size of the buffer + * @shm_ref: Temporary shared memory reference, pointer to a struct tee_shm + * + * Secure and normal world communicates pointers as physical address + * instead of the virtual address. This is because secure and normal world + * have completely independent memory mapping. Normal world can even have a + * hypervisor which need to translate the guest physical address (AKA IPA + * in ARM documentation) to a real physical address before passing the + * structure to secure world. + */ +struct optee_msg_param_tmem { + u64 buf_ptr; + u64 size; + u64 shm_ref; +}; + +/** + * struct optee_msg_param_rmem - registered memory reference parameter + * @offs: Offset into shared memory reference + * @size: Size of the buffer + * @shm_ref: Shared memory reference, pointer to a struct tee_shm + */ +struct optee_msg_param_rmem { + u64 offs; + u64 size; + u64 shm_ref; +}; + +/** + * struct optee_msg_param_value - opaque value parameter + * + * Value parameters are passed unchecked between normal and secure world. + */ +struct optee_msg_param_value { + u64 a; + u64 b; + u64 c; +}; + +/** + * struct optee_msg_param - parameter used together with struct optee_msg_arg + * @attr: attributes + * @tmem: parameter by temporary memory reference + * @rmem: parameter by registered memory reference + * @value: parameter by opaque value + * + * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in + * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value, + * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates tmem and + * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates rmem. + * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used. + */ +struct optee_msg_param { + u64 attr; + union { + struct optee_msg_param_tmem tmem; + struct optee_msg_param_rmem rmem; + struct optee_msg_param_value value; + } u; +}; + +/** + * struct optee_msg_arg - call argument + * @cmd: Command, one of OPTEE_MSG_CMD_* or OPTEE_MSG_RPC_CMD_* + * @func: Trusted Application function, specific to the Trusted Application, + * used if cmd == OPTEE_MSG_CMD_INVOKE_COMMAND + * @session: In parameter for all OPTEE_MSG_CMD_* except + * OPTEE_MSG_CMD_OPEN_SESSION where it's an output parameter instead + * @cancel_id: Cancellation id, a unique value to identify this request + * @ret: return value + * @ret_origin: origin of the return value + * @num_params: number of parameters supplied to the OS Command + * @params: the parameters supplied to the OS Command + * + * All normal calls to Trusted OS uses this struct. If cmd requires further + * information than what these field holds it can be passed as a parameter + * tagged as meta (setting the OPTEE_MSG_ATTR_META bit in corresponding + * attrs field). All parameters tagged as meta has to come first. + * + * Temp memref parameters can be fragmented if supported by the Trusted OS + * (when optee_smc.h is bearer of this protocol this is indicated with + * OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM). If a logical memref parameter is + * fragmented then has all but the last fragment the + * OPTEE_MSG_ATTR_FRAGMENT bit set in attrs. Even if a memref is fragmented + * it will still be presented as a single logical memref to the Trusted + * Application. + */ +struct optee_msg_arg { + u32 cmd; + u32 func; + u32 session; + u32 cancel_id; + u32 pad; + u32 ret; + u32 ret_origin; + u32 num_params; + + /* num_params tells the actual number of element in params */ + struct optee_msg_param params[0]; +}; + +/** + * OPTEE_MSG_GET_ARG_SIZE - return size of struct optee_msg_arg + * + * @num_params: Number of parameters embedded in the struct optee_msg_arg + * + * Returns the size of the struct optee_msg_arg together with the number + * of embedded parameters. + */ +#define OPTEE_MSG_GET_ARG_SIZE(num_params) \ + (sizeof(struct optee_msg_arg) + \ + sizeof(struct optee_msg_param) * (num_params)) + +/***************************************************************************** + * Part 2 - requests from normal world + *****************************************************************************/ + +/* + * Return the following UID if using API specified in this file without + * further extensions: + * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b. + * Represented in 4 32-bit words in OPTEE_MSG_UID_0, OPTEE_MSG_UID_1, + * OPTEE_MSG_UID_2, OPTEE_MSG_UID_3. + */ +#define OPTEE_MSG_UID_0 0x384fb3e0 +#define OPTEE_MSG_UID_1 0xe7f811e3 +#define OPTEE_MSG_UID_2 0xaf630002 +#define OPTEE_MSG_UID_3 0xa5d5c51b +#define OPTEE_MSG_FUNCID_CALLS_UID 0xFF01 + +/* + * Returns 2.0 if using API specified in this file without further + * extensions. Represented in 2 32-bit words in OPTEE_MSG_REVISION_MAJOR + * and OPTEE_MSG_REVISION_MINOR + */ +#define OPTEE_MSG_REVISION_MAJOR 2 +#define OPTEE_MSG_REVISION_MINOR 0 +#define OPTEE_MSG_FUNCID_CALLS_REVISION 0xFF03 + +/* + * Get UUID of Trusted OS. + * + * Used by non-secure world to figure out which Trusted OS is installed. + * Note that returned UUID is the UUID of the Trusted OS, not of the API. + * + * Returns UUID in 4 32-bit words in the same way as + * OPTEE_MSG_FUNCID_CALLS_UID described above. + */ +#define OPTEE_MSG_OS_OPTEE_UUID_0 0x486178e0 +#define OPTEE_MSG_OS_OPTEE_UUID_1 0xe7f811e3 +#define OPTEE_MSG_OS_OPTEE_UUID_2 0xbc5e0002 +#define OPTEE_MSG_OS_OPTEE_UUID_3 0xa5d5c51b +#define OPTEE_MSG_FUNCID_GET_OS_UUID 0x0000 + +/* + * Get revision of Trusted OS. + * + * Used by non-secure world to figure out which version of the Trusted OS + * is installed. Note that the returned revision is the revision of the + * Trusted OS, not of the API. + * + * Returns revision in 2 32-bit words in the same way as + * OPTEE_MSG_CALLS_REVISION described above. + */ +#define OPTEE_MSG_FUNCID_GET_OS_REVISION 0x0001 + +/* + * Do a secure call with struct optee_msg_arg as argument + * The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd + * + * OPTEE_MSG_CMD_OPEN_SESSION opens a session to a Trusted Application. + * The first two parameters are tagged as meta, holding two value + * parameters to pass the following information: + * param[0].u.value.a-b uuid of Trusted Application + * param[1].u.value.a-b uuid of Client + * param[1].u.value.c Login class of client OPTEE_MSG_LOGIN_* + * + * OPTEE_MSG_CMD_INVOKE_COMMAND invokes a command a previously opened + * session to a Trusted Application. struct optee_msg_arg::func is Trusted + * Application function, specific to the Trusted Application. + * + * OPTEE_MSG_CMD_CLOSE_SESSION closes a previously opened session to + * Trusted Application. + * + * OPTEE_MSG_CMD_CANCEL cancels a currently invoked command. + * + * OPTEE_MSG_CMD_REGISTER_SHM registers a shared memory reference. The + * information is passed as: + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + * [| OPTEE_MSG_ATTR_FRAGMENT] + * [in] param[0].u.tmem.buf_ptr physical address (of first fragment) + * [in] param[0].u.tmem.size size (of first fragment) + * [in] param[0].u.tmem.shm_ref holds shared memory reference + * ... + * The shared memory can optionally be fragmented, temp memrefs can follow + * each other with all but the last with the OPTEE_MSG_ATTR_FRAGMENT bit set. + * + * OPTEE_MSG_CMD_UNREGISTER_SHM unregisteres a previously registered shared + * memory reference. The information is passed as: + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + * [in] param[0].u.rmem.shm_ref holds shared memory reference + * [in] param[0].u.rmem.offs 0 + * [in] param[0].u.rmem.size 0 + */ +#define OPTEE_MSG_CMD_OPEN_SESSION 0 +#define OPTEE_MSG_CMD_INVOKE_COMMAND 1 +#define OPTEE_MSG_CMD_CLOSE_SESSION 2 +#define OPTEE_MSG_CMD_CANCEL 3 +#define OPTEE_MSG_CMD_REGISTER_SHM 4 +#define OPTEE_MSG_CMD_UNREGISTER_SHM 5 +#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004 + +/***************************************************************************** + * Part 3 - Requests from secure world, RPC + *****************************************************************************/ + +/* + * All RPC is done with a struct optee_msg_arg as bearer of information, + * struct optee_msg_arg::arg holds values defined by OPTEE_MSG_RPC_CMD_* below + * + * RPC communication with tee-supplicant is reversed compared to normal + * client communication desribed above. The supplicant receives requests + * and sends responses. + */ + +/* + * Load a TA into memory, defined in tee-supplicant + */ +#define OPTEE_MSG_RPC_CMD_LOAD_TA 0 + +/* + * Reserved + */ +#define OPTEE_MSG_RPC_CMD_RPMB 1 + +/* + * File system access, defined in tee-supplicant + */ +#define OPTEE_MSG_RPC_CMD_FS 2 + +/* + * Get time + * + * Returns number of seconds and nano seconds since the Epoch, + * 1970-01-01 00:00:00 +0000 (UTC). + * + * [out] param[0].u.value.a Number of seconds + * [out] param[0].u.value.b Number of nano seconds. + */ +#define OPTEE_MSG_RPC_CMD_GET_TIME 3 + +/* + * Wait queue primitive, helper for secure world to implement a wait queue. + * + * If secure world need to wait for a secure world mutex it issues a sleep + * request instead of spinning in secure world. Conversely is a wakeup + * request issued when a secure world mutex with a thread waiting thread is + * unlocked. + * + * Waiting on a key + * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP + * [in] param[0].u.value.b wait key + * + * Waking up a key + * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP + * [in] param[0].u.value.b wakeup key + */ +#define OPTEE_MSG_RPC_CMD_WAIT_QUEUE 4 +#define OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP 0 +#define OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP 1 + +/* + * Suspend execution + * + * [in] param[0].value .a number of milliseconds to suspend + */ +#define OPTEE_MSG_RPC_CMD_SUSPEND 5 + +/* + * Allocate a piece of shared memory + * + * Shared memory can optionally be fragmented, to support that additional + * spare param entries are allocated to make room for eventual fragments. + * The spare param entries has .attr = OPTEE_MSG_ATTR_TYPE_NONE when + * unused. All returned temp memrefs except the last should have the + * OPTEE_MSG_ATTR_FRAGMENT bit set in the attr field. + * + * [in] param[0].u.value.a type of memory one of + * OPTEE_MSG_RPC_SHM_TYPE_* below + * [in] param[0].u.value.b requested size + * [in] param[0].u.value.c required alignment + * + * [out] param[0].u.tmem.buf_ptr physical address (of first fragment) + * [out] param[0].u.tmem.size size (of first fragment) + * [out] param[0].u.tmem.shm_ref shared memory reference + * ... + * [out] param[n].u.tmem.buf_ptr physical address + * [out] param[n].u.tmem.size size + * [out] param[n].u.tmem.shm_ref shared memory reference (same value + * as in param[n-1].u.tmem.shm_ref) + */ +#define OPTEE_MSG_RPC_CMD_SHM_ALLOC 6 +/* Memory that can be shared with a non-secure user space application */ +#define OPTEE_MSG_RPC_SHM_TYPE_APPL 0 +/* Memory only shared with non-secure kernel */ +#define OPTEE_MSG_RPC_SHM_TYPE_KERNEL 1 + +/* + * Free shared memory previously allocated with OPTEE_MSG_RPC_CMD_SHM_ALLOC + * + * [in] param[0].u.value.a type of memory one of + * OPTEE_MSG_RPC_SHM_TYPE_* above + * [in] param[0].u.value.b value of shared memory reference + * returned in param[0].u.tmem.shm_ref + * above + */ +#define OPTEE_MSG_RPC_CMD_SHM_FREE 7 + +#endif /* _OPTEE_MSG_H */ diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h new file mode 100644 index 000000000000..c374cd594314 --- /dev/null +++ b/drivers/tee/optee/optee_private.h @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef OPTEE_PRIVATE_H +#define OPTEE_PRIVATE_H + +#include <linux/arm-smccc.h> +#include <linux/semaphore.h> +#include <linux/tee_drv.h> +#include <linux/types.h> +#include "optee_msg.h" + +#define OPTEE_MAX_ARG_SIZE 1024 + +/* Some Global Platform error codes used in this driver */ +#define TEEC_SUCCESS 0x00000000 +#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006 +#define TEEC_ERROR_COMMUNICATION 0xFFFF000E +#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C + +#define TEEC_ORIGIN_COMMS 0x00000002 + +typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long, + unsigned long, unsigned long, unsigned long, + unsigned long, unsigned long, + struct arm_smccc_res *); + +struct optee_call_queue { + /* Serializes access to this struct */ + struct mutex mutex; + struct list_head waiters; +}; + +struct optee_wait_queue { + /* Serializes access to this struct */ + struct mutex mu; + struct list_head db; +}; + +/** + * struct optee_supp - supplicant synchronization struct + * @ctx the context of current connected supplicant. + * if !NULL the supplicant device is available for use, + * else busy + * @ctx_mutex: held while accessing @ctx + * @func: supplicant function id to call + * @ret: call return value + * @num_params: number of elements in @param + * @param: parameters for @func + * @req_posted: if true, a request has been posted to the supplicant + * @supp_next_send: if true, next step is for supplicant to send response + * @thrd_mutex: held by the thread doing a request to supplicant + * @supp_mutex: held by supplicant while operating on this struct + * @data_to_supp: supplicant is waiting on this for next request + * @data_from_supp: requesting thread is waiting on this to get the result + */ +struct optee_supp { + struct tee_context *ctx; + /* Serializes access of ctx */ + struct mutex ctx_mutex; + + u32 func; + u32 ret; + size_t num_params; + struct tee_param *param; + + bool req_posted; + bool supp_next_send; + /* Serializes access to this struct for requesting thread */ + struct mutex thrd_mutex; + /* Serializes access to this struct for supplicant threads */ + struct mutex supp_mutex; + struct completion data_to_supp; + struct completion data_from_supp; +}; + +/** + * struct optee - main service struct + * @supp_teedev: supplicant device + * @teedev: client device + * @invoke_fn: function to issue smc or hvc + * @call_queue: queue of threads waiting to call @invoke_fn + * @wait_queue: queue of threads from secure world waiting for a + * secure world sync object + * @supp: supplicant synchronization struct for RPC to supplicant + * @pool: shared memory pool + * @memremaped_shm virtual address of memory in shared memory pool + */ +struct optee { + struct tee_device *supp_teedev; + struct tee_device *teedev; + optee_invoke_fn *invoke_fn; + struct optee_call_queue call_queue; + struct optee_wait_queue wait_queue; + struct optee_supp supp; + struct tee_shm_pool *pool; + void *memremaped_shm; +}; + +struct optee_session { + struct list_head list_node; + u32 session_id; +}; + +struct optee_context_data { + /* Serializes access to this struct */ + struct mutex mutex; + struct list_head sess_list; +}; + +struct optee_rpc_param { + u32 a0; + u32 a1; + u32 a2; + u32 a3; + u32 a4; + u32 a5; + u32 a6; + u32 a7; +}; + +void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param); + +void optee_wait_queue_init(struct optee_wait_queue *wq); +void optee_wait_queue_exit(struct optee_wait_queue *wq); + +u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, + struct tee_param *param); + +int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len); +int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len); +void optee_supp_init(struct optee_supp *supp); +void optee_supp_uninit(struct optee_supp *supp); + +int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, + struct tee_param *param); +int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params, + struct tee_param *param); + +u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg); +int optee_open_session(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param); +int optee_close_session(struct tee_context *ctx, u32 session); +int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, + struct tee_param *param); +int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session); + +void optee_enable_shm_cache(struct optee *optee); +void optee_disable_shm_cache(struct optee *optee); + +int optee_from_msg_param(struct tee_param *params, size_t num_params, + const struct optee_msg_param *msg_params); +int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, + const struct tee_param *params); + +/* + * Small helpers + */ + +static inline void *reg_pair_to_ptr(u32 reg0, u32 reg1) +{ + return (void *)(unsigned long)(((u64)reg0 << 32) | reg1); +} + +static inline void reg_pair_from_64(u32 *reg0, u32 *reg1, u64 val) +{ + *reg0 = val >> 32; + *reg1 = val; +} + +#endif /*OPTEE_PRIVATE_H*/ diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h new file mode 100644 index 000000000000..13b7c98cdf25 --- /dev/null +++ b/drivers/tee/optee/optee_smc.h @@ -0,0 +1,450 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef OPTEE_SMC_H +#define OPTEE_SMC_H + +#include <linux/arm-smccc.h> +#include <linux/bitops.h> + +#define OPTEE_SMC_STD_CALL_VAL(func_num) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS, (func_num)) +#define OPTEE_SMC_FAST_CALL_VAL(func_num) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS, (func_num)) + +/* + * Function specified by SMC Calling convention. + */ +#define OPTEE_SMC_FUNCID_CALLS_COUNT 0xFF00 +#define OPTEE_SMC_CALLS_COUNT \ + ARM_SMCCC_CALL_VAL(OPTEE_SMC_FAST_CALL, SMCCC_SMC_32, \ + SMCCC_OWNER_TRUSTED_OS_END, \ + OPTEE_SMC_FUNCID_CALLS_COUNT) + +/* + * Normal cached memory (write-back), shareable for SMP systems and not + * shareable for UP systems. + */ +#define OPTEE_SMC_SHM_CACHED 1 + +/* + * a0..a7 is used as register names in the descriptions below, on arm32 + * that translates to r0..r7 and on arm64 to w0..w7. In both cases it's + * 32-bit registers. + */ + +/* + * Function specified by SMC Calling convention + * + * Return one of the following UIDs if using API specified in this file + * without further extentions: + * 65cb6b93-af0c-4617-8ed6-644a8d1140f8 + * see also OPTEE_SMC_UID_* in optee_msg.h + */ +#define OPTEE_SMC_FUNCID_CALLS_UID OPTEE_MSG_FUNCID_CALLS_UID +#define OPTEE_SMC_CALLS_UID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS_END, \ + OPTEE_SMC_FUNCID_CALLS_UID) + +/* + * Function specified by SMC Calling convention + * + * Returns 2.0 if using API specified in this file without further extentions. + * see also OPTEE_MSG_REVISION_* in optee_msg.h + */ +#define OPTEE_SMC_FUNCID_CALLS_REVISION OPTEE_MSG_FUNCID_CALLS_REVISION +#define OPTEE_SMC_CALLS_REVISION \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_TRUSTED_OS_END, \ + OPTEE_SMC_FUNCID_CALLS_REVISION) + +struct optee_smc_calls_revision_result { + unsigned long major; + unsigned long minor; + unsigned long reserved0; + unsigned long reserved1; +}; + +/* + * Get UUID of Trusted OS. + * + * Used by non-secure world to figure out which Trusted OS is installed. + * Note that returned UUID is the UUID of the Trusted OS, not of the API. + * + * Returns UUID in a0-4 in the same way as OPTEE_SMC_CALLS_UID + * described above. + */ +#define OPTEE_SMC_FUNCID_GET_OS_UUID OPTEE_MSG_FUNCID_GET_OS_UUID +#define OPTEE_SMC_CALL_GET_OS_UUID \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_UUID) + +/* + * Get revision of Trusted OS. + * + * Used by non-secure world to figure out which version of the Trusted OS + * is installed. Note that the returned revision is the revision of the + * Trusted OS, not of the API. + * + * Returns revision in a0-1 in the same way as OPTEE_SMC_CALLS_REVISION + * described above. + */ +#define OPTEE_SMC_FUNCID_GET_OS_REVISION OPTEE_MSG_FUNCID_GET_OS_REVISION +#define OPTEE_SMC_CALL_GET_OS_REVISION \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_REVISION) + +/* + * Call with struct optee_msg_arg as argument + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG + * a1 Upper 32bit of a 64bit physical pointer to a struct optee_msg_arg + * a2 Lower 32bit of a 64bit physical pointer to a struct optee_msg_arg + * a3 Cache settings, not used if physical pointer is in a predefined shared + * memory area else per OPTEE_SMC_SHM_* + * a4-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 Return value, OPTEE_SMC_RETURN_* + * a1-3 Not used + * a4-7 Preserved + * + * OPTEE_SMC_RETURN_ETHREAD_LIMIT return register usage: + * a0 Return value, OPTEE_SMC_RETURN_ETHREAD_LIMIT + * a1-3 Preserved + * a4-7 Preserved + * + * RPC return register usage: + * a0 Return value, OPTEE_SMC_RETURN_IS_RPC(val) + * a1-2 RPC parameters + * a3-7 Resume information, must be preserved + * + * Possible return values: + * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this + * function. + * OPTEE_SMC_RETURN_OK Call completed, result updated in + * the previously supplied struct + * optee_msg_arg. + * OPTEE_SMC_RETURN_ETHREAD_LIMIT Number of Trusted OS threads exceeded, + * try again later. + * OPTEE_SMC_RETURN_EBADADDR Bad physcial pointer to struct + * optee_msg_arg. + * OPTEE_SMC_RETURN_EBADCMD Bad/unknown cmd in struct optee_msg_arg + * OPTEE_SMC_RETURN_IS_RPC() Call suspended by RPC call to normal + * world. + */ +#define OPTEE_SMC_FUNCID_CALL_WITH_ARG OPTEE_MSG_FUNCID_CALL_WITH_ARG +#define OPTEE_SMC_CALL_WITH_ARG \ + OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_ARG) + +/* + * Get Shared Memory Config + * + * Returns the Secure/Non-secure shared memory config. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_GET_SHM_CONFIG + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Have config return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 Physical address of start of SHM + * a2 Size of of SHM + * a3 Cache settings of memory, as defined by the + * OPTEE_SMC_SHM_* values above + * a4-7 Preserved + * + * Not available register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL + * a1-3 Not used + * a4-7 Preserved + */ +#define OPTEE_SMC_FUNCID_GET_SHM_CONFIG 7 +#define OPTEE_SMC_GET_SHM_CONFIG \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_SHM_CONFIG) + +struct optee_smc_get_shm_config_result { + unsigned long status; + unsigned long start; + unsigned long size; + unsigned long settings; +}; + +/* + * Exchanges capabilities between normal world and secure world + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_EXCHANGE_CAPABILITIES + * a1 bitfield of normal world capabilities OPTEE_SMC_NSEC_CAP_* + * a2-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_* + * a2-7 Preserved + * + * Error return register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world + * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_* + * a2-7 Preserved + */ +/* Normal world works as a uniprocessor system */ +#define OPTEE_SMC_NSEC_CAP_UNIPROCESSOR BIT(0) +/* Secure world has reserved shared memory for normal world to use */ +#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0) +/* Secure world can communicate via previously unregistered shared memory */ +#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1) +#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9 +#define OPTEE_SMC_EXCHANGE_CAPABILITIES \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES) + +struct optee_smc_exchange_capabilities_result { + unsigned long status; + unsigned long capabilities; + unsigned long reserved0; + unsigned long reserved1; +}; + +/* + * Disable and empties cache of shared memory objects + * + * Secure world can cache frequently used shared memory objects, for + * example objects used as RPC arguments. When secure world is idle this + * function returns one shared memory reference to free. To disable the + * cache and free all cached objects this function has to be called until + * it returns OPTEE_SMC_RETURN_ENOTAVAIL. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_DISABLE_SHM_CACHE + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 Upper 32bit of a 64bit Shared memory cookie + * a2 Lower 32bit of a 64bit Shared memory cookie + * a3-7 Preserved + * + * Cache empty return register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL + * a1-7 Preserved + * + * Not idle return register usage: + * a0 OPTEE_SMC_RETURN_EBUSY + * a1-7 Preserved + */ +#define OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE 10 +#define OPTEE_SMC_DISABLE_SHM_CACHE \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE) + +struct optee_smc_disable_shm_cache_result { + unsigned long status; + unsigned long shm_upper32; + unsigned long shm_lower32; + unsigned long reserved0; +}; + +/* + * Enable cache of shared memory objects + * + * Secure world can cache frequently used shared memory objects, for + * example objects used as RPC arguments. When secure world is idle this + * function returns OPTEE_SMC_RETURN_OK and the cache is enabled. If + * secure world isn't idle OPTEE_SMC_RETURN_EBUSY is returned. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_ENABLE_SHM_CACHE + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1-7 Preserved + * + * Not idle return register usage: + * a0 OPTEE_SMC_RETURN_EBUSY + * a1-7 Preserved + */ +#define OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE 11 +#define OPTEE_SMC_ENABLE_SHM_CACHE \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE) + +/* + * Resume from RPC (for example after processing an IRQ) + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC + * a1-3 Value of a1-3 when OPTEE_SMC_CALL_WITH_ARG returned + * OPTEE_SMC_RETURN_RPC in a0 + * + * Return register usage is the same as for OPTEE_SMC_*CALL_WITH_ARG above. + * + * Possible return values + * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this + * function. + * OPTEE_SMC_RETURN_OK Original call completed, result + * updated in the previously supplied. + * struct optee_msg_arg + * OPTEE_SMC_RETURN_RPC Call suspended by RPC call to normal + * world. + * OPTEE_SMC_RETURN_ERESUME Resume failed, the opaque resume + * information was corrupt. + */ +#define OPTEE_SMC_FUNCID_RETURN_FROM_RPC 3 +#define OPTEE_SMC_CALL_RETURN_FROM_RPC \ + OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_RETURN_FROM_RPC) + +#define OPTEE_SMC_RETURN_RPC_PREFIX_MASK 0xFFFF0000 +#define OPTEE_SMC_RETURN_RPC_PREFIX 0xFFFF0000 +#define OPTEE_SMC_RETURN_RPC_FUNC_MASK 0x0000FFFF + +#define OPTEE_SMC_RETURN_GET_RPC_FUNC(ret) \ + ((ret) & OPTEE_SMC_RETURN_RPC_FUNC_MASK) + +#define OPTEE_SMC_RPC_VAL(func) ((func) | OPTEE_SMC_RETURN_RPC_PREFIX) + +/* + * Allocate memory for RPC parameter passing. The memory is used to hold a + * struct optee_msg_arg. + * + * "Call" register usage: + * a0 This value, OPTEE_SMC_RETURN_RPC_ALLOC + * a1 Size in bytes of required argument memory + * a2 Not used + * a3 Resume information, must be preserved + * a4-5 Not used + * a6-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1 Upper 32bits of 64bit physical pointer to allocated + * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't + * be allocated. + * a2 Lower 32bits of 64bit physical pointer to allocated + * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't + * be allocated + * a3 Preserved + * a4 Upper 32bits of 64bit Shared memory cookie used when freeing + * the memory or doing an RPC + * a5 Lower 32bits of 64bit Shared memory cookie used when freeing + * the memory or doing an RPC + * a6-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_ALLOC 0 +#define OPTEE_SMC_RETURN_RPC_ALLOC \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_ALLOC) + +/* + * Free memory previously allocated by OPTEE_SMC_RETURN_RPC_ALLOC + * + * "Call" register usage: + * a0 This value, OPTEE_SMC_RETURN_RPC_FREE + * a1 Upper 32bits of 64bit shared memory cookie belonging to this + * argument memory + * a2 Lower 32bits of 64bit shared memory cookie belonging to this + * argument memory + * a3-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1-2 Not used + * a3-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_FREE 2 +#define OPTEE_SMC_RETURN_RPC_FREE \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE) + +/* + * Deliver an IRQ in normal world. + * + * "Call" register usage: + * a0 OPTEE_SMC_RETURN_RPC_IRQ + * a1-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_IRQ 4 +#define OPTEE_SMC_RETURN_RPC_IRQ \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_IRQ) + +/* + * Do an RPC request. The supplied struct optee_msg_arg tells which + * request to do and the parameters for the request. The following fields + * are used (the rest are unused): + * - cmd the Request ID + * - ret return value of the request, filled in by normal world + * - num_params number of parameters for the request + * - params the parameters + * - param_attrs attributes of the parameters + * + * "Call" register usage: + * a0 OPTEE_SMC_RETURN_RPC_CMD + * a1 Upper 32bit of a 64bit Shared memory cookie holding a + * struct optee_msg_arg, must be preserved, only the data should + * be updated + * a2 Lower 32bit of a 64bit Shared memory cookie holding a + * struct optee_msg_arg, must be preserved, only the data should + * be updated + * a3-7 Resume information, must be preserved + * + * "Return" register usage: + * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC. + * a1-2 Not used + * a3-7 Preserved + */ +#define OPTEE_SMC_RPC_FUNC_CMD 5 +#define OPTEE_SMC_RETURN_RPC_CMD \ + OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_CMD) + +/* Returned in a0 */ +#define OPTEE_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF + +/* Returned in a0 only from Trusted OS functions */ +#define OPTEE_SMC_RETURN_OK 0x0 +#define OPTEE_SMC_RETURN_ETHREAD_LIMIT 0x1 +#define OPTEE_SMC_RETURN_EBUSY 0x2 +#define OPTEE_SMC_RETURN_ERESUME 0x3 +#define OPTEE_SMC_RETURN_EBADADDR 0x4 +#define OPTEE_SMC_RETURN_EBADCMD 0x5 +#define OPTEE_SMC_RETURN_ENOMEM 0x6 +#define OPTEE_SMC_RETURN_ENOTAVAIL 0x7 +#define OPTEE_SMC_RETURN_IS_RPC(ret) __optee_smc_return_is_rpc((ret)) + +static inline bool __optee_smc_return_is_rpc(u32 ret) +{ + return ret != OPTEE_SMC_RETURN_UNKNOWN_FUNCTION && + (ret & OPTEE_SMC_RETURN_RPC_PREFIX_MASK) == + OPTEE_SMC_RETURN_RPC_PREFIX; +} + +#endif /* OPTEE_SMC_H */ diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c new file mode 100644 index 000000000000..8814eca06021 --- /dev/null +++ b/drivers/tee/optee/rpc.c @@ -0,0 +1,396 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include "optee_private.h" +#include "optee_smc.h" + +struct wq_entry { + struct list_head link; + struct completion c; + u32 key; +}; + +void optee_wait_queue_init(struct optee_wait_queue *priv) +{ + mutex_init(&priv->mu); + INIT_LIST_HEAD(&priv->db); +} + +void optee_wait_queue_exit(struct optee_wait_queue *priv) +{ + mutex_destroy(&priv->mu); +} + +static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg) +{ + struct timespec64 ts; + + if (arg->num_params != 1) + goto bad; + if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) != + OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT) + goto bad; + + getnstimeofday64(&ts); + arg->params[0].u.value.a = ts.tv_sec; + arg->params[0].u.value.b = ts.tv_nsec; + + arg->ret = TEEC_SUCCESS; + return; +bad: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +} + +static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key) +{ + struct wq_entry *w; + + mutex_lock(&wq->mu); + + list_for_each_entry(w, &wq->db, link) + if (w->key == key) + goto out; + + w = kmalloc(sizeof(*w), GFP_KERNEL); + if (w) { + init_completion(&w->c); + w->key = key; + list_add_tail(&w->link, &wq->db); + } +out: + mutex_unlock(&wq->mu); + return w; +} + +static void wq_sleep(struct optee_wait_queue *wq, u32 key) +{ + struct wq_entry *w = wq_entry_get(wq, key); + + if (w) { + wait_for_completion(&w->c); + mutex_lock(&wq->mu); + list_del(&w->link); + mutex_unlock(&wq->mu); + kfree(w); + } +} + +static void wq_wakeup(struct optee_wait_queue *wq, u32 key) +{ + struct wq_entry *w = wq_entry_get(wq, key); + + if (w) + complete(&w->c); +} + +static void handle_rpc_func_cmd_wq(struct optee *optee, + struct optee_msg_arg *arg) +{ + if (arg->num_params != 1) + goto bad; + + if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) != + OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) + goto bad; + + switch (arg->params[0].u.value.a) { + case OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP: + wq_sleep(&optee->wait_queue, arg->params[0].u.value.b); + break; + case OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP: + wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b); + break; + default: + goto bad; + } + + arg->ret = TEEC_SUCCESS; + return; +bad: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +} + +static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg) +{ + u32 msec_to_wait; + + if (arg->num_params != 1) + goto bad; + + if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) != + OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) + goto bad; + + msec_to_wait = arg->params[0].u.value.a; + + /* set task's state to interruptible sleep */ + set_current_state(TASK_INTERRUPTIBLE); + + /* take a nap */ + msleep(msec_to_wait); + + arg->ret = TEEC_SUCCESS; + return; +bad: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +} + +static void handle_rpc_supp_cmd(struct tee_context *ctx, + struct optee_msg_arg *arg) +{ + struct tee_param *params; + + arg->ret_origin = TEEC_ORIGIN_COMMS; + + params = kmalloc_array(arg->num_params, sizeof(struct tee_param), + GFP_KERNEL); + if (!params) { + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; + return; + } + + if (optee_from_msg_param(params, arg->num_params, arg->params)) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + goto out; + } + + arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params); + + if (optee_to_msg_param(arg->params, arg->num_params, params)) + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +out: + kfree(params); +} + +static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz) +{ + u32 ret; + struct tee_param param; + struct optee *optee = tee_get_drvdata(ctx->teedev); + struct tee_shm *shm; + + param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; + param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL; + param.u.value.b = sz; + param.u.value.c = 0; + + ret = optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, ¶m); + if (ret) + return ERR_PTR(-ENOMEM); + + mutex_lock(&optee->supp.ctx_mutex); + /* Increases count as secure world doesn't have a reference */ + shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c); + mutex_unlock(&optee->supp.ctx_mutex); + return shm; +} + +static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, + struct optee_msg_arg *arg) +{ + phys_addr_t pa; + struct tee_shm *shm; + size_t sz; + size_t n; + + arg->ret_origin = TEEC_ORIGIN_COMMS; + + if (!arg->num_params || + arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + + for (n = 1; n < arg->num_params; n++) { + if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + } + + sz = arg->params[0].u.value.b; + switch (arg->params[0].u.value.a) { + case OPTEE_MSG_RPC_SHM_TYPE_APPL: + shm = cmd_alloc_suppl(ctx, sz); + break; + case OPTEE_MSG_RPC_SHM_TYPE_KERNEL: + shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED); + break; + default: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + + if (IS_ERR(shm)) { + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; + return; + } + + if (tee_shm_get_pa(shm, 0, &pa)) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + goto bad; + } + + arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT; + arg->params[0].u.tmem.buf_ptr = pa; + arg->params[0].u.tmem.size = sz; + arg->params[0].u.tmem.shm_ref = (unsigned long)shm; + arg->ret = TEEC_SUCCESS; + return; +bad: + tee_shm_free(shm); +} + +static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm) +{ + struct tee_param param; + + param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; + param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL; + param.u.value.b = tee_shm_get_id(shm); + param.u.value.c = 0; + + /* + * Match the tee_shm_get_from_id() in cmd_alloc_suppl() as secure + * world has released its reference. + * + * It's better to do this before sending the request to supplicant + * as we'd like to let the process doing the initial allocation to + * do release the last reference too in order to avoid stacking + * many pending fput() on the client process. This could otherwise + * happen if secure world does many allocate and free in a single + * invoke. + */ + tee_shm_put(shm); + + optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_FREE, 1, ¶m); +} + +static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx, + struct optee_msg_arg *arg) +{ + struct tee_shm *shm; + + arg->ret_origin = TEEC_ORIGIN_COMMS; + + if (arg->num_params != 1 || + arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + return; + } + + shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b; + switch (arg->params[0].u.value.a) { + case OPTEE_MSG_RPC_SHM_TYPE_APPL: + cmd_free_suppl(ctx, shm); + break; + case OPTEE_MSG_RPC_SHM_TYPE_KERNEL: + tee_shm_free(shm); + break; + default: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; + } + arg->ret = TEEC_SUCCESS; +} + +static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, + struct tee_shm *shm) +{ + struct optee_msg_arg *arg; + + arg = tee_shm_get_va(shm, 0); + if (IS_ERR(arg)) { + pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm); + return; + } + + switch (arg->cmd) { + case OPTEE_MSG_RPC_CMD_GET_TIME: + handle_rpc_func_cmd_get_time(arg); + break; + case OPTEE_MSG_RPC_CMD_WAIT_QUEUE: + handle_rpc_func_cmd_wq(optee, arg); + break; + case OPTEE_MSG_RPC_CMD_SUSPEND: + handle_rpc_func_cmd_wait(arg); + break; + case OPTEE_MSG_RPC_CMD_SHM_ALLOC: + handle_rpc_func_cmd_shm_alloc(ctx, arg); + break; + case OPTEE_MSG_RPC_CMD_SHM_FREE: + handle_rpc_func_cmd_shm_free(ctx, arg); + break; + default: + handle_rpc_supp_cmd(ctx, arg); + } +} + +/** + * optee_handle_rpc() - handle RPC from secure world + * @ctx: context doing the RPC + * @param: value of registers for the RPC + * + * Result of RPC is written back into @param. + */ +void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param) +{ + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + struct tee_shm *shm; + phys_addr_t pa; + + switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) { + case OPTEE_SMC_RPC_FUNC_ALLOC: + shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED); + if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) { + reg_pair_from_64(¶m->a1, ¶m->a2, pa); + reg_pair_from_64(¶m->a4, ¶m->a5, + (unsigned long)shm); + } else { + param->a1 = 0; + param->a2 = 0; + param->a4 = 0; + param->a5 = 0; + } + break; + case OPTEE_SMC_RPC_FUNC_FREE: + shm = reg_pair_to_ptr(param->a1, param->a2); + tee_shm_free(shm); + break; + case OPTEE_SMC_RPC_FUNC_IRQ: + /* + * An IRQ was raised while secure world was executing, + * since all IRQs are handled in Linux a dummy RPC is + * performed to let Linux take the IRQ through the normal + * vector. + */ + break; + case OPTEE_SMC_RPC_FUNC_CMD: + shm = reg_pair_to_ptr(param->a1, param->a2); + handle_rpc_func_cmd(ctx, optee, shm); + break; + default: + pr_warn("Unknown RPC func 0x%x\n", + (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)); + break; + } + + param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC; +} diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c new file mode 100644 index 000000000000..b4ea0678a436 --- /dev/null +++ b/drivers/tee/optee/supp.c @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include "optee_private.h" + +void optee_supp_init(struct optee_supp *supp) +{ + memset(supp, 0, sizeof(*supp)); + mutex_init(&supp->ctx_mutex); + mutex_init(&supp->thrd_mutex); + mutex_init(&supp->supp_mutex); + init_completion(&supp->data_to_supp); + init_completion(&supp->data_from_supp); +} + +void optee_supp_uninit(struct optee_supp *supp) +{ + mutex_destroy(&supp->ctx_mutex); + mutex_destroy(&supp->thrd_mutex); + mutex_destroy(&supp->supp_mutex); +} + +/** + * optee_supp_thrd_req() - request service from supplicant + * @ctx: context doing the request + * @func: function requested + * @num_params: number of elements in @param array + * @param: parameters for function + * + * Returns result of operation to be passed to secure world + */ +u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, + struct tee_param *param) +{ + bool interruptable; + struct optee *optee = tee_get_drvdata(ctx->teedev); + struct optee_supp *supp = &optee->supp; + u32 ret; + + /* + * Other threads blocks here until we've copied our answer from + * supplicant. + */ + while (mutex_lock_interruptible(&supp->thrd_mutex)) { + /* See comment below on when the RPC can be interrupted. */ + mutex_lock(&supp->ctx_mutex); + interruptable = !supp->ctx; + mutex_unlock(&supp->ctx_mutex); + if (interruptable) + return TEEC_ERROR_COMMUNICATION; + } + + /* + * We have exclusive access now since the supplicant at this + * point is either doing a + * wait_for_completion_interruptible(&supp->data_to_supp) or is in + * userspace still about to do the ioctl() to enter + * optee_supp_recv() below. + */ + + supp->func = func; + supp->num_params = num_params; + supp->param = param; + supp->req_posted = true; + + /* Let supplicant get the data */ + complete(&supp->data_to_supp); + + /* + * Wait for supplicant to process and return result, once we've + * returned from wait_for_completion(data_from_supp) we have + * exclusive access again. + */ + while (wait_for_completion_interruptible(&supp->data_from_supp)) { + mutex_lock(&supp->ctx_mutex); + interruptable = !supp->ctx; + if (interruptable) { + /* + * There's no supplicant available and since the + * supp->ctx_mutex currently is held none can + * become available until the mutex released + * again. + * + * Interrupting an RPC to supplicant is only + * allowed as a way of slightly improving the user + * experience in case the supplicant hasn't been + * started yet. During normal operation the supplicant + * will serve all requests in a timely manner and + * interrupting then wouldn't make sense. + */ + supp->ret = TEEC_ERROR_COMMUNICATION; + init_completion(&supp->data_to_supp); + } + mutex_unlock(&supp->ctx_mutex); + if (interruptable) + break; + } + + ret = supp->ret; + supp->param = NULL; + supp->req_posted = false; + + /* We're done, let someone else talk to the supplicant now. */ + mutex_unlock(&supp->thrd_mutex); + + return ret; +} + +/** + * optee_supp_recv() - receive request for supplicant + * @ctx: context receiving the request + * @func: requested function in supplicant + * @num_params: number of elements allocated in @param, updated with number + * used elements + * @param: space for parameters for @func + * + * Returns 0 on success or <0 on failure + */ +int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, + struct tee_param *param) +{ + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + struct optee_supp *supp = &optee->supp; + int rc; + + /* + * In case two threads in one supplicant is calling this function + * simultaneously we need to protect the data with a mutex which + * we'll release before returning. + */ + mutex_lock(&supp->supp_mutex); + + if (supp->supp_next_send) { + /* + * optee_supp_recv() has been called again without + * a optee_supp_send() in between. Supplicant has + * probably been restarted before it was able to + * write back last result. Abort last request and + * wait for a new. + */ + if (supp->req_posted) { + supp->ret = TEEC_ERROR_COMMUNICATION; + supp->supp_next_send = false; + complete(&supp->data_from_supp); + } + } + + /* + * This is where supplicant will be hanging most of the + * time, let's make this interruptable so we can easily + * restart supplicant if needed. + */ + if (wait_for_completion_interruptible(&supp->data_to_supp)) { + rc = -ERESTARTSYS; + goto out; + } + + /* We have exlusive access to the data */ + + if (*num_params < supp->num_params) { + /* + * Not enough room for parameters, tell supplicant + * it failed and abort last request. + */ + supp->ret = TEEC_ERROR_COMMUNICATION; + rc = -EINVAL; + complete(&supp->data_from_supp); + goto out; + } + + *func = supp->func; + *num_params = supp->num_params; + memcpy(param, supp->param, + sizeof(struct tee_param) * supp->num_params); + + /* Allow optee_supp_send() below to do its work */ + supp->supp_next_send = true; + + rc = 0; +out: + mutex_unlock(&supp->supp_mutex); + return rc; +} + +/** + * optee_supp_send() - send result of request from supplicant + * @ctx: context sending result + * @ret: return value of request + * @num_params: number of parameters returned + * @param: returned parameters + * + * Returns 0 on success or <0 on failure. + */ +int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params, + struct tee_param *param) +{ + struct tee_device *teedev = ctx->teedev; + struct optee *optee = tee_get_drvdata(teedev); + struct optee_supp *supp = &optee->supp; + size_t n; + int rc = 0; + + /* + * We still have exclusive access to the data since that's how we + * left it when returning from optee_supp_read(). + */ + + /* See comment on mutex in optee_supp_read() above */ + mutex_lock(&supp->supp_mutex); + + if (!supp->supp_next_send) { + /* + * Something strange is going on, supplicant shouldn't + * enter optee_supp_send() in this state + */ + rc = -ENOENT; + goto out; + } + + if (num_params != supp->num_params) { + /* + * Something is wrong, let supplicant restart. Next call to + * optee_supp_recv() will give an error to the requesting + * thread and release it. + */ + rc = -EINVAL; + goto out; + } + + /* Update out and in/out parameters */ + for (n = 0; n < num_params; n++) { + struct tee_param *p = supp->param + n; + + switch (p->attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + p->u.value.a = param[n].u.value.a; + p->u.value.b = param[n].u.value.b; + p->u.value.c = param[n].u.value.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + p->u.memref.size = param[n].u.memref.size; + break; + default: + break; + } + } + supp->ret = ret; + + /* Allow optee_supp_recv() above to do its work */ + supp->supp_next_send = false; + + /* Let the requesting thread continue */ + complete(&supp->data_from_supp); +out: + mutex_unlock(&supp->supp_mutex); + return rc; +} diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c new file mode 100644 index 000000000000..5c60bf4423e6 --- /dev/null +++ b/drivers/tee/tee_core.c @@ -0,0 +1,893 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/idr.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include <linux/uaccess.h> +#include "tee_private.h" + +#define TEE_NUM_DEVICES 32 + +#define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x)) + +/* + * Unprivileged devices in the lower half range and privileged devices in + * the upper half range. + */ +static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES); +static DEFINE_SPINLOCK(driver_lock); + +static struct class *tee_class; +static dev_t tee_devt; + +static int tee_open(struct inode *inode, struct file *filp) +{ + int rc; + struct tee_device *teedev; + struct tee_context *ctx; + + teedev = container_of(inode->i_cdev, struct tee_device, cdev); + if (!tee_device_get(teedev)) + return -EINVAL; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + rc = -ENOMEM; + goto err; + } + + ctx->teedev = teedev; + INIT_LIST_HEAD(&ctx->list_shm); + filp->private_data = ctx; + rc = teedev->desc->ops->open(ctx); + if (rc) + goto err; + + return 0; +err: + kfree(ctx); + tee_device_put(teedev); + return rc; +} + +static int tee_release(struct inode *inode, struct file *filp) +{ + struct tee_context *ctx = filp->private_data; + struct tee_device *teedev = ctx->teedev; + struct tee_shm *shm; + + ctx->teedev->desc->ops->release(ctx); + mutex_lock(&ctx->teedev->mutex); + list_for_each_entry(shm, &ctx->list_shm, link) + shm->ctx = NULL; + mutex_unlock(&ctx->teedev->mutex); + kfree(ctx); + tee_device_put(teedev); + return 0; +} + +static int tee_ioctl_version(struct tee_context *ctx, + struct tee_ioctl_version_data __user *uvers) +{ + struct tee_ioctl_version_data vers; + + ctx->teedev->desc->ops->get_version(ctx->teedev, &vers); + if (copy_to_user(uvers, &vers, sizeof(vers))) + return -EFAULT; + return 0; +} + +static int tee_ioctl_shm_alloc(struct tee_context *ctx, + struct tee_ioctl_shm_alloc_data __user *udata) +{ + long ret; + struct tee_ioctl_shm_alloc_data data; + struct tee_shm *shm; + + if (copy_from_user(&data, udata, sizeof(data))) + return -EFAULT; + + /* Currently no input flags are supported */ + if (data.flags) + return -EINVAL; + + data.id = -1; + + shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + data.id = shm->id; + data.flags = shm->flags; + data.size = shm->size; + + if (copy_to_user(udata, &data, sizeof(data))) + ret = -EFAULT; + else + ret = tee_shm_get_fd(shm); + + /* + * When user space closes the file descriptor the shared memory + * should be freed or if tee_shm_get_fd() failed then it will + * be freed immediately. + */ + tee_shm_put(shm); + return ret; +} + +static int params_from_user(struct tee_context *ctx, struct tee_param *params, + size_t num_params, + struct tee_ioctl_param __user *uparams) +{ + size_t n; + + for (n = 0; n < num_params; n++) { + struct tee_shm *shm; + struct tee_ioctl_param ip; + + if (copy_from_user(&ip, uparams + n, sizeof(ip))) + return -EFAULT; + + /* All unused attribute bits has to be zero */ + if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK) + return -EINVAL; + + params[n].attr = ip.attr; + switch (ip.attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + params[n].u.value.a = ip.a; + params[n].u.value.b = ip.b; + params[n].u.value.c = ip.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + /* + * If we fail to get a pointer to a shared memory + * object (and increase the ref count) from an + * identifier we return an error. All pointers that + * has been added in params have an increased ref + * count. It's the callers responibility to do + * tee_shm_put() on all resolved pointers. + */ + shm = tee_shm_get_from_id(ctx, ip.c); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + params[n].u.memref.shm_offs = ip.a; + params[n].u.memref.size = ip.b; + params[n].u.memref.shm = shm; + break; + default: + /* Unknown attribute */ + return -EINVAL; + } + } + return 0; +} + +static int params_to_user(struct tee_ioctl_param __user *uparams, + size_t num_params, struct tee_param *params) +{ + size_t n; + + for (n = 0; n < num_params; n++) { + struct tee_ioctl_param __user *up = uparams + n; + struct tee_param *p = params + n; + + switch (p->attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + if (put_user(p->u.value.a, &up->a) || + put_user(p->u.value.b, &up->b) || + put_user(p->u.value.c, &up->c)) + return -EFAULT; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + if (put_user((u64)p->u.memref.size, &up->b)) + return -EFAULT; + default: + break; + } + } + return 0; +} + +static bool param_is_memref(struct tee_param *param) +{ + switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + return true; + default: + return false; + } +} + +static int tee_ioctl_open_session(struct tee_context *ctx, + struct tee_ioctl_buf_data __user *ubuf) +{ + int rc; + size_t n; + struct tee_ioctl_buf_data buf; + struct tee_ioctl_open_session_arg __user *uarg; + struct tee_ioctl_open_session_arg arg; + struct tee_ioctl_param __user *uparams = NULL; + struct tee_param *params = NULL; + bool have_session = false; + + if (!ctx->teedev->desc->ops->open_session) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, sizeof(buf))) + return -EFAULT; + + if (buf.buf_len > TEE_MAX_ARG_SIZE || + buf.buf_len < sizeof(struct tee_ioctl_open_session_arg)) + return -EINVAL; + + uarg = u64_to_user_ptr(buf.buf_ptr); + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len) + return -EINVAL; + + if (arg.num_params) { + params = kcalloc(arg.num_params, sizeof(struct tee_param), + GFP_KERNEL); + if (!params) + return -ENOMEM; + uparams = uarg->params; + rc = params_from_user(ctx, params, arg.num_params, uparams); + if (rc) + goto out; + } + + rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params); + if (rc) + goto out; + have_session = true; + + if (put_user(arg.session, &uarg->session) || + put_user(arg.ret, &uarg->ret) || + put_user(arg.ret_origin, &uarg->ret_origin)) { + rc = -EFAULT; + goto out; + } + rc = params_to_user(uparams, arg.num_params, params); +out: + /* + * If we've succeeded to open the session but failed to communicate + * it back to user space, close the session again to avoid leakage. + */ + if (rc && have_session && ctx->teedev->desc->ops->close_session) + ctx->teedev->desc->ops->close_session(ctx, arg.session); + + if (params) { + /* Decrease ref count for all valid shared memory pointers */ + for (n = 0; n < arg.num_params; n++) + if (param_is_memref(params + n) && + params[n].u.memref.shm) + tee_shm_put(params[n].u.memref.shm); + kfree(params); + } + + return rc; +} + +static int tee_ioctl_invoke(struct tee_context *ctx, + struct tee_ioctl_buf_data __user *ubuf) +{ + int rc; + size_t n; + struct tee_ioctl_buf_data buf; + struct tee_ioctl_invoke_arg __user *uarg; + struct tee_ioctl_invoke_arg arg; + struct tee_ioctl_param __user *uparams = NULL; + struct tee_param *params = NULL; + + if (!ctx->teedev->desc->ops->invoke_func) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, sizeof(buf))) + return -EFAULT; + + if (buf.buf_len > TEE_MAX_ARG_SIZE || + buf.buf_len < sizeof(struct tee_ioctl_invoke_arg)) + return -EINVAL; + + uarg = u64_to_user_ptr(buf.buf_ptr); + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len) + return -EINVAL; + + if (arg.num_params) { + params = kcalloc(arg.num_params, sizeof(struct tee_param), + GFP_KERNEL); + if (!params) + return -ENOMEM; + uparams = uarg->params; + rc = params_from_user(ctx, params, arg.num_params, uparams); + if (rc) + goto out; + } + + rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params); + if (rc) + goto out; + + if (put_user(arg.ret, &uarg->ret) || + put_user(arg.ret_origin, &uarg->ret_origin)) { + rc = -EFAULT; + goto out; + } + rc = params_to_user(uparams, arg.num_params, params); +out: + if (params) { + /* Decrease ref count for all valid shared memory pointers */ + for (n = 0; n < arg.num_params; n++) + if (param_is_memref(params + n) && + params[n].u.memref.shm) + tee_shm_put(params[n].u.memref.shm); + kfree(params); + } + return rc; +} + +static int tee_ioctl_cancel(struct tee_context *ctx, + struct tee_ioctl_cancel_arg __user *uarg) +{ + struct tee_ioctl_cancel_arg arg; + + if (!ctx->teedev->desc->ops->cancel_req) + return -EINVAL; + + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id, + arg.session); +} + +static int +tee_ioctl_close_session(struct tee_context *ctx, + struct tee_ioctl_close_session_arg __user *uarg) +{ + struct tee_ioctl_close_session_arg arg; + + if (!ctx->teedev->desc->ops->close_session) + return -EINVAL; + + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + return ctx->teedev->desc->ops->close_session(ctx, arg.session); +} + +static int params_to_supp(struct tee_context *ctx, + struct tee_ioctl_param __user *uparams, + size_t num_params, struct tee_param *params) +{ + size_t n; + + for (n = 0; n < num_params; n++) { + struct tee_ioctl_param ip; + struct tee_param *p = params + n; + + ip.attr = p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK; + switch (p->attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + ip.a = p->u.value.a; + ip.b = p->u.value.b; + ip.c = p->u.value.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + ip.b = p->u.memref.size; + if (!p->u.memref.shm) { + ip.a = 0; + ip.c = (u64)-1; /* invalid shm id */ + break; + } + ip.a = p->u.memref.shm_offs; + ip.c = p->u.memref.shm->id; + break; + default: + ip.a = 0; + ip.b = 0; + ip.c = 0; + break; + } + + if (copy_to_user(uparams + n, &ip, sizeof(ip))) + return -EFAULT; + } + + return 0; +} + +static int tee_ioctl_supp_recv(struct tee_context *ctx, + struct tee_ioctl_buf_data __user *ubuf) +{ + int rc; + struct tee_ioctl_buf_data buf; + struct tee_iocl_supp_recv_arg __user *uarg; + struct tee_param *params; + u32 num_params; + u32 func; + + if (!ctx->teedev->desc->ops->supp_recv) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, sizeof(buf))) + return -EFAULT; + + if (buf.buf_len > TEE_MAX_ARG_SIZE || + buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg)) + return -EINVAL; + + uarg = u64_to_user_ptr(buf.buf_ptr); + if (get_user(num_params, &uarg->num_params)) + return -EFAULT; + + if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len) + return -EINVAL; + + params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL); + if (!params) + return -ENOMEM; + + rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params); + if (rc) + goto out; + + if (put_user(func, &uarg->func) || + put_user(num_params, &uarg->num_params)) { + rc = -EFAULT; + goto out; + } + + rc = params_to_supp(ctx, uarg->params, num_params, params); +out: + kfree(params); + return rc; +} + +static int params_from_supp(struct tee_param *params, size_t num_params, + struct tee_ioctl_param __user *uparams) +{ + size_t n; + + for (n = 0; n < num_params; n++) { + struct tee_param *p = params + n; + struct tee_ioctl_param ip; + + if (copy_from_user(&ip, uparams + n, sizeof(ip))) + return -EFAULT; + + /* All unused attribute bits has to be zero */ + if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK) + return -EINVAL; + + p->attr = ip.attr; + switch (ip.attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + /* Only out and in/out values can be updated */ + p->u.value.a = ip.a; + p->u.value.b = ip.b; + p->u.value.c = ip.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + /* + * Only the size of the memref can be updated. + * Since we don't have access to the original + * parameters here, only store the supplied size. + * The driver will copy the updated size into the + * original parameters. + */ + p->u.memref.shm = NULL; + p->u.memref.shm_offs = 0; + p->u.memref.size = ip.b; + break; + default: + memset(&p->u, 0, sizeof(p->u)); + break; + } + } + return 0; +} + +static int tee_ioctl_supp_send(struct tee_context *ctx, + struct tee_ioctl_buf_data __user *ubuf) +{ + long rc; + struct tee_ioctl_buf_data buf; + struct tee_iocl_supp_send_arg __user *uarg; + struct tee_param *params; + u32 num_params; + u32 ret; + + /* Not valid for this driver */ + if (!ctx->teedev->desc->ops->supp_send) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, sizeof(buf))) + return -EFAULT; + + if (buf.buf_len > TEE_MAX_ARG_SIZE || + buf.buf_len < sizeof(struct tee_iocl_supp_send_arg)) + return -EINVAL; + + uarg = u64_to_user_ptr(buf.buf_ptr); + if (get_user(ret, &uarg->ret) || + get_user(num_params, &uarg->num_params)) + return -EFAULT; + + if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len) + return -EINVAL; + + params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL); + if (!params) + return -ENOMEM; + + rc = params_from_supp(params, num_params, uarg->params); + if (rc) + goto out; + + rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params); +out: + kfree(params); + return rc; +} + +static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct tee_context *ctx = filp->private_data; + void __user *uarg = (void __user *)arg; + + switch (cmd) { + case TEE_IOC_VERSION: + return tee_ioctl_version(ctx, uarg); + case TEE_IOC_SHM_ALLOC: + return tee_ioctl_shm_alloc(ctx, uarg); + case TEE_IOC_OPEN_SESSION: + return tee_ioctl_open_session(ctx, uarg); + case TEE_IOC_INVOKE: + return tee_ioctl_invoke(ctx, uarg); + case TEE_IOC_CANCEL: + return tee_ioctl_cancel(ctx, uarg); + case TEE_IOC_CLOSE_SESSION: + return tee_ioctl_close_session(ctx, uarg); + case TEE_IOC_SUPPL_RECV: + return tee_ioctl_supp_recv(ctx, uarg); + case TEE_IOC_SUPPL_SEND: + return tee_ioctl_supp_send(ctx, uarg); + default: + return -EINVAL; + } +} + +static const struct file_operations tee_fops = { + .owner = THIS_MODULE, + .open = tee_open, + .release = tee_release, + .unlocked_ioctl = tee_ioctl, + .compat_ioctl = tee_ioctl, +}; + +static void tee_release_device(struct device *dev) +{ + struct tee_device *teedev = container_of(dev, struct tee_device, dev); + + spin_lock(&driver_lock); + clear_bit(teedev->id, dev_mask); + spin_unlock(&driver_lock); + mutex_destroy(&teedev->mutex); + idr_destroy(&teedev->idr); + kfree(teedev); +} + +/** + * tee_device_alloc() - Allocate a new struct tee_device instance + * @teedesc: Descriptor for this driver + * @dev: Parent device for this device + * @pool: Shared memory pool, NULL if not used + * @driver_data: Private driver data for this device + * + * Allocates a new struct tee_device instance. The device is + * removed by tee_device_unregister(). + * + * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure + */ +struct tee_device *tee_device_alloc(const struct tee_desc *teedesc, + struct device *dev, + struct tee_shm_pool *pool, + void *driver_data) +{ + struct tee_device *teedev; + void *ret; + int rc; + int offs = 0; + + if (!teedesc || !teedesc->name || !teedesc->ops || + !teedesc->ops->get_version || !teedesc->ops->open || + !teedesc->ops->release || !pool) + return ERR_PTR(-EINVAL); + + teedev = kzalloc(sizeof(*teedev), GFP_KERNEL); + if (!teedev) { + ret = ERR_PTR(-ENOMEM); + goto err; + } + + if (teedesc->flags & TEE_DESC_PRIVILEGED) + offs = TEE_NUM_DEVICES / 2; + + spin_lock(&driver_lock); + teedev->id = find_next_zero_bit(dev_mask, TEE_NUM_DEVICES, offs); + if (teedev->id < TEE_NUM_DEVICES) + set_bit(teedev->id, dev_mask); + spin_unlock(&driver_lock); + + if (teedev->id >= TEE_NUM_DEVICES) { + ret = ERR_PTR(-ENOMEM); + goto err; + } + + snprintf(teedev->name, sizeof(teedev->name), "tee%s%d", + teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "", + teedev->id - offs); + + teedev->dev.class = tee_class; + teedev->dev.release = tee_release_device; + teedev->dev.parent = dev; + + teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id); + + rc = dev_set_name(&teedev->dev, "%s", teedev->name); + if (rc) { + ret = ERR_PTR(rc); + goto err_devt; + } + + cdev_init(&teedev->cdev, &tee_fops); + teedev->cdev.owner = teedesc->owner; + teedev->cdev.kobj.parent = &teedev->dev.kobj; + + dev_set_drvdata(&teedev->dev, driver_data); + device_initialize(&teedev->dev); + + /* 1 as tee_device_unregister() does one final tee_device_put() */ + teedev->num_users = 1; + init_completion(&teedev->c_no_users); + mutex_init(&teedev->mutex); + idr_init(&teedev->idr); + + teedev->desc = teedesc; + teedev->pool = pool; + + return teedev; +err_devt: + unregister_chrdev_region(teedev->dev.devt, 1); +err: + pr_err("could not register %s driver\n", + teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client"); + if (teedev && teedev->id < TEE_NUM_DEVICES) { + spin_lock(&driver_lock); + clear_bit(teedev->id, dev_mask); + spin_unlock(&driver_lock); + } + kfree(teedev); + return ret; +} +EXPORT_SYMBOL_GPL(tee_device_alloc); + +static ssize_t implementation_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tee_device *teedev = container_of(dev, struct tee_device, dev); + struct tee_ioctl_version_data vers; + + teedev->desc->ops->get_version(teedev, &vers); + return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id); +} +static DEVICE_ATTR_RO(implementation_id); + +static struct attribute *tee_dev_attrs[] = { + &dev_attr_implementation_id.attr, + NULL +}; + +static const struct attribute_group tee_dev_group = { + .attrs = tee_dev_attrs, +}; + +/** + * tee_device_register() - Registers a TEE device + * @teedev: Device to register + * + * tee_device_unregister() need to be called to remove the @teedev if + * this function fails. + * + * @returns < 0 on failure + */ +int tee_device_register(struct tee_device *teedev) +{ + int rc; + + if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) { + dev_err(&teedev->dev, "attempt to register twice\n"); + return -EINVAL; + } + + rc = cdev_add(&teedev->cdev, teedev->dev.devt, 1); + if (rc) { + dev_err(&teedev->dev, + "unable to cdev_add() %s, major %d, minor %d, err=%d\n", + teedev->name, MAJOR(teedev->dev.devt), + MINOR(teedev->dev.devt), rc); + return rc; + } + + rc = device_add(&teedev->dev); + if (rc) { + dev_err(&teedev->dev, + "unable to device_add() %s, major %d, minor %d, err=%d\n", + teedev->name, MAJOR(teedev->dev.devt), + MINOR(teedev->dev.devt), rc); + goto err_device_add; + } + + rc = sysfs_create_group(&teedev->dev.kobj, &tee_dev_group); + if (rc) { + dev_err(&teedev->dev, + "failed to create sysfs attributes, err=%d\n", rc); + goto err_sysfs_create_group; + } + + teedev->flags |= TEE_DEVICE_FLAG_REGISTERED; + return 0; + +err_sysfs_create_group: + device_del(&teedev->dev); +err_device_add: + cdev_del(&teedev->cdev); + return rc; +} +EXPORT_SYMBOL_GPL(tee_device_register); + +void tee_device_put(struct tee_device *teedev) +{ + mutex_lock(&teedev->mutex); + /* Shouldn't put in this state */ + if (!WARN_ON(!teedev->desc)) { + teedev->num_users--; + if (!teedev->num_users) { + teedev->desc = NULL; + complete(&teedev->c_no_users); + } + } + mutex_unlock(&teedev->mutex); +} + +bool tee_device_get(struct tee_device *teedev) +{ + mutex_lock(&teedev->mutex); + if (!teedev->desc) { + mutex_unlock(&teedev->mutex); + return false; + } + teedev->num_users++; + mutex_unlock(&teedev->mutex); + return true; +} + +/** + * tee_device_unregister() - Removes a TEE device + * @teedev: Device to unregister + * + * This function should be called to remove the @teedev even if + * tee_device_register() hasn't been called yet. Does nothing if + * @teedev is NULL. + */ +void tee_device_unregister(struct tee_device *teedev) +{ + if (!teedev) + return; + + if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) { + sysfs_remove_group(&teedev->dev.kobj, &tee_dev_group); + cdev_del(&teedev->cdev); + device_del(&teedev->dev); + } + + tee_device_put(teedev); + wait_for_completion(&teedev->c_no_users); + + /* + * No need to take a mutex any longer now since teedev->desc was + * set to NULL before teedev->c_no_users was completed. + */ + + teedev->pool = NULL; + + put_device(&teedev->dev); +} +EXPORT_SYMBOL_GPL(tee_device_unregister); + +/** + * tee_get_drvdata() - Return driver_data pointer + * @teedev: Device containing the driver_data pointer + * @returns the driver_data pointer supplied to tee_register(). + */ +void *tee_get_drvdata(struct tee_device *teedev) +{ + return dev_get_drvdata(&teedev->dev); +} +EXPORT_SYMBOL_GPL(tee_get_drvdata); + +static int __init tee_init(void) +{ + int rc; + + tee_class = class_create(THIS_MODULE, "tee"); + if (IS_ERR(tee_class)) { + pr_err("couldn't create class\n"); + return PTR_ERR(tee_class); + } + + rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee"); + if (rc) { + pr_err("failed to allocate char dev region\n"); + class_destroy(tee_class); + tee_class = NULL; + } + + return rc; +} + +static void __exit tee_exit(void) +{ + class_destroy(tee_class); + tee_class = NULL; + unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES); +} + +subsys_initcall(tee_init); +module_exit(tee_exit); + +MODULE_AUTHOR("Linaro"); +MODULE_DESCRIPTION("TEE Driver"); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h new file mode 100644 index 000000000000..21cb6be8bce9 --- /dev/null +++ b/drivers/tee/tee_private.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef TEE_PRIVATE_H +#define TEE_PRIVATE_H + +#include <linux/cdev.h> +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/kref.h> +#include <linux/mutex.h> +#include <linux/types.h> + +struct tee_device; + +/** + * struct tee_shm - shared memory object + * @teedev: device used to allocate the object + * @ctx: context using the object, if NULL the context is gone + * @link link element + * @paddr: physical address of the shared memory + * @kaddr: virtual address of the shared memory + * @size: size of shared memory + * @dmabuf: dmabuf used to for exporting to user space + * @flags: defined by TEE_SHM_* in tee_drv.h + * @id: unique id of a shared memory object on this device + */ +struct tee_shm { + struct tee_device *teedev; + struct tee_context *ctx; + struct list_head link; + phys_addr_t paddr; + void *kaddr; + size_t size; + struct dma_buf *dmabuf; + u32 flags; + int id; +}; + +struct tee_shm_pool_mgr; + +/** + * struct tee_shm_pool_mgr_ops - shared memory pool manager operations + * @alloc: called when allocating shared memory + * @free: called when freeing shared memory + */ +struct tee_shm_pool_mgr_ops { + int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm, + size_t size); + void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm); +}; + +/** + * struct tee_shm_pool_mgr - shared memory manager + * @ops: operations + * @private_data: private data for the shared memory manager + */ +struct tee_shm_pool_mgr { + const struct tee_shm_pool_mgr_ops *ops; + void *private_data; +}; + +/** + * struct tee_shm_pool - shared memory pool + * @private_mgr: pool manager for shared memory only between kernel + * and secure world + * @dma_buf_mgr: pool manager for shared memory exported to user space + * @destroy: called when destroying the pool + * @private_data: private data for the pool + */ +struct tee_shm_pool { + struct tee_shm_pool_mgr private_mgr; + struct tee_shm_pool_mgr dma_buf_mgr; + void (*destroy)(struct tee_shm_pool *pool); + void *private_data; +}; + +#define TEE_DEVICE_FLAG_REGISTERED 0x1 +#define TEE_MAX_DEV_NAME_LEN 32 + +/** + * struct tee_device - TEE Device representation + * @name: name of device + * @desc: description of device + * @id: unique id of device + * @flags: represented by TEE_DEVICE_FLAG_REGISTERED above + * @dev: embedded basic device structure + * @cdev: embedded cdev + * @num_users: number of active users of this device + * @c_no_user: completion used when unregistering the device + * @mutex: mutex protecting @num_users and @idr + * @idr: register of shared memory object allocated on this device + * @pool: shared memory pool + */ +struct tee_device { + char name[TEE_MAX_DEV_NAME_LEN]; + const struct tee_desc *desc; + int id; + unsigned int flags; + + struct device dev; + struct cdev cdev; + + size_t num_users; + struct completion c_no_users; + struct mutex mutex; /* protects num_users and idr */ + + struct idr idr; + struct tee_shm_pool *pool; +}; + +int tee_shm_init(void); + +int tee_shm_get_fd(struct tee_shm *shm); + +bool tee_device_get(struct tee_device *teedev); +void tee_device_put(struct tee_device *teedev); + +#endif /*TEE_PRIVATE_H*/ diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c new file mode 100644 index 000000000000..0be1e3e93bee --- /dev/null +++ b/drivers/tee/tee_shm.c @@ -0,0 +1,358 @@ +/* + * Copyright (c) 2015-2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/device.h> +#include <linux/dma-buf.h> +#include <linux/fdtable.h> +#include <linux/idr.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include "tee_private.h" + +static void tee_shm_release(struct tee_shm *shm) +{ + struct tee_device *teedev = shm->teedev; + struct tee_shm_pool_mgr *poolm; + + mutex_lock(&teedev->mutex); + idr_remove(&teedev->idr, shm->id); + if (shm->ctx) + list_del(&shm->link); + mutex_unlock(&teedev->mutex); + + if (shm->flags & TEE_SHM_DMA_BUF) + poolm = &teedev->pool->dma_buf_mgr; + else + poolm = &teedev->pool->private_mgr; + + poolm->ops->free(poolm, shm); + kfree(shm); + + tee_device_put(teedev); +} + +static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment + *attach, enum dma_data_direction dir) +{ + return NULL; +} + +static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *table, + enum dma_data_direction dir) +{ +} + +static void tee_shm_op_release(struct dma_buf *dmabuf) +{ + struct tee_shm *shm = dmabuf->priv; + + tee_shm_release(shm); +} + +static void *tee_shm_op_kmap_atomic(struct dma_buf *dmabuf, unsigned long pgnum) +{ + return NULL; +} + +static void *tee_shm_op_kmap(struct dma_buf *dmabuf, unsigned long pgnum) +{ + return NULL; +} + +static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct tee_shm *shm = dmabuf->priv; + size_t size = vma->vm_end - vma->vm_start; + + return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, + size, vma->vm_page_prot); +} + +static struct dma_buf_ops tee_shm_dma_buf_ops = { + .map_dma_buf = tee_shm_op_map_dma_buf, + .unmap_dma_buf = tee_shm_op_unmap_dma_buf, + .release = tee_shm_op_release, + .kmap_atomic = tee_shm_op_kmap_atomic, + .kmap = tee_shm_op_kmap, + .mmap = tee_shm_op_mmap, +}; + +/** + * tee_shm_alloc() - Allocate shared memory + * @ctx: Context that allocates the shared memory + * @size: Requested size of shared memory + * @flags: Flags setting properties for the requested shared memory. + * + * Memory allocated as global shared memory is automatically freed when the + * TEE file pointer is closed. The @flags field uses the bits defined by + * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be + * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and + * associated with a dma-buf handle, else driver private memory. + */ +struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) +{ + struct tee_device *teedev = ctx->teedev; + struct tee_shm_pool_mgr *poolm = NULL; + struct tee_shm *shm; + void *ret; + int rc; + + if (!(flags & TEE_SHM_MAPPED)) { + dev_err(teedev->dev.parent, + "only mapped allocations supported\n"); + return ERR_PTR(-EINVAL); + } + + if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) { + dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags); + return ERR_PTR(-EINVAL); + } + + if (!tee_device_get(teedev)) + return ERR_PTR(-EINVAL); + + if (!teedev->pool) { + /* teedev has been detached from driver */ + ret = ERR_PTR(-EINVAL); + goto err_dev_put; + } + + shm = kzalloc(sizeof(*shm), GFP_KERNEL); + if (!shm) { + ret = ERR_PTR(-ENOMEM); + goto err_dev_put; + } + + shm->flags = flags; + shm->teedev = teedev; + shm->ctx = ctx; + if (flags & TEE_SHM_DMA_BUF) + poolm = &teedev->pool->dma_buf_mgr; + else + poolm = &teedev->pool->private_mgr; + + rc = poolm->ops->alloc(poolm, shm, size); + if (rc) { + ret = ERR_PTR(rc); + goto err_kfree; + } + + mutex_lock(&teedev->mutex); + shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); + mutex_unlock(&teedev->mutex); + if (shm->id < 0) { + ret = ERR_PTR(shm->id); + goto err_pool_free; + } + + if (flags & TEE_SHM_DMA_BUF) { + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &tee_shm_dma_buf_ops; + exp_info.size = shm->size; + exp_info.flags = O_RDWR; + exp_info.priv = shm; + + shm->dmabuf = dma_buf_export(&exp_info); + if (IS_ERR(shm->dmabuf)) { + ret = ERR_CAST(shm->dmabuf); + goto err_rem; + } + } + mutex_lock(&teedev->mutex); + list_add_tail(&shm->link, &ctx->list_shm); + mutex_unlock(&teedev->mutex); + + return shm; +err_rem: + mutex_lock(&teedev->mutex); + idr_remove(&teedev->idr, shm->id); + mutex_unlock(&teedev->mutex); +err_pool_free: + poolm->ops->free(poolm, shm); +err_kfree: + kfree(shm); +err_dev_put: + tee_device_put(teedev); + return ret; +} +EXPORT_SYMBOL_GPL(tee_shm_alloc); + +/** + * tee_shm_get_fd() - Increase reference count and return file descriptor + * @shm: Shared memory handle + * @returns user space file descriptor to shared memory + */ +int tee_shm_get_fd(struct tee_shm *shm) +{ + u32 req_flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF; + int fd; + + if ((shm->flags & req_flags) != req_flags) + return -EINVAL; + + fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC); + if (fd >= 0) + get_dma_buf(shm->dmabuf); + return fd; +} + +/** + * tee_shm_free() - Free shared memory + * @shm: Handle to shared memory to free + */ +void tee_shm_free(struct tee_shm *shm) +{ + /* + * dma_buf_put() decreases the dmabuf reference counter and will + * call tee_shm_release() when the last reference is gone. + * + * In the case of driver private memory we call tee_shm_release + * directly instead as it doesn't have a reference counter. + */ + if (shm->flags & TEE_SHM_DMA_BUF) + dma_buf_put(shm->dmabuf); + else + tee_shm_release(shm); +} +EXPORT_SYMBOL_GPL(tee_shm_free); + +/** + * tee_shm_va2pa() - Get physical address of a virtual address + * @shm: Shared memory handle + * @va: Virtual address to tranlsate + * @pa: Returned physical address + * @returns 0 on success and < 0 on failure + */ +int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa) +{ + /* Check that we're in the range of the shm */ + if ((char *)va < (char *)shm->kaddr) + return -EINVAL; + if ((char *)va >= ((char *)shm->kaddr + shm->size)) + return -EINVAL; + + return tee_shm_get_pa( + shm, (unsigned long)va - (unsigned long)shm->kaddr, pa); +} +EXPORT_SYMBOL_GPL(tee_shm_va2pa); + +/** + * tee_shm_pa2va() - Get virtual address of a physical address + * @shm: Shared memory handle + * @pa: Physical address to tranlsate + * @va: Returned virtual address + * @returns 0 on success and < 0 on failure + */ +int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va) +{ + /* Check that we're in the range of the shm */ + if (pa < shm->paddr) + return -EINVAL; + if (pa >= (shm->paddr + shm->size)) + return -EINVAL; + + if (va) { + void *v = tee_shm_get_va(shm, pa - shm->paddr); + + if (IS_ERR(v)) + return PTR_ERR(v); + *va = v; + } + return 0; +} +EXPORT_SYMBOL_GPL(tee_shm_pa2va); + +/** + * tee_shm_get_va() - Get virtual address of a shared memory plus an offset + * @shm: Shared memory handle + * @offs: Offset from start of this shared memory + * @returns virtual address of the shared memory + offs if offs is within + * the bounds of this shared memory, else an ERR_PTR + */ +void *tee_shm_get_va(struct tee_shm *shm, size_t offs) +{ + if (offs >= shm->size) + return ERR_PTR(-EINVAL); + return (char *)shm->kaddr + offs; +} +EXPORT_SYMBOL_GPL(tee_shm_get_va); + +/** + * tee_shm_get_pa() - Get physical address of a shared memory plus an offset + * @shm: Shared memory handle + * @offs: Offset from start of this shared memory + * @pa: Physical address to return + * @returns 0 if offs is within the bounds of this shared memory, else an + * error code. + */ +int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa) +{ + if (offs >= shm->size) + return -EINVAL; + if (pa) + *pa = shm->paddr + offs; + return 0; +} +EXPORT_SYMBOL_GPL(tee_shm_get_pa); + +/** + * tee_shm_get_from_id() - Find shared memory object and increase reference + * count + * @ctx: Context owning the shared memory + * @id: Id of shared memory object + * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure + */ +struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id) +{ + struct tee_device *teedev; + struct tee_shm *shm; + + if (!ctx) + return ERR_PTR(-EINVAL); + + teedev = ctx->teedev; + mutex_lock(&teedev->mutex); + shm = idr_find(&teedev->idr, id); + if (!shm || shm->ctx != ctx) + shm = ERR_PTR(-EINVAL); + else if (shm->flags & TEE_SHM_DMA_BUF) + get_dma_buf(shm->dmabuf); + mutex_unlock(&teedev->mutex); + return shm; +} +EXPORT_SYMBOL_GPL(tee_shm_get_from_id); + +/** + * tee_shm_get_id() - Get id of a shared memory object + * @shm: Shared memory handle + * @returns id + */ +int tee_shm_get_id(struct tee_shm *shm) +{ + return shm->id; +} +EXPORT_SYMBOL_GPL(tee_shm_get_id); + +/** + * tee_shm_put() - Decrease reference count on a shared memory handle + * @shm: Shared memory handle + */ +void tee_shm_put(struct tee_shm *shm) +{ + if (shm->flags & TEE_SHM_DMA_BUF) + dma_buf_put(shm->dmabuf); +} +EXPORT_SYMBOL_GPL(tee_shm_put); diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c new file mode 100644 index 000000000000..fb4f8522a526 --- /dev/null +++ b/drivers/tee/tee_shm_pool.c @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/device.h> +#include <linux/dma-buf.h> +#include <linux/genalloc.h> +#include <linux/slab.h> +#include <linux/tee_drv.h> +#include "tee_private.h" + +static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm, + struct tee_shm *shm, size_t size) +{ + unsigned long va; + struct gen_pool *genpool = poolm->private_data; + size_t s = roundup(size, 1 << genpool->min_alloc_order); + + va = gen_pool_alloc(genpool, s); + if (!va) + return -ENOMEM; + + memset((void *)va, 0, s); + shm->kaddr = (void *)va; + shm->paddr = gen_pool_virt_to_phys(genpool, va); + shm->size = s; + return 0; +} + +static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm, + struct tee_shm *shm) +{ + gen_pool_free(poolm->private_data, (unsigned long)shm->kaddr, + shm->size); + shm->kaddr = NULL; +} + +static const struct tee_shm_pool_mgr_ops pool_ops_generic = { + .alloc = pool_op_gen_alloc, + .free = pool_op_gen_free, +}; + +static void pool_res_mem_destroy(struct tee_shm_pool *pool) +{ + gen_pool_destroy(pool->private_mgr.private_data); + gen_pool_destroy(pool->dma_buf_mgr.private_data); +} + +static int pool_res_mem_mgr_init(struct tee_shm_pool_mgr *mgr, + struct tee_shm_pool_mem_info *info, + int min_alloc_order) +{ + size_t page_mask = PAGE_SIZE - 1; + struct gen_pool *genpool = NULL; + int rc; + + /* + * Start and end must be page aligned + */ + if ((info->vaddr & page_mask) || (info->paddr & page_mask) || + (info->size & page_mask)) + return -EINVAL; + + genpool = gen_pool_create(min_alloc_order, -1); + if (!genpool) + return -ENOMEM; + + gen_pool_set_algo(genpool, gen_pool_best_fit, NULL); + rc = gen_pool_add_virt(genpool, info->vaddr, info->paddr, info->size, + -1); + if (rc) { + gen_pool_destroy(genpool); + return rc; + } + + mgr->private_data = genpool; + mgr->ops = &pool_ops_generic; + return 0; +} + +/** + * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved + * memory range + * @priv_info: Information for driver private shared memory pool + * @dmabuf_info: Information for dma-buf shared memory pool + * + * Start and end of pools will must be page aligned. + * + * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied + * in @dmabuf, others will use the range provided by @priv. + * + * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. + */ +struct tee_shm_pool * +tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info, + struct tee_shm_pool_mem_info *dmabuf_info) +{ + struct tee_shm_pool *pool = NULL; + int ret; + + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) { + ret = -ENOMEM; + goto err; + } + + /* + * Create the pool for driver private shared memory + */ + ret = pool_res_mem_mgr_init(&pool->private_mgr, priv_info, + 3 /* 8 byte aligned */); + if (ret) + goto err; + + /* + * Create the pool for dma_buf shared memory + */ + ret = pool_res_mem_mgr_init(&pool->dma_buf_mgr, dmabuf_info, + PAGE_SHIFT); + if (ret) + goto err; + + pool->destroy = pool_res_mem_destroy; + return pool; +err: + if (ret == -ENOMEM) + pr_err("%s: can't allocate memory for res_mem shared memory pool\n", __func__); + if (pool && pool->private_mgr.private_data) + gen_pool_destroy(pool->private_mgr.private_data); + kfree(pool); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem); + +/** + * tee_shm_pool_free() - Free a shared memory pool + * @pool: The shared memory pool to free + * + * There must be no remaining shared memory allocated from this pool when + * this function is called. + */ +void tee_shm_pool_free(struct tee_shm_pool *pool) +{ + pool->destroy(pool); + kfree(pool); +} +EXPORT_SYMBOL_GPL(tee_shm_pool_free); |