aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Stultz <john.stultz@linaro.org>2012-04-19 11:26:53 -0700
committerJohn Stultz <john.stultz@linaro.org>2012-04-19 11:26:53 -0700
commitf3abc4e3e5a0b51c5f8772876e2d1df500dc8b01 (patch)
treef83168a7339d2ef223af5a2ab256fbd4c8d98c7e
parent78ae076bed244402bb98a757dc6ff6152d15646d (diff)
parenta0ec4361e4539e30cf1c5de7ddfd2dadcd8e1595 (diff)
Merge branch 'upstream/android-3.4' into linaro-android-3.4tracking-linaro-android-3.4-3.4-rc3-2012.04-0
-rw-r--r--Documentation/cpu-freq/governors.txt16
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c106
-rw-r--r--include/trace/events/cpufreq_interactive.h87
-rw-r--r--net/netfilter/xt_qtaguid.c52
4 files changed, 226 insertions, 35 deletions
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index d6ef94a95cc8..8a83c0a5c287 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -223,11 +223,21 @@ frequency before ramping down. This is to ensure that the governor has
seen enough historic cpu load data to determine the appropriate
workload. Default is 80000 uS.
-go_maxspeed_load: The CPU load at which to ramp to max speed. Default
-is 85.
+hispeed_freq: An intermediate "hi speed" at which to initially ramp
+when CPU load hits the value specified in go_hispeed_load. If load
+stays high for the amount of time specified in above_hispeed_delay,
+then speed may be bumped higher. Default is maximum speed.
+
+go_hispeed_load: The CPU load at which to ramp to the intermediate "hi
+speed". Default is 85%.
+
+above_hispeed_delay: Once speed is set to hispeed_freq, wait for this
+long before bumping speed higher in response to continued high load.
+Default is 20000 uS.
timer_rate: Sample rate for reevaluating cpu load when the system is
-not idle. Default is 30000 uS.
+not idle. Default is 20000 uS.
+
3. The Governor Interface in the CPUfreq Core
=============================================
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 8a5cd15512c7..fc3628dc0d56 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -29,6 +29,9 @@
#include <linux/kthread.h>
#include <linux/mutex.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/cpufreq_interactive.h>
+
#include <asm/cputime.h>
static atomic_t active_count = ATOMIC_INIT(0);
@@ -40,8 +43,8 @@ struct cpufreq_interactive_cpuinfo {
u64 idle_exit_time;
u64 timer_run_time;
int idling;
- u64 freq_change_time;
- u64 freq_change_time_in_idle;
+ u64 target_set_time;
+ u64 target_set_time_in_idle;
struct cpufreq_policy *policy;
struct cpufreq_frequency_table *freq_table;
unsigned int target_freq;
@@ -64,21 +67,28 @@ static struct mutex set_speed_lock;
static u64 hispeed_freq;
/* Go to hi speed when CPU load at or above this value. */
-#define DEFAULT_GO_HISPEED_LOAD 95
+#define DEFAULT_GO_HISPEED_LOAD 85
static unsigned long go_hispeed_load;
/*
* The minimum amount of time to spend at a frequency before we can ramp down.
*/
-#define DEFAULT_MIN_SAMPLE_TIME 20 * USEC_PER_MSEC
+#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
static unsigned long min_sample_time;
/*
* The sample rate of the timer used to increase frequency
*/
-#define DEFAULT_TIMER_RATE 20 * USEC_PER_MSEC
+#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
static unsigned long timer_rate;
+/*
+ * Wait this long before raising speed above hispeed, by default a single
+ * timer interval.
+ */
+#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
+static unsigned long above_hispeed_delay_val;
+
static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
unsigned int event);
@@ -144,8 +154,9 @@ static void cpufreq_interactive_timer(unsigned long data)
else
cpu_load = 100 * (delta_time - delta_idle) / delta_time;
- delta_idle = (unsigned int)(now_idle - pcpu->freq_change_time_in_idle);
- delta_time = (unsigned int)(pcpu->timer_run_time - pcpu->freq_change_time);
+ delta_idle = (unsigned int)(now_idle - pcpu->target_set_time_in_idle);
+ delta_time = (unsigned int)(pcpu->timer_run_time -
+ pcpu->target_set_time);
if ((delta_time == 0) || (delta_idle > delta_time))
load_since_change = 0;
@@ -162,12 +173,26 @@ static void cpufreq_interactive_timer(unsigned long data)
cpu_load = load_since_change;
if (cpu_load >= go_hispeed_load) {
- if (pcpu->policy->cur == pcpu->policy->min)
+ if (pcpu->policy->cur == pcpu->policy->min) {
new_freq = hispeed_freq;
- else
+ } else {
new_freq = pcpu->policy->max * cpu_load / 100;
+
+ if (new_freq < hispeed_freq)
+ new_freq = hispeed_freq;
+
+ if (pcpu->target_freq == hispeed_freq &&
+ new_freq > hispeed_freq &&
+ pcpu->timer_run_time - pcpu->target_set_time
+ < above_hispeed_delay_val) {
+ trace_cpufreq_interactive_notyet(data, cpu_load,
+ pcpu->target_freq,
+ new_freq);
+ goto rearm;
+ }
+ }
} else {
- new_freq = pcpu->policy->cur * cpu_load / 100;
+ new_freq = pcpu->policy->max * cpu_load / 100;
}
if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
@@ -180,19 +205,31 @@ static void cpufreq_interactive_timer(unsigned long data)
new_freq = pcpu->freq_table[index].frequency;
- if (pcpu->target_freq == new_freq)
- goto rearm_if_notmax;
-
/*
* Do not scale down unless we have been at this frequency for the
* minimum sample time.
*/
if (new_freq < pcpu->target_freq) {
- if (pcpu->timer_run_time - pcpu->freq_change_time
- < min_sample_time)
+ if (pcpu->timer_run_time - pcpu->target_set_time
+ < min_sample_time) {
+ trace_cpufreq_interactive_notyet(data, cpu_load,
+ pcpu->target_freq, new_freq);
goto rearm;
+ }
}
+ pcpu->target_set_time_in_idle = now_idle;
+ pcpu->target_set_time = pcpu->timer_run_time;
+
+ if (pcpu->target_freq == new_freq) {
+ trace_cpufreq_interactive_already(data, cpu_load,
+ pcpu->target_freq, new_freq);
+ goto rearm_if_notmax;
+ }
+
+ trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
+ new_freq);
+
if (new_freq < pcpu->target_freq) {
pcpu->target_freq = new_freq;
spin_lock_irqsave(&down_cpumask_lock, flags);
@@ -376,10 +413,8 @@ static int cpufreq_interactive_up_task(void *data)
max_freq,
CPUFREQ_RELATION_H);
mutex_unlock(&set_speed_lock);
-
- pcpu->freq_change_time_in_idle =
- get_cpu_idle_time_us(cpu,
- &pcpu->freq_change_time);
+ trace_cpufreq_interactive_up(cpu, pcpu->target_freq,
+ pcpu->policy->cur);
}
}
@@ -423,9 +458,8 @@ static void cpufreq_interactive_freq_down(struct work_struct *work)
CPUFREQ_RELATION_H);
mutex_unlock(&set_speed_lock);
- pcpu->freq_change_time_in_idle =
- get_cpu_idle_time_us(cpu,
- &pcpu->freq_change_time);
+ trace_cpufreq_interactive_down(cpu, pcpu->target_freq,
+ pcpu->policy->cur);
}
}
@@ -497,6 +531,28 @@ static ssize_t store_min_sample_time(struct kobject *kobj,
static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
show_min_sample_time, store_min_sample_time);
+static ssize_t show_above_hispeed_delay(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", above_hispeed_delay_val);
+}
+
+static ssize_t store_above_hispeed_delay(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ above_hispeed_delay_val = val;
+ return count;
+}
+
+define_one_global_rw(above_hispeed_delay);
+
static ssize_t show_timer_rate(struct kobject *kobj,
struct attribute *attr, char *buf)
{
@@ -522,6 +578,7 @@ static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
static struct attribute *interactive_attributes[] = {
&hispeed_freq_attr.attr,
&go_hispeed_load_attr.attr,
+ &above_hispeed_delay.attr,
&min_sample_time_attr.attr,
&timer_rate_attr.attr,
NULL,
@@ -553,9 +610,9 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
pcpu->policy = policy;
pcpu->target_freq = policy->cur;
pcpu->freq_table = freq_table;
- pcpu->freq_change_time_in_idle =
+ pcpu->target_set_time_in_idle =
get_cpu_idle_time_us(j,
- &pcpu->freq_change_time);
+ &pcpu->target_set_time);
pcpu->governor_enabled = 1;
smp_wmb();
}
@@ -642,6 +699,7 @@ static int __init cpufreq_interactive_init(void)
go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
+ above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
timer_rate = DEFAULT_TIMER_RATE;
/* Initalize per-cpu timers */
diff --git a/include/trace/events/cpufreq_interactive.h b/include/trace/events/cpufreq_interactive.h
new file mode 100644
index 000000000000..3a90d3d609ba
--- /dev/null
+++ b/include/trace/events/cpufreq_interactive.h
@@ -0,0 +1,87 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpufreq_interactive
+
+#if !defined(_TRACE_CPUFREQ_INTERACTIVE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CPUFREQ_INTERACTIVE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(set,
+ TP_PROTO(u32 cpu_id, unsigned long targfreq,
+ unsigned long actualfreq),
+ TP_ARGS(cpu_id, targfreq, actualfreq),
+
+ TP_STRUCT__entry(
+ __field( u32, cpu_id )
+ __field(unsigned long, targfreq )
+ __field(unsigned long, actualfreq )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = (u32) cpu_id;
+ __entry->targfreq = targfreq;
+ __entry->actualfreq = actualfreq;
+ ),
+
+ TP_printk("cpu=%u targ=%lu actual=%lu",
+ __entry->cpu_id, __entry->targfreq,
+ __entry->actualfreq)
+);
+
+DEFINE_EVENT(set, cpufreq_interactive_up,
+ TP_PROTO(u32 cpu_id, unsigned long targfreq,
+ unsigned long actualfreq),
+ TP_ARGS(cpu_id, targfreq, actualfreq)
+);
+
+DEFINE_EVENT(set, cpufreq_interactive_down,
+ TP_PROTO(u32 cpu_id, unsigned long targfreq,
+ unsigned long actualfreq),
+ TP_ARGS(cpu_id, targfreq, actualfreq)
+);
+
+DECLARE_EVENT_CLASS(loadeval,
+ TP_PROTO(unsigned long cpu_id, unsigned long load,
+ unsigned long curfreq, unsigned long targfreq),
+ TP_ARGS(cpu_id, load, curfreq, targfreq),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, cpu_id )
+ __field(unsigned long, load )
+ __field(unsigned long, curfreq )
+ __field(unsigned long, targfreq )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = cpu_id;
+ __entry->load = load;
+ __entry->curfreq = curfreq;
+ __entry->targfreq = targfreq;
+ ),
+
+ TP_printk("cpu=%lu load=%lu cur=%lu targ=%lu",
+ __entry->cpu_id, __entry->load, __entry->curfreq,
+ __entry->targfreq)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_target,
+ TP_PROTO(unsigned long cpu_id, unsigned long load,
+ unsigned long curfreq, unsigned long targfreq),
+ TP_ARGS(cpu_id, load, curfreq, targfreq)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_already,
+ TP_PROTO(unsigned long cpu_id, unsigned long load,
+ unsigned long curfreq, unsigned long targfreq),
+ TP_ARGS(cpu_id, load, curfreq, targfreq)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
+ TP_PROTO(unsigned long cpu_id, unsigned long load,
+ unsigned long curfreq, unsigned long targfreq),
+ TP_ARGS(cpu_id, load, curfreq, targfreq)
+);
+#endif /* _TRACE_CPUFREQ_INTERACTIVE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
index 08086d680c2c..062f582ea75e 100644
--- a/net/netfilter/xt_qtaguid.c
+++ b/net/netfilter/xt_qtaguid.c
@@ -26,6 +26,10 @@
#include <net/tcp.h>
#include <net/udp.h>
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#endif
+
#include <linux/netfilter/xt_socket.h>
#include "xt_qtaguid_internal.h"
#include "xt_qtaguid_print.h"
@@ -1265,7 +1269,7 @@ static void if_tag_stat_update(const char *ifname, uid_t uid,
struct data_counters *uid_tag_counters;
struct sock_tag *sock_tag_entry;
struct iface_stat *iface_entry;
- struct tag_stat *new_tag_stat;
+ struct tag_stat *new_tag_stat = NULL;
MT_DEBUG("qtaguid: if_tag_stat_update(ifname=%s "
"uid=%u sk=%p dir=%d proto=%d bytes=%d)\n",
ifname, uid, sk, direction, proto, bytes);
@@ -1330,8 +1334,19 @@ static void if_tag_stat_update(const char *ifname, uid_t uid,
}
if (acct_tag) {
+ /* Create the child {acct_tag, uid_tag} and hook up parent. */
new_tag_stat = create_if_tag_stat(iface_entry, tag);
new_tag_stat->parent_counters = uid_tag_counters;
+ } else {
+ /*
+ * For new_tag_stat to be still NULL here would require:
+ * {0, uid_tag} exists
+ * and {acct_tag, uid_tag} doesn't exist
+ * AND acct_tag == 0.
+ * Impossible. This reassures us that new_tag_stat
+ * below will always be assigned.
+ */
+ BUG_ON(!new_tag_stat);
}
tag_stat_update(new_tag_stat, direction, proto, bytes);
spin_unlock_bh(&iface_entry->tag_stat_list_lock);
@@ -1535,6 +1550,27 @@ static struct sock *qtaguid_find_sk(const struct sk_buff *skb,
return sk;
}
+static int ipx_proto(const struct sk_buff *skb,
+ struct xt_action_param *par)
+{
+ int thoff, tproto;
+
+ switch (par->family) {
+ case NFPROTO_IPV6:
+ tproto = ipv6_find_hdr(skb, &thoff, -1, NULL);
+ if (tproto < 0)
+ MT_DEBUG("%s(): transport header not found in ipv6"
+ " skb=%p\n", __func__, skb);
+ break;
+ case NFPROTO_IPV4:
+ tproto = ip_hdr(skb)->protocol;
+ break;
+ default:
+ tproto = IPPROTO_RAW;
+ }
+ return tproto;
+}
+
static void account_for_uid(const struct sk_buff *skb,
const struct sock *alternate_sk, uid_t uid,
struct xt_action_param *par)
@@ -1561,15 +1597,15 @@ static void account_for_uid(const struct sk_buff *skb,
} else if (unlikely(!el_dev->name)) {
pr_info("qtaguid[%d]: no dev->name?!!\n", par->hooknum);
} else {
- MT_DEBUG("qtaguid[%d]: dev name=%s type=%d\n",
- par->hooknum,
- el_dev->name,
- el_dev->type);
+ int proto = ipx_proto(skb, par);
+ MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
+ par->hooknum, el_dev->name, el_dev->type,
+ par->family, proto);
if_tag_stat_update(el_dev->name, uid,
skb->sk ? skb->sk : alternate_sk,
par->in ? IFS_RX : IFS_TX,
- ip_hdr(skb)->protocol, skb->len);
+ proto, skb->len);
}
}
@@ -1614,8 +1650,8 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
} else {
atomic64_inc(&qtu_events.match_found_sk);
}
- MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d proto=%d\n",
- par->hooknum, sk, got_sock, ip_hdr(skb)->protocol);
+ MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d fam=%d proto=%d\n",
+ par->hooknum, sk, got_sock, par->family, ipx_proto(skb, par));
if (sk != NULL) {
MT_DEBUG("qtaguid[%d]: sk=%p->sk_socket=%p->file=%p\n",
par->hooknum, sk, sk->sk_socket,