summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-05-11 20:53:22 -0400
committerDavid S. Miller <davem@davemloft.net>2018-05-11 20:53:22 -0400
commitb2d6cee117f708d493c020f9f355297321507be7 (patch)
tree2c6975b47034de78fc899b4191260bb2704efc0f /kernel
parentb753a9faaf9aef1338c28ebd9ace6d749428788b (diff)
parent4bc871984f7cb5b2dec3ae64b570cb02f9ce2227 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
The bpf syscall and selftests conflicts were trivial overlapping changes. The r8169 change involved moving the added mdelay from 'net' into a different function. A TLS close bug fix overlapped with the splitting of the TLS state into separate TX and RX parts. I just expanded the tests in the bug fix from "ctx->conf == X" into "ctx->tx_conf == X && ctx->rx_conf == X". Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/syscall.c19
-rw-r--r--kernel/compat.c1
-rw-r--r--kernel/sched/cpufreq_schedutil.c16
-rw-r--r--kernel/time/clocksource.c63
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/trace_events_filter.c3
-rw-r--r--kernel/trace/trace_stack.c2
7 files changed, 67 insertions, 41 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 9b87198deea2..c286e75ec087 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -28,6 +28,7 @@
#include <linux/timekeeping.h>
#include <linux/ctype.h>
#include <linux/btf.h>
+#include <linux/nospec.h>
#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
(map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
@@ -104,12 +105,14 @@ const struct bpf_map_ops bpf_map_offload_ops = {
static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
{
const struct bpf_map_ops *ops;
+ u32 type = attr->map_type;
struct bpf_map *map;
int err;
- if (attr->map_type >= ARRAY_SIZE(bpf_map_types))
+ if (type >= ARRAY_SIZE(bpf_map_types))
return ERR_PTR(-EINVAL);
- ops = bpf_map_types[attr->map_type];
+ type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
+ ops = bpf_map_types[type];
if (!ops)
return ERR_PTR(-EINVAL);
@@ -124,7 +127,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
if (IS_ERR(map))
return map;
map->ops = ops;
- map->map_type = attr->map_type;
+ map->map_type = type;
return map;
}
@@ -897,11 +900,17 @@ static const struct bpf_prog_ops * const bpf_prog_types[] = {
static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
{
- if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
+ const struct bpf_prog_ops *ops;
+
+ if (type >= ARRAY_SIZE(bpf_prog_types))
+ return -EINVAL;
+ type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
+ ops = bpf_prog_types[type];
+ if (!ops)
return -EINVAL;
if (!bpf_prog_is_dev_bound(prog->aux))
- prog->aux->ops = bpf_prog_types[type];
+ prog->aux->ops = ops;
else
prog->aux->ops = &bpf_offload_prog_ops;
prog->type = type;
diff --git a/kernel/compat.c b/kernel/compat.c
index 6d21894806b4..92d8c98c0f57 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -34,6 +34,7 @@ int compat_get_timex(struct timex *txc, const struct compat_timex __user *utp)
{
struct compat_timex tx32;
+ memset(txc, 0, sizeof(struct timex));
if (copy_from_user(&tx32, utp, sizeof(struct compat_timex)))
return -EFAULT;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index d2c6083304b4..e13df951aca7 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -305,7 +305,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
* Do not reduce the frequency if the CPU has not been idle
* recently, as the reduction is likely to be premature then.
*/
- if (busy && next_f < sg_policy->next_freq) {
+ if (busy && next_f < sg_policy->next_freq &&
+ sg_policy->next_freq != UINT_MAX) {
next_f = sg_policy->next_freq;
/* Reset cached freq as next_freq has changed */
@@ -396,19 +397,6 @@ static void sugov_irq_work(struct irq_work *irq_work)
sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
- /*
- * For RT tasks, the schedutil governor shoots the frequency to maximum.
- * Special care must be taken to ensure that this kthread doesn't result
- * in the same behavior.
- *
- * This is (mostly) guaranteed by the work_in_progress flag. The flag is
- * updated only at the end of the sugov_work() function and before that
- * the schedutil governor rejects all other frequency scaling requests.
- *
- * There is a very rare case though, where the RT thread yields right
- * after the work_in_progress flag is cleared. The effects of that are
- * neglected for now.
- */
kthread_queue_work(&sg_policy->worker, &sg_policy->work);
}
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 0e974cface0b..84f37420fcf5 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -119,6 +119,16 @@ static DEFINE_SPINLOCK(watchdog_lock);
static int watchdog_running;
static atomic_t watchdog_reset_pending;
+static void inline clocksource_watchdog_lock(unsigned long *flags)
+{
+ spin_lock_irqsave(&watchdog_lock, *flags);
+}
+
+static void inline clocksource_watchdog_unlock(unsigned long *flags)
+{
+ spin_unlock_irqrestore(&watchdog_lock, *flags);
+}
+
static int clocksource_watchdog_kthread(void *data);
static void __clocksource_change_rating(struct clocksource *cs, int rating);
@@ -142,9 +152,19 @@ static void __clocksource_unstable(struct clocksource *cs)
cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
cs->flags |= CLOCK_SOURCE_UNSTABLE;
+ /*
+ * If the clocksource is registered clocksource_watchdog_kthread() will
+ * re-rate and re-select.
+ */
+ if (list_empty(&cs->list)) {
+ cs->rating = 0;
+ return;
+ }
+
if (cs->mark_unstable)
cs->mark_unstable(cs);
+ /* kick clocksource_watchdog_kthread() */
if (finished_booting)
schedule_work(&watchdog_work);
}
@@ -153,10 +173,8 @@ static void __clocksource_unstable(struct clocksource *cs)
* clocksource_mark_unstable - mark clocksource unstable via watchdog
* @cs: clocksource to be marked unstable
*
- * This function is called instead of clocksource_change_rating from
- * cpu hotplug code to avoid a deadlock between the clocksource mutex
- * and the cpu hotplug mutex. It defers the update of the clocksource
- * to the watchdog thread.
+ * This function is called by the x86 TSC code to mark clocksources as unstable;
+ * it defers demotion and re-selection to a kthread.
*/
void clocksource_mark_unstable(struct clocksource *cs)
{
@@ -164,7 +182,7 @@ void clocksource_mark_unstable(struct clocksource *cs)
spin_lock_irqsave(&watchdog_lock, flags);
if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
- if (list_empty(&cs->wd_list))
+ if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
list_add(&cs->wd_list, &watchdog_list);
__clocksource_unstable(cs);
}
@@ -319,9 +337,8 @@ static void clocksource_resume_watchdog(void)
static void clocksource_enqueue_watchdog(struct clocksource *cs)
{
- unsigned long flags;
+ INIT_LIST_HEAD(&cs->wd_list);
- spin_lock_irqsave(&watchdog_lock, flags);
if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
/* cs is a clocksource to be watched. */
list_add(&cs->wd_list, &watchdog_list);
@@ -331,7 +348,6 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
}
- spin_unlock_irqrestore(&watchdog_lock, flags);
}
static void clocksource_select_watchdog(bool fallback)
@@ -373,9 +389,6 @@ static void clocksource_select_watchdog(bool fallback)
static void clocksource_dequeue_watchdog(struct clocksource *cs)
{
- unsigned long flags;
-
- spin_lock_irqsave(&watchdog_lock, flags);
if (cs != watchdog) {
if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
/* cs is a watched clocksource. */
@@ -384,21 +397,19 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
clocksource_stop_watchdog();
}
}
- spin_unlock_irqrestore(&watchdog_lock, flags);
}
static int __clocksource_watchdog_kthread(void)
{
struct clocksource *cs, *tmp;
unsigned long flags;
- LIST_HEAD(unstable);
int select = 0;
spin_lock_irqsave(&watchdog_lock, flags);
list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
list_del_init(&cs->wd_list);
- list_add(&cs->wd_list, &unstable);
+ __clocksource_change_rating(cs, 0);
select = 1;
}
if (cs->flags & CLOCK_SOURCE_RESELECT) {
@@ -410,11 +421,6 @@ static int __clocksource_watchdog_kthread(void)
clocksource_stop_watchdog();
spin_unlock_irqrestore(&watchdog_lock, flags);
- /* Needs to be done outside of watchdog lock */
- list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
- list_del_init(&cs->wd_list);
- __clocksource_change_rating(cs, 0);
- }
return select;
}
@@ -447,6 +453,9 @@ static inline int __clocksource_watchdog_kthread(void) { return 0; }
static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
void clocksource_mark_unstable(struct clocksource *cs) { }
+static void inline clocksource_watchdog_lock(unsigned long *flags) { }
+static void inline clocksource_watchdog_unlock(unsigned long *flags) { }
+
#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
/**
@@ -779,14 +788,19 @@ EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
*/
int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
{
+ unsigned long flags;
/* Initialize mult/shift and max_idle_ns */
__clocksource_update_freq_scale(cs, scale, freq);
/* Add clocksource to the clocksource list */
mutex_lock(&clocksource_mutex);
+
+ clocksource_watchdog_lock(&flags);
clocksource_enqueue(cs);
clocksource_enqueue_watchdog(cs);
+ clocksource_watchdog_unlock(&flags);
+
clocksource_select();
clocksource_select_watchdog(false);
mutex_unlock(&clocksource_mutex);
@@ -808,8 +822,13 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
*/
void clocksource_change_rating(struct clocksource *cs, int rating)
{
+ unsigned long flags;
+
mutex_lock(&clocksource_mutex);
+ clocksource_watchdog_lock(&flags);
__clocksource_change_rating(cs, rating);
+ clocksource_watchdog_unlock(&flags);
+
clocksource_select();
clocksource_select_watchdog(false);
mutex_unlock(&clocksource_mutex);
@@ -821,6 +840,8 @@ EXPORT_SYMBOL(clocksource_change_rating);
*/
static int clocksource_unbind(struct clocksource *cs)
{
+ unsigned long flags;
+
if (clocksource_is_watchdog(cs)) {
/* Select and try to install a replacement watchdog. */
clocksource_select_watchdog(true);
@@ -834,8 +855,12 @@ static int clocksource_unbind(struct clocksource *cs)
if (curr_clocksource == cs)
return -EBUSY;
}
+
+ clocksource_watchdog_lock(&flags);
clocksource_dequeue_watchdog(cs);
list_del_init(&cs->list);
+ clocksource_watchdog_unlock(&flags);
+
return 0;
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 16bbf062018f..8d83bcf9ef69 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -5514,10 +5514,10 @@ static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
ftrace_create_filter_files(&global_ops, d_tracer);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- trace_create_file("set_graph_function", 0444, d_tracer,
+ trace_create_file("set_graph_function", 0644, d_tracer,
NULL,
&ftrace_graph_fops);
- trace_create_file("set_graph_notrace", 0444, d_tracer,
+ trace_create_file("set_graph_notrace", 0644, d_tracer,
NULL,
&ftrace_graph_notrace_fops);
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 1f951b3df60c..7d306b74230f 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -762,6 +762,9 @@ static int regex_match_full(char *str, struct regex *r, int len)
static int regex_match_front(char *str, struct regex *r, int len)
{
+ if (len < r->len)
+ return 0;
+
if (strncmp(str, r->pattern, r->len) == 0)
return 1;
return 0;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 3c7bfc4bf5e9..4237eba4ef20 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -472,7 +472,7 @@ static __init int stack_trace_init(void)
NULL, &stack_trace_fops);
#ifdef CONFIG_DYNAMIC_FTRACE
- trace_create_file("stack_trace_filter", 0444, d_tracer,
+ trace_create_file("stack_trace_filter", 0644, d_tracer,
&trace_ops, &stack_trace_filter_fops);
#endif