diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-07-06 11:46:47 -0700 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-08-30 16:03:26 -0700 |
commit | a8bb74acd8efe2eb934d524ae20859980975b602 (patch) | |
tree | 190aee61241decfa3bb25788f6c95cd6f142b3a2 | |
parent | 4c7e9c1434c6fc960774a5475f2fbccbf557fdeb (diff) |
rcu: Consolidate RCU-sched update-side function definitions
This commit saves a few lines by consolidating the RCU-sched function
definitions at the end of include/linux/rcupdate.h. This consolidation
also makes it easier to remove them all when the time comes.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r-- | include/linux/rcupdate.h | 38 | ||||
-rw-r--r-- | include/linux/rcutiny.h | 32 | ||||
-rw-r--r-- | include/linux/rcutree.h | 9 | ||||
-rw-r--r-- | kernel/rcu/tree.c | 58 |
4 files changed, 32 insertions, 105 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index e530f5739033..12103e1bbe67 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -48,12 +48,6 @@ #define ulong2long(a) (*(long *)(&(a))) /* Exported common interfaces */ - -#ifndef CONFIG_TINY_RCU -void synchronize_sched(void); -void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); -#endif - void call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier_tasks(void); void synchronize_rcu(void); @@ -170,7 +164,7 @@ void exit_tasks_rcu_finish(void); #define rcu_tasks_qs(t) do { } while (0) #define rcu_note_voluntary_context_switch(t) rcu_all_qs() #define call_rcu_tasks call_rcu_sched -#define synchronize_rcu_tasks synchronize_sched +#define synchronize_rcu_tasks synchronize_rcu static inline void exit_tasks_rcu_start(void) { } static inline void exit_tasks_rcu_finish(void) { } #endif /* #else #ifdef CONFIG_TASKS_RCU */ @@ -892,4 +886,34 @@ static inline void rcu_barrier_bh(void) rcu_barrier(); } +static inline void synchronize_sched(void) +{ + synchronize_rcu(); +} + +static inline void synchronize_sched_expedited(void) +{ + synchronize_rcu_expedited(); +} + +static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) +{ + call_rcu(head, func); +} + +static inline void rcu_barrier_sched(void) +{ + rcu_barrier(); +} + +static inline unsigned long get_state_synchronize_sched(void) +{ + return get_state_synchronize_rcu(); +} + +static inline void cond_synchronize_sched(unsigned long oldstate) +{ + cond_synchronize_rcu(oldstate); +} + #endif /* __LINUX_RCUPDATE_H */ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index df82bada9b19..7fa4fb9e899e 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -36,11 +36,6 @@ static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp) /* Never flag non-existent other CPUs! */ static inline bool rcu_eqs_special_set(int cpu) { return false; } -static inline void synchronize_sched(void) -{ - synchronize_rcu(); -} - static inline unsigned long get_state_synchronize_rcu(void) { return 0; @@ -51,36 +46,11 @@ static inline void cond_synchronize_rcu(unsigned long oldstate) might_sleep(); } -static inline unsigned long get_state_synchronize_sched(void) -{ - return 0; -} - -static inline void cond_synchronize_sched(unsigned long oldstate) -{ - might_sleep(); -} - extern void rcu_barrier(void); -static inline void rcu_barrier_sched(void) -{ - rcu_barrier(); /* Only one CPU, so only one list of callbacks! */ -} - static inline void synchronize_rcu_expedited(void) { - synchronize_sched(); -} - -static inline void synchronize_sched_expedited(void) -{ - synchronize_sched(); -} - -static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) -{ - call_rcu(head, func); + synchronize_rcu(); } static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 94820156aa62..d09a9abe9440 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -46,21 +46,12 @@ static inline void rcu_virt_note_context_switch(int cpu) } void synchronize_rcu_expedited(void); - -static inline void synchronize_sched_expedited(void) -{ - synchronize_rcu_expedited(); -} - void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier(void); -void rcu_barrier_sched(void); bool rcu_eqs_special_set(int cpu); unsigned long get_state_synchronize_rcu(void); void cond_synchronize_rcu(unsigned long oldstate); -unsigned long get_state_synchronize_sched(void); -void cond_synchronize_sched(unsigned long oldstate); void rcu_idle_enter(void); void rcu_idle_exit(void); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 8d5dadaf3c53..1a2551a4d583 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2950,19 +2950,6 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func) } EXPORT_SYMBOL_GPL(call_rcu); -/** - * call_rcu_sched() - Queue an RCU for invocation after sched grace period. - * @head: structure to be used for queueing the RCU updates. - * @func: actual callback function to be invoked after the grace period - * - * This is transitional. - */ -void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) -{ - call_rcu(head, func); -} -EXPORT_SYMBOL_GPL(call_rcu_sched); - /* * Queue an RCU callback for lazy invocation after a grace period. * This will likely be later named something like "call_rcu_lazy()", @@ -2977,17 +2964,6 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) EXPORT_SYMBOL_GPL(kfree_call_rcu); /** - * synchronize_sched - wait until an rcu-sched grace period has elapsed. - * - * This is transitional. - */ -void synchronize_sched(void) -{ - synchronize_rcu(); -} -EXPORT_SYMBOL_GPL(synchronize_sched); - -/** * get_state_synchronize_rcu - Snapshot current RCU state * * Returns a cookie that is used by a later call to cond_synchronize_rcu() @@ -3028,29 +3004,6 @@ void cond_synchronize_rcu(unsigned long oldstate) } EXPORT_SYMBOL_GPL(cond_synchronize_rcu); -/** - * get_state_synchronize_sched - Snapshot current RCU-sched state - * - * This is transitional, and only used by rcutorture. - */ -unsigned long get_state_synchronize_sched(void) -{ - return get_state_synchronize_rcu(); -} -EXPORT_SYMBOL_GPL(get_state_synchronize_sched); - -/** - * cond_synchronize_sched - Conditionally wait for an RCU-sched grace period - * @oldstate: return value from earlier call to get_state_synchronize_sched() - * - * This is transitional and only used by rcutorture. - */ -void cond_synchronize_sched(unsigned long oldstate) -{ - cond_synchronize_rcu(oldstate); -} -EXPORT_SYMBOL_GPL(cond_synchronize_sched); - /* * Check to see if there is any immediate RCU-related work to be done by * the current CPU, for the specified type of RCU, returning 1 if so and @@ -3266,17 +3219,6 @@ void rcu_barrier(void) } EXPORT_SYMBOL_GPL(rcu_barrier); -/** - * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. - * - * This is transitional. - */ -void rcu_barrier_sched(void) -{ - rcu_barrier(); -} -EXPORT_SYMBOL_GPL(rcu_barrier_sched); - /* * Propagate ->qsinitmask bits up the rcu_node tree to account for the * first CPU in a given leaf rcu_node structure coming online. The caller |