mirror of
https://github.com/torvalds/linux.git
synced 2025-04-06 09:13:43 +00:00
slab, rcu: move TINY_RCU variant of kvfree_rcu() to SLAB
Following the move of TREE_RCU implementation, let's move also the TINY_RCU one for consistency and subsequent refactoring. For simplicity, remove the separate inline __kvfree_call_rcu() as TINY_RCU is not meant for high-performance hardware anyway. Declare kvfree_call_rcu() in rcupdate.h to avoid header dependency issues. Also move the kvfree_rcu_barrier() declaration to slab.h Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Tested-by: Paul E. McKenney <paulmck@kernel.org> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
2014c95afe
commit
b14ff274e8
@ -1082,6 +1082,11 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
|
||||
#define kfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr)
|
||||
#define kvfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr)
|
||||
|
||||
/*
|
||||
* In mm/slab_common.c, no suitable header to include here.
|
||||
*/
|
||||
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
|
||||
|
||||
#define kvfree_rcu_arg_2(ptr, rhf) \
|
||||
do { \
|
||||
typeof (ptr) ___p = (ptr); \
|
||||
|
@ -90,41 +90,6 @@ static inline void synchronize_rcu_expedited(void)
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
/*
|
||||
* Add one more declaration of kvfree() here. It is
|
||||
* not so straight forward to just include <linux/mm.h>
|
||||
* where it is defined due to getting many compile
|
||||
* errors caused by that include.
|
||||
*/
|
||||
extern void kvfree(const void *addr);
|
||||
|
||||
static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr)
|
||||
{
|
||||
if (head) {
|
||||
call_rcu(head, (rcu_callback_t) ((void *) head - ptr));
|
||||
return;
|
||||
}
|
||||
|
||||
// kvfree_rcu(one_arg) call.
|
||||
might_sleep();
|
||||
synchronize_rcu();
|
||||
kvfree(ptr);
|
||||
}
|
||||
|
||||
static inline void kvfree_rcu_barrier(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KASAN_GENERIC
|
||||
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
|
||||
#else
|
||||
static inline void kvfree_call_rcu(struct rcu_head *head, void *ptr)
|
||||
{
|
||||
__kvfree_call_rcu(head, ptr);
|
||||
}
|
||||
#endif
|
||||
|
||||
void rcu_qs(void);
|
||||
|
||||
static inline void rcu_softirq_qs(void)
|
||||
@ -164,7 +129,6 @@ static inline void rcu_end_inkernel_boot(void) { }
|
||||
static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
|
||||
static inline bool rcu_is_watching(void) { return true; }
|
||||
static inline void rcu_momentary_eqs(void) { }
|
||||
static inline void kfree_rcu_scheduler_running(void) { }
|
||||
|
||||
/* Avoid RCU read-side critical sections leaking across. */
|
||||
static inline void rcu_all_qs(void) { barrier(); }
|
||||
|
@ -34,12 +34,9 @@ static inline void rcu_virt_note_context_switch(void)
|
||||
}
|
||||
|
||||
void synchronize_rcu_expedited(void);
|
||||
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
|
||||
void kvfree_rcu_barrier(void);
|
||||
|
||||
void rcu_barrier(void);
|
||||
void rcu_momentary_eqs(void);
|
||||
void kfree_rcu_scheduler_running(void);
|
||||
|
||||
struct rcu_gp_oldstate {
|
||||
unsigned long rgos_norm;
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/overflow.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/percpu-refcount.h>
|
||||
#include <linux/cleanup.h>
|
||||
@ -1082,6 +1083,19 @@ extern void kvfree_sensitive(const void *addr, size_t len);
|
||||
|
||||
unsigned int kmem_cache_size(struct kmem_cache *s);
|
||||
|
||||
#ifdef CONFIG_TINY_RCU
|
||||
static inline void kvfree_rcu_barrier(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
}
|
||||
|
||||
static inline void kfree_rcu_scheduler_running(void) { }
|
||||
#else
|
||||
void kvfree_rcu_barrier(void);
|
||||
|
||||
void kfree_rcu_scheduler_running(void);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* kmalloc_size_roundup - Report allocation bucket size for the given size
|
||||
*
|
||||
|
@ -246,17 +246,6 @@ bool poll_state_synchronize_rcu(unsigned long oldstate)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
|
||||
|
||||
#ifdef CONFIG_KASAN_GENERIC
|
||||
void kvfree_call_rcu(struct rcu_head *head, void *ptr)
|
||||
{
|
||||
if (head)
|
||||
kasan_record_aux_stack(ptr);
|
||||
|
||||
__kvfree_call_rcu(head, ptr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvfree_call_rcu);
|
||||
#endif
|
||||
|
||||
void __init rcu_init(void)
|
||||
{
|
||||
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
|
||||
|
@ -1284,6 +1284,25 @@ EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
|
||||
EXPORT_TRACEPOINT_SYMBOL(kfree);
|
||||
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
|
||||
|
||||
#ifdef CONFIG_TINY_RCU
|
||||
|
||||
void kvfree_call_rcu(struct rcu_head *head, void *ptr)
|
||||
{
|
||||
if (head) {
|
||||
kasan_record_aux_stack(ptr);
|
||||
call_rcu(head, (rcu_callback_t) ((void *) head - ptr));
|
||||
return;
|
||||
}
|
||||
|
||||
// kvfree_rcu(one_arg) call.
|
||||
might_sleep();
|
||||
synchronize_rcu();
|
||||
kvfree(ptr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvfree_call_rcu);
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This rcu parameter is runtime-read-only. It reflects
|
||||
* a minimum allowed number of objects which can be cached
|
||||
|
Loading…
x
Reference in New Issue
Block a user