mirror of
https://github.com/torvalds/linux.git
synced 2025-04-09 14:45:27 +00:00
md: switch md-cluster to use md_submodle_head
To make code cleaner, and prepare to add kconfig for bitmap. Also remove the unsed global variables pers_lock, md_cluster_ops and md_cluster_mod, and exported symbols register_md_cluster_operations(), unregister_md_cluster_operations() and md_cluster_ops. Link: https://lore.kernel.org/linux-raid/20250215092225.2427977-8-yukuai1@huaweicloud.com Signed-off-by: Yu Kuai <yukuai3@huawei.com> Reviewed-by: Su Yue <glass.su@suse.com>
This commit is contained in:
parent
c594de0455
commit
87a86277c9
@ -1612,7 +1612,14 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct md_cluster_operations cluster_ops = {
|
||||
static struct md_cluster_operations cluster_ops = {
|
||||
.head = {
|
||||
.type = MD_CLUSTER,
|
||||
.id = ID_CLUSTER,
|
||||
.name = "cluster",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
|
||||
.join = join,
|
||||
.leave = leave,
|
||||
.slot_number = slot_number,
|
||||
@ -1642,13 +1649,12 @@ static int __init cluster_init(void)
|
||||
{
|
||||
pr_warn("md-cluster: support raid1 and raid10 (limited support)\n");
|
||||
pr_info("Registering Cluster MD functions\n");
|
||||
register_md_cluster_operations(&cluster_ops, THIS_MODULE);
|
||||
return 0;
|
||||
return register_md_submodule(&cluster_ops.head);
|
||||
}
|
||||
|
||||
static void cluster_exit(void)
|
||||
{
|
||||
unregister_md_cluster_operations();
|
||||
unregister_md_submodule(&cluster_ops.head);
|
||||
}
|
||||
|
||||
module_init(cluster_init);
|
||||
|
@ -37,9 +37,6 @@ struct md_cluster_operations {
|
||||
void (*update_size)(struct mddev *mddev, sector_t old_dev_sectors);
|
||||
};
|
||||
|
||||
extern int register_md_cluster_operations(const struct md_cluster_operations *ops,
|
||||
struct module *module);
|
||||
extern int unregister_md_cluster_operations(void);
|
||||
extern int md_setup_cluster(struct mddev *mddev, int nodes);
|
||||
extern void md_cluster_stop(struct mddev *mddev);
|
||||
extern void md_reload_sb(struct mddev *mddev, int raid_disk);
|
||||
|
@ -81,13 +81,8 @@ static const char *action_name[NR_SYNC_ACTIONS] = {
|
||||
|
||||
static DEFINE_XARRAY(md_submodule);
|
||||
|
||||
static DEFINE_SPINLOCK(pers_lock);
|
||||
|
||||
static const struct kobj_type md_ktype;
|
||||
|
||||
static const struct md_cluster_operations *md_cluster_ops;
|
||||
static struct module *md_cluster_mod;
|
||||
|
||||
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
|
||||
static struct workqueue_struct *md_wq;
|
||||
|
||||
@ -7452,11 +7447,12 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
|
||||
|
||||
static int get_cluster_ops(struct mddev *mddev)
|
||||
{
|
||||
spin_lock(&pers_lock);
|
||||
mddev->cluster_ops = md_cluster_ops;
|
||||
if (mddev->cluster_ops && !try_module_get(md_cluster_mod))
|
||||
xa_lock(&md_submodule);
|
||||
mddev->cluster_ops = xa_load(&md_submodule, ID_CLUSTER);
|
||||
if (mddev->cluster_ops &&
|
||||
!try_module_get(mddev->cluster_ops->head.owner))
|
||||
mddev->cluster_ops = NULL;
|
||||
spin_unlock(&pers_lock);
|
||||
xa_unlock(&md_submodule);
|
||||
|
||||
return mddev->cluster_ops == NULL ? -ENOENT : 0;
|
||||
}
|
||||
@ -7467,7 +7463,7 @@ static void put_cluster_ops(struct mddev *mddev)
|
||||
return;
|
||||
|
||||
mddev->cluster_ops->leave(mddev);
|
||||
module_put(md_cluster_mod);
|
||||
module_put(mddev->cluster_ops->head.owner);
|
||||
mddev->cluster_ops = NULL;
|
||||
}
|
||||
|
||||
@ -8559,31 +8555,6 @@ void unregister_md_submodule(struct md_submodule_head *msh)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_md_submodule);
|
||||
|
||||
int register_md_cluster_operations(const struct md_cluster_operations *ops,
|
||||
struct module *module)
|
||||
{
|
||||
int ret = 0;
|
||||
spin_lock(&pers_lock);
|
||||
if (md_cluster_ops != NULL)
|
||||
ret = -EALREADY;
|
||||
else {
|
||||
md_cluster_ops = ops;
|
||||
md_cluster_mod = module;
|
||||
}
|
||||
spin_unlock(&pers_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(register_md_cluster_operations);
|
||||
|
||||
int unregister_md_cluster_operations(void)
|
||||
{
|
||||
spin_lock(&pers_lock);
|
||||
md_cluster_ops = NULL;
|
||||
spin_unlock(&pers_lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(unregister_md_cluster_operations);
|
||||
|
||||
int md_setup_cluster(struct mddev *mddev, int nodes)
|
||||
{
|
||||
int ret = get_cluster_ops(mddev);
|
||||
|
@ -603,7 +603,7 @@ struct mddev {
|
||||
mempool_t *serial_info_pool;
|
||||
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
|
||||
struct md_cluster_info *cluster_info;
|
||||
const struct md_cluster_operations *cluster_ops;
|
||||
struct md_cluster_operations *cluster_ops;
|
||||
unsigned int good_device_nr; /* good device num within cluster raid */
|
||||
unsigned int noio_flag; /* for memalloc scope API */
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user