ocfs2: miscellaneous spelling fixes

Correct spelling here and there as suggested by codespell.

Link: https://lkml.kernel.org/r/20241115151013.1404929-1-dmantipov@yandex.ru
Signed-off-by: Dmitry Antipov <dmantipov@yandex.ru>
Acked-by: Joseph Qi <joseph.qi@linux.alibaba.com>
Cc: Mark Fasheh <mark@fasheh.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Junxiao Bi <junxiao.bi@oracle.com>
Cc: Changwei Ge <gechangwei@live.cn>
Cc: Jun Piao <piaojun@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Dmitry Antipov 2024-11-15 18:10:13 +03:00 committed by Andrew Morton
parent 08de555ae5
commit a0f8a9a963
22 changed files with 52 additions and 52 deletions

View File

@ -566,7 +566,7 @@ static void ocfs2_adjust_rightmost_records(handle_t *handle,
struct ocfs2_path *path, struct ocfs2_path *path,
struct ocfs2_extent_rec *insert_rec); struct ocfs2_extent_rec *insert_rec);
/* /*
* Reset the actual path elements so that we can re-use the structure * Reset the actual path elements so that we can reuse the structure
* to build another path. Generally, this involves freeing the buffer * to build another path. Generally, this involves freeing the buffer
* heads. * heads.
*/ */
@ -1182,7 +1182,7 @@ static int ocfs2_add_branch(handle_t *handle,
/* /*
* If there is a gap before the root end and the real end * If there is a gap before the root end and the real end
* of the righmost leaf block, we need to remove the gap * of the rightmost leaf block, we need to remove the gap
* between new_cpos and root_end first so that the tree * between new_cpos and root_end first so that the tree
* is consistent after we add a new branch(it will start * is consistent after we add a new branch(it will start
* from new_cpos). * from new_cpos).
@ -1238,7 +1238,7 @@ static int ocfs2_add_branch(handle_t *handle,
/* Note: new_eb_bhs[new_blocks - 1] is the guy which will be /* Note: new_eb_bhs[new_blocks - 1] is the guy which will be
* linked with the rest of the tree. * linked with the rest of the tree.
* conversly, new_eb_bhs[0] is the new bottommost leaf. * conversely, new_eb_bhs[0] is the new bottommost leaf.
* *
* when we leave the loop, new_last_eb_blk will point to the * when we leave the loop, new_last_eb_blk will point to the
* newest leaf, and next_blkno will point to the topmost extent * newest leaf, and next_blkno will point to the topmost extent
@ -3712,7 +3712,7 @@ static int ocfs2_try_to_merge_extent(handle_t *handle,
* update split_index here. * update split_index here.
* *
* When the split_index is zero, we need to merge it to the * When the split_index is zero, we need to merge it to the
* prevoius extent block. It is more efficient and easier * previous extent block. It is more efficient and easier
* if we do merge_right first and merge_left later. * if we do merge_right first and merge_left later.
*/ */
ret = ocfs2_merge_rec_right(path, handle, et, split_rec, ret = ocfs2_merge_rec_right(path, handle, et, split_rec,
@ -4517,7 +4517,7 @@ static void ocfs2_figure_contig_type(struct ocfs2_extent_tree *et,
} }
/* /*
* This should only be called against the righmost leaf extent list. * This should only be called against the rightmost leaf extent list.
* *
* ocfs2_figure_appending_type() will figure out whether we'll have to * ocfs2_figure_appending_type() will figure out whether we'll have to
* insert at the tail of the rightmost leaf. * insert at the tail of the rightmost leaf.

View File

@ -305,7 +305,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio)
} }
/* /*
* i_size might have just been updated as we grabed the meta lock. We * i_size might have just been updated as we grabbed the meta lock. We
* might now be discovering a truncate that hit on another node. * might now be discovering a truncate that hit on another node.
* block_read_full_folio->get_block freaks out if it is asked to read * block_read_full_folio->get_block freaks out if it is asked to read
* beyond the end of a file, so we check here. Callers * beyond the end of a file, so we check here. Callers

View File

@ -1020,7 +1020,7 @@ fire_callbacks:
if (list_empty(&slot->ds_live_item)) if (list_empty(&slot->ds_live_item))
goto out; goto out;
/* live nodes only go dead after enough consequtive missed /* live nodes only go dead after enough consecutive missed
* samples.. reset the missed counter whenever we see * samples.. reset the missed counter whenever we see
* activity */ * activity */
if (slot->ds_equal_samples >= o2hb_dead_threshold || gen_changed) { if (slot->ds_equal_samples >= o2hb_dead_threshold || gen_changed) {

View File

@ -29,7 +29,7 @@
* just calling printk() so that this can eventually make its way through * just calling printk() so that this can eventually make its way through
* relayfs along with the debugging messages. Everything else gets KERN_DEBUG. * relayfs along with the debugging messages. Everything else gets KERN_DEBUG.
* The inline tests and macro dance give GCC the opportunity to quite cleverly * The inline tests and macro dance give GCC the opportunity to quite cleverly
* only emit the appropriage printk() when the caller passes in a constant * only emit the appropriate printk() when the caller passes in a constant
* mask, as is almost always the case. * mask, as is almost always the case.
* *
* All this bitmask nonsense is managed from the files under * All this bitmask nonsense is managed from the files under

View File

@ -23,7 +23,7 @@
* race between when we see a node start heartbeating and when we connect * race between when we see a node start heartbeating and when we connect
* to it. * to it.
* *
* So nodes that are in this transtion put a hold on the quorum decision * So nodes that are in this transition put a hold on the quorum decision
* with a counter. As they fall out of this transition they drop the count * with a counter. As they fall out of this transition they drop the count
* and if they're the last, they fire off the decision. * and if they're the last, they fire off the decision.
*/ */
@ -189,7 +189,7 @@ static void o2quo_clear_hold(struct o2quo_state *qs, u8 node)
} }
/* as a node comes up we delay the quorum decision until we know the fate of /* as a node comes up we delay the quorum decision until we know the fate of
* the connection. the hold will be droped in conn_up or hb_down. it might be * the connection. the hold will be dropped in conn_up or hb_down. it might be
* perpetuated by con_err until hb_down. if we already have a conn, we might * perpetuated by con_err until hb_down. if we already have a conn, we might
* be dropping a hold that conn_up got. */ * be dropping a hold that conn_up got. */
void o2quo_hb_up(u8 node) void o2quo_hb_up(u8 node)
@ -256,7 +256,7 @@ void o2quo_hb_still_up(u8 node)
} }
/* This is analogous to hb_up. as a node's connection comes up we delay the /* This is analogous to hb_up. as a node's connection comes up we delay the
* quorum decision until we see it heartbeating. the hold will be droped in * quorum decision until we see it heartbeating. the hold will be dropped in
* hb_up or hb_down. it might be perpetuated by con_err until hb_down. if * hb_up or hb_down. it might be perpetuated by con_err until hb_down. if
* it's already heartbeating we might be dropping a hold that conn_up got. * it's already heartbeating we might be dropping a hold that conn_up got.
* */ * */

View File

@ -5,13 +5,13 @@
* *
* ---- * ----
* *
* Callers for this were originally written against a very simple synchronus * Callers for this were originally written against a very simple synchronous
* API. This implementation reflects those simple callers. Some day I'm sure * API. This implementation reflects those simple callers. Some day I'm sure
* we'll need to move to a more robust posting/callback mechanism. * we'll need to move to a more robust posting/callback mechanism.
* *
* Transmit calls pass in kernel virtual addresses and block copying this into * Transmit calls pass in kernel virtual addresses and block copying this into
* the socket's tx buffers via a usual blocking sendmsg. They'll block waiting * the socket's tx buffers via a usual blocking sendmsg. They'll block waiting
* for a failed socket to timeout. TX callers can also pass in a poniter to an * for a failed socket to timeout. TX callers can also pass in a pointer to an
* 'int' which gets filled with an errno off the wire in response to the * 'int' which gets filled with an errno off the wire in response to the
* message they send. * message they send.
* *
@ -101,7 +101,7 @@ static struct socket *o2net_listen_sock;
* o2net_wq. teardown detaches the callbacks before destroying the workqueue. * o2net_wq. teardown detaches the callbacks before destroying the workqueue.
* quorum work is queued as sock containers are shutdown.. stop_listening * quorum work is queued as sock containers are shutdown.. stop_listening
* tears down all the node's sock containers, preventing future shutdowns * tears down all the node's sock containers, preventing future shutdowns
* and queued quroum work, before canceling delayed quorum work and * and queued quorum work, before canceling delayed quorum work and
* destroying the work queue. * destroying the work queue.
*/ */
static struct workqueue_struct *o2net_wq; static struct workqueue_struct *o2net_wq;
@ -1419,7 +1419,7 @@ out:
return ret; return ret;
} }
/* this work func is triggerd by data ready. it reads until it can read no /* this work func is triggered by data ready. it reads until it can read no
* more. it interprets 0, eof, as fatal. if data_ready hits while we're doing * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing
* our work the work struct will be marked and we'll be called again. */ * our work the work struct will be marked and we'll be called again. */
static void o2net_rx_until_empty(struct work_struct *work) static void o2net_rx_until_empty(struct work_struct *work)

View File

@ -118,7 +118,7 @@ struct dlm_lockstatus {
#define LKM_VALBLK 0x00000100 /* lock value block request */ #define LKM_VALBLK 0x00000100 /* lock value block request */
#define LKM_NOQUEUE 0x00000200 /* non blocking request */ #define LKM_NOQUEUE 0x00000200 /* non blocking request */
#define LKM_CONVERT 0x00000400 /* conversion request */ #define LKM_CONVERT 0x00000400 /* conversion request */
#define LKM_NODLCKWT 0x00000800 /* this lock wont deadlock (U) */ #define LKM_NODLCKWT 0x00000800 /* this lock won't deadlock (U) */
#define LKM_UNLOCK 0x00001000 /* deallocate this lock */ #define LKM_UNLOCK 0x00001000 /* deallocate this lock */
#define LKM_CANCEL 0x00002000 /* cancel conversion request */ #define LKM_CANCEL 0x00002000 /* cancel conversion request */
#define LKM_DEQALL 0x00004000 /* remove all locks held by proc (U) */ #define LKM_DEQALL 0x00004000 /* remove all locks held by proc (U) */

View File

@ -207,7 +207,7 @@ void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
* 1) all recovery threads cluster wide will work on recovering * 1) all recovery threads cluster wide will work on recovering
* ONE node at a time * ONE node at a time
* 2) negotiate who will take over all the locks for the dead node. * 2) negotiate who will take over all the locks for the dead node.
* thats right... ALL the locks. * that's right... ALL the locks.
* 3) once a new master is chosen, everyone scans all locks * 3) once a new master is chosen, everyone scans all locks
* and moves aside those mastered by the dead guy * and moves aside those mastered by the dead guy
* 4) each of these locks should be locked until recovery is done * 4) each of these locks should be locked until recovery is done
@ -1469,7 +1469,7 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
* The first one is handled at the end of this function. The * The first one is handled at the end of this function. The
* other two are handled in the worker thread after locks have * other two are handled in the worker thread after locks have
* been attached. Yes, we don't wait for purge time to match * been attached. Yes, we don't wait for purge time to match
* kref_init. The lockres will still have atleast one ref * kref_init. The lockres will still have at least one ref
* added because it is in the hash __dlm_insert_lockres() */ * added because it is in the hash __dlm_insert_lockres() */
extra_refs++; extra_refs++;
@ -1735,7 +1735,7 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
spin_unlock(&res->spinlock); spin_unlock(&res->spinlock);
} }
} else { } else {
/* put.. incase we are not the master */ /* put.. in case we are not the master */
spin_unlock(&res->spinlock); spin_unlock(&res->spinlock);
dlm_lockres_put(res); dlm_lockres_put(res);
} }

View File

@ -794,7 +794,7 @@ void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
/* /*
* Keep a list of processes who have interest in a lockres. * Keep a list of processes who have interest in a lockres.
* Note: this is now only uesed for check recursive cluster locking. * Note: this is now only used for check recursive cluster locking.
*/ */
static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres, static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
struct ocfs2_lock_holder *oh) struct ocfs2_lock_holder *oh)
@ -2532,7 +2532,7 @@ bail:
* locks while holding a page lock and the downconvert thread which * locks while holding a page lock and the downconvert thread which
* blocks dlm lock acquiry while acquiring page locks. * blocks dlm lock acquiry while acquiring page locks.
* *
* ** These _with_page variantes are only intended to be called from aop * ** These _with_page variants are only intended to be called from aop
* methods that hold page locks and return a very specific *positive* error * methods that hold page locks and return a very specific *positive* error
* code that aop methods pass up to the VFS -- test for errors with != 0. ** * code that aop methods pass up to the VFS -- test for errors with != 0. **
* *
@ -2630,7 +2630,7 @@ void ocfs2_inode_unlock(struct inode *inode,
} }
/* /*
* This _tracker variantes are introduced to deal with the recursive cluster * This _tracker variants are introduced to deal with the recursive cluster
* locking issue. The idea is to keep track of a lock holder on the stack of * locking issue. The idea is to keep track of a lock holder on the stack of
* the current process. If there's a lock holder on the stack, we know the * the current process. If there's a lock holder on the stack, we know the
* task context is already protected by cluster locking. Currently, they're * task context is already protected by cluster locking. Currently, they're
@ -2735,7 +2735,7 @@ void ocfs2_inode_unlock_tracker(struct inode *inode,
struct ocfs2_lock_res *lockres; struct ocfs2_lock_res *lockres;
lockres = &OCFS2_I(inode)->ip_inode_lockres; lockres = &OCFS2_I(inode)->ip_inode_lockres;
/* had_lock means that the currect process already takes the cluster /* had_lock means that the current process already takes the cluster
* lock previously. * lock previously.
* If had_lock is 1, we have nothing to do here. * If had_lock is 1, we have nothing to do here.
* If had_lock is 0, we will release the lock. * If had_lock is 0, we will release the lock.
@ -3802,9 +3802,9 @@ recheck:
* set when the ast is received for an upconvert just before the * set when the ast is received for an upconvert just before the
* OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
* on the heels of the ast, we want to delay the downconvert just * on the heels of the ast, we want to delay the downconvert just
* enough to allow the up requestor to do its task. Because this * enough to allow the up requester to do its task. Because this
* lock is in the blocked queue, the lock will be downconverted * lock is in the blocked queue, the lock will be downconverted
* as soon as the requestor is done with the lock. * as soon as the requester is done with the lock.
*/ */
if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
goto leave_requeue; goto leave_requeue;

View File

@ -1122,7 +1122,7 @@ static void ocfs2_clear_inode(struct inode *inode)
dquot_drop(inode); dquot_drop(inode);
/* To preven remote deletes we hold open lock before, now it /* To prevent remote deletes we hold open lock before, now it
* is time to unlock PR and EX open locks. */ * is time to unlock PR and EX open locks. */
ocfs2_open_unlock(inode); ocfs2_open_unlock(inode);
@ -1437,7 +1437,7 @@ static int ocfs2_filecheck_validate_inode_block(struct super_block *sb,
* Call ocfs2_validate_meta_ecc() first since it has ecc repair * Call ocfs2_validate_meta_ecc() first since it has ecc repair
* function, but we should not return error immediately when ecc * function, but we should not return error immediately when ecc
* validation fails, because the reason is quite likely the invalid * validation fails, because the reason is quite likely the invalid
* inode number inputed. * inode number inputted.
*/ */
rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check); rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check);
if (rc) { if (rc) {

View File

@ -796,7 +796,7 @@ bail:
/* /*
* OCFS2_IOC_INFO handles an array of requests passed from userspace. * OCFS2_IOC_INFO handles an array of requests passed from userspace.
* *
* ocfs2_info_handle() recevies a large info aggregation, grab and * ocfs2_info_handle() receives a large info aggregation, grab and
* validate the request count from header, then break it into small * validate the request count from header, then break it into small
* pieces, later specific handlers can handle them one by one. * pieces, later specific handlers can handle them one by one.
* *

View File

@ -1956,7 +1956,7 @@ bail:
/* /*
* Scan timer should get fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT. Add some * Scan timer should get fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT. Add some
* randomness to the timeout to minimize multple nodes firing the timer at the * randomness to the timeout to minimize multiple nodes firing the timer at the
* same time. * same time.
*/ */
static inline unsigned long ocfs2_orphan_scan_timeout(void) static inline unsigned long ocfs2_orphan_scan_timeout(void)

View File

@ -492,7 +492,7 @@ static int ocfs2_validate_and_adjust_move_goal(struct inode *inode,
bg = (struct ocfs2_group_desc *)gd_bh->b_data; bg = (struct ocfs2_group_desc *)gd_bh->b_data;
/* /*
* moving goal is not allowd to start with a group desc blok(#0 blk) * moving goal is not allowed to start with a group desc blok(#0 blk)
* let's compromise to the latter cluster. * let's compromise to the latter cluster.
*/ */
if (range->me_goal == le64_to_cpu(bg->bg_blkno)) if (range->me_goal == le64_to_cpu(bg->bg_blkno))
@ -658,7 +658,7 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
/* /*
* probe the victim cluster group to find a proper * probe the victim cluster group to find a proper
* region to fit wanted movement, it even will perfrom * region to fit wanted movement, it even will perform
* a best-effort attempt by compromising to a threshold * a best-effort attempt by compromising to a threshold
* around the goal. * around the goal.
*/ */
@ -920,7 +920,7 @@ static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
} }
/* /*
* rememer ip_xattr_sem also needs to be held if necessary * remember ip_xattr_sem also needs to be held if necessary
*/ */
down_write(&OCFS2_I(inode)->ip_alloc_sem); down_write(&OCFS2_I(inode)->ip_alloc_sem);
@ -1022,7 +1022,7 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
context->range = &range; context->range = &range;
/* /*
* ok, the default theshold for the defragmentation * ok, the default threshold for the defragmentation
* is 1M, since our maximum clustersize was 1M also. * is 1M, since our maximum clustersize was 1M also.
* any thought? * any thought?
*/ */

View File

@ -132,7 +132,7 @@
* well as the name of the cluster being joined. * well as the name of the cluster being joined.
* mount.ocfs2 must pass in a matching stack name. * mount.ocfs2 must pass in a matching stack name.
* *
* If not set, the classic stack will be used. This is compatbile with * If not set, the classic stack will be used. This is compatible with
* all older versions. * all older versions.
*/ */
#define OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK 0x0080 #define OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK 0x0080
@ -143,7 +143,7 @@
/* Support for extended attributes */ /* Support for extended attributes */
#define OCFS2_FEATURE_INCOMPAT_XATTR 0x0200 #define OCFS2_FEATURE_INCOMPAT_XATTR 0x0200
/* Support for indexed directores */ /* Support for indexed directories */
#define OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS 0x0400 #define OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS 0x0400
/* Metadata checksum and error correction */ /* Metadata checksum and error correction */
@ -156,7 +156,7 @@
#define OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG 0x2000 #define OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG 0x2000
/* /*
* Incompat bit to indicate useable clusterinfo with stackflags for all * Incompat bit to indicate usable clusterinfo with stackflags for all
* cluster stacks (userspace adnd o2cb). If this bit is set, * cluster stacks (userspace adnd o2cb). If this bit is set,
* INCOMPAT_USERSPACE_STACK becomes superfluous and thus should not be set. * INCOMPAT_USERSPACE_STACK becomes superfluous and thus should not be set.
*/ */
@ -1083,7 +1083,7 @@ struct ocfs2_xattr_block {
struct ocfs2_xattr_header xb_header; /* xattr header if this struct ocfs2_xattr_header xb_header; /* xattr header if this
block contains xattr */ block contains xattr */
struct ocfs2_xattr_tree_root xb_root;/* xattr tree root if this struct ocfs2_xattr_tree_root xb_root;/* xattr tree root if this
block cotains xattr block contains xattr
tree. */ tree. */
} xb_attrs; } xb_attrs;
}; };

View File

@ -215,7 +215,7 @@ struct ocfs2_move_extents {
movement less likely movement less likely
to fail, may make fs to fail, may make fs
even more fragmented */ even more fragmented */
#define OCFS2_MOVE_EXT_FL_COMPLETE (0x00000004) /* Move or defragmenation #define OCFS2_MOVE_EXT_FL_COMPLETE (0x00000004) /* Move or defragmentation
completely gets done. completely gets done.
*/ */

View File

@ -93,7 +93,7 @@ static char *ocfs2_lock_type_strings[] = {
[OCFS2_LOCK_TYPE_DATA] = "Data", [OCFS2_LOCK_TYPE_DATA] = "Data",
[OCFS2_LOCK_TYPE_SUPER] = "Super", [OCFS2_LOCK_TYPE_SUPER] = "Super",
[OCFS2_LOCK_TYPE_RENAME] = "Rename", [OCFS2_LOCK_TYPE_RENAME] = "Rename",
/* Need to differntiate from [R]ename.. serializing writes is the /* Need to differentiate from [R]ename.. serializing writes is the
* important job it does, anyway. */ * important job it does, anyway. */
[OCFS2_LOCK_TYPE_RW] = "Write/Read", [OCFS2_LOCK_TYPE_RW] = "Write/Read",
[OCFS2_LOCK_TYPE_DENTRY] = "Dentry", [OCFS2_LOCK_TYPE_DENTRY] = "Dentry",

View File

@ -2420,7 +2420,7 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
* *
* If we will insert a new one, this is easy and only happens * If we will insert a new one, this is easy and only happens
* during adding refcounted flag to the extent, so we don't * during adding refcounted flag to the extent, so we don't
* have a chance of spliting. We just need one record. * have a chance of splitting. We just need one record.
* *
* If the refcount rec already exists, that would be a little * If the refcount rec already exists, that would be a little
* complicated. we may have to: * complicated. we may have to:
@ -2610,11 +2610,11 @@ static inline unsigned int ocfs2_cow_align_length(struct super_block *sb,
/* /*
* Calculate out the start and number of virtual clusters we need to CoW. * Calculate out the start and number of virtual clusters we need to CoW.
* *
* cpos is vitual start cluster position we want to do CoW in a * cpos is virtual start cluster position we want to do CoW in a
* file and write_len is the cluster length. * file and write_len is the cluster length.
* max_cpos is the place where we want to stop CoW intentionally. * max_cpos is the place where we want to stop CoW intentionally.
* *
* Normal we will start CoW from the beginning of extent record cotaining cpos. * Normal we will start CoW from the beginning of extent record containing cpos.
* We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we
* get good I/O from the resulting extent tree. * get good I/O from the resulting extent tree.
*/ */

View File

@ -31,7 +31,7 @@ struct ocfs2_alloc_reservation {
#define OCFS2_RESV_FLAG_INUSE 0x01 /* Set when r_node is part of a btree */ #define OCFS2_RESV_FLAG_INUSE 0x01 /* Set when r_node is part of a btree */
#define OCFS2_RESV_FLAG_TMP 0x02 /* Temporary reservation, will be #define OCFS2_RESV_FLAG_TMP 0x02 /* Temporary reservation, will be
* destroyed immedately after use */ * destroyed immediately after use */
#define OCFS2_RESV_FLAG_DIR 0x04 /* Reservation is for an unindexed #define OCFS2_RESV_FLAG_DIR 0x04 /* Reservation is for an unindexed
* directory btree */ * directory btree */
@ -125,7 +125,7 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
/** /**
* ocfs2_resmap_claimed_bits() - Tell the reservation code that bits were used. * ocfs2_resmap_claimed_bits() - Tell the reservation code that bits were used.
* @resmap: reservations bitmap * @resmap: reservations bitmap
* @resv: optional reservation to recalulate based on new bitmap * @resv: optional reservation to recalculate based on new bitmap
* @cstart: start of allocation in clusters * @cstart: start of allocation in clusters
* @clen: end of allocation in clusters. * @clen: end of allocation in clusters.
* *

View File

@ -227,7 +227,7 @@ static int o2cb_dlm_lock_status(struct ocfs2_dlm_lksb *lksb)
} }
/* /*
* o2dlm aways has a "valid" LVB. If the dlm loses track of the LVB * o2dlm always has a "valid" LVB. If the dlm loses track of the LVB
* contents, it will zero out the LVB. Thus the caller can always trust * contents, it will zero out the LVB. Thus the caller can always trust
* the contents. * the contents.
*/ */

View File

@ -210,7 +210,7 @@ struct ocfs2_stack_operations {
struct file_lock *fl); struct file_lock *fl);
/* /*
* This is an optoinal debugging hook. If provided, the * This is an optional debugging hook. If provided, the
* stack can dump debugging information about this lock. * stack can dump debugging information about this lock.
*/ */
void (*dump_lksb)(struct ocfs2_dlm_lksb *lksb); void (*dump_lksb)(struct ocfs2_dlm_lksb *lksb);

View File

@ -1803,7 +1803,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
osb = OCFS2_SB(sb); osb = OCFS2_SB(sb);
BUG_ON(!osb); BUG_ON(!osb);
/* Remove file check sysfs related directores/files, /* Remove file check sysfs related directories/files,
* and wait for the pending file check operations */ * and wait for the pending file check operations */
ocfs2_filecheck_remove_sysfs(osb); ocfs2_filecheck_remove_sysfs(osb);

View File

@ -648,7 +648,7 @@ int ocfs2_calc_xattr_init(struct inode *dir,
* 256(name) + 80(value) + 16(entry) = 352 bytes, * 256(name) + 80(value) + 16(entry) = 352 bytes,
* The max space of acl xattr taken inline is * The max space of acl xattr taken inline is
* 80(value) + 16(entry) * 2(if directory) = 192 bytes, * 80(value) + 16(entry) * 2(if directory) = 192 bytes,
* when blocksize = 512, may reserve one more cluser for * when blocksize = 512, may reserve one more cluster for
* xattr bucket, otherwise reserve one metadata block * xattr bucket, otherwise reserve one metadata block
* for them is ok. * for them is ok.
* If this is a new directory with inline data, * If this is a new directory with inline data,
@ -4371,7 +4371,7 @@ static int cmp_xe_offset(const void *a, const void *b)
/* /*
* defrag a xattr bucket if we find that the bucket has some * defrag a xattr bucket if we find that the bucket has some
* holes beteen name/value pairs. * holes between name/value pairs.
* We will move all the name/value pairs to the end of the bucket * We will move all the name/value pairs to the end of the bucket
* so that we can spare some space for insertion. * so that we can spare some space for insertion.
*/ */
@ -5011,7 +5011,7 @@ static int ocfs2_divide_xattr_cluster(struct inode *inode,
* 2. If cluster_size == bucket_size: * 2. If cluster_size == bucket_size:
* a) If the previous extent rec has more than one cluster and the insert * a) If the previous extent rec has more than one cluster and the insert
* place isn't in the last cluster, copy the entire last cluster to the * place isn't in the last cluster, copy the entire last cluster to the
* new one. This time, we don't need to upate the first_bh and header_bh * new one. This time, we don't need to update the first_bh and header_bh
* since they will not be moved into the new cluster. * since they will not be moved into the new cluster.
* b) Otherwise, move the bottom half of the xattrs in the last cluster into * b) Otherwise, move the bottom half of the xattrs in the last cluster into
* the new one. And we set the extend flag to zero if the insert place is * the new one. And we set the extend flag to zero if the insert place is
@ -6189,7 +6189,7 @@ struct ocfs2_xattr_reflink {
/* /*
* Given a xattr header and xe offset, * Given a xattr header and xe offset,
* return the proper xv and the corresponding bh. * return the proper xv and the corresponding bh.
* xattr in inode, block and xattr tree have different implementaions. * xattr in inode, block and xattr tree have different implementations.
*/ */
typedef int (get_xattr_value_root)(struct super_block *sb, typedef int (get_xattr_value_root)(struct super_block *sb,
struct buffer_head *bh, struct buffer_head *bh,
@ -6269,7 +6269,7 @@ static int ocfs2_get_xattr_value_root(struct super_block *sb,
} }
/* /*
* Lock the meta_ac and caculate how much credits we need for reflink xattrs. * Lock the meta_ac and calculate how much credits we need for reflink xattrs.
* It is only used for inline xattr and xattr block. * It is only used for inline xattr and xattr block.
*/ */
static int ocfs2_reflink_lock_xattr_allocators(struct ocfs2_super *osb, static int ocfs2_reflink_lock_xattr_allocators(struct ocfs2_super *osb,