mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:24:45 +01:00
New code for 6.10:
* Introduce Parent Pointer extended attribute for inodes.
* Online Repair
- Implement atomic file content exchanges i.e. exchange ranges of bytes
between two files atomically.
- Create temporary files to repair file-based metadata. This uses atomic
file content exchange facility to swap file fork mappings between the
temporary file and the metadata inode.
- Allow callers of directory/xattr code to set an explicit owner number to
be written into the header fields of any new blocks that are created.
This is required to avoid walking every block of the new structure and
modify their ownership during online repair.
- Repair
- Extended attributes
- Inode unlinked state
- Directories
- Symbolic links
- AGI's unlinked inode list.
- Parent pointers.
- Move Orphan files to lost and found directory.
- Fixes for Inode repair functionality.
- Introduce a new sub-AG FITRIM implementation to reduce the duration for
which the AGF lock is held.
- Updates for the design documentation.
- Use Parent Pointers to assist in checking directories, parent pointers,
extended attributes, and link counts.
* Bring back delalloc support for realtime devices which have an extent size
that is equal to filesystem's block size.
* Improve performance of log incompat feature handling.
* Fixes
- Prevent userspace from reading invalid file data due to incorrect.
updation of file size when performing a non-atomic clone operation.
- Minor fixes to online repair.
- Fix confusing return values from xfs_bmapi_write().
- Fix an out of bounds access due to incorrect h_size during log recovery.
- Defer upgrading the extent counters in xfs_reflink_end_cow_extent() until
we know we are going to modify the extent mapping.
- Remove racy access to if_bytes check in xfs_reflink_end_cow_extent().
- Fix sparse warnings.
* Cleanups
- Hold inode locks on all files involved in a rename until the completion
of the operation. This is in preparation for the parent pointers patchset
where parent pointers are applied in a separate chained update from the
actual directory update.
- Compile out v4 support when disabled.
- Cleanup xfs_extent_busy_clear().
- Remove unused flags and fields from struct xfs_da_args.
- Remove definitions of unused functions.
- Improve extended attribute validation.
- Add higher level directory operations helpers to remove duplication of
code.
- Cleanup quota (un)reservation interfaces.
Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
-----BEGIN PGP SIGNATURE-----
iHUEABYIAB0WIQQjMC4mbgVeU7MxEIYH7y4RirJu9AUCZjZC0wAKCRAH7y4RirJu
9HsCAPoCQvmPefDv56aMb5JEQNpv9dPz2Djj14hqLytQs5P/twD+LF5NhJgQNDUo
Lwnb0tmkAhmG9Y4CCiN1FwSj1rq59gE=
=2hXB
-----END PGP SIGNATURE-----
Merge tag 'xfs-6.10-merge-6' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
Pull xfs updates from Chandan Babu:
"Online repair feature continues to be expanded. Also, we now support
delayed allocation for realtime devices which have an extent size that
is equal to filesystem's block size.
New code:
- Introduce Parent Pointer extended attribute for inodes
- Bring back delalloc support for realtime devices which have an
extent size that is equal to filesystem's block size
- Improve performance of log incompat feature handling
Online Repair:
- Implement atomic file content exchanges i.e. exchange ranges of
bytes between two files atomically
- Create temporary files to repair file-based metadata. This uses
atomic file content exchange facility to swap file fork mappings
between the temporary file and the metadata inode
- Allow callers of directory/xattr code to set an explicit owner
number to be written into the header fields of any new blocks that
are created. This is required to avoid walking every block of the
new structure and modify their ownership during online repair
- Repair more data structures:
- Extended attributes
- Inode unlinked state
- Directories
- Symbolic links
- AGI's unlinked inode list
- Parent pointers
- Move Orphan files to lost and found directory
- Fixes for Inode repair functionality
- Introduce a new sub-AG FITRIM implementation to reduce the duration
for which the AGF lock is held
- Updates for the design documentation
- Use Parent Pointers to assist in checking directories, parent
pointers, extended attributes, and link counts
Fixes:
- Prevent userspace from reading invalid file data due to incorrect.
updation of file size when performing a non-atomic clone operation
- Minor fixes to online repair
- Fix confusing return values from xfs_bmapi_write()
- Fix an out of bounds access due to incorrect h_size during log
recovery
- Defer upgrading the extent counters in xfs_reflink_end_cow_extent()
until we know we are going to modify the extent mapping
- Remove racy access to if_bytes check in
xfs_reflink_end_cow_extent()
- Fix sparse warnings
Cleanups:
- Hold inode locks on all files involved in a rename until the
completion of the operation. This is in preparation for the parent
pointers patchset where parent pointers are applied in a separate
chained update from the actual directory update
- Compile out v4 support when disabled
- Cleanup xfs_extent_busy_clear()
- Remove unused flags and fields from struct xfs_da_args
- Remove definitions of unused functions
- Improve extended attribute validation
- Add higher level directory operations helpers to remove duplication
of code
- Cleanup quota (un)reservation interfaces"
* tag 'xfs-6.10-merge-6' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: (221 commits)
xfs: simplify iext overflow checking and upgrade
xfs: remove a racy if_bytes check in xfs_reflink_end_cow_extent
xfs: upgrade the extent counters in xfs_reflink_end_cow_extent later
xfs: xfs_quota_unreserve_blkres can't fail
xfs: consolidate the xfs_quota_reserve_blkres definitions
xfs: clean up buffer allocation in xlog_do_recovery_pass
xfs: fix log recovery buffer allocation for the legacy h_size fixup
xfs: widen flags argument to the xfs_iflags_* helpers
xfs: minor cleanups of xfs_attr3_rmt_blocks
xfs: create a helper to compute the blockcount of a max sized remote value
xfs: turn XFS_ATTR3_RMT_BUF_SPACE into a function
xfs: use unsigned ints for non-negative quantities in xfs_attr_remote.c
xfs: do not allocate the entire delalloc extent in xfs_bmapi_write
xfs: fix xfs_bmap_add_extent_delay_real for partial conversions
xfs: remove the xfs_iext_peek_prev_extent call in xfs_bmapi_allocate
xfs: pass the actual offset and len to allocate to xfs_bmapi_allocate
xfs: don't open code XFS_FILBLKS_MIN in xfs_bmapi_write
xfs: lift a xfs_valid_startblock into xfs_bmapi_allocate
xfs: remove the unusued tmp_logflags variable in xfs_bmapi_allocate
xfs: fix error returns from xfs_bmapi_write
...
This commit is contained in:
commit
119d1b8a5d
186 changed files with 25098 additions and 3038 deletions
|
|
@ -2167,7 +2167,7 @@ The ``xfblob_free`` function frees a specific blob, and the ``xfblob_truncate``
|
|||
function frees them all because compaction is not needed.
|
||||
|
||||
The details of repairing directories and extended attributes will be discussed
|
||||
in a subsequent section about atomic extent swapping.
|
||||
in a subsequent section about atomic file content exchanges.
|
||||
However, it should be noted that these repair functions only use blob storage
|
||||
to cache a small number of entries before adding them to a temporary ondisk
|
||||
file, which is why compaction is not required.
|
||||
|
|
@ -2802,7 +2802,8 @@ follows this format:
|
|||
|
||||
Repairs for file-based metadata such as extended attributes, directories,
|
||||
symbolic links, quota files and realtime bitmaps are performed by building a
|
||||
new structure attached to a temporary file and swapping the forks.
|
||||
new structure attached to a temporary file and exchanging all mappings in the
|
||||
file forks.
|
||||
Afterward, the mappings in the old file fork are the candidate blocks for
|
||||
disposal.
|
||||
|
||||
|
|
@ -3851,8 +3852,8 @@ Because file forks can consume as much space as the entire filesystem, repairs
|
|||
cannot be staged in memory, even when a paging scheme is available.
|
||||
Therefore, online repair of file-based metadata createas a temporary file in
|
||||
the XFS filesystem, writes a new structure at the correct offsets into the
|
||||
temporary file, and atomically swaps the fork mappings (and hence the fork
|
||||
contents) to commit the repair.
|
||||
temporary file, and atomically exchanges all file fork mappings (and hence the
|
||||
fork contents) to commit the repair.
|
||||
Once the repair is complete, the old fork can be reaped as necessary; if the
|
||||
system goes down during the reap, the iunlink code will delete the blocks
|
||||
during log recovery.
|
||||
|
|
@ -3862,10 +3863,11 @@ consistent to use a temporary file safely!
|
|||
This dependency is the reason why online repair can only use pageable kernel
|
||||
memory to stage ondisk space usage information.
|
||||
|
||||
Swapping metadata extents with a temporary file requires the owner field of the
|
||||
block headers to match the file being repaired and not the temporary file. The
|
||||
directory, extended attribute, and symbolic link functions were all modified to
|
||||
allow callers to specify owner numbers explicitly.
|
||||
Exchanging metadata file mappings with a temporary file requires the owner
|
||||
field of the block headers to match the file being repaired and not the
|
||||
temporary file.
|
||||
The directory, extended attribute, and symbolic link functions were all
|
||||
modified to allow callers to specify owner numbers explicitly.
|
||||
|
||||
There is a downside to the reaping process -- if the system crashes during the
|
||||
reap phase and the fork extents are crosslinked, the iunlink processing will
|
||||
|
|
@ -3974,8 +3976,8 @@ The proposed patches are in the
|
|||
<https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux.git/log/?h=repair-tempfiles>`_
|
||||
series.
|
||||
|
||||
Atomic Extent Swapping
|
||||
----------------------
|
||||
Logged File Content Exchanges
|
||||
-----------------------------
|
||||
|
||||
Once repair builds a temporary file with a new data structure written into
|
||||
it, it must commit the new changes into the existing file.
|
||||
|
|
@ -4010,17 +4012,21 @@ e. Old blocks in the file may be cross-linked with another structure and must
|
|||
These problems are overcome by creating a new deferred operation and a new type
|
||||
of log intent item to track the progress of an operation to exchange two file
|
||||
ranges.
|
||||
The new deferred operation type chains together the same transactions used by
|
||||
the reverse-mapping extent swap code.
|
||||
The new exchange operation type chains together the same transactions used by
|
||||
the reverse-mapping extent swap code, but records intermedia progress in the
|
||||
log so that operations can be restarted after a crash.
|
||||
This new functionality is called the file contents exchange (xfs_exchrange)
|
||||
code.
|
||||
The underlying implementation exchanges file fork mappings (xfs_exchmaps).
|
||||
The new log item records the progress of the exchange to ensure that once an
|
||||
exchange begins, it will always run to completion, even there are
|
||||
interruptions.
|
||||
The new ``XFS_SB_FEAT_INCOMPAT_LOG_ATOMIC_SWAP`` log-incompatible feature flag
|
||||
The new ``XFS_SB_FEAT_INCOMPAT_EXCHRANGE`` incompatible feature flag
|
||||
in the superblock protects these new log item records from being replayed on
|
||||
old kernels.
|
||||
|
||||
The proposed patchset is the
|
||||
`atomic extent swap
|
||||
`file contents exchange
|
||||
<https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux.git/log/?h=atomic-file-updates>`_
|
||||
series.
|
||||
|
||||
|
|
@ -4047,9 +4053,6 @@ series.
|
|||
| one ``struct rw_semaphore`` for each feature. |
|
||||
| The log cleaning code tries to take this rwsem in exclusive mode to |
|
||||
| clear the bit; if the lock attempt fails, the feature bit remains set. |
|
||||
| Filesystem code signals its intention to use a log incompat feature in a |
|
||||
| transaction by calling ``xlog_use_incompat_feat``, which takes the rwsem |
|
||||
| in shared mode. |
|
||||
| The code supporting a log incompat feature should create wrapper |
|
||||
| functions to obtain the log feature and call |
|
||||
| ``xfs_add_incompat_log_feature`` to set the feature bits in the primary |
|
||||
|
|
@ -4064,72 +4067,73 @@ series.
|
|||
| The feature bit will not be cleared from the superblock until the log |
|
||||
| becomes clean. |
|
||||
| |
|
||||
| Log-assisted extended attribute updates and atomic extent swaps both use |
|
||||
| log incompat features and provide convenience wrappers around the |
|
||||
| Log-assisted extended attribute updates and file content exchanges bothe |
|
||||
| use log incompat features and provide convenience wrappers around the |
|
||||
| functionality. |
|
||||
+--------------------------------------------------------------------------+
|
||||
|
||||
Mechanics of an Atomic Extent Swap
|
||||
``````````````````````````````````
|
||||
Mechanics of a Logged File Content Exchange
|
||||
```````````````````````````````````````````
|
||||
|
||||
Swapping entire file forks is a complex task.
|
||||
Exchanging contents between file forks is a complex task.
|
||||
The goal is to exchange all file fork mappings between two file fork offset
|
||||
ranges.
|
||||
There are likely to be many extent mappings in each fork, and the edges of
|
||||
the mappings aren't necessarily aligned.
|
||||
Furthermore, there may be other updates that need to happen after the swap,
|
||||
Furthermore, there may be other updates that need to happen after the exchange,
|
||||
such as exchanging file sizes, inode flags, or conversion of fork data to local
|
||||
format.
|
||||
This is roughly the format of the new deferred extent swap work item:
|
||||
This is roughly the format of the new deferred exchange-mapping work item:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct xfs_swapext_intent {
|
||||
struct xfs_exchmaps_intent {
|
||||
/* Inodes participating in the operation. */
|
||||
struct xfs_inode *sxi_ip1;
|
||||
struct xfs_inode *sxi_ip2;
|
||||
struct xfs_inode *xmi_ip1;
|
||||
struct xfs_inode *xmi_ip2;
|
||||
|
||||
/* File offset range information. */
|
||||
xfs_fileoff_t sxi_startoff1;
|
||||
xfs_fileoff_t sxi_startoff2;
|
||||
xfs_filblks_t sxi_blockcount;
|
||||
xfs_fileoff_t xmi_startoff1;
|
||||
xfs_fileoff_t xmi_startoff2;
|
||||
xfs_filblks_t xmi_blockcount;
|
||||
|
||||
/* Set these file sizes after the operation, unless negative. */
|
||||
xfs_fsize_t sxi_isize1;
|
||||
xfs_fsize_t sxi_isize2;
|
||||
xfs_fsize_t xmi_isize1;
|
||||
xfs_fsize_t xmi_isize2;
|
||||
|
||||
/* XFS_SWAP_EXT_* log operation flags */
|
||||
uint64_t sxi_flags;
|
||||
/* XFS_EXCHMAPS_* log operation flags */
|
||||
uint64_t xmi_flags;
|
||||
};
|
||||
|
||||
The new log intent item contains enough information to track two logical fork
|
||||
offset ranges: ``(inode1, startoff1, blockcount)`` and ``(inode2, startoff2,
|
||||
blockcount)``.
|
||||
Each step of a swap operation exchanges the largest file range mapping possible
|
||||
from one file to the other.
|
||||
After each step in the swap operation, the two startoff fields are incremented
|
||||
and the blockcount field is decremented to reflect the progress made.
|
||||
The flags field captures behavioral parameters such as swapping the attr fork
|
||||
instead of the data fork and other work to be done after the extent swap.
|
||||
The two isize fields are used to swap the file size at the end of the operation
|
||||
if the file data fork is the target of the swap operation.
|
||||
Each step of an exchange operation exchanges the largest file range mapping
|
||||
possible from one file to the other.
|
||||
After each step in the exchange operation, the two startoff fields are
|
||||
incremented and the blockcount field is decremented to reflect the progress
|
||||
made.
|
||||
The flags field captures behavioral parameters such as exchanging attr fork
|
||||
mappings instead of the data fork and other work to be done after the exchange.
|
||||
The two isize fields are used to exchange the file sizes at the end of the
|
||||
operation if the file data fork is the target of the operation.
|
||||
|
||||
When the extent swap is initiated, the sequence of operations is as follows:
|
||||
When the exchange is initiated, the sequence of operations is as follows:
|
||||
|
||||
1. Create a deferred work item for the extent swap.
|
||||
At the start, it should contain the entirety of the file ranges to be
|
||||
swapped.
|
||||
1. Create a deferred work item for the file mapping exchange.
|
||||
At the start, it should contain the entirety of the file block ranges to be
|
||||
exchanged.
|
||||
|
||||
2. Call ``xfs_defer_finish`` to process the exchange.
|
||||
This is encapsulated in ``xrep_tempswap_contents`` for scrub operations.
|
||||
This is encapsulated in ``xrep_tempexch_contents`` for scrub operations.
|
||||
This will log an extent swap intent item to the transaction for the deferred
|
||||
extent swap work item.
|
||||
mapping exchange work item.
|
||||
|
||||
3. Until ``sxi_blockcount`` of the deferred extent swap work item is zero,
|
||||
3. Until ``xmi_blockcount`` of the deferred mapping exchange work item is zero,
|
||||
|
||||
a. Read the block maps of both file ranges starting at ``sxi_startoff1`` and
|
||||
``sxi_startoff2``, respectively, and compute the longest extent that can
|
||||
be swapped in a single step.
|
||||
a. Read the block maps of both file ranges starting at ``xmi_startoff1`` and
|
||||
``xmi_startoff2``, respectively, and compute the longest extent that can
|
||||
be exchanged in a single step.
|
||||
This is the minimum of the two ``br_blockcount`` s in the mappings.
|
||||
Keep advancing through the file forks until at least one of the mappings
|
||||
contains written blocks.
|
||||
|
|
@ -4151,20 +4155,20 @@ When the extent swap is initiated, the sequence of operations is as follows:
|
|||
|
||||
g. Extend the ondisk size of either file if necessary.
|
||||
|
||||
h. Log an extent swap done log item for the extent swap intent log item
|
||||
that was read at the start of step 3.
|
||||
h. Log a mapping exchange done log item for th mapping exchange intent log
|
||||
item that was read at the start of step 3.
|
||||
|
||||
i. Compute the amount of file range that has just been covered.
|
||||
This quantity is ``(map1.br_startoff + map1.br_blockcount -
|
||||
sxi_startoff1)``, because step 3a could have skipped holes.
|
||||
xmi_startoff1)``, because step 3a could have skipped holes.
|
||||
|
||||
j. Increase the starting offsets of ``sxi_startoff1`` and ``sxi_startoff2``
|
||||
j. Increase the starting offsets of ``xmi_startoff1`` and ``xmi_startoff2``
|
||||
by the number of blocks computed in the previous step, and decrease
|
||||
``sxi_blockcount`` by the same quantity.
|
||||
``xmi_blockcount`` by the same quantity.
|
||||
This advances the cursor.
|
||||
|
||||
k. Log a new extent swap intent log item reflecting the advanced state of
|
||||
the work item.
|
||||
k. Log a new mapping exchange intent log item reflecting the advanced state
|
||||
of the work item.
|
||||
|
||||
l. Return the proper error code (EAGAIN) to the deferred operation manager
|
||||
to inform it that there is more work to be done.
|
||||
|
|
@ -4175,22 +4179,23 @@ When the extent swap is initiated, the sequence of operations is as follows:
|
|||
This will be discussed in more detail in subsequent sections.
|
||||
|
||||
If the filesystem goes down in the middle of an operation, log recovery will
|
||||
find the most recent unfinished extent swap log intent item and restart from
|
||||
there.
|
||||
This is how extent swapping guarantees that an outside observer will either see
|
||||
the old broken structure or the new one, and never a mismash of both.
|
||||
find the most recent unfinished maping exchange log intent item and restart
|
||||
from there.
|
||||
This is how atomic file mapping exchanges guarantees that an outside observer
|
||||
will either see the old broken structure or the new one, and never a mismash of
|
||||
both.
|
||||
|
||||
Preparation for Extent Swapping
|
||||
```````````````````````````````
|
||||
Preparation for File Content Exchanges
|
||||
``````````````````````````````````````
|
||||
|
||||
There are a few things that need to be taken care of before initiating an
|
||||
atomic extent swap operation.
|
||||
atomic file mapping exchange operation.
|
||||
First, regular files require the page cache to be flushed to disk before the
|
||||
operation begins, and directio writes to be quiesced.
|
||||
Like any filesystem operation, extent swapping must determine the maximum
|
||||
amount of disk space and quota that can be consumed on behalf of both files in
|
||||
the operation, and reserve that quantity of resources to avoid an unrecoverable
|
||||
out of space failure once it starts dirtying metadata.
|
||||
Like any filesystem operation, file mapping exchanges must determine the
|
||||
maximum amount of disk space and quota that can be consumed on behalf of both
|
||||
files in the operation, and reserve that quantity of resources to avoid an
|
||||
unrecoverable out of space failure once it starts dirtying metadata.
|
||||
The preparation step scans the ranges of both files to estimate:
|
||||
|
||||
- Data device blocks needed to handle the repeated updates to the fork
|
||||
|
|
@ -4204,56 +4209,59 @@ The preparation step scans the ranges of both files to estimate:
|
|||
to different extents on the realtime volume, which could happen if the
|
||||
operation fails to run to completion.
|
||||
|
||||
The need for precise estimation increases the run time of the swap operation,
|
||||
but it is very important to maintain correct accounting.
|
||||
The filesystem must not run completely out of free space, nor can the extent
|
||||
swap ever add more extent mappings to a fork than it can support.
|
||||
The need for precise estimation increases the run time of the exchange
|
||||
operation, but it is very important to maintain correct accounting.
|
||||
The filesystem must not run completely out of free space, nor can the mapping
|
||||
exchange ever add more extent mappings to a fork than it can support.
|
||||
Regular users are required to abide the quota limits, though metadata repairs
|
||||
may exceed quota to resolve inconsistent metadata elsewhere.
|
||||
|
||||
Special Features for Swapping Metadata File Extents
|
||||
```````````````````````````````````````````````````
|
||||
Special Features for Exchanging Metadata File Contents
|
||||
``````````````````````````````````````````````````````
|
||||
|
||||
Extended attributes, symbolic links, and directories can set the fork format to
|
||||
"local" and treat the fork as a literal area for data storage.
|
||||
Metadata repairs must take extra steps to support these cases:
|
||||
|
||||
- If both forks are in local format and the fork areas are large enough, the
|
||||
swap is performed by copying the incore fork contents, logging both forks,
|
||||
and committing.
|
||||
The atomic extent swap mechanism is not necessary, since this can be done
|
||||
with a single transaction.
|
||||
exchange is performed by copying the incore fork contents, logging both
|
||||
forks, and committing.
|
||||
The atomic file mapping exchange mechanism is not necessary, since this can
|
||||
be done with a single transaction.
|
||||
|
||||
- If both forks map blocks, then the regular atomic extent swap is used.
|
||||
- If both forks map blocks, then the regular atomic file mapping exchange is
|
||||
used.
|
||||
|
||||
- Otherwise, only one fork is in local format.
|
||||
The contents of the local format fork are converted to a block to perform the
|
||||
swap.
|
||||
exchange.
|
||||
The conversion to block format must be done in the same transaction that
|
||||
logs the initial extent swap intent log item.
|
||||
The regular atomic extent swap is used to exchange the mappings.
|
||||
Special flags are set on the swap operation so that the transaction can be
|
||||
rolled one more time to convert the second file's fork back to local format
|
||||
so that the second file will be ready to go as soon as the ILOCK is dropped.
|
||||
logs the initial mapping exchange intent log item.
|
||||
The regular atomic mapping exchange is used to exchange the metadata file
|
||||
mappings.
|
||||
Special flags are set on the exchange operation so that the transaction can
|
||||
be rolled one more time to convert the second file's fork back to local
|
||||
format so that the second file will be ready to go as soon as the ILOCK is
|
||||
dropped.
|
||||
|
||||
Extended attributes and directories stamp the owning inode into every block,
|
||||
but the buffer verifiers do not actually check the inode number!
|
||||
Although there is no verification, it is still important to maintain
|
||||
referential integrity, so prior to performing the extent swap, online repair
|
||||
builds every block in the new data structure with the owner field of the file
|
||||
being repaired.
|
||||
referential integrity, so prior to performing the mapping exchange, online
|
||||
repair builds every block in the new data structure with the owner field of the
|
||||
file being repaired.
|
||||
|
||||
After a successful swap operation, the repair operation must reap the old fork
|
||||
blocks by processing each fork mapping through the standard :ref:`file extent
|
||||
reaping <reaping>` mechanism that is done post-repair.
|
||||
After a successful exchange operation, the repair operation must reap the old
|
||||
fork blocks by processing each fork mapping through the standard :ref:`file
|
||||
extent reaping <reaping>` mechanism that is done post-repair.
|
||||
If the filesystem should go down during the reap part of the repair, the
|
||||
iunlink processing at the end of recovery will free both the temporary file and
|
||||
whatever blocks were not reaped.
|
||||
However, this iunlink processing omits the cross-link detection of online
|
||||
repair, and is not completely foolproof.
|
||||
|
||||
Swapping Temporary File Extents
|
||||
```````````````````````````````
|
||||
Exchanging Temporary File Contents
|
||||
``````````````````````````````````
|
||||
|
||||
To repair a metadata file, online repair proceeds as follows:
|
||||
|
||||
|
|
@ -4263,14 +4271,14 @@ To repair a metadata file, online repair proceeds as follows:
|
|||
file.
|
||||
The same fork must be written to as is being repaired.
|
||||
|
||||
3. Commit the scrub transaction, since the swap estimation step must be
|
||||
completed before transaction reservations are made.
|
||||
3. Commit the scrub transaction, since the exchange resource estimation step
|
||||
must be completed before transaction reservations are made.
|
||||
|
||||
4. Call ``xrep_tempswap_trans_alloc`` to allocate a new scrub transaction with
|
||||
4. Call ``xrep_tempexch_trans_alloc`` to allocate a new scrub transaction with
|
||||
the appropriate resource reservations, locks, and fill out a ``struct
|
||||
xfs_swapext_req`` with the details of the swap operation.
|
||||
xfs_exchmaps_req`` with the details of the exchange operation.
|
||||
|
||||
5. Call ``xrep_tempswap_contents`` to swap the contents.
|
||||
5. Call ``xrep_tempexch_contents`` to exchange the contents.
|
||||
|
||||
6. Commit the transaction to complete the repair.
|
||||
|
||||
|
|
@ -4312,7 +4320,7 @@ To check the summary file against the bitmap:
|
|||
3. Compare the contents of the xfile against the ondisk file.
|
||||
|
||||
To repair the summary file, write the xfile contents into the temporary file
|
||||
and use atomic extent swap to commit the new contents.
|
||||
and use atomic mapping exchange to commit the new contents.
|
||||
The temporary file is then reaped.
|
||||
|
||||
The proposed patchset is the
|
||||
|
|
@ -4355,8 +4363,8 @@ Salvaging extended attributes is done as follows:
|
|||
memory or there are no more attr fork blocks to examine, unlock the file and
|
||||
add the staged extended attributes to the temporary file.
|
||||
|
||||
3. Use atomic extent swapping to exchange the new and old extended attribute
|
||||
structures.
|
||||
3. Use atomic file mapping exchange to exchange the new and old extended
|
||||
attribute structures.
|
||||
The old attribute blocks are now attached to the temporary file.
|
||||
|
||||
4. Reap the temporary file.
|
||||
|
|
@ -4413,7 +4421,8 @@ salvaging directories is straightforward:
|
|||
directory and add the staged dirents into the temporary directory.
|
||||
Truncate the staging files.
|
||||
|
||||
4. Use atomic extent swapping to exchange the new and old directory structures.
|
||||
4. Use atomic file mapping exchange to exchange the new and old directory
|
||||
structures.
|
||||
The old directory blocks are now attached to the temporary file.
|
||||
|
||||
5. Reap the temporary file.
|
||||
|
|
@ -4456,10 +4465,10 @@ reconstruction of filesystem space metadata.
|
|||
The parent pointer feature, however, makes total directory reconstruction
|
||||
possible.
|
||||
|
||||
XFS parent pointers include the dirent name and location of the entry within
|
||||
the parent directory.
|
||||
XFS parent pointers contain the information needed to identify the
|
||||
corresponding directory entry in the parent directory.
|
||||
In other words, child files use extended attributes to store pointers to
|
||||
parents in the form ``(parent_inum, parent_gen, dirent_pos) → (dirent_name)``.
|
||||
parents in the form ``(dirent_name) → (parent_inum, parent_gen)``.
|
||||
The directory checking process can be strengthened to ensure that the target of
|
||||
each dirent also contains a parent pointer pointing back to the dirent.
|
||||
Likewise, each parent pointer can be checked by ensuring that the target of
|
||||
|
|
@ -4467,8 +4476,6 @@ each parent pointer is a directory and that it contains a dirent matching
|
|||
the parent pointer.
|
||||
Both online and offline repair can use this strategy.
|
||||
|
||||
**Note**: The ondisk format of parent pointers is not yet finalized.
|
||||
|
||||
+--------------------------------------------------------------------------+
|
||||
| **Historical Sidebar**: |
|
||||
+--------------------------------------------------------------------------+
|
||||
|
|
@ -4510,8 +4517,58 @@ Both online and offline repair can use this strategy.
|
|||
| Chandan increased the maximum extent counts of both data and attribute |
|
||||
| forks, thereby ensuring that the extended attribute structure can grow |
|
||||
| to handle the maximum hardlink count of any file. |
|
||||
| |
|
||||
| For this second effort, the ondisk parent pointer format as originally |
|
||||
| proposed was ``(parent_inum, parent_gen, dirent_pos) → (dirent_name)``. |
|
||||
| The format was changed during development to eliminate the requirement |
|
||||
| of repair tools needing to to ensure that the ``dirent_pos`` field |
|
||||
| always matched when reconstructing a directory. |
|
||||
| |
|
||||
| There were a few other ways to have solved that problem: |
|
||||
| |
|
||||
| 1. The field could be designated advisory, since the other three values |
|
||||
| are sufficient to find the entry in the parent. |
|
||||
| However, this makes indexed key lookup impossible while repairs are |
|
||||
| ongoing. |
|
||||
| |
|
||||
| 2. We could allow creating directory entries at specified offsets, which |
|
||||
| solves the referential integrity problem but runs the risk that |
|
||||
| dirent creation will fail due to conflicts with the free space in the |
|
||||
| directory. |
|
||||
| |
|
||||
| These conflicts could be resolved by appending the directory entry |
|
||||
| and amending the xattr code to support updating an xattr key and |
|
||||
| reindexing the dabtree, though this would have to be performed with |
|
||||
| the parent directory still locked. |
|
||||
| |
|
||||
| 3. Same as above, but remove the old parent pointer entry and add a new |
|
||||
| one atomically. |
|
||||
| |
|
||||
| 4. Change the ondisk xattr format to |
|
||||
| ``(parent_inum, name) → (parent_gen)``, which would provide the attr |
|
||||
| name uniqueness that we require, without forcing repair code to |
|
||||
| update the dirent position. |
|
||||
| Unfortunately, this requires changes to the xattr code to support |
|
||||
| attr names as long as 263 bytes. |
|
||||
| |
|
||||
| 5. Change the ondisk xattr format to ``(parent_inum, hash(name)) → |
|
||||
| (name, parent_gen)``. |
|
||||
| If the hash is sufficiently resistant to collisions (e.g. sha256) |
|
||||
| then this should provide the attr name uniqueness that we require. |
|
||||
| Names shorter than 247 bytes could be stored directly. |
|
||||
| |
|
||||
| 6. Change the ondisk xattr format to ``(dirent_name) → (parent_ino, |
|
||||
| parent_gen)``. This format doesn't require any of the complicated |
|
||||
| nested name hashing of the previous suggestions. However, it was |
|
||||
| discovered that multiple hardlinks to the same inode with the same |
|
||||
| filename caused performance problems with hashed xattr lookups, so |
|
||||
| the parent inumber is now xor'd into the hash index. |
|
||||
| |
|
||||
| In the end, it was decided that solution #6 was the most compact and the |
|
||||
| most performant. A new hash function was designed for parent pointers. |
|
||||
+--------------------------------------------------------------------------+
|
||||
|
||||
|
||||
Case Study: Repairing Directories with Parent Pointers
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
|
|
@ -4519,8 +4576,9 @@ Directory rebuilding uses a :ref:`coordinated inode scan <iscan>` and
|
|||
a :ref:`directory entry live update hook <liveupdate>` as follows:
|
||||
|
||||
1. Set up a temporary directory for generating the new directory structure,
|
||||
an xfblob for storing entry names, and an xfarray for stashing directory
|
||||
updates.
|
||||
an xfblob for storing entry names, and an xfarray for stashing the fixed
|
||||
size fields involved in a directory update: ``(child inumber, add vs.
|
||||
remove, name cookie, ftype)``.
|
||||
|
||||
2. Set up an inode scanner and hook into the directory entry code to receive
|
||||
updates on directory operations.
|
||||
|
|
@ -4529,73 +4587,36 @@ a :ref:`directory entry live update hook <liveupdate>` as follows:
|
|||
pointer references the directory of interest.
|
||||
If so:
|
||||
|
||||
a. Stash an addname entry for this dirent in the xfarray for later.
|
||||
a. Stash the parent pointer name and an addname entry for this dirent in the
|
||||
xfblob and xfarray, respectively.
|
||||
|
||||
b. When finished scanning that file, flush the stashed updates to the
|
||||
temporary directory.
|
||||
b. When finished scanning that file or the kernel memory consumption exceeds
|
||||
a threshold, flush the stashed updates to the temporary directory.
|
||||
|
||||
4. For each live directory update received via the hook, decide if the child
|
||||
has already been scanned.
|
||||
If so:
|
||||
|
||||
a. Stash an addname or removename entry for this dirent update in the
|
||||
xfarray for later.
|
||||
a. Stash the parent pointer name an addname or removename entry for this
|
||||
dirent update in the xfblob and xfarray for later.
|
||||
We cannot write directly to the temporary directory because hook
|
||||
functions are not allowed to modify filesystem metadata.
|
||||
Instead, we stash updates in the xfarray and rely on the scanner thread
|
||||
to apply the stashed updates to the temporary directory.
|
||||
|
||||
5. When the scan is complete, atomically swap the contents of the temporary
|
||||
5. When the scan is complete, replay any stashed entries in the xfarray.
|
||||
|
||||
6. When the scan is complete, atomically exchange the contents of the temporary
|
||||
directory and the directory being repaired.
|
||||
The temporary directory now contains the damaged directory structure.
|
||||
|
||||
6. Reap the temporary directory.
|
||||
|
||||
7. Update the dirent position field of parent pointers as necessary.
|
||||
This may require the queuing of a substantial number of xattr log intent
|
||||
items.
|
||||
7. Reap the temporary directory.
|
||||
|
||||
The proposed patchset is the
|
||||
`parent pointers directory repair
|
||||
<https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux.git/log/?h=pptrs-online-dir-repair>`_
|
||||
<https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux.git/log/?h=pptrs-fsck>`_
|
||||
series.
|
||||
|
||||
**Unresolved Question**: How will repair ensure that the ``dirent_pos`` fields
|
||||
match in the reconstructed directory?
|
||||
|
||||
*Answer*: There are a few ways to solve this problem:
|
||||
|
||||
1. The field could be designated advisory, since the other three values are
|
||||
sufficient to find the entry in the parent.
|
||||
However, this makes indexed key lookup impossible while repairs are ongoing.
|
||||
|
||||
2. We could allow creating directory entries at specified offsets, which solves
|
||||
the referential integrity problem but runs the risk that dirent creation
|
||||
will fail due to conflicts with the free space in the directory.
|
||||
|
||||
These conflicts could be resolved by appending the directory entry and
|
||||
amending the xattr code to support updating an xattr key and reindexing the
|
||||
dabtree, though this would have to be performed with the parent directory
|
||||
still locked.
|
||||
|
||||
3. Same as above, but remove the old parent pointer entry and add a new one
|
||||
atomically.
|
||||
|
||||
4. Change the ondisk xattr format to ``(parent_inum, name) → (parent_gen)``,
|
||||
which would provide the attr name uniqueness that we require, without
|
||||
forcing repair code to update the dirent position.
|
||||
Unfortunately, this requires changes to the xattr code to support attr
|
||||
names as long as 263 bytes.
|
||||
|
||||
5. Change the ondisk xattr format to ``(parent_inum, hash(name)) →
|
||||
(name, parent_gen)``.
|
||||
If the hash is sufficiently resistant to collisions (e.g. sha256) then
|
||||
this should provide the attr name uniqueness that we require.
|
||||
Names shorter than 247 bytes could be stored directly.
|
||||
|
||||
Discussion is ongoing under the `parent pointers patch deluge
|
||||
<https://www.spinics.net/lists/linux-xfs/msg69397.html>`_.
|
||||
|
||||
Case Study: Repairing Parent Pointers
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
|
|
@ -4603,8 +4624,9 @@ Online reconstruction of a file's parent pointer information works similarly to
|
|||
directory reconstruction:
|
||||
|
||||
1. Set up a temporary file for generating a new extended attribute structure,
|
||||
an `xfblob<xfblob>` for storing parent pointer names, and an xfarray for
|
||||
stashing parent pointer updates.
|
||||
an xfblob for storing parent pointer names, and an xfarray for stashing the
|
||||
fixed size fields involved in a parent pointer update: ``(parent inumber,
|
||||
parent generation, add vs. remove, name cookie)``.
|
||||
|
||||
2. Set up an inode scanner and hook into the directory entry code to receive
|
||||
updates on directory operations.
|
||||
|
|
@ -4613,34 +4635,36 @@ directory reconstruction:
|
|||
dirent references the file of interest.
|
||||
If so:
|
||||
|
||||
a. Stash an addpptr entry for this parent pointer in the xfblob and xfarray
|
||||
for later.
|
||||
a. Stash the dirent name and an addpptr entry for this parent pointer in the
|
||||
xfblob and xfarray, respectively.
|
||||
|
||||
b. When finished scanning the directory, flush the stashed updates to the
|
||||
temporary directory.
|
||||
b. When finished scanning the directory or the kernel memory consumption
|
||||
exceeds a threshold, flush the stashed updates to the temporary file.
|
||||
|
||||
4. For each live directory update received via the hook, decide if the parent
|
||||
has already been scanned.
|
||||
If so:
|
||||
|
||||
a. Stash an addpptr or removepptr entry for this dirent update in the
|
||||
xfarray for later.
|
||||
a. Stash the dirent name and an addpptr or removepptr entry for this dirent
|
||||
update in the xfblob and xfarray for later.
|
||||
We cannot write parent pointers directly to the temporary file because
|
||||
hook functions are not allowed to modify filesystem metadata.
|
||||
Instead, we stash updates in the xfarray and rely on the scanner thread
|
||||
to apply the stashed parent pointer updates to the temporary file.
|
||||
|
||||
5. Copy all non-parent pointer extended attributes to the temporary file.
|
||||
5. When the scan is complete, replay any stashed entries in the xfarray.
|
||||
|
||||
6. When the scan is complete, atomically swap the attribute fork of the
|
||||
temporary file and the file being repaired.
|
||||
6. Copy all non-parent pointer extended attributes to the temporary file.
|
||||
|
||||
7. When the scan is complete, atomically exchange the mappings of the attribute
|
||||
forks of the temporary file and the file being repaired.
|
||||
The temporary file now contains the damaged extended attribute structure.
|
||||
|
||||
7. Reap the temporary file.
|
||||
8. Reap the temporary file.
|
||||
|
||||
The proposed patchset is the
|
||||
`parent pointers repair
|
||||
<https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux.git/log/?h=pptrs-online-parent-repair>`_
|
||||
<https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux.git/log/?h=pptrs-fsck>`_
|
||||
series.
|
||||
|
||||
Digression: Offline Checking of Parent Pointers
|
||||
|
|
@ -4651,26 +4675,56 @@ files are erased long before directory tree connectivity checks are performed.
|
|||
Parent pointer checks are therefore a second pass to be added to the existing
|
||||
connectivity checks:
|
||||
|
||||
1. After the set of surviving files has been established (i.e. phase 6),
|
||||
1. After the set of surviving files has been established (phase 6),
|
||||
walk the surviving directories of each AG in the filesystem.
|
||||
This is already performed as part of the connectivity checks.
|
||||
|
||||
2. For each directory entry found, record the name in an xfblob, and store
|
||||
``(child_ag_inum, parent_inum, parent_gen, dirent_pos)`` tuples in a
|
||||
per-AG in-memory slab.
|
||||
2. For each directory entry found,
|
||||
|
||||
a. If the name has already been stored in the xfblob, then use that cookie
|
||||
and skip the next step.
|
||||
|
||||
b. Otherwise, record the name in an xfblob, and remember the xfblob cookie.
|
||||
Unique mappings are critical for
|
||||
|
||||
1. Deduplicating names to reduce memory usage, and
|
||||
|
||||
2. Creating a stable sort key for the parent pointer indexes so that the
|
||||
parent pointer validation described below will work.
|
||||
|
||||
c. Store ``(child_ag_inum, parent_inum, parent_gen, name_hash, name_len,
|
||||
name_cookie)`` tuples in a per-AG in-memory slab. The ``name_hash``
|
||||
referenced in this section is the regular directory entry name hash, not
|
||||
the specialized one used for parent pointer xattrs.
|
||||
|
||||
3. For each AG in the filesystem,
|
||||
|
||||
a. Sort the per-AG tuples in order of child_ag_inum, parent_inum, and
|
||||
dirent_pos.
|
||||
a. Sort the per-AG tuple set in order of ``child_ag_inum``, ``parent_inum``,
|
||||
``name_hash``, and ``name_cookie``.
|
||||
Having a single ``name_cookie`` for each ``name`` is critical for
|
||||
handling the uncommon case of a directory containing multiple hardlinks
|
||||
to the same file where all the names hash to the same value.
|
||||
|
||||
b. For each inode in the AG,
|
||||
|
||||
1. Scan the inode for parent pointers.
|
||||
Record the names in a per-file xfblob, and store ``(parent_inum,
|
||||
parent_gen, dirent_pos)`` tuples in a per-file slab.
|
||||
For each parent pointer found,
|
||||
|
||||
2. Sort the per-file tuples in order of parent_inum, and dirent_pos.
|
||||
a. Validate the ondisk parent pointer.
|
||||
If validation fails, move on to the next parent pointer in the
|
||||
file.
|
||||
|
||||
b. If the name has already been stored in the xfblob, then use that
|
||||
cookie and skip the next step.
|
||||
|
||||
c. Record the name in a per-file xfblob, and remember the xfblob
|
||||
cookie.
|
||||
|
||||
d. Store ``(parent_inum, parent_gen, name_hash, name_len,
|
||||
name_cookie)`` tuples in a per-file slab.
|
||||
|
||||
2. Sort the per-file tuples in order of ``parent_inum``, ``name_hash``,
|
||||
and ``name_cookie``.
|
||||
|
||||
3. Position one slab cursor at the start of the inode's records in the
|
||||
per-AG tuple slab.
|
||||
|
|
@ -4679,28 +4733,37 @@ connectivity checks:
|
|||
|
||||
4. Position a second slab cursor at the start of the per-file tuple slab.
|
||||
|
||||
5. Iterate the two cursors in lockstep, comparing the parent_ino and
|
||||
dirent_pos fields of the records under each cursor.
|
||||
5. Iterate the two cursors in lockstep, comparing the ``parent_ino``,
|
||||
``name_hash``, and ``name_cookie`` fields of the records under each
|
||||
cursor:
|
||||
|
||||
a. Tuples in the per-AG list but not the per-file list are missing and
|
||||
need to be written to the inode.
|
||||
a. If the per-AG cursor is at a lower point in the keyspace than the
|
||||
per-file cursor, then the per-AG cursor points to a missing parent
|
||||
pointer.
|
||||
Add the parent pointer to the inode and advance the per-AG
|
||||
cursor.
|
||||
|
||||
b. Tuples in the per-file list but not the per-AG list are dangling
|
||||
and need to be removed from the inode.
|
||||
b. If the per-file cursor is at a lower point in the keyspace than
|
||||
the per-AG cursor, then the per-file cursor points to a dangling
|
||||
parent pointer.
|
||||
Remove the parent pointer from the inode and advance the per-file
|
||||
cursor.
|
||||
|
||||
c. For tuples in both lists, update the parent_gen and name components
|
||||
of the parent pointer if necessary.
|
||||
c. Otherwise, both cursors point at the same parent pointer.
|
||||
Update the parent_gen component if necessary.
|
||||
Advance both cursors.
|
||||
|
||||
4. Move on to examining link counts, as we do today.
|
||||
|
||||
The proposed patchset is the
|
||||
`offline parent pointers repair
|
||||
<https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfsprogs-dev.git/log/?h=pptrs-repair>`_
|
||||
<https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfsprogs-dev.git/log/?h=pptrs-fsck>`_
|
||||
series.
|
||||
|
||||
Rebuilding directories from parent pointers in offline repair is very
|
||||
challenging because it currently uses a single-pass scan of the filesystem
|
||||
during phase 3 to decide which files are corrupt enough to be zapped.
|
||||
Rebuilding directories from parent pointers in offline repair would be very
|
||||
challenging because xfs_repair currently uses two single-pass scans of the
|
||||
filesystem during phases 3 and 4 to decide which files are corrupt enough to be
|
||||
zapped.
|
||||
This scan would have to be converted into a multi-pass scan:
|
||||
|
||||
1. The first pass of the scan zaps corrupt inodes, forks, and attributes
|
||||
|
|
@ -4722,6 +4785,130 @@ This scan would have to be converted into a multi-pass scan:
|
|||
|
||||
This code has not yet been constructed.
|
||||
|
||||
.. _dirtree:
|
||||
|
||||
Case Study: Directory Tree Structure
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
As mentioned earlier, the filesystem directory tree is supposed to be a
|
||||
directed acylic graph structure.
|
||||
However, each node in this graph is a separate ``xfs_inode`` object with its
|
||||
own locks, which makes validating the tree qualities difficult.
|
||||
Fortunately, non-directories are allowed to have multiple parents and cannot
|
||||
have children, so only directories need to be scanned.
|
||||
Directories typically constitute 5-10% of the files in a filesystem, which
|
||||
reduces the amount of work dramatically.
|
||||
|
||||
If the directory tree could be frozen, it would be easy to discover cycles and
|
||||
disconnected regions by running a depth (or breadth) first search downwards
|
||||
from the root directory and marking a bitmap for each directory found.
|
||||
At any point in the walk, trying to set an already set bit means there is a
|
||||
cycle.
|
||||
After the scan completes, XORing the marked inode bitmap with the inode
|
||||
allocation bitmap reveals disconnected inodes.
|
||||
However, one of online repair's design goals is to avoid locking the entire
|
||||
filesystem unless it's absolutely necessary.
|
||||
Directory tree updates can move subtrees across the scanner wavefront on a live
|
||||
filesystem, so the bitmap algorithm cannot be applied.
|
||||
|
||||
Directory parent pointers enable an incremental approach to validation of the
|
||||
tree structure.
|
||||
Instead of using one thread to scan the entire filesystem, multiple threads can
|
||||
walk from individual subdirectories upwards towards the root.
|
||||
For this to work, all directory entries and parent pointers must be internally
|
||||
consistent, each directory entry must have a parent pointer, and the link
|
||||
counts of all directories must be correct.
|
||||
Each scanner thread must be able to take the IOLOCK of an alleged parent
|
||||
directory while holding the IOLOCK of the child directory to prevent either
|
||||
directory from being moved within the tree.
|
||||
This is not possible since the VFS does not take the IOLOCK of a child
|
||||
subdirectory when moving that subdirectory, so instead the scanner stabilizes
|
||||
the parent -> child relationship by taking the ILOCKs and installing a dirent
|
||||
update hook to detect changes.
|
||||
|
||||
The scanning process uses a dirent hook to detect changes to the directories
|
||||
mentioned in the scan data.
|
||||
The scan works as follows:
|
||||
|
||||
1. For each subdirectory in the filesystem,
|
||||
|
||||
a. For each parent pointer of that subdirectory,
|
||||
|
||||
1. Create a path object for that parent pointer, and mark the
|
||||
subdirectory inode number in the path object's bitmap.
|
||||
|
||||
2. Record the parent pointer name and inode number in a path structure.
|
||||
|
||||
3. If the alleged parent is the subdirectory being scrubbed, the path is
|
||||
a cycle.
|
||||
Mark the path for deletion and repeat step 1a with the next
|
||||
subdirectory parent pointer.
|
||||
|
||||
4. Try to mark the alleged parent inode number in a bitmap in the path
|
||||
object.
|
||||
If the bit is already set, then there is a cycle in the directory
|
||||
tree.
|
||||
Mark the path as a cycle and repeat step 1a with the next subdirectory
|
||||
parent pointer.
|
||||
|
||||
5. Load the alleged parent.
|
||||
If the alleged parent is not a linked directory, abort the scan
|
||||
because the parent pointer information is inconsistent.
|
||||
|
||||
6. For each parent pointer of this alleged ancestor directory,
|
||||
|
||||
a. Record the parent pointer name and inode number in the path object
|
||||
if no parent has been set for that level.
|
||||
|
||||
b. If an ancestor has more than one parent, mark the path as corrupt.
|
||||
Repeat step 1a with the next subdirectory parent pointer.
|
||||
|
||||
c. Repeat steps 1a3-1a6 for the ancestor identified in step 1a6a.
|
||||
This repeats until the directory tree root is reached or no parents
|
||||
are found.
|
||||
|
||||
7. If the walk terminates at the root directory, mark the path as ok.
|
||||
|
||||
8. If the walk terminates without reaching the root, mark the path as
|
||||
disconnected.
|
||||
|
||||
2. If the directory entry update hook triggers, check all paths already found
|
||||
by the scan.
|
||||
If the entry matches part of a path, mark that path and the scan stale.
|
||||
When the scanner thread sees that the scan has been marked stale, it deletes
|
||||
all scan data and starts over.
|
||||
|
||||
Repairing the directory tree works as follows:
|
||||
|
||||
1. Walk each path of the target subdirectory.
|
||||
|
||||
a. Corrupt paths and cycle paths are counted as suspect.
|
||||
|
||||
b. Paths already marked for deletion are counted as bad.
|
||||
|
||||
c. Paths that reached the root are counted as good.
|
||||
|
||||
2. If the subdirectory is either the root directory or has zero link count,
|
||||
delete all incoming directory entries in the immediate parents.
|
||||
Repairs are complete.
|
||||
|
||||
3. If the subdirectory has exactly one path, set the dotdot entry to the
|
||||
parent and exit.
|
||||
|
||||
4. If the subdirectory has at least one good path, delete all the other
|
||||
incoming directory entries in the immediate parents.
|
||||
|
||||
5. If the subdirectory has no good paths and more than one suspect path, delete
|
||||
all the other incoming directory entries in the immediate parents.
|
||||
|
||||
6. If the subdirectory has zero paths, attach it to the lost and found.
|
||||
|
||||
The proposed patches are in the
|
||||
`directory tree repair
|
||||
<https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux.git/log/?h=scrub-directory-tree>`_
|
||||
series.
|
||||
|
||||
|
||||
.. _orphanage:
|
||||
|
||||
The Orphanage
|
||||
|
|
@ -4769,14 +4956,22 @@ Orphaned files are adopted by the orphanage as follows:
|
|||
The ``xrep_orphanage_iolock_two`` function follows the inode locking
|
||||
strategy discussed earlier.
|
||||
|
||||
3. Call ``xrep_orphanage_compute_blkres`` and ``xrep_orphanage_compute_name``
|
||||
to compute the new name in the orphanage and the block reservation required.
|
||||
|
||||
4. Use ``xrep_orphanage_adoption_prep`` to reserve resources to the repair
|
||||
3. Use ``xrep_adoption_trans_alloc`` to reserve resources to the repair
|
||||
transaction.
|
||||
|
||||
5. Call ``xrep_orphanage_adopt`` to reparent the orphaned file into the lost
|
||||
and found, and update the kernel dentry cache.
|
||||
4. Call ``xrep_orphanage_compute_name`` to compute the new name in the
|
||||
orphanage.
|
||||
|
||||
5. If the adoption is going to happen, call ``xrep_adoption_reparent`` to
|
||||
reparent the orphaned file into the lost and found and invalidate the dentry
|
||||
cache.
|
||||
|
||||
6. Call ``xrep_adoption_finish`` to commit any filesystem updates, release the
|
||||
orphanage ILOCK, and clean the scrub transaction. Call
|
||||
``xrep_adoption_commit`` to commit the updates and the scrub transaction.
|
||||
|
||||
7. If a runtime error happens, call ``xrep_adoption_cancel`` to release all
|
||||
resources.
|
||||
|
||||
The proposed patches are in the
|
||||
`orphanage adoption
|
||||
|
|
@ -5108,18 +5303,18 @@ make it easier for code readers to understand what has been built, for whom it
|
|||
has been built, and why.
|
||||
Please feel free to contact the XFS mailing list with questions.
|
||||
|
||||
FIEXCHANGE_RANGE
|
||||
----------------
|
||||
XFS_IOC_EXCHANGE_RANGE
|
||||
----------------------
|
||||
|
||||
As discussed earlier, a second frontend to the atomic extent swap mechanism is
|
||||
a new ioctl call that userspace programs can use to commit updates to files
|
||||
atomically.
|
||||
As discussed earlier, a second frontend to the atomic file mapping exchange
|
||||
mechanism is a new ioctl call that userspace programs can use to commit updates
|
||||
to files atomically.
|
||||
This frontend has been out for review for several years now, though the
|
||||
necessary refinements to online repair and lack of customer demand mean that
|
||||
the proposal has not been pushed very hard.
|
||||
|
||||
Extent Swapping with Regular User Files
|
||||
```````````````````````````````````````
|
||||
File Content Exchanges with Regular User Files
|
||||
``````````````````````````````````````````````
|
||||
|
||||
As mentioned earlier, XFS has long had the ability to swap extents between
|
||||
files, which is used almost exclusively by ``xfs_fsr`` to defragment files.
|
||||
|
|
@ -5134,12 +5329,12 @@ the consistency of the fork mappings with the reverse mapping index was to
|
|||
develop an iterative mechanism that used deferred bmap and rmap operations to
|
||||
swap mappings one at a time.
|
||||
This mechanism is identical to steps 2-3 from the procedure above except for
|
||||
the new tracking items, because the atomic extent swap mechanism is an
|
||||
iteration of an existing mechanism and not something totally novel.
|
||||
the new tracking items, because the atomic file mapping exchange mechanism is
|
||||
an iteration of an existing mechanism and not something totally novel.
|
||||
For the narrow case of file defragmentation, the file contents must be
|
||||
identical, so the recovery guarantees are not much of a gain.
|
||||
|
||||
Atomic extent swapping is much more flexible than the existing swapext
|
||||
Atomic file content exchanges are much more flexible than the existing swapext
|
||||
implementations because it can guarantee that the caller never sees a mix of
|
||||
old and new contents even after a crash, and it can operate on two arbitrary
|
||||
file fork ranges.
|
||||
|
|
@ -5150,11 +5345,11 @@ The extra flexibility enables several new use cases:
|
|||
Next, it opens a temporary file and calls the file clone operation to reflink
|
||||
the first file's contents into the temporary file.
|
||||
Writes to the original file should instead be written to the temporary file.
|
||||
Finally, the process calls the atomic extent swap system call
|
||||
(``FIEXCHANGE_RANGE``) to exchange the file contents, thereby committing all
|
||||
of the updates to the original file, or none of them.
|
||||
Finally, the process calls the atomic file mapping exchange system call
|
||||
(``XFS_IOC_EXCHANGE_RANGE``) to exchange the file contents, thereby
|
||||
committing all of the updates to the original file, or none of them.
|
||||
|
||||
.. _swapext_if_unchanged:
|
||||
.. _exchrange_if_unchanged:
|
||||
|
||||
- **Transactional file updates**: The same mechanism as above, but the caller
|
||||
only wants the commit to occur if the original file's contents have not
|
||||
|
|
@ -5163,16 +5358,17 @@ The extra flexibility enables several new use cases:
|
|||
change timestamps of the original file before reflinking its data to the
|
||||
temporary file.
|
||||
When the program is ready to commit the changes, it passes the timestamps
|
||||
into the kernel as arguments to the atomic extent swap system call.
|
||||
into the kernel as arguments to the atomic file mapping exchange system call.
|
||||
The kernel only commits the changes if the provided timestamps match the
|
||||
original file.
|
||||
A new ioctl (``XFS_IOC_COMMIT_RANGE``) is provided to perform this.
|
||||
|
||||
- **Emulation of atomic block device writes**: Export a block device with a
|
||||
logical sector size matching the filesystem block size to force all writes
|
||||
to be aligned to the filesystem block size.
|
||||
Stage all writes to a temporary file, and when that is complete, call the
|
||||
atomic extent swap system call with a flag to indicate that holes in the
|
||||
temporary file should be ignored.
|
||||
atomic file mapping exchange system call with a flag to indicate that holes
|
||||
in the temporary file should be ignored.
|
||||
This emulates an atomic device write in software, and can support arbitrary
|
||||
scattered writes.
|
||||
|
||||
|
|
@ -5254,8 +5450,8 @@ of the file to try to share the physical space with a dummy file.
|
|||
Cloning the extent means that the original owners cannot overwrite the
|
||||
contents; any changes will be written somewhere else via copy-on-write.
|
||||
Clearspace makes its own copy of the frozen extent in an area that is not being
|
||||
cleared, and uses ``FIEDEUPRANGE`` (or the :ref:`atomic extent swap
|
||||
<swapext_if_unchanged>` feature) to change the target file's data extent
|
||||
cleared, and uses ``FIEDEUPRANGE`` (or the :ref:`atomic file content exchanges
|
||||
<exchrange_if_unchanged>` feature) to change the target file's data extent
|
||||
mapping away from the area being cleared.
|
||||
When all other mappings have been moved, clearspace reflinks the space into the
|
||||
space collector file so that it becomes unavailable.
|
||||
|
|
|
|||
|
|
@ -1667,6 +1667,7 @@ int generic_write_check_limits(struct file *file, loff_t pos, loff_t *count)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(generic_write_check_limits);
|
||||
|
||||
/* Like generic_write_checks(), but takes size of write instead of iter. */
|
||||
int generic_write_checks_count(struct kiocb *iocb, loff_t *count)
|
||||
|
|
|
|||
|
|
@ -99,8 +99,7 @@ static int generic_remap_checks(struct file *file_in, loff_t pos_in,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int remap_verify_area(struct file *file, loff_t pos, loff_t len,
|
||||
bool write)
|
||||
int remap_verify_area(struct file *file, loff_t pos, loff_t len, bool write)
|
||||
{
|
||||
int mask = write ? MAY_WRITE : MAY_READ;
|
||||
loff_t tmp;
|
||||
|
|
@ -118,6 +117,7 @@ static int remap_verify_area(struct file *file, loff_t pos, loff_t len,
|
|||
|
||||
return fsnotify_file_area_perm(file, mask, &pos, len);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(remap_verify_area);
|
||||
|
||||
/*
|
||||
* Ensure that we don't remap a partial EOF block in the middle of something
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ xfs-y += $(addprefix libxfs/, \
|
|||
xfs_dir2_node.o \
|
||||
xfs_dir2_sf.o \
|
||||
xfs_dquot_buf.o \
|
||||
xfs_exchmaps.o \
|
||||
xfs_ialloc.o \
|
||||
xfs_ialloc_btree.o \
|
||||
xfs_iext_tree.o \
|
||||
|
|
@ -41,6 +42,7 @@ xfs-y += $(addprefix libxfs/, \
|
|||
xfs_inode_buf.o \
|
||||
xfs_log_rlimit.o \
|
||||
xfs_ag_resv.o \
|
||||
xfs_parent.o \
|
||||
xfs_rmap.o \
|
||||
xfs_rmap_btree.o \
|
||||
xfs_refcount.o \
|
||||
|
|
@ -49,6 +51,7 @@ xfs-y += $(addprefix libxfs/, \
|
|||
xfs_symlink_remote.o \
|
||||
xfs_trans_inode.o \
|
||||
xfs_trans_resv.o \
|
||||
xfs_trans_space.o \
|
||||
xfs_types.o \
|
||||
)
|
||||
# xfs_rtbitmap is shared with libxfs
|
||||
|
|
@ -67,6 +70,7 @@ xfs-y += xfs_aops.o \
|
|||
xfs_dir2_readdir.o \
|
||||
xfs_discard.o \
|
||||
xfs_error.o \
|
||||
xfs_exchrange.o \
|
||||
xfs_export.o \
|
||||
xfs_extent_busy.o \
|
||||
xfs_file.o \
|
||||
|
|
@ -74,6 +78,7 @@ xfs-y += xfs_aops.o \
|
|||
xfs_fsmap.o \
|
||||
xfs_fsops.o \
|
||||
xfs_globals.o \
|
||||
xfs_handle.o \
|
||||
xfs_health.o \
|
||||
xfs_icache.o \
|
||||
xfs_ioctl.o \
|
||||
|
|
@ -101,6 +106,7 @@ xfs-y += xfs_log.o \
|
|||
xfs_buf_item.o \
|
||||
xfs_buf_item_recover.o \
|
||||
xfs_dquot_item_recover.o \
|
||||
xfs_exchmaps_item.o \
|
||||
xfs_extfree_item.o \
|
||||
xfs_attr_item.o \
|
||||
xfs_icreate_item.o \
|
||||
|
|
@ -157,11 +163,13 @@ xfs-y += $(addprefix scrub/, \
|
|||
common.o \
|
||||
dabtree.o \
|
||||
dir.o \
|
||||
dirtree.o \
|
||||
fscounters.o \
|
||||
health.o \
|
||||
ialloc.o \
|
||||
inode.o \
|
||||
iscan.o \
|
||||
listxattr.o \
|
||||
nlinks.o \
|
||||
parent.o \
|
||||
readdir.o \
|
||||
|
|
@ -170,6 +178,7 @@ xfs-y += $(addprefix scrub/, \
|
|||
scrub.o \
|
||||
symlink.o \
|
||||
xfarray.o \
|
||||
xfblob.o \
|
||||
xfile.o \
|
||||
)
|
||||
|
||||
|
|
@ -191,23 +200,32 @@ ifeq ($(CONFIG_XFS_ONLINE_REPAIR),y)
|
|||
xfs-y += $(addprefix scrub/, \
|
||||
agheader_repair.o \
|
||||
alloc_repair.o \
|
||||
attr_repair.o \
|
||||
bmap_repair.o \
|
||||
cow_repair.o \
|
||||
dir_repair.o \
|
||||
dirtree_repair.o \
|
||||
findparent.o \
|
||||
fscounters_repair.o \
|
||||
ialloc_repair.o \
|
||||
inode_repair.o \
|
||||
newbt.o \
|
||||
nlinks_repair.o \
|
||||
orphanage.o \
|
||||
parent_repair.o \
|
||||
rcbag_btree.o \
|
||||
rcbag.o \
|
||||
reap.o \
|
||||
refcount_repair.o \
|
||||
repair.o \
|
||||
rmap_repair.o \
|
||||
symlink_repair.o \
|
||||
tempfile.o \
|
||||
)
|
||||
|
||||
xfs-$(CONFIG_XFS_RT) += $(addprefix scrub/, \
|
||||
rtbitmap_repair.o \
|
||||
rtsummary_repair.o \
|
||||
)
|
||||
|
||||
xfs-$(CONFIG_XFS_QUOTA) += $(addprefix scrub/, \
|
||||
|
|
|
|||
|
|
@ -194,7 +194,7 @@ xfs_initialize_perag_data(
|
|||
pag = xfs_perag_get(mp, index);
|
||||
error = xfs_alloc_read_agf(pag, NULL, 0, NULL);
|
||||
if (!error)
|
||||
error = xfs_ialloc_read_agi(pag, NULL, NULL);
|
||||
error = xfs_ialloc_read_agi(pag, NULL, 0, NULL);
|
||||
if (error) {
|
||||
xfs_perag_put(pag);
|
||||
return error;
|
||||
|
|
@ -931,7 +931,7 @@ xfs_ag_shrink_space(
|
|||
int error, err2;
|
||||
|
||||
ASSERT(pag->pag_agno == mp->m_sb.sb_agcount - 1);
|
||||
error = xfs_ialloc_read_agi(pag, *tpp, &agibp);
|
||||
error = xfs_ialloc_read_agi(pag, *tpp, 0, &agibp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -963,9 +963,7 @@ xfs_ag_shrink_space(
|
|||
* Disable perag reservations so it doesn't cause the allocation request
|
||||
* to fail. We'll reestablish reservation before we return.
|
||||
*/
|
||||
error = xfs_ag_resv_free(pag);
|
||||
if (error)
|
||||
return error;
|
||||
xfs_ag_resv_free(pag);
|
||||
|
||||
/* internal log shouldn't also show up in the free space btrees */
|
||||
error = xfs_alloc_vextent_exact_bno(&args,
|
||||
|
|
@ -1062,7 +1060,7 @@ xfs_ag_extend_space(
|
|||
|
||||
ASSERT(pag->pag_agno == pag->pag_mount->m_sb.sb_agcount - 1);
|
||||
|
||||
error = xfs_ialloc_read_agi(pag, tp, &bp);
|
||||
error = xfs_ialloc_read_agi(pag, tp, 0, &bp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -1119,7 +1117,7 @@ xfs_ag_get_geometry(
|
|||
int error;
|
||||
|
||||
/* Lock the AG headers. */
|
||||
error = xfs_ialloc_read_agi(pag, NULL, &agi_bp);
|
||||
error = xfs_ialloc_read_agi(pag, NULL, 0, &agi_bp);
|
||||
if (error)
|
||||
return error;
|
||||
error = xfs_alloc_read_agf(pag, NULL, 0, &agf_bp);
|
||||
|
|
|
|||
|
|
@ -126,14 +126,13 @@ xfs_ag_resv_needed(
|
|||
}
|
||||
|
||||
/* Clean out a reservation */
|
||||
static int
|
||||
static void
|
||||
__xfs_ag_resv_free(
|
||||
struct xfs_perag *pag,
|
||||
enum xfs_ag_resv_type type)
|
||||
{
|
||||
struct xfs_ag_resv *resv;
|
||||
xfs_extlen_t oldresv;
|
||||
int error;
|
||||
|
||||
trace_xfs_ag_resv_free(pag, type, 0);
|
||||
|
||||
|
|
@ -149,30 +148,19 @@ __xfs_ag_resv_free(
|
|||
oldresv = resv->ar_orig_reserved;
|
||||
else
|
||||
oldresv = resv->ar_reserved;
|
||||
error = xfs_mod_fdblocks(pag->pag_mount, oldresv, true);
|
||||
xfs_add_fdblocks(pag->pag_mount, oldresv);
|
||||
resv->ar_reserved = 0;
|
||||
resv->ar_asked = 0;
|
||||
resv->ar_orig_reserved = 0;
|
||||
|
||||
if (error)
|
||||
trace_xfs_ag_resv_free_error(pag->pag_mount, pag->pag_agno,
|
||||
error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Free a per-AG reservation. */
|
||||
int
|
||||
void
|
||||
xfs_ag_resv_free(
|
||||
struct xfs_perag *pag)
|
||||
{
|
||||
int error;
|
||||
int err2;
|
||||
|
||||
error = __xfs_ag_resv_free(pag, XFS_AG_RESV_RMAPBT);
|
||||
err2 = __xfs_ag_resv_free(pag, XFS_AG_RESV_METADATA);
|
||||
if (err2 && !error)
|
||||
error = err2;
|
||||
return error;
|
||||
__xfs_ag_resv_free(pag, XFS_AG_RESV_RMAPBT);
|
||||
__xfs_ag_resv_free(pag, XFS_AG_RESV_METADATA);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
@ -216,7 +204,7 @@ __xfs_ag_resv_init(
|
|||
if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_AG_RESV_FAIL))
|
||||
error = -ENOSPC;
|
||||
else
|
||||
error = xfs_mod_fdblocks(mp, -(int64_t)hidden_space, true);
|
||||
error = xfs_dec_fdblocks(mp, hidden_space, true);
|
||||
if (error) {
|
||||
trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
|
||||
error, _RET_IP_);
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
#ifndef __XFS_AG_RESV_H__
|
||||
#define __XFS_AG_RESV_H__
|
||||
|
||||
int xfs_ag_resv_free(struct xfs_perag *pag);
|
||||
void xfs_ag_resv_free(struct xfs_perag *pag);
|
||||
int xfs_ag_resv_init(struct xfs_perag *pag, struct xfs_trans *tp);
|
||||
|
||||
bool xfs_ag_resv_critical(struct xfs_perag *pag, enum xfs_ag_resv_type type);
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ xfs_prealloc_blocks(
|
|||
}
|
||||
|
||||
/*
|
||||
* The number of blocks per AG that we withhold from xfs_mod_fdblocks to
|
||||
* The number of blocks per AG that we withhold from xfs_dec_fdblocks to
|
||||
* guarantee that we can refill the AGFL prior to allocating space in a nearly
|
||||
* full AG. Although the space described by the free space btrees, the
|
||||
* blocks used by the freesp btrees themselves, and the blocks owned by the
|
||||
|
|
@ -89,7 +89,7 @@ xfs_prealloc_blocks(
|
|||
* until the fs goes down, we subtract this many AG blocks from the incore
|
||||
* fdblocks to ensure user allocation does not overcommit the space the
|
||||
* filesystem needs for the AGFLs. The rmap btree uses a per-AG reservation to
|
||||
* withhold space from xfs_mod_fdblocks, so we do not account for that here.
|
||||
* withhold space from xfs_dec_fdblocks, so we do not account for that here.
|
||||
*/
|
||||
#define XFS_ALLOCBT_AGFL_RESERVE 4
|
||||
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@
|
|||
#include "xfs_trace.h"
|
||||
#include "xfs_attr_item.h"
|
||||
#include "xfs_xattr.h"
|
||||
#include "xfs_parent.h"
|
||||
|
||||
struct kmem_cache *xfs_attr_intent_cache;
|
||||
|
||||
|
|
@ -87,6 +88,8 @@ xfs_attr_is_leaf(
|
|||
struct xfs_iext_cursor icur;
|
||||
struct xfs_bmbt_irec imap;
|
||||
|
||||
ASSERT(!xfs_need_iread_extents(ifp));
|
||||
|
||||
if (ifp->if_nextents != 1 || ifp->if_format != XFS_DINODE_FMT_EXTENTS)
|
||||
return false;
|
||||
|
||||
|
|
@ -224,11 +227,21 @@ int
|
|||
xfs_attr_get_ilocked(
|
||||
struct xfs_da_args *args)
|
||||
{
|
||||
int error;
|
||||
|
||||
xfs_assert_ilocked(args->dp, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL);
|
||||
|
||||
if (!xfs_inode_hasattr(args->dp))
|
||||
return -ENOATTR;
|
||||
|
||||
/*
|
||||
* The incore attr fork iext tree must be loaded for xfs_attr_is_leaf
|
||||
* to work correctly.
|
||||
*/
|
||||
error = xfs_iread_extents(args->trans, args->dp, XFS_ATTR_FORK);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (args->dp->i_af.if_format == XFS_DINODE_FMT_LOCAL)
|
||||
return xfs_attr_shortform_getvalue(args);
|
||||
if (xfs_attr_is_leaf(args->dp))
|
||||
|
|
@ -264,9 +277,11 @@ xfs_attr_get(
|
|||
if (xfs_is_shutdown(args->dp->i_mount))
|
||||
return -EIO;
|
||||
|
||||
if (!args->owner)
|
||||
args->owner = args->dp->i_ino;
|
||||
args->geo = args->dp->i_mount->m_attr_geo;
|
||||
args->whichfork = XFS_ATTR_FORK;
|
||||
args->hashval = xfs_da_hashname(args->name, args->namelen);
|
||||
xfs_attr_sethash(args);
|
||||
|
||||
/* Entirely possible to look up a name which doesn't exist */
|
||||
args->op_flags = XFS_DA_OP_OKNOENT;
|
||||
|
|
@ -363,7 +378,7 @@ xfs_attr_try_sf_addname(
|
|||
* Commit the shortform mods, and we're done.
|
||||
* NOTE: this is also the error path (EEXIST, etc).
|
||||
*/
|
||||
if (!error && !(args->op_flags & XFS_DA_OP_NOTIME))
|
||||
if (!error)
|
||||
xfs_trans_ichgtime(args->trans, dp, XFS_ICHGTIME_CHG);
|
||||
|
||||
if (xfs_has_wsync(dp->i_mount))
|
||||
|
|
@ -401,6 +416,50 @@ out:
|
|||
return error;
|
||||
}
|
||||
|
||||
/* Compute the hash value for a user/root/secure extended attribute */
|
||||
xfs_dahash_t
|
||||
xfs_attr_hashname(
|
||||
const uint8_t *name,
|
||||
int namelen)
|
||||
{
|
||||
return xfs_da_hashname(name, namelen);
|
||||
}
|
||||
|
||||
/* Compute the hash value for any extended attribute from any namespace. */
|
||||
xfs_dahash_t
|
||||
xfs_attr_hashval(
|
||||
struct xfs_mount *mp,
|
||||
unsigned int attr_flags,
|
||||
const uint8_t *name,
|
||||
int namelen,
|
||||
const void *value,
|
||||
int valuelen)
|
||||
{
|
||||
ASSERT(xfs_attr_check_namespace(attr_flags));
|
||||
|
||||
if (attr_flags & XFS_ATTR_PARENT)
|
||||
return xfs_parent_hashattr(mp, name, namelen, value, valuelen);
|
||||
|
||||
return xfs_attr_hashname(name, namelen);
|
||||
}
|
||||
|
||||
/*
|
||||
* PPTR_REPLACE operations require the caller to set the old and new names and
|
||||
* values explicitly. Update the canonical fields to the new name and value
|
||||
* here now that the removal phase has finished.
|
||||
*/
|
||||
static void
|
||||
xfs_attr_update_pptr_replace_args(
|
||||
struct xfs_da_args *args)
|
||||
{
|
||||
ASSERT(args->new_namelen > 0);
|
||||
args->name = args->new_name;
|
||||
args->namelen = args->new_namelen;
|
||||
args->value = args->new_value;
|
||||
args->valuelen = args->new_valuelen;
|
||||
xfs_attr_sethash(args);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle the state change on completion of a multi-state attr operation.
|
||||
*
|
||||
|
|
@ -418,14 +477,15 @@ xfs_attr_complete_op(
|
|||
enum xfs_delattr_state replace_state)
|
||||
{
|
||||
struct xfs_da_args *args = attr->xattri_da_args;
|
||||
bool do_replace = args->op_flags & XFS_DA_OP_REPLACE;
|
||||
|
||||
if (!(args->op_flags & XFS_DA_OP_REPLACE))
|
||||
replace_state = XFS_DAS_DONE;
|
||||
else if (xfs_attr_intent_op(attr) == XFS_ATTRI_OP_FLAGS_PPTR_REPLACE)
|
||||
xfs_attr_update_pptr_replace_args(args);
|
||||
|
||||
args->op_flags &= ~XFS_DA_OP_REPLACE;
|
||||
args->attr_filter &= ~XFS_ATTR_INCOMPLETE;
|
||||
if (do_replace)
|
||||
return replace_state;
|
||||
|
||||
return XFS_DAS_DONE;
|
||||
return replace_state;
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
@ -647,8 +707,8 @@ xfs_attr_leaf_remove_attr(
|
|||
int forkoff;
|
||||
int error;
|
||||
|
||||
error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno,
|
||||
&bp);
|
||||
error = xfs_attr3_leaf_read(args->trans, args->dp, args->owner,
|
||||
args->blkno, &bp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -679,7 +739,7 @@ xfs_attr_leaf_shrink(
|
|||
if (!xfs_attr_is_leaf(dp))
|
||||
return 0;
|
||||
|
||||
error = xfs_attr3_leaf_read(args->trans, args->dp, 0, &bp);
|
||||
error = xfs_attr3_leaf_read(args->trans, args->dp, args->owner, 0, &bp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -868,6 +928,11 @@ xfs_attr_lookup(
|
|||
return -ENOATTR;
|
||||
}
|
||||
|
||||
/* Prerequisite for xfs_attr_is_leaf */
|
||||
error = xfs_iread_extents(args->trans, args->dp, XFS_ATTR_FORK);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (xfs_attr_is_leaf(dp)) {
|
||||
error = xfs_attr_leaf_hasname(args, &bp);
|
||||
|
||||
|
|
@ -883,74 +948,72 @@ xfs_attr_lookup(
|
|||
return error;
|
||||
}
|
||||
|
||||
static void
|
||||
xfs_attr_defer_add(
|
||||
struct xfs_da_args *args,
|
||||
unsigned int op_flags)
|
||||
int
|
||||
xfs_attr_add_fork(
|
||||
struct xfs_inode *ip, /* incore inode pointer */
|
||||
int size, /* space new attribute needs */
|
||||
int rsvd) /* xact may use reserved blks */
|
||||
{
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct xfs_trans *tp; /* transaction pointer */
|
||||
unsigned int blks; /* space reservation */
|
||||
int error; /* error return value */
|
||||
|
||||
struct xfs_attr_intent *new;
|
||||
ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
|
||||
|
||||
new = kmem_cache_zalloc(xfs_attr_intent_cache,
|
||||
GFP_KERNEL | __GFP_NOFAIL);
|
||||
new->xattri_op_flags = op_flags;
|
||||
new->xattri_da_args = args;
|
||||
blks = XFS_ADDAFORK_SPACE_RES(mp);
|
||||
|
||||
switch (op_flags) {
|
||||
case XFS_ATTRI_OP_FLAGS_SET:
|
||||
new->xattri_dela_state = xfs_attr_init_add_state(args);
|
||||
break;
|
||||
case XFS_ATTRI_OP_FLAGS_REPLACE:
|
||||
new->xattri_dela_state = xfs_attr_init_replace_state(args);
|
||||
break;
|
||||
case XFS_ATTRI_OP_FLAGS_REMOVE:
|
||||
new->xattri_dela_state = xfs_attr_init_remove_state(args);
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
}
|
||||
error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_addafork, blks, 0,
|
||||
rsvd, &tp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_defer_add(args->trans, &new->xattri_list, &xfs_attr_defer_type);
|
||||
trace_xfs_attr_defer_add(new->xattri_dela_state, args->dp);
|
||||
if (xfs_inode_has_attr_fork(ip))
|
||||
goto trans_cancel;
|
||||
|
||||
error = xfs_bmap_add_attrfork(tp, ip, size, rsvd);
|
||||
if (error)
|
||||
goto trans_cancel;
|
||||
|
||||
error = xfs_trans_commit(tp);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
return error;
|
||||
|
||||
trans_cancel:
|
||||
xfs_trans_cancel(tp);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: If args->value is NULL the attribute will be removed, just like the
|
||||
* Linux ->setattr API.
|
||||
* Make a change to the xattr structure.
|
||||
*
|
||||
* The caller must have initialized @args, attached dquots, and must not hold
|
||||
* any ILOCKs. Reserved data blocks may be used if @rsvd is set.
|
||||
*
|
||||
* Returns -EEXIST for XFS_ATTRUPDATE_CREATE if the name already exists.
|
||||
* Returns -ENOATTR for XFS_ATTRUPDATE_REMOVE if the name does not exist.
|
||||
* Returns 0 on success, or a negative errno if something else went wrong.
|
||||
*/
|
||||
int
|
||||
xfs_attr_set(
|
||||
struct xfs_da_args *args)
|
||||
struct xfs_da_args *args,
|
||||
enum xfs_attr_update op,
|
||||
bool rsvd)
|
||||
{
|
||||
struct xfs_inode *dp = args->dp;
|
||||
struct xfs_mount *mp = dp->i_mount;
|
||||
struct xfs_trans_res tres;
|
||||
bool rsvd = (args->attr_filter & XFS_ATTR_ROOT);
|
||||
int error, local;
|
||||
int rmt_blks = 0;
|
||||
unsigned int total;
|
||||
|
||||
if (xfs_is_shutdown(dp->i_mount))
|
||||
return -EIO;
|
||||
ASSERT(!args->trans);
|
||||
|
||||
error = xfs_qm_dqattach(dp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
args->geo = mp->m_attr_geo;
|
||||
args->whichfork = XFS_ATTR_FORK;
|
||||
args->hashval = xfs_da_hashname(args->name, args->namelen);
|
||||
|
||||
/*
|
||||
* We have no control over the attribute names that userspace passes us
|
||||
* to remove, so we have to allow the name lookup prior to attribute
|
||||
* removal to fail as well. Preserve the logged flag, since we need
|
||||
* to pass that through to the logging code.
|
||||
*/
|
||||
args->op_flags = XFS_DA_OP_OKNOENT |
|
||||
(args->op_flags & XFS_DA_OP_LOGGED);
|
||||
|
||||
if (args->value) {
|
||||
switch (op) {
|
||||
case XFS_ATTRUPDATE_UPSERT:
|
||||
case XFS_ATTRUPDATE_CREATE:
|
||||
case XFS_ATTRUPDATE_REPLACE:
|
||||
XFS_STATS_INC(mp, xs_attr_set);
|
||||
args->total = xfs_attr_calc_size(args, &local);
|
||||
|
||||
|
|
@ -963,16 +1026,18 @@ xfs_attr_set(
|
|||
xfs_attr_sf_entsize_byname(args->namelen,
|
||||
args->valuelen);
|
||||
|
||||
error = xfs_bmap_add_attrfork(dp, sf_size, rsvd);
|
||||
error = xfs_attr_add_fork(dp, sf_size, rsvd);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
if (!local)
|
||||
rmt_blks = xfs_attr3_rmt_blocks(mp, args->valuelen);
|
||||
} else {
|
||||
break;
|
||||
case XFS_ATTRUPDATE_REMOVE:
|
||||
XFS_STATS_INC(mp, xs_attr_remove);
|
||||
rmt_blks = xfs_attr3_rmt_blocks(mp, XFS_XATTR_SIZE_MAX);
|
||||
rmt_blks = xfs_attr3_max_rmt_blocks(mp);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -984,12 +1049,9 @@ xfs_attr_set(
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
if (args->value || xfs_inode_hasattr(dp)) {
|
||||
error = xfs_iext_count_may_overflow(dp, XFS_ATTR_FORK,
|
||||
if (op != XFS_ATTRUPDATE_REMOVE || xfs_inode_hasattr(dp)) {
|
||||
error = xfs_iext_count_extend(args->trans, dp, XFS_ATTR_FORK,
|
||||
XFS_IEXT_ATTR_MANIP_CNT(rmt_blks));
|
||||
if (error == -EFBIG)
|
||||
error = xfs_iext_count_upgrade(args->trans, dp,
|
||||
XFS_IEXT_ATTR_MANIP_CNT(rmt_blks));
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
}
|
||||
|
|
@ -997,26 +1059,26 @@ xfs_attr_set(
|
|||
error = xfs_attr_lookup(args);
|
||||
switch (error) {
|
||||
case -EEXIST:
|
||||
if (!args->value) {
|
||||
if (op == XFS_ATTRUPDATE_REMOVE) {
|
||||
/* if no value, we are performing a remove operation */
|
||||
xfs_attr_defer_add(args, XFS_ATTRI_OP_FLAGS_REMOVE);
|
||||
xfs_attr_defer_add(args, XFS_ATTR_DEFER_REMOVE);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Pure create fails if the attr already exists */
|
||||
if (args->attr_flags & XATTR_CREATE)
|
||||
if (op == XFS_ATTRUPDATE_CREATE)
|
||||
goto out_trans_cancel;
|
||||
xfs_attr_defer_add(args, XFS_ATTRI_OP_FLAGS_REPLACE);
|
||||
xfs_attr_defer_add(args, XFS_ATTR_DEFER_REPLACE);
|
||||
break;
|
||||
case -ENOATTR:
|
||||
/* Can't remove what isn't there. */
|
||||
if (!args->value)
|
||||
if (op == XFS_ATTRUPDATE_REMOVE)
|
||||
goto out_trans_cancel;
|
||||
|
||||
/* Pure replace fails if no existing attr to replace. */
|
||||
if (args->attr_flags & XATTR_REPLACE)
|
||||
if (op == XFS_ATTRUPDATE_REPLACE)
|
||||
goto out_trans_cancel;
|
||||
xfs_attr_defer_add(args, XFS_ATTRI_OP_FLAGS_SET);
|
||||
xfs_attr_defer_add(args, XFS_ATTR_DEFER_SET);
|
||||
break;
|
||||
default:
|
||||
goto out_trans_cancel;
|
||||
|
|
@ -1029,8 +1091,7 @@ xfs_attr_set(
|
|||
if (xfs_has_wsync(mp))
|
||||
xfs_trans_set_sync(args->trans);
|
||||
|
||||
if (!(args->op_flags & XFS_DA_OP_NOTIME))
|
||||
xfs_trans_ichgtime(args->trans, dp, XFS_ICHGTIME_CHG);
|
||||
xfs_trans_ichgtime(args->trans, dp, XFS_ICHGTIME_CHG);
|
||||
|
||||
/*
|
||||
* Commit the last in the sequence of transactions.
|
||||
|
|
@ -1039,6 +1100,7 @@ xfs_attr_set(
|
|||
error = xfs_trans_commit(args->trans);
|
||||
out_unlock:
|
||||
xfs_iunlock(dp, XFS_ILOCK_EXCL);
|
||||
args->trans = NULL;
|
||||
return error;
|
||||
|
||||
out_trans_cancel:
|
||||
|
|
@ -1051,7 +1113,7 @@ out_trans_cancel:
|
|||
* External routines when attribute list is inside the inode
|
||||
*========================================================================*/
|
||||
|
||||
static inline int xfs_attr_sf_totsize(struct xfs_inode *dp)
|
||||
int xfs_attr_sf_totsize(struct xfs_inode *dp)
|
||||
{
|
||||
struct xfs_attr_sf_hdr *sf = dp->i_af.if_data;
|
||||
|
||||
|
|
@ -1154,7 +1216,7 @@ xfs_attr_leaf_try_add(
|
|||
struct xfs_buf *bp;
|
||||
int error;
|
||||
|
||||
error = xfs_attr3_leaf_read(args->trans, args->dp, 0, &bp);
|
||||
error = xfs_attr3_leaf_read(args->trans, args->dp, args->owner, 0, &bp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -1202,7 +1264,7 @@ xfs_attr_leaf_hasname(
|
|||
{
|
||||
int error = 0;
|
||||
|
||||
error = xfs_attr3_leaf_read(args->trans, args->dp, 0, bp);
|
||||
error = xfs_attr3_leaf_read(args->trans, args->dp, args->owner, 0, bp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -1511,12 +1573,23 @@ out_release:
|
|||
return error;
|
||||
}
|
||||
|
||||
/* Enforce that there is at most one namespace bit per attr. */
|
||||
inline bool xfs_attr_check_namespace(unsigned int attr_flags)
|
||||
{
|
||||
return hweight32(attr_flags & XFS_ATTR_NSP_ONDISK_MASK) < 2;
|
||||
}
|
||||
|
||||
/* Returns true if the attribute entry name is valid. */
|
||||
bool
|
||||
xfs_attr_namecheck(
|
||||
unsigned int attr_flags,
|
||||
const void *name,
|
||||
size_t length)
|
||||
{
|
||||
/* Only one namespace bit allowed. */
|
||||
if (!xfs_attr_check_namespace(attr_flags))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* MAXNAMELEN includes the trailing null, but (name/length) leave it
|
||||
* out, so use >= for the length check.
|
||||
|
|
@ -1524,6 +1597,10 @@ xfs_attr_namecheck(
|
|||
if (length >= MAXNAMELEN)
|
||||
return false;
|
||||
|
||||
/* Parent pointers have their own validation. */
|
||||
if (attr_flags & XFS_ATTR_PARENT)
|
||||
return xfs_parent_namecheck(attr_flags, name, length);
|
||||
|
||||
/* There shouldn't be any nulls here */
|
||||
return !memchr(name, 0, length);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -47,8 +47,9 @@ struct xfs_attrlist_cursor_kern {
|
|||
|
||||
|
||||
/* void; state communicated via *context */
|
||||
typedef void (*put_listent_func_t)(struct xfs_attr_list_context *, int,
|
||||
unsigned char *, int, int);
|
||||
typedef void (*put_listent_func_t)(struct xfs_attr_list_context *context,
|
||||
int flags, unsigned char *name, int namelen, void *value,
|
||||
int valuelen);
|
||||
|
||||
struct xfs_attr_list_context {
|
||||
struct xfs_trans *tp;
|
||||
|
|
@ -510,8 +511,8 @@ struct xfs_attr_intent {
|
|||
struct xfs_da_args *xattri_da_args;
|
||||
|
||||
/*
|
||||
* Shared buffer containing the attr name and value so that the logging
|
||||
* code can share large memory buffers between log items.
|
||||
* Shared buffer containing the attr name, new name, and value so that
|
||||
* the logging code can share large memory buffers between log items.
|
||||
*/
|
||||
struct xfs_attri_log_nameval *xattri_nameval;
|
||||
|
||||
|
|
@ -529,6 +530,11 @@ struct xfs_attr_intent {
|
|||
struct xfs_bmbt_irec xattri_map;
|
||||
};
|
||||
|
||||
static inline unsigned int
|
||||
xfs_attr_intent_op(const struct xfs_attr_intent *attr)
|
||||
{
|
||||
return attr->xattri_op_flags & XFS_ATTRI_OP_FLAGS_TYPE_MASK;
|
||||
}
|
||||
|
||||
/*========================================================================
|
||||
* Function prototypes for the kernel.
|
||||
|
|
@ -544,10 +550,20 @@ int xfs_inode_hasattr(struct xfs_inode *ip);
|
|||
bool xfs_attr_is_leaf(struct xfs_inode *ip);
|
||||
int xfs_attr_get_ilocked(struct xfs_da_args *args);
|
||||
int xfs_attr_get(struct xfs_da_args *args);
|
||||
int xfs_attr_set(struct xfs_da_args *args);
|
||||
|
||||
enum xfs_attr_update {
|
||||
XFS_ATTRUPDATE_REMOVE, /* remove attr */
|
||||
XFS_ATTRUPDATE_UPSERT, /* set value, replace any existing attr */
|
||||
XFS_ATTRUPDATE_CREATE, /* set value, fail if attr already exists */
|
||||
XFS_ATTRUPDATE_REPLACE, /* set value, fail if attr does not exist */
|
||||
};
|
||||
|
||||
int xfs_attr_set(struct xfs_da_args *args, enum xfs_attr_update op, bool rsvd);
|
||||
int xfs_attr_set_iter(struct xfs_attr_intent *attr);
|
||||
int xfs_attr_remove_iter(struct xfs_attr_intent *attr);
|
||||
bool xfs_attr_namecheck(const void *name, size_t length);
|
||||
bool xfs_attr_check_namespace(unsigned int attr_flags);
|
||||
bool xfs_attr_namecheck(unsigned int attr_flags, const void *name,
|
||||
size_t length);
|
||||
int xfs_attr_calc_size(struct xfs_da_args *args, int *local);
|
||||
void xfs_init_attr_trans(struct xfs_da_args *args, struct xfs_trans_res *tres,
|
||||
unsigned int *total);
|
||||
|
|
@ -590,7 +606,6 @@ xfs_attr_init_add_state(struct xfs_da_args *args)
|
|||
static inline enum xfs_delattr_state
|
||||
xfs_attr_init_remove_state(struct xfs_da_args *args)
|
||||
{
|
||||
args->op_flags |= XFS_DA_OP_REMOVE;
|
||||
if (xfs_attr_is_shortform(args->dp))
|
||||
return XFS_DAS_SF_REMOVE;
|
||||
if (xfs_attr_is_leaf(args->dp))
|
||||
|
|
@ -614,8 +629,25 @@ xfs_attr_init_replace_state(struct xfs_da_args *args)
|
|||
return xfs_attr_init_add_state(args);
|
||||
}
|
||||
|
||||
xfs_dahash_t xfs_attr_hashname(const uint8_t *name, int namelen);
|
||||
|
||||
xfs_dahash_t xfs_attr_hashval(struct xfs_mount *mp, unsigned int attr_flags,
|
||||
const uint8_t *name, int namelen, const void *value,
|
||||
int valuelen);
|
||||
|
||||
/* Set the hash value for any extended attribute from any namespace. */
|
||||
static inline void xfs_attr_sethash(struct xfs_da_args *args)
|
||||
{
|
||||
args->hashval = xfs_attr_hashval(args->dp->i_mount, args->attr_filter,
|
||||
args->name, args->namelen,
|
||||
args->value, args->valuelen);
|
||||
}
|
||||
|
||||
extern struct kmem_cache *xfs_attr_intent_cache;
|
||||
int __init xfs_attr_intent_init_cache(void);
|
||||
void xfs_attr_intent_destroy_cache(void);
|
||||
|
||||
int xfs_attr_sf_totsize(struct xfs_inode *dp);
|
||||
int xfs_attr_add_fork(struct xfs_inode *ip, int size, int rsvd);
|
||||
|
||||
#endif /* __XFS_ATTR_H__ */
|
||||
|
|
|
|||
|
|
@ -388,6 +388,27 @@ xfs_attr3_leaf_verify(
|
|||
return NULL;
|
||||
}
|
||||
|
||||
xfs_failaddr_t
|
||||
xfs_attr3_leaf_header_check(
|
||||
struct xfs_buf *bp,
|
||||
xfs_ino_t owner)
|
||||
{
|
||||
struct xfs_mount *mp = bp->b_mount;
|
||||
|
||||
if (xfs_has_crc(mp)) {
|
||||
struct xfs_attr3_leafblock *hdr3 = bp->b_addr;
|
||||
|
||||
if (hdr3->hdr.info.hdr.magic !=
|
||||
cpu_to_be16(XFS_ATTR3_LEAF_MAGIC))
|
||||
return __this_address;
|
||||
|
||||
if (be64_to_cpu(hdr3->hdr.info.owner) != owner)
|
||||
return __this_address;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
xfs_attr3_leaf_write_verify(
|
||||
struct xfs_buf *bp)
|
||||
|
|
@ -448,16 +469,30 @@ int
|
|||
xfs_attr3_leaf_read(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *dp,
|
||||
xfs_ino_t owner,
|
||||
xfs_dablk_t bno,
|
||||
struct xfs_buf **bpp)
|
||||
{
|
||||
xfs_failaddr_t fa;
|
||||
int err;
|
||||
|
||||
err = xfs_da_read_buf(tp, dp, bno, 0, bpp, XFS_ATTR_FORK,
|
||||
&xfs_attr3_leaf_buf_ops);
|
||||
if (!err && tp && *bpp)
|
||||
if (err || !(*bpp))
|
||||
return err;
|
||||
|
||||
fa = xfs_attr3_leaf_header_check(*bpp, owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(*bpp, fa);
|
||||
xfs_trans_brelse(tp, *bpp);
|
||||
*bpp = NULL;
|
||||
xfs_dirattr_mark_sick(dp, XFS_ATTR_FORK);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
if (tp)
|
||||
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_ATTR_LEAF_BUF);
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*========================================================================
|
||||
|
|
@ -472,28 +507,57 @@ xfs_attr3_leaf_read(
|
|||
* INCOMPLETE flag will not be set in attr->attr_filter, but rather
|
||||
* XFS_DA_OP_RECOVERY will be set in args->op_flags.
|
||||
*/
|
||||
static inline unsigned int xfs_attr_match_mask(const struct xfs_da_args *args)
|
||||
{
|
||||
if (args->op_flags & XFS_DA_OP_RECOVERY)
|
||||
return XFS_ATTR_NSP_ONDISK_MASK;
|
||||
return XFS_ATTR_NSP_ONDISK_MASK | XFS_ATTR_INCOMPLETE;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
xfs_attr_parent_match(
|
||||
const struct xfs_da_args *args,
|
||||
const void *value,
|
||||
unsigned int valuelen)
|
||||
{
|
||||
ASSERT(args->value != NULL);
|
||||
|
||||
/* Parent pointers do not use remote values */
|
||||
if (!value)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* The only value we support is a parent rec. However, we'll accept
|
||||
* any valuelen so that offline repair can delete ATTR_PARENT values
|
||||
* that are not parent pointers.
|
||||
*/
|
||||
if (valuelen != args->valuelen)
|
||||
return false;
|
||||
|
||||
return memcmp(args->value, value, valuelen) == 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
xfs_attr_match(
|
||||
struct xfs_da_args *args,
|
||||
uint8_t namelen,
|
||||
unsigned char *name,
|
||||
int flags)
|
||||
unsigned int attr_flags,
|
||||
const unsigned char *name,
|
||||
unsigned int namelen,
|
||||
const void *value,
|
||||
unsigned int valuelen)
|
||||
{
|
||||
unsigned int mask = xfs_attr_match_mask(args);
|
||||
|
||||
if (args->namelen != namelen)
|
||||
return false;
|
||||
if ((args->attr_filter & mask) != (attr_flags & mask))
|
||||
return false;
|
||||
if (memcmp(args->name, name, namelen) != 0)
|
||||
return false;
|
||||
|
||||
/* Recovery ignores the INCOMPLETE flag. */
|
||||
if ((args->op_flags & XFS_DA_OP_RECOVERY) &&
|
||||
args->attr_filter == (flags & XFS_ATTR_NSP_ONDISK_MASK))
|
||||
return true;
|
||||
if (attr_flags & XFS_ATTR_PARENT)
|
||||
return xfs_attr_parent_match(args, value, valuelen);
|
||||
|
||||
/* All remaining matches need to be filtered by INCOMPLETE state. */
|
||||
if (args->attr_filter !=
|
||||
(flags & (XFS_ATTR_NSP_ONDISK_MASK | XFS_ATTR_INCOMPLETE)))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -503,6 +567,13 @@ xfs_attr_copy_value(
|
|||
unsigned char *value,
|
||||
int valuelen)
|
||||
{
|
||||
/*
|
||||
* Parent pointer lookups require the caller to specify the name and
|
||||
* value, so don't copy anything.
|
||||
*/
|
||||
if (args->attr_filter & XFS_ATTR_PARENT)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* No copy if all we have to do is get the length
|
||||
*/
|
||||
|
|
@ -711,8 +782,9 @@ xfs_attr_sf_findname(
|
|||
for (sfe = xfs_attr_sf_firstentry(sf);
|
||||
sfe < xfs_attr_sf_endptr(sf);
|
||||
sfe = xfs_attr_sf_nextentry(sfe)) {
|
||||
if (xfs_attr_match(args, sfe->namelen, sfe->nameval,
|
||||
sfe->flags))
|
||||
if (xfs_attr_match(args, sfe->flags, sfe->nameval,
|
||||
sfe->namelen, &sfe->nameval[sfe->namelen],
|
||||
sfe->valuelen))
|
||||
return sfe;
|
||||
}
|
||||
|
||||
|
|
@ -819,7 +891,8 @@ xfs_attr_sf_removename(
|
|||
*/
|
||||
if (totsize == sizeof(struct xfs_attr_sf_hdr) && xfs_has_attr2(mp) &&
|
||||
(dp->i_df.if_format != XFS_DINODE_FMT_BTREE) &&
|
||||
!(args->op_flags & (XFS_DA_OP_ADDNAME | XFS_DA_OP_REPLACE))) {
|
||||
!(args->op_flags & (XFS_DA_OP_ADDNAME | XFS_DA_OP_REPLACE)) &&
|
||||
!xfs_has_parent(mp)) {
|
||||
xfs_attr_fork_remove(dp, args->trans);
|
||||
} else {
|
||||
xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
|
||||
|
|
@ -828,7 +901,8 @@ xfs_attr_sf_removename(
|
|||
ASSERT(totsize > sizeof(struct xfs_attr_sf_hdr) ||
|
||||
(args->op_flags & XFS_DA_OP_ADDNAME) ||
|
||||
!xfs_has_attr2(mp) ||
|
||||
dp->i_df.if_format == XFS_DINODE_FMT_BTREE);
|
||||
dp->i_df.if_format == XFS_DINODE_FMT_BTREE ||
|
||||
xfs_has_parent(mp));
|
||||
xfs_trans_log_inode(args->trans, dp,
|
||||
XFS_ILOG_CORE | XFS_ILOG_ADATA);
|
||||
}
|
||||
|
|
@ -904,6 +978,7 @@ xfs_attr_shortform_to_leaf(
|
|||
nargs.whichfork = XFS_ATTR_FORK;
|
||||
nargs.trans = args->trans;
|
||||
nargs.op_flags = XFS_DA_OP_OKNOENT;
|
||||
nargs.owner = args->owner;
|
||||
|
||||
sfe = xfs_attr_sf_firstentry(sf);
|
||||
for (i = 0; i < sf->count; i++) {
|
||||
|
|
@ -911,9 +986,13 @@ xfs_attr_shortform_to_leaf(
|
|||
nargs.namelen = sfe->namelen;
|
||||
nargs.value = &sfe->nameval[nargs.namelen];
|
||||
nargs.valuelen = sfe->valuelen;
|
||||
nargs.hashval = xfs_da_hashname(sfe->nameval,
|
||||
sfe->namelen);
|
||||
nargs.attr_filter = sfe->flags & XFS_ATTR_NSP_ONDISK_MASK;
|
||||
if (!xfs_attr_check_namespace(sfe->flags)) {
|
||||
xfs_da_mark_sick(args);
|
||||
error = -EFSCORRUPTED;
|
||||
goto out;
|
||||
}
|
||||
xfs_attr_sethash(&nargs);
|
||||
error = xfs_attr3_leaf_lookup_int(bp, &nargs); /* set a->index */
|
||||
ASSERT(error == -ENOATTR);
|
||||
error = xfs_attr3_leaf_add(bp, &nargs);
|
||||
|
|
@ -1027,7 +1106,7 @@ xfs_attr_shortform_verify(
|
|||
* one namespace flag per xattr, so we can just count the
|
||||
* bits (i.e. hweight) here.
|
||||
*/
|
||||
if (hweight8(sfep->flags & XFS_ATTR_NSP_ONDISK_MASK) > 1)
|
||||
if (!xfs_attr_check_namespace(sfep->flags))
|
||||
return __this_address;
|
||||
|
||||
sfep = next_sfep;
|
||||
|
|
@ -1106,6 +1185,7 @@ xfs_attr3_leaf_to_shortform(
|
|||
nargs.whichfork = XFS_ATTR_FORK;
|
||||
nargs.trans = args->trans;
|
||||
nargs.op_flags = XFS_DA_OP_OKNOENT;
|
||||
nargs.owner = args->owner;
|
||||
|
||||
for (i = 0; i < ichdr.count; entry++, i++) {
|
||||
if (entry->flags & XFS_ATTR_INCOMPLETE)
|
||||
|
|
@ -1158,7 +1238,7 @@ xfs_attr3_leaf_to_node(
|
|||
error = xfs_da_grow_inode(args, &blkno);
|
||||
if (error)
|
||||
goto out;
|
||||
error = xfs_attr3_leaf_read(args->trans, dp, 0, &bp1);
|
||||
error = xfs_attr3_leaf_read(args->trans, dp, args->owner, 0, &bp1);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
|
|
@ -1237,7 +1317,7 @@ xfs_attr3_leaf_create(
|
|||
ichdr.magic = XFS_ATTR3_LEAF_MAGIC;
|
||||
|
||||
hdr3->blkno = cpu_to_be64(xfs_buf_daddr(bp));
|
||||
hdr3->owner = cpu_to_be64(dp->i_ino);
|
||||
hdr3->owner = cpu_to_be64(args->owner);
|
||||
uuid_copy(&hdr3->uuid, &mp->m_sb.sb_meta_uuid);
|
||||
|
||||
ichdr.freemap[0].base = sizeof(struct xfs_attr3_leaf_hdr);
|
||||
|
|
@ -1993,7 +2073,7 @@ xfs_attr3_leaf_toosmall(
|
|||
if (blkno == 0)
|
||||
continue;
|
||||
error = xfs_attr3_leaf_read(state->args->trans, state->args->dp,
|
||||
blkno, &bp);
|
||||
state->args->owner, blkno, &bp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -2401,18 +2481,23 @@ xfs_attr3_leaf_lookup_int(
|
|||
*/
|
||||
if (entry->flags & XFS_ATTR_LOCAL) {
|
||||
name_loc = xfs_attr3_leaf_name_local(leaf, probe);
|
||||
if (!xfs_attr_match(args, name_loc->namelen,
|
||||
name_loc->nameval, entry->flags))
|
||||
if (!xfs_attr_match(args, entry->flags,
|
||||
name_loc->nameval, name_loc->namelen,
|
||||
&name_loc->nameval[name_loc->namelen],
|
||||
be16_to_cpu(name_loc->valuelen)))
|
||||
continue;
|
||||
args->index = probe;
|
||||
return -EEXIST;
|
||||
} else {
|
||||
unsigned int valuelen;
|
||||
|
||||
name_rmt = xfs_attr3_leaf_name_remote(leaf, probe);
|
||||
if (!xfs_attr_match(args, name_rmt->namelen,
|
||||
name_rmt->name, entry->flags))
|
||||
valuelen = be32_to_cpu(name_rmt->valuelen);
|
||||
if (!xfs_attr_match(args, entry->flags, name_rmt->name,
|
||||
name_rmt->namelen, NULL, valuelen))
|
||||
continue;
|
||||
args->index = probe;
|
||||
args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen);
|
||||
args->rmtvaluelen = valuelen;
|
||||
args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
|
||||
args->rmtblkcnt = xfs_attr3_rmt_blocks(
|
||||
args->dp->i_mount,
|
||||
|
|
@ -2715,7 +2800,8 @@ xfs_attr3_leaf_clearflag(
|
|||
/*
|
||||
* Set up the operation.
|
||||
*/
|
||||
error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp);
|
||||
error = xfs_attr3_leaf_read(args->trans, args->dp, args->owner,
|
||||
args->blkno, &bp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -2779,7 +2865,8 @@ xfs_attr3_leaf_setflag(
|
|||
/*
|
||||
* Set up the operation.
|
||||
*/
|
||||
error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp);
|
||||
error = xfs_attr3_leaf_read(args->trans, args->dp, args->owner,
|
||||
args->blkno, &bp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -2838,7 +2925,8 @@ xfs_attr3_leaf_flipflags(
|
|||
/*
|
||||
* Read the block containing the "old" attr
|
||||
*/
|
||||
error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp1);
|
||||
error = xfs_attr3_leaf_read(args->trans, args->dp, args->owner,
|
||||
args->blkno, &bp1);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -2846,8 +2934,8 @@ xfs_attr3_leaf_flipflags(
|
|||
* Read the block containing the "new" attr, if it is different
|
||||
*/
|
||||
if (args->blkno2 != args->blkno) {
|
||||
error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno2,
|
||||
&bp2);
|
||||
error = xfs_attr3_leaf_read(args->trans, args->dp, args->owner,
|
||||
args->blkno2, &bp2);
|
||||
if (error)
|
||||
return error;
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -98,12 +98,14 @@ int xfs_attr_leaf_order(struct xfs_buf *leaf1_bp,
|
|||
struct xfs_buf *leaf2_bp);
|
||||
int xfs_attr_leaf_newentsize(struct xfs_da_args *args, int *local);
|
||||
int xfs_attr3_leaf_read(struct xfs_trans *tp, struct xfs_inode *dp,
|
||||
xfs_dablk_t bno, struct xfs_buf **bpp);
|
||||
xfs_ino_t owner, xfs_dablk_t bno, struct xfs_buf **bpp);
|
||||
void xfs_attr3_leaf_hdr_from_disk(struct xfs_da_geometry *geo,
|
||||
struct xfs_attr3_icleaf_hdr *to,
|
||||
struct xfs_attr_leafblock *from);
|
||||
void xfs_attr3_leaf_hdr_to_disk(struct xfs_da_geometry *geo,
|
||||
struct xfs_attr_leafblock *to,
|
||||
struct xfs_attr3_icleaf_hdr *from);
|
||||
xfs_failaddr_t xfs_attr3_leaf_header_check(struct xfs_buf *bp,
|
||||
xfs_ino_t owner);
|
||||
|
||||
#endif /* __XFS_ATTR_LEAF_H__ */
|
||||
|
|
|
|||
|
|
@ -43,19 +43,32 @@
|
|||
* the logging system and therefore never have a log item.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Each contiguous block has a header, so it is not just a simple attribute
|
||||
* length to FSB conversion.
|
||||
*/
|
||||
int
|
||||
xfs_attr3_rmt_blocks(
|
||||
struct xfs_mount *mp,
|
||||
int attrlen)
|
||||
/* How many bytes can be stored in a remote value buffer? */
|
||||
inline unsigned int
|
||||
xfs_attr3_rmt_buf_space(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
if (xfs_has_crc(mp)) {
|
||||
int buflen = XFS_ATTR3_RMT_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
|
||||
return (attrlen + buflen - 1) / buflen;
|
||||
}
|
||||
unsigned int blocksize = mp->m_attr_geo->blksize;
|
||||
|
||||
if (xfs_has_crc(mp))
|
||||
return blocksize - sizeof(struct xfs_attr3_rmt_hdr);
|
||||
|
||||
return blocksize;
|
||||
}
|
||||
|
||||
/* Compute number of fsblocks needed to store a remote attr value */
|
||||
unsigned int
|
||||
xfs_attr3_rmt_blocks(
|
||||
struct xfs_mount *mp,
|
||||
unsigned int attrlen)
|
||||
{
|
||||
/*
|
||||
* Each contiguous block has a header, so it is not just a simple
|
||||
* attribute length to FSB conversion.
|
||||
*/
|
||||
if (xfs_has_crc(mp))
|
||||
return howmany(attrlen, xfs_attr3_rmt_buf_space(mp));
|
||||
|
||||
return XFS_B_TO_FSB(mp, attrlen);
|
||||
}
|
||||
|
||||
|
|
@ -92,7 +105,6 @@ xfs_attr3_rmt_verify(
|
|||
struct xfs_mount *mp,
|
||||
struct xfs_buf *bp,
|
||||
void *ptr,
|
||||
int fsbsize,
|
||||
xfs_daddr_t bno)
|
||||
{
|
||||
struct xfs_attr3_rmt_hdr *rmt = ptr;
|
||||
|
|
@ -103,7 +115,7 @@ xfs_attr3_rmt_verify(
|
|||
return __this_address;
|
||||
if (be64_to_cpu(rmt->rm_blkno) != bno)
|
||||
return __this_address;
|
||||
if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt))
|
||||
if (be32_to_cpu(rmt->rm_bytes) > mp->m_attr_geo->blksize - sizeof(*rmt))
|
||||
return __this_address;
|
||||
if (be32_to_cpu(rmt->rm_offset) +
|
||||
be32_to_cpu(rmt->rm_bytes) > XFS_XATTR_SIZE_MAX)
|
||||
|
|
@ -122,9 +134,9 @@ __xfs_attr3_rmt_read_verify(
|
|||
{
|
||||
struct xfs_mount *mp = bp->b_mount;
|
||||
char *ptr;
|
||||
int len;
|
||||
unsigned int len;
|
||||
xfs_daddr_t bno;
|
||||
int blksize = mp->m_attr_geo->blksize;
|
||||
unsigned int blksize = mp->m_attr_geo->blksize;
|
||||
|
||||
/* no verification of non-crc buffers */
|
||||
if (!xfs_has_crc(mp))
|
||||
|
|
@ -141,7 +153,7 @@ __xfs_attr3_rmt_read_verify(
|
|||
*failaddr = __this_address;
|
||||
return -EFSBADCRC;
|
||||
}
|
||||
*failaddr = xfs_attr3_rmt_verify(mp, bp, ptr, blksize, bno);
|
||||
*failaddr = xfs_attr3_rmt_verify(mp, bp, ptr, bno);
|
||||
if (*failaddr)
|
||||
return -EFSCORRUPTED;
|
||||
len -= blksize;
|
||||
|
|
@ -186,7 +198,7 @@ xfs_attr3_rmt_write_verify(
|
|||
{
|
||||
struct xfs_mount *mp = bp->b_mount;
|
||||
xfs_failaddr_t fa;
|
||||
int blksize = mp->m_attr_geo->blksize;
|
||||
unsigned int blksize = mp->m_attr_geo->blksize;
|
||||
char *ptr;
|
||||
int len;
|
||||
xfs_daddr_t bno;
|
||||
|
|
@ -203,7 +215,7 @@ xfs_attr3_rmt_write_verify(
|
|||
while (len > 0) {
|
||||
struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
|
||||
|
||||
fa = xfs_attr3_rmt_verify(mp, bp, ptr, blksize, bno);
|
||||
fa = xfs_attr3_rmt_verify(mp, bp, ptr, bno);
|
||||
if (fa) {
|
||||
xfs_verifier_error(bp, -EFSCORRUPTED, fa);
|
||||
return;
|
||||
|
|
@ -280,30 +292,30 @@ xfs_attr_rmtval_copyout(
|
|||
struct xfs_mount *mp,
|
||||
struct xfs_buf *bp,
|
||||
struct xfs_inode *dp,
|
||||
int *offset,
|
||||
int *valuelen,
|
||||
xfs_ino_t owner,
|
||||
unsigned int *offset,
|
||||
unsigned int *valuelen,
|
||||
uint8_t **dst)
|
||||
{
|
||||
char *src = bp->b_addr;
|
||||
xfs_ino_t ino = dp->i_ino;
|
||||
xfs_daddr_t bno = xfs_buf_daddr(bp);
|
||||
int len = BBTOB(bp->b_length);
|
||||
int blksize = mp->m_attr_geo->blksize;
|
||||
unsigned int len = BBTOB(bp->b_length);
|
||||
unsigned int blksize = mp->m_attr_geo->blksize;
|
||||
|
||||
ASSERT(len >= blksize);
|
||||
|
||||
while (len > 0 && *valuelen > 0) {
|
||||
int hdr_size = 0;
|
||||
int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, blksize);
|
||||
unsigned int hdr_size = 0;
|
||||
unsigned int byte_cnt = xfs_attr3_rmt_buf_space(mp);
|
||||
|
||||
byte_cnt = min(*valuelen, byte_cnt);
|
||||
|
||||
if (xfs_has_crc(mp)) {
|
||||
if (xfs_attr3_rmt_hdr_ok(src, ino, *offset,
|
||||
if (xfs_attr3_rmt_hdr_ok(src, owner, *offset,
|
||||
byte_cnt, bno)) {
|
||||
xfs_alert(mp,
|
||||
"remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)",
|
||||
bno, *offset, byte_cnt, ino);
|
||||
bno, *offset, byte_cnt, owner);
|
||||
xfs_dirattr_mark_sick(dp, XFS_ATTR_FORK);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
|
@ -330,20 +342,20 @@ xfs_attr_rmtval_copyin(
|
|||
struct xfs_mount *mp,
|
||||
struct xfs_buf *bp,
|
||||
xfs_ino_t ino,
|
||||
int *offset,
|
||||
int *valuelen,
|
||||
unsigned int *offset,
|
||||
unsigned int *valuelen,
|
||||
uint8_t **src)
|
||||
{
|
||||
char *dst = bp->b_addr;
|
||||
xfs_daddr_t bno = xfs_buf_daddr(bp);
|
||||
int len = BBTOB(bp->b_length);
|
||||
int blksize = mp->m_attr_geo->blksize;
|
||||
unsigned int len = BBTOB(bp->b_length);
|
||||
unsigned int blksize = mp->m_attr_geo->blksize;
|
||||
|
||||
ASSERT(len >= blksize);
|
||||
|
||||
while (len > 0 && *valuelen > 0) {
|
||||
int hdr_size;
|
||||
int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, blksize);
|
||||
unsigned int hdr_size;
|
||||
unsigned int byte_cnt = xfs_attr3_rmt_buf_space(mp);
|
||||
|
||||
byte_cnt = min(*valuelen, byte_cnt);
|
||||
hdr_size = xfs_attr3_rmt_hdr_set(mp, dst, ino, *offset,
|
||||
|
|
@ -389,12 +401,12 @@ xfs_attr_rmtval_get(
|
|||
struct xfs_buf *bp;
|
||||
xfs_dablk_t lblkno = args->rmtblkno;
|
||||
uint8_t *dst = args->value;
|
||||
int valuelen;
|
||||
unsigned int valuelen;
|
||||
int nmap;
|
||||
int error;
|
||||
int blkcnt = args->rmtblkcnt;
|
||||
unsigned int blkcnt = args->rmtblkcnt;
|
||||
int i;
|
||||
int offset = 0;
|
||||
unsigned int offset = 0;
|
||||
|
||||
trace_xfs_attr_rmtval_get(args);
|
||||
|
||||
|
|
@ -427,8 +439,7 @@ xfs_attr_rmtval_get(
|
|||
return error;
|
||||
|
||||
error = xfs_attr_rmtval_copyout(mp, bp, args->dp,
|
||||
&offset, &valuelen,
|
||||
&dst);
|
||||
args->owner, &offset, &valuelen, &dst);
|
||||
xfs_buf_relse(bp);
|
||||
if (error)
|
||||
return error;
|
||||
|
|
@ -453,7 +464,7 @@ xfs_attr_rmt_find_hole(
|
|||
struct xfs_inode *dp = args->dp;
|
||||
struct xfs_mount *mp = dp->i_mount;
|
||||
int error;
|
||||
int blkcnt;
|
||||
unsigned int blkcnt;
|
||||
xfs_fileoff_t lfileoff = 0;
|
||||
|
||||
/*
|
||||
|
|
@ -482,11 +493,11 @@ xfs_attr_rmtval_set_value(
|
|||
struct xfs_bmbt_irec map;
|
||||
xfs_dablk_t lblkno;
|
||||
uint8_t *src = args->value;
|
||||
int blkcnt;
|
||||
int valuelen;
|
||||
unsigned int blkcnt;
|
||||
unsigned int valuelen;
|
||||
int nmap;
|
||||
int error;
|
||||
int offset = 0;
|
||||
unsigned int offset = 0;
|
||||
|
||||
/*
|
||||
* Roll through the "value", copying the attribute value to the
|
||||
|
|
@ -522,8 +533,8 @@ xfs_attr_rmtval_set_value(
|
|||
return error;
|
||||
bp->b_ops = &xfs_attr3_rmt_buf_ops;
|
||||
|
||||
xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset,
|
||||
&valuelen, &src);
|
||||
xfs_attr_rmtval_copyin(mp, bp, args->owner, &offset, &valuelen,
|
||||
&src);
|
||||
|
||||
error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */
|
||||
xfs_buf_relse(bp);
|
||||
|
|
@ -626,7 +637,6 @@ xfs_attr_rmtval_set_blk(
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
ASSERT(nmap == 1);
|
||||
ASSERT((map->br_startblock != DELAYSTARTBLOCK) &&
|
||||
(map->br_startblock != HOLESTARTBLOCK));
|
||||
|
||||
|
|
@ -646,7 +656,7 @@ xfs_attr_rmtval_invalidate(
|
|||
struct xfs_da_args *args)
|
||||
{
|
||||
xfs_dablk_t lblkno;
|
||||
int blkcnt;
|
||||
unsigned int blkcnt;
|
||||
int error;
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -6,7 +6,13 @@
|
|||
#ifndef __XFS_ATTR_REMOTE_H__
|
||||
#define __XFS_ATTR_REMOTE_H__
|
||||
|
||||
int xfs_attr3_rmt_blocks(struct xfs_mount *mp, int attrlen);
|
||||
unsigned int xfs_attr3_rmt_blocks(struct xfs_mount *mp, unsigned int attrlen);
|
||||
|
||||
/* Number of rmt blocks needed to store the maximally sized attr value */
|
||||
static inline unsigned int xfs_attr3_max_rmt_blocks(struct xfs_mount *mp)
|
||||
{
|
||||
return xfs_attr3_rmt_blocks(mp, XFS_XATTR_SIZE_MAX);
|
||||
}
|
||||
|
||||
int xfs_attr_rmtval_get(struct xfs_da_args *args);
|
||||
int xfs_attr_rmtval_stale(struct xfs_inode *ip, struct xfs_bmbt_irec *map,
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ typedef struct xfs_attr_sf_sort {
|
|||
uint8_t flags; /* flags bits (see xfs_attr_leaf.h) */
|
||||
xfs_dahash_t hash; /* this entry's hash value */
|
||||
unsigned char *name; /* name value, pointer into buffer */
|
||||
void *value;
|
||||
} xfs_attr_sf_sort_t;
|
||||
|
||||
#define XFS_ATTR_SF_ENTSIZE_MAX /* max space for name&value */ \
|
||||
|
|
|
|||
|
|
@ -779,7 +779,7 @@ xfs_bmap_local_to_extents_empty(
|
|||
}
|
||||
|
||||
|
||||
STATIC int /* error */
|
||||
int /* error */
|
||||
xfs_bmap_local_to_extents(
|
||||
xfs_trans_t *tp, /* transaction pointer */
|
||||
xfs_inode_t *ip, /* incore inode pointer */
|
||||
|
|
@ -789,7 +789,8 @@ xfs_bmap_local_to_extents(
|
|||
void (*init_fn)(struct xfs_trans *tp,
|
||||
struct xfs_buf *bp,
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_ifork *ifp))
|
||||
struct xfs_ifork *ifp, void *priv),
|
||||
void *priv)
|
||||
{
|
||||
int error = 0;
|
||||
int flags; /* logging flags returned */
|
||||
|
|
@ -850,7 +851,7 @@ xfs_bmap_local_to_extents(
|
|||
* log here. Note that init_fn must also set the buffer log item type
|
||||
* correctly.
|
||||
*/
|
||||
init_fn(tp, bp, ip, ifp);
|
||||
init_fn(tp, bp, ip, ifp, priv);
|
||||
|
||||
/* account for the change in fork size */
|
||||
xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
|
||||
|
|
@ -976,13 +977,14 @@ xfs_bmap_add_attrfork_local(
|
|||
dargs.total = dargs.geo->fsbcount;
|
||||
dargs.whichfork = XFS_DATA_FORK;
|
||||
dargs.trans = tp;
|
||||
dargs.owner = ip->i_ino;
|
||||
return xfs_dir2_sf_to_block(&dargs);
|
||||
}
|
||||
|
||||
if (S_ISLNK(VFS_I(ip)->i_mode))
|
||||
return xfs_bmap_local_to_extents(tp, ip, 1, flags,
|
||||
XFS_DATA_FORK,
|
||||
xfs_symlink_local_to_remote);
|
||||
XFS_DATA_FORK, xfs_symlink_local_to_remote,
|
||||
NULL);
|
||||
|
||||
/* should only be called for types that support local format data */
|
||||
ASSERT(0);
|
||||
|
|
@ -1023,40 +1025,29 @@ xfs_bmap_set_attrforkoff(
|
|||
}
|
||||
|
||||
/*
|
||||
* Convert inode from non-attributed to attributed.
|
||||
* Must not be in a transaction, ip must not be locked.
|
||||
* Convert inode from non-attributed to attributed. Caller must hold the
|
||||
* ILOCK_EXCL and the file cannot have an attr fork.
|
||||
*/
|
||||
int /* error code */
|
||||
xfs_bmap_add_attrfork(
|
||||
xfs_inode_t *ip, /* incore inode pointer */
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *ip, /* incore inode pointer */
|
||||
int size, /* space new attribute needs */
|
||||
int rsvd) /* xact may use reserved blks */
|
||||
{
|
||||
xfs_mount_t *mp; /* mount structure */
|
||||
xfs_trans_t *tp; /* transaction pointer */
|
||||
int blks; /* space reservation */
|
||||
struct xfs_mount *mp = tp->t_mountp;
|
||||
int version = 1; /* superblock attr version */
|
||||
int logflags; /* logging flags */
|
||||
int error; /* error return value */
|
||||
|
||||
ASSERT(xfs_inode_has_attr_fork(ip) == 0);
|
||||
|
||||
mp = ip->i_mount;
|
||||
xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
|
||||
ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
|
||||
|
||||
blks = XFS_ADDAFORK_SPACE_RES(mp);
|
||||
|
||||
error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_addafork, blks, 0,
|
||||
rsvd, &tp);
|
||||
if (error)
|
||||
return error;
|
||||
if (xfs_inode_has_attr_fork(ip))
|
||||
goto trans_cancel;
|
||||
ASSERT(!xfs_inode_has_attr_fork(ip));
|
||||
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
error = xfs_bmap_set_attrforkoff(ip, size, &version);
|
||||
if (error)
|
||||
goto trans_cancel;
|
||||
return error;
|
||||
|
||||
xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
|
||||
logflags = 0;
|
||||
|
|
@ -1077,7 +1068,7 @@ xfs_bmap_add_attrfork(
|
|||
if (logflags)
|
||||
xfs_trans_log_inode(tp, ip, logflags);
|
||||
if (error)
|
||||
goto trans_cancel;
|
||||
return error;
|
||||
if (!xfs_has_attr(mp) ||
|
||||
(!xfs_has_attr2(mp) && version == 2)) {
|
||||
bool log_sb = false;
|
||||
|
|
@ -1096,14 +1087,7 @@ xfs_bmap_add_attrfork(
|
|||
xfs_log_sb(tp);
|
||||
}
|
||||
|
||||
error = xfs_trans_commit(tp);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
return error;
|
||||
|
||||
trans_cancel:
|
||||
xfs_trans_cancel(tp);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
return error;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1586,6 +1570,7 @@ xfs_bmap_add_extent_delay_real(
|
|||
if (error)
|
||||
goto done;
|
||||
}
|
||||
ASSERT(da_new <= da_old);
|
||||
break;
|
||||
|
||||
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
|
||||
|
|
@ -1616,6 +1601,7 @@ xfs_bmap_add_extent_delay_real(
|
|||
if (error)
|
||||
goto done;
|
||||
}
|
||||
ASSERT(da_new <= da_old);
|
||||
break;
|
||||
|
||||
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
|
||||
|
|
@ -1650,6 +1636,7 @@ xfs_bmap_add_extent_delay_real(
|
|||
if (error)
|
||||
goto done;
|
||||
}
|
||||
ASSERT(da_new <= da_old);
|
||||
break;
|
||||
|
||||
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
|
||||
|
|
@ -1684,6 +1671,7 @@ xfs_bmap_add_extent_delay_real(
|
|||
goto done;
|
||||
}
|
||||
}
|
||||
ASSERT(da_new <= da_old);
|
||||
break;
|
||||
|
||||
case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
|
||||
|
|
@ -1722,6 +1710,7 @@ xfs_bmap_add_extent_delay_real(
|
|||
if (error)
|
||||
goto done;
|
||||
}
|
||||
ASSERT(da_new <= da_old);
|
||||
break;
|
||||
|
||||
case BMAP_LEFT_FILLING:
|
||||
|
|
@ -1812,6 +1801,7 @@ xfs_bmap_add_extent_delay_real(
|
|||
xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
|
||||
xfs_iext_next(ifp, &bma->icur);
|
||||
xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
|
||||
ASSERT(da_new <= da_old);
|
||||
break;
|
||||
|
||||
case BMAP_RIGHT_FILLING:
|
||||
|
|
@ -1861,6 +1851,7 @@ xfs_bmap_add_extent_delay_real(
|
|||
PREV.br_blockcount = temp;
|
||||
xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
|
||||
xfs_iext_next(ifp, &bma->icur);
|
||||
ASSERT(da_new <= da_old);
|
||||
break;
|
||||
|
||||
case 0:
|
||||
|
|
@ -1975,7 +1966,7 @@ xfs_bmap_add_extent_delay_real(
|
|||
}
|
||||
|
||||
if (da_new != da_old)
|
||||
xfs_mod_delalloc(mp, (int64_t)da_new - da_old);
|
||||
xfs_mod_delalloc(bma->ip, 0, (int64_t)da_new - da_old);
|
||||
|
||||
if (bma->cur) {
|
||||
da_new += bma->cur->bc_bmap.allocated;
|
||||
|
|
@ -1983,11 +1974,10 @@ xfs_bmap_add_extent_delay_real(
|
|||
}
|
||||
|
||||
/* adjust for changes in reserved delayed indirect blocks */
|
||||
if (da_new != da_old) {
|
||||
ASSERT(state == 0 || da_new < da_old);
|
||||
error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new),
|
||||
false);
|
||||
}
|
||||
if (da_new < da_old)
|
||||
xfs_add_fdblocks(mp, da_old - da_new);
|
||||
else if (da_new > da_old)
|
||||
error = xfs_dec_fdblocks(mp, da_new - da_old, true);
|
||||
|
||||
xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
|
||||
done:
|
||||
|
|
@ -2688,12 +2678,12 @@ xfs_bmap_add_extent_hole_delay(
|
|||
}
|
||||
if (oldlen != newlen) {
|
||||
ASSERT(oldlen > newlen);
|
||||
xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
|
||||
false);
|
||||
xfs_add_fdblocks(ip->i_mount, oldlen - newlen);
|
||||
|
||||
/*
|
||||
* Nothing to do for disk quota accounting here.
|
||||
*/
|
||||
xfs_mod_delalloc(ip->i_mount, (int64_t)newlen - oldlen);
|
||||
xfs_mod_delalloc(ip, 0, (int64_t)newlen - oldlen);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -3370,7 +3360,7 @@ xfs_bmap_alloc_account(
|
|||
* yet.
|
||||
*/
|
||||
if (ap->wasdel) {
|
||||
xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)ap->length);
|
||||
xfs_mod_delalloc(ap->ip, -(int64_t)ap->length, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -3394,7 +3384,7 @@ xfs_bmap_alloc_account(
|
|||
xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
|
||||
if (ap->wasdel) {
|
||||
ap->ip->i_delayed_blks -= ap->length;
|
||||
xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)ap->length);
|
||||
xfs_mod_delalloc(ap->ip, -(int64_t)ap->length, 0);
|
||||
fld = isrt ? XFS_TRANS_DQ_DELRTBCOUNT : XFS_TRANS_DQ_DELBCOUNT;
|
||||
} else {
|
||||
fld = isrt ? XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
|
||||
|
|
@ -4066,6 +4056,7 @@ xfs_bmapi_reserve_delalloc(
|
|||
struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
|
||||
xfs_extlen_t alen;
|
||||
xfs_extlen_t indlen;
|
||||
uint64_t fdblocks;
|
||||
int error;
|
||||
xfs_fileoff_t aoff = off;
|
||||
|
||||
|
|
@ -4108,17 +4099,21 @@ xfs_bmapi_reserve_delalloc(
|
|||
indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
|
||||
ASSERT(indlen > 0);
|
||||
|
||||
error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
|
||||
if (error)
|
||||
goto out_unreserve_quota;
|
||||
fdblocks = indlen;
|
||||
if (XFS_IS_REALTIME_INODE(ip)) {
|
||||
error = xfs_dec_frextents(mp, xfs_rtb_to_rtx(mp, alen));
|
||||
if (error)
|
||||
goto out_unreserve_quota;
|
||||
} else {
|
||||
fdblocks += alen;
|
||||
}
|
||||
|
||||
error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
|
||||
error = xfs_dec_fdblocks(mp, fdblocks, false);
|
||||
if (error)
|
||||
goto out_unreserve_blocks;
|
||||
|
||||
goto out_unreserve_frextents;
|
||||
|
||||
ip->i_delayed_blks += alen;
|
||||
xfs_mod_delalloc(ip->i_mount, alen + indlen);
|
||||
xfs_mod_delalloc(ip, alen, indlen);
|
||||
|
||||
got->br_startoff = aoff;
|
||||
got->br_startblock = nullstartblock(indlen);
|
||||
|
|
@ -4139,8 +4134,9 @@ xfs_bmapi_reserve_delalloc(
|
|||
|
||||
return 0;
|
||||
|
||||
out_unreserve_blocks:
|
||||
xfs_mod_fdblocks(mp, alen, false);
|
||||
out_unreserve_frextents:
|
||||
if (XFS_IS_REALTIME_INODE(ip))
|
||||
xfs_add_frextents(mp, xfs_rtb_to_rtx(mp, alen));
|
||||
out_unreserve_quota:
|
||||
if (XFS_IS_QUOTA_ON(mp))
|
||||
xfs_quota_unreserve_blkres(ip, alen);
|
||||
|
|
@ -4191,26 +4187,10 @@ xfs_bmapi_allocate(
|
|||
struct xfs_mount *mp = bma->ip->i_mount;
|
||||
int whichfork = xfs_bmapi_whichfork(bma->flags);
|
||||
struct xfs_ifork *ifp = xfs_ifork_ptr(bma->ip, whichfork);
|
||||
int tmp_logflags = 0;
|
||||
int error;
|
||||
|
||||
ASSERT(bma->length > 0);
|
||||
|
||||
/*
|
||||
* For the wasdelay case, we could also just allocate the stuff asked
|
||||
* for in this bmap call but that wouldn't be as good.
|
||||
*/
|
||||
if (bma->wasdel) {
|
||||
bma->length = (xfs_extlen_t)bma->got.br_blockcount;
|
||||
bma->offset = bma->got.br_startoff;
|
||||
if (!xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev))
|
||||
bma->prev.br_startoff = NULLFILEOFF;
|
||||
} else {
|
||||
bma->length = XFS_FILBLKS_MIN(bma->length, XFS_MAX_BMBT_EXTLEN);
|
||||
if (!bma->eof)
|
||||
bma->length = XFS_FILBLKS_MIN(bma->length,
|
||||
bma->got.br_startoff - bma->offset);
|
||||
}
|
||||
ASSERT(bma->length <= XFS_MAX_BMBT_EXTLEN);
|
||||
|
||||
if (bma->flags & XFS_BMAPI_CONTIG)
|
||||
bma->minlen = bma->length;
|
||||
|
|
@ -4226,8 +4206,15 @@ xfs_bmapi_allocate(
|
|||
} else {
|
||||
error = xfs_bmap_alloc_userdata(bma);
|
||||
}
|
||||
if (error || bma->blkno == NULLFSBLOCK)
|
||||
if (error)
|
||||
return error;
|
||||
if (bma->blkno == NULLFSBLOCK)
|
||||
return -ENOSPC;
|
||||
|
||||
if (WARN_ON_ONCE(!xfs_valid_startblock(bma->ip, bma->blkno))) {
|
||||
xfs_bmap_mark_sick(bma->ip, whichfork);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
if (bma->flags & XFS_BMAPI_ZERO) {
|
||||
error = xfs_zero_extent(bma->ip, bma->blkno, bma->length);
|
||||
|
|
@ -4260,8 +4247,6 @@ xfs_bmapi_allocate(
|
|||
error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
|
||||
whichfork, &bma->icur, &bma->cur, &bma->got,
|
||||
&bma->logflags, bma->flags);
|
||||
|
||||
bma->logflags |= tmp_logflags;
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -4406,6 +4391,15 @@ xfs_bmapi_finish(
|
|||
* extent state if necessary. Details behaviour is controlled by the flags
|
||||
* parameter. Only allocates blocks from a single allocation group, to avoid
|
||||
* locking problems.
|
||||
*
|
||||
* Returns 0 on success and places the extent mappings in mval. nmaps is used
|
||||
* as an input/output parameter where the caller specifies the maximum number
|
||||
* of mappings that may be returned and xfs_bmapi_write passes back the number
|
||||
* of mappings (including existing mappings) it found.
|
||||
*
|
||||
* Returns a negative error code on failure, including -ENOSPC when it could not
|
||||
* allocate any blocks and -ENOSR when it did allocate blocks to convert a
|
||||
* delalloc range, but those blocks were before the passed in range.
|
||||
*/
|
||||
int
|
||||
xfs_bmapi_write(
|
||||
|
|
@ -4524,20 +4518,33 @@ xfs_bmapi_write(
|
|||
* allocation length request (which can be 64 bits in
|
||||
* length) and the bma length request, which is
|
||||
* xfs_extlen_t and therefore 32 bits. Hence we have to
|
||||
* check for 32-bit overflows and handle them here.
|
||||
* be careful and do the min() using the larger type to
|
||||
* avoid overflows.
|
||||
*/
|
||||
if (len > (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN)
|
||||
bma.length = XFS_MAX_BMBT_EXTLEN;
|
||||
else
|
||||
bma.length = len;
|
||||
bma.length = XFS_FILBLKS_MIN(len, XFS_MAX_BMBT_EXTLEN);
|
||||
|
||||
if (wasdelay) {
|
||||
bma.length = XFS_FILBLKS_MIN(bma.length,
|
||||
bma.got.br_blockcount -
|
||||
(bno - bma.got.br_startoff));
|
||||
} else {
|
||||
if (!eof)
|
||||
bma.length = XFS_FILBLKS_MIN(bma.length,
|
||||
bma.got.br_startoff - bno);
|
||||
}
|
||||
|
||||
ASSERT(len > 0);
|
||||
ASSERT(bma.length > 0);
|
||||
error = xfs_bmapi_allocate(&bma);
|
||||
if (error)
|
||||
if (error) {
|
||||
/*
|
||||
* If we already allocated space in a previous
|
||||
* iteration return what we go so far when
|
||||
* running out of space.
|
||||
*/
|
||||
if (error == -ENOSPC && bma.nallocs)
|
||||
break;
|
||||
goto error0;
|
||||
if (bma.blkno == NULLFSBLOCK)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this is a CoW allocation, record the data in
|
||||
|
|
@ -4575,7 +4582,6 @@ xfs_bmapi_write(
|
|||
if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
|
||||
eof = true;
|
||||
}
|
||||
*nmap = n;
|
||||
|
||||
error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
|
||||
whichfork);
|
||||
|
|
@ -4586,7 +4592,22 @@ xfs_bmapi_write(
|
|||
ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork));
|
||||
xfs_bmapi_finish(&bma, whichfork, 0);
|
||||
xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
|
||||
orig_nmap, *nmap);
|
||||
orig_nmap, n);
|
||||
|
||||
/*
|
||||
* When converting delayed allocations, xfs_bmapi_allocate ignores
|
||||
* the passed in bno and always converts from the start of the found
|
||||
* delalloc extent.
|
||||
*
|
||||
* To avoid a successful return with *nmap set to 0, return the magic
|
||||
* -ENOSR error code for this particular case so that the caller can
|
||||
* handle it.
|
||||
*/
|
||||
if (!n) {
|
||||
ASSERT(bma.nallocs >= *nmap);
|
||||
return -ENOSR;
|
||||
}
|
||||
*nmap = n;
|
||||
return 0;
|
||||
error0:
|
||||
xfs_bmapi_finish(&bma, whichfork, error);
|
||||
|
|
@ -4599,8 +4620,8 @@ error0:
|
|||
* invocations to allocate the target offset if a large enough physical extent
|
||||
* is not available.
|
||||
*/
|
||||
int
|
||||
xfs_bmapi_convert_delalloc(
|
||||
static int
|
||||
xfs_bmapi_convert_one_delalloc(
|
||||
struct xfs_inode *ip,
|
||||
int whichfork,
|
||||
xfs_off_t offset,
|
||||
|
|
@ -4630,11 +4651,8 @@ xfs_bmapi_convert_delalloc(
|
|||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
xfs_trans_ijoin(tp, ip, 0);
|
||||
|
||||
error = xfs_iext_count_may_overflow(ip, whichfork,
|
||||
error = xfs_iext_count_extend(tp, ip, whichfork,
|
||||
XFS_IEXT_ADD_NOSPLIT_CNT);
|
||||
if (error == -EFBIG)
|
||||
error = xfs_iext_count_upgrade(tp, ip,
|
||||
XFS_IEXT_ADD_NOSPLIT_CNT);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
|
||||
|
|
@ -4657,18 +4675,24 @@ xfs_bmapi_convert_delalloc(
|
|||
if (!isnullstartblock(bma.got.br_startblock)) {
|
||||
xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
|
||||
xfs_iomap_inode_sequence(ip, flags));
|
||||
*seq = READ_ONCE(ifp->if_seq);
|
||||
if (seq)
|
||||
*seq = READ_ONCE(ifp->if_seq);
|
||||
goto out_trans_cancel;
|
||||
}
|
||||
|
||||
bma.tp = tp;
|
||||
bma.ip = ip;
|
||||
bma.wasdel = true;
|
||||
bma.offset = bma.got.br_startoff;
|
||||
bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount,
|
||||
XFS_MAX_BMBT_EXTLEN);
|
||||
bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
|
||||
|
||||
/*
|
||||
* Always allocate convert from the start of the delalloc extent even if
|
||||
* that is outside the passed in range to create large contiguous
|
||||
* extents on disk.
|
||||
*/
|
||||
bma.offset = bma.got.br_startoff;
|
||||
bma.length = bma.got.br_blockcount;
|
||||
|
||||
/*
|
||||
* When we're converting the delalloc reservations backing dirty pages
|
||||
* in the page cache, we must be careful about how we create the new
|
||||
|
|
@ -4693,22 +4717,14 @@ xfs_bmapi_convert_delalloc(
|
|||
if (error)
|
||||
goto out_finish;
|
||||
|
||||
error = -ENOSPC;
|
||||
if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK))
|
||||
goto out_finish;
|
||||
if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock))) {
|
||||
xfs_bmap_mark_sick(ip, whichfork);
|
||||
error = -EFSCORRUPTED;
|
||||
goto out_finish;
|
||||
}
|
||||
|
||||
XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length));
|
||||
XFS_STATS_INC(mp, xs_xstrat_quick);
|
||||
|
||||
ASSERT(!isnullstartblock(bma.got.br_startblock));
|
||||
xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
|
||||
xfs_iomap_inode_sequence(ip, flags));
|
||||
*seq = READ_ONCE(ifp->if_seq);
|
||||
if (seq)
|
||||
*seq = READ_ONCE(ifp->if_seq);
|
||||
|
||||
if (whichfork == XFS_COW_FORK)
|
||||
xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length);
|
||||
|
|
@ -4731,6 +4747,36 @@ out_trans_cancel:
|
|||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Pass in a dellalloc extent and convert it to real extents, return the real
|
||||
* extent that maps offset_fsb in iomap.
|
||||
*/
|
||||
int
|
||||
xfs_bmapi_convert_delalloc(
|
||||
struct xfs_inode *ip,
|
||||
int whichfork,
|
||||
loff_t offset,
|
||||
struct iomap *iomap,
|
||||
unsigned int *seq)
|
||||
{
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Attempt to allocate whatever delalloc extent currently backs offset
|
||||
* and put the result into iomap. Allocate in a loop because it may
|
||||
* take several attempts to allocate real blocks for a contiguous
|
||||
* delalloc extent if free space is sufficiently fragmented.
|
||||
*/
|
||||
do {
|
||||
error = xfs_bmapi_convert_one_delalloc(ip, whichfork, offset,
|
||||
iomap, seq);
|
||||
if (error)
|
||||
return error;
|
||||
} while (iomap->offset + iomap->length <= offset);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_bmapi_remap(
|
||||
struct xfs_trans *tp,
|
||||
|
|
@ -4822,31 +4868,17 @@ error0:
|
|||
* ores == 1). The number of stolen blocks is returned. The availability and
|
||||
* subsequent accounting of stolen blocks is the responsibility of the caller.
|
||||
*/
|
||||
static xfs_filblks_t
|
||||
static void
|
||||
xfs_bmap_split_indlen(
|
||||
xfs_filblks_t ores, /* original res. */
|
||||
xfs_filblks_t *indlen1, /* ext1 worst indlen */
|
||||
xfs_filblks_t *indlen2, /* ext2 worst indlen */
|
||||
xfs_filblks_t avail) /* stealable blocks */
|
||||
xfs_filblks_t *indlen2) /* ext2 worst indlen */
|
||||
{
|
||||
xfs_filblks_t len1 = *indlen1;
|
||||
xfs_filblks_t len2 = *indlen2;
|
||||
xfs_filblks_t nres = len1 + len2; /* new total res. */
|
||||
xfs_filblks_t stolen = 0;
|
||||
xfs_filblks_t resfactor;
|
||||
|
||||
/*
|
||||
* Steal as many blocks as we can to try and satisfy the worst case
|
||||
* indlen for both new extents.
|
||||
*/
|
||||
if (ores < nres && avail)
|
||||
stolen = XFS_FILBLKS_MIN(nres - ores, avail);
|
||||
ores += stolen;
|
||||
|
||||
/* nothing else to do if we've satisfied the new reservation */
|
||||
if (ores >= nres)
|
||||
return stolen;
|
||||
|
||||
/*
|
||||
* We can't meet the total required reservation for the two extents.
|
||||
* Calculate the percent of the overall shortage between both extents
|
||||
|
|
@ -4891,11 +4923,9 @@ xfs_bmap_split_indlen(
|
|||
|
||||
*indlen1 = len1;
|
||||
*indlen2 = len2;
|
||||
|
||||
return stolen;
|
||||
}
|
||||
|
||||
int
|
||||
void
|
||||
xfs_bmap_del_extent_delay(
|
||||
struct xfs_inode *ip,
|
||||
int whichfork,
|
||||
|
|
@ -4908,9 +4938,9 @@ xfs_bmap_del_extent_delay(
|
|||
struct xfs_bmbt_irec new;
|
||||
int64_t da_old, da_new, da_diff = 0;
|
||||
xfs_fileoff_t del_endoff, got_endoff;
|
||||
xfs_filblks_t got_indlen, new_indlen, stolen;
|
||||
xfs_filblks_t got_indlen, new_indlen, stolen = 0;
|
||||
uint32_t state = xfs_bmap_fork_to_state(whichfork);
|
||||
int error = 0;
|
||||
uint64_t fdblocks;
|
||||
bool isrt;
|
||||
|
||||
XFS_STATS_INC(mp, xs_del_exlist);
|
||||
|
|
@ -4925,18 +4955,12 @@ xfs_bmap_del_extent_delay(
|
|||
ASSERT(got->br_startoff <= del->br_startoff);
|
||||
ASSERT(got_endoff >= del_endoff);
|
||||
|
||||
if (isrt)
|
||||
xfs_mod_frextents(mp, xfs_rtb_to_rtx(mp, del->br_blockcount));
|
||||
|
||||
/*
|
||||
* Update the inode delalloc counter now and wait to update the
|
||||
* sb counters as we might have to borrow some blocks for the
|
||||
* indirect block accounting.
|
||||
*/
|
||||
ASSERT(!isrt);
|
||||
error = xfs_quota_unreserve_blkres(ip, del->br_blockcount);
|
||||
if (error)
|
||||
return error;
|
||||
xfs_quota_unreserve_blkres(ip, del->br_blockcount);
|
||||
ip->i_delayed_blks -= del->br_blockcount;
|
||||
|
||||
if (got->br_startoff == del->br_startoff)
|
||||
|
|
@ -4990,8 +5014,24 @@ xfs_bmap_del_extent_delay(
|
|||
new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
|
||||
|
||||
WARN_ON_ONCE(!got_indlen || !new_indlen);
|
||||
stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
|
||||
del->br_blockcount);
|
||||
/*
|
||||
* Steal as many blocks as we can to try and satisfy the worst
|
||||
* case indlen for both new extents.
|
||||
*
|
||||
* However, we can't just steal reservations from the data
|
||||
* blocks if this is an RT inodes as the data and metadata
|
||||
* blocks come from different pools. We'll have to live with
|
||||
* under-filled indirect reservation in this case.
|
||||
*/
|
||||
da_new = got_indlen + new_indlen;
|
||||
if (da_new > da_old && !isrt) {
|
||||
stolen = XFS_FILBLKS_MIN(da_new - da_old,
|
||||
del->br_blockcount);
|
||||
da_old += stolen;
|
||||
}
|
||||
if (da_new > da_old)
|
||||
xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen);
|
||||
da_new = got_indlen + new_indlen;
|
||||
|
||||
got->br_startblock = nullstartblock((int)got_indlen);
|
||||
|
||||
|
|
@ -5003,20 +5043,21 @@ xfs_bmap_del_extent_delay(
|
|||
xfs_iext_next(ifp, icur);
|
||||
xfs_iext_insert(ip, icur, &new, state);
|
||||
|
||||
da_new = got_indlen + new_indlen - stolen;
|
||||
del->br_blockcount -= stolen;
|
||||
break;
|
||||
}
|
||||
|
||||
ASSERT(da_old >= da_new);
|
||||
da_diff = da_old - da_new;
|
||||
if (!isrt)
|
||||
da_diff += del->br_blockcount;
|
||||
if (da_diff) {
|
||||
xfs_mod_fdblocks(mp, da_diff, false);
|
||||
xfs_mod_delalloc(mp, -da_diff);
|
||||
}
|
||||
return error;
|
||||
fdblocks = da_diff;
|
||||
|
||||
if (isrt)
|
||||
xfs_add_frextents(mp, xfs_rtb_to_rtx(mp, del->br_blockcount));
|
||||
else
|
||||
fdblocks += del->br_blockcount;
|
||||
|
||||
xfs_add_fdblocks(mp, fdblocks);
|
||||
xfs_mod_delalloc(ip, -(int64_t)del->br_blockcount, -da_diff);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
@ -5107,8 +5148,7 @@ xfs_bmap_del_extent_real(
|
|||
{
|
||||
xfs_fsblock_t del_endblock=0; /* first block past del */
|
||||
xfs_fileoff_t del_endoff; /* first offset past del */
|
||||
int do_fx; /* free extent at end of routine */
|
||||
int error; /* error return value */
|
||||
int error = 0; /* error return value */
|
||||
struct xfs_bmbt_irec got; /* current extent entry */
|
||||
xfs_fileoff_t got_endoff; /* first offset past got */
|
||||
int i; /* temp state */
|
||||
|
|
@ -5151,20 +5191,10 @@ xfs_bmap_del_extent_real(
|
|||
return -ENOSPC;
|
||||
|
||||
*logflagsp = XFS_ILOG_CORE;
|
||||
if (xfs_ifork_is_realtime(ip, whichfork)) {
|
||||
if (!(bflags & XFS_BMAPI_REMAP)) {
|
||||
error = xfs_rtfree_blocks(tp, del->br_startblock,
|
||||
del->br_blockcount);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
do_fx = 0;
|
||||
if (xfs_ifork_is_realtime(ip, whichfork))
|
||||
qfield = XFS_TRANS_DQ_RTBCOUNT;
|
||||
} else {
|
||||
do_fx = 1;
|
||||
else
|
||||
qfield = XFS_TRANS_DQ_BCOUNT;
|
||||
}
|
||||
nblks = del->br_blockcount;
|
||||
|
||||
del_endblock = del->br_startblock + del->br_blockcount;
|
||||
|
|
@ -5312,18 +5342,29 @@ xfs_bmap_del_extent_real(
|
|||
/*
|
||||
* If we need to, add to list of extents to delete.
|
||||
*/
|
||||
if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
|
||||
if (!(bflags & XFS_BMAPI_REMAP)) {
|
||||
if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
|
||||
xfs_refcount_decrease_extent(tp, del);
|
||||
} else if (xfs_ifork_is_realtime(ip, whichfork)) {
|
||||
/*
|
||||
* Ensure the bitmap and summary inodes are locked
|
||||
* and joined to the transaction before modifying them.
|
||||
*/
|
||||
if (!(tp->t_flags & XFS_TRANS_RTBITMAP_LOCKED)) {
|
||||
tp->t_flags |= XFS_TRANS_RTBITMAP_LOCKED;
|
||||
xfs_rtbitmap_lock(tp, mp);
|
||||
}
|
||||
error = xfs_rtfree_blocks(tp, del->br_startblock,
|
||||
del->br_blockcount);
|
||||
} else {
|
||||
error = xfs_free_extent_later(tp, del->br_startblock,
|
||||
del->br_blockcount, NULL,
|
||||
XFS_AG_RESV_NONE,
|
||||
((bflags & XFS_BMAPI_NODISCARD) ||
|
||||
del->br_state == XFS_EXT_UNWRITTEN));
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -5414,16 +5455,6 @@ __xfs_bunmapi(
|
|||
} else
|
||||
cur = NULL;
|
||||
|
||||
if (isrt) {
|
||||
/*
|
||||
* Synchronize by locking the bitmap inode.
|
||||
*/
|
||||
xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
|
||||
xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
|
||||
xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
|
||||
xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
|
||||
}
|
||||
|
||||
extno = 0;
|
||||
while (end != (xfs_fileoff_t)-1 && end >= start &&
|
||||
(nexts == 0 || extno < nexts)) {
|
||||
|
|
@ -5584,18 +5615,16 @@ __xfs_bunmapi(
|
|||
|
||||
delete:
|
||||
if (wasdel) {
|
||||
error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
|
||||
&got, &del);
|
||||
xfs_bmap_del_extent_delay(ip, whichfork, &icur, &got, &del);
|
||||
} else {
|
||||
error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
|
||||
&del, &tmp_logflags, whichfork,
|
||||
flags);
|
||||
logflags |= tmp_logflags;
|
||||
if (error)
|
||||
goto error0;
|
||||
}
|
||||
|
||||
if (error)
|
||||
goto error0;
|
||||
|
||||
end = del.br_startoff - 1;
|
||||
nodelete:
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -158,7 +158,7 @@ static inline bool xfs_bmap_is_real_extent(const struct xfs_bmbt_irec *irec)
|
|||
* Return true if the extent is a real, allocated extent, or false if it is a
|
||||
* delayed allocation, and unwritten extent or a hole.
|
||||
*/
|
||||
static inline bool xfs_bmap_is_written_extent(struct xfs_bmbt_irec *irec)
|
||||
static inline bool xfs_bmap_is_written_extent(const struct xfs_bmbt_irec *irec)
|
||||
{
|
||||
return xfs_bmap_is_real_extent(irec) &&
|
||||
irec->br_state != XFS_EXT_UNWRITTEN;
|
||||
|
|
@ -176,9 +176,16 @@ int xfs_bmap_longest_free_extent(struct xfs_perag *pag,
|
|||
void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
|
||||
xfs_filblks_t len);
|
||||
unsigned int xfs_bmap_compute_attr_offset(struct xfs_mount *mp);
|
||||
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
|
||||
int xfs_bmap_add_attrfork(struct xfs_trans *tp, struct xfs_inode *ip,
|
||||
int size, int rsvd);
|
||||
void xfs_bmap_local_to_extents_empty(struct xfs_trans *tp,
|
||||
struct xfs_inode *ip, int whichfork);
|
||||
int xfs_bmap_local_to_extents(struct xfs_trans *tp, struct xfs_inode *ip,
|
||||
xfs_extlen_t total, int *logflagsp, int whichfork,
|
||||
void (*init_fn)(struct xfs_trans *tp, struct xfs_buf *bp,
|
||||
struct xfs_inode *ip, struct xfs_ifork *ifp,
|
||||
void *priv),
|
||||
void *priv);
|
||||
void xfs_bmap_compute_maxlevels(struct xfs_mount *mp, int whichfork);
|
||||
int xfs_bmap_first_unused(struct xfs_trans *tp, struct xfs_inode *ip,
|
||||
xfs_extlen_t len, xfs_fileoff_t *unused, int whichfork);
|
||||
|
|
@ -195,7 +202,7 @@ int xfs_bmapi_write(struct xfs_trans *tp, struct xfs_inode *ip,
|
|||
int xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
|
||||
xfs_fileoff_t bno, xfs_filblks_t len, uint32_t flags,
|
||||
xfs_extnum_t nexts, int *done);
|
||||
int xfs_bmap_del_extent_delay(struct xfs_inode *ip, int whichfork,
|
||||
void xfs_bmap_del_extent_delay(struct xfs_inode *ip, int whichfork,
|
||||
struct xfs_iext_cursor *cur, struct xfs_bmbt_irec *got,
|
||||
struct xfs_bmbt_irec *del);
|
||||
void xfs_bmap_del_extent_cow(struct xfs_inode *ip,
|
||||
|
|
|
|||
|
|
@ -252,6 +252,51 @@ xfs_da3_node_verify(
|
|||
return NULL;
|
||||
}
|
||||
|
||||
xfs_failaddr_t
|
||||
xfs_da3_node_header_check(
|
||||
struct xfs_buf *bp,
|
||||
xfs_ino_t owner)
|
||||
{
|
||||
struct xfs_mount *mp = bp->b_mount;
|
||||
|
||||
if (xfs_has_crc(mp)) {
|
||||
struct xfs_da3_blkinfo *hdr3 = bp->b_addr;
|
||||
|
||||
if (hdr3->hdr.magic != cpu_to_be16(XFS_DA3_NODE_MAGIC))
|
||||
return __this_address;
|
||||
|
||||
if (be64_to_cpu(hdr3->owner) != owner)
|
||||
return __this_address;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
xfs_failaddr_t
|
||||
xfs_da3_header_check(
|
||||
struct xfs_buf *bp,
|
||||
xfs_ino_t owner)
|
||||
{
|
||||
struct xfs_mount *mp = bp->b_mount;
|
||||
struct xfs_da_blkinfo *hdr = bp->b_addr;
|
||||
|
||||
if (!xfs_has_crc(mp))
|
||||
return NULL;
|
||||
|
||||
switch (hdr->magic) {
|
||||
case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
|
||||
return xfs_attr3_leaf_header_check(bp, owner);
|
||||
case cpu_to_be16(XFS_DA3_NODE_MAGIC):
|
||||
return xfs_da3_node_header_check(bp, owner);
|
||||
case cpu_to_be16(XFS_DIR3_LEAF1_MAGIC):
|
||||
case cpu_to_be16(XFS_DIR3_LEAFN_MAGIC):
|
||||
return xfs_dir3_leaf_header_check(bp, owner);
|
||||
}
|
||||
|
||||
ASSERT(0);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
xfs_da3_node_write_verify(
|
||||
struct xfs_buf *bp)
|
||||
|
|
@ -486,7 +531,7 @@ xfs_da3_node_create(
|
|||
memset(hdr3, 0, sizeof(struct xfs_da3_node_hdr));
|
||||
ichdr.magic = XFS_DA3_NODE_MAGIC;
|
||||
hdr3->info.blkno = cpu_to_be64(xfs_buf_daddr(bp));
|
||||
hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
|
||||
hdr3->info.owner = cpu_to_be64(args->owner);
|
||||
uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid);
|
||||
} else {
|
||||
ichdr.magic = XFS_DA_NODE_MAGIC;
|
||||
|
|
@ -1199,6 +1244,7 @@ xfs_da3_root_join(
|
|||
struct xfs_da3_icnode_hdr oldroothdr;
|
||||
int error;
|
||||
struct xfs_inode *dp = state->args->dp;
|
||||
xfs_failaddr_t fa;
|
||||
|
||||
trace_xfs_da_root_join(state->args);
|
||||
|
||||
|
|
@ -1225,6 +1271,13 @@ xfs_da3_root_join(
|
|||
error = xfs_da3_node_read(args->trans, dp, child, &bp, args->whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
fa = xfs_da3_header_check(bp, args->owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(bp, fa);
|
||||
xfs_trans_brelse(args->trans, bp);
|
||||
xfs_da_mark_sick(args);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
|
||||
|
||||
/*
|
||||
|
|
@ -1259,6 +1312,7 @@ xfs_da3_node_toosmall(
|
|||
struct xfs_da_blkinfo *info;
|
||||
xfs_dablk_t blkno;
|
||||
struct xfs_buf *bp;
|
||||
xfs_failaddr_t fa;
|
||||
struct xfs_da3_icnode_hdr nodehdr;
|
||||
int count;
|
||||
int forward;
|
||||
|
|
@ -1333,6 +1387,13 @@ xfs_da3_node_toosmall(
|
|||
state->args->whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
fa = xfs_da3_node_header_check(bp, state->args->owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(bp, fa);
|
||||
xfs_trans_brelse(state->args->trans, bp);
|
||||
xfs_da_mark_sick(state->args);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
node = bp->b_addr;
|
||||
xfs_da3_node_hdr_from_disk(dp->i_mount, &thdr, node);
|
||||
|
|
@ -1591,6 +1652,7 @@ xfs_da3_node_lookup_int(
|
|||
struct xfs_da_node_entry *btree;
|
||||
struct xfs_da3_icnode_hdr nodehdr;
|
||||
struct xfs_da_args *args;
|
||||
xfs_failaddr_t fa;
|
||||
xfs_dablk_t blkno;
|
||||
xfs_dahash_t hashval;
|
||||
xfs_dahash_t btreehashval;
|
||||
|
|
@ -1629,6 +1691,12 @@ xfs_da3_node_lookup_int(
|
|||
|
||||
if (magic == XFS_ATTR_LEAF_MAGIC ||
|
||||
magic == XFS_ATTR3_LEAF_MAGIC) {
|
||||
fa = xfs_attr3_leaf_header_check(blk->bp, args->owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(blk->bp, fa);
|
||||
xfs_da_mark_sick(args);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
blk->magic = XFS_ATTR_LEAF_MAGIC;
|
||||
blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
|
||||
break;
|
||||
|
|
@ -1636,6 +1704,12 @@ xfs_da3_node_lookup_int(
|
|||
|
||||
if (magic == XFS_DIR2_LEAFN_MAGIC ||
|
||||
magic == XFS_DIR3_LEAFN_MAGIC) {
|
||||
fa = xfs_dir3_leaf_header_check(blk->bp, args->owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(blk->bp, fa);
|
||||
xfs_da_mark_sick(args);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
blk->magic = XFS_DIR2_LEAFN_MAGIC;
|
||||
blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
|
||||
blk->bp, NULL);
|
||||
|
|
@ -1648,6 +1722,13 @@ xfs_da3_node_lookup_int(
|
|||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
fa = xfs_da3_node_header_check(blk->bp, args->owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(blk->bp, fa);
|
||||
xfs_da_mark_sick(args);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
blk->magic = XFS_DA_NODE_MAGIC;
|
||||
|
||||
/*
|
||||
|
|
@ -1820,6 +1901,7 @@ xfs_da3_blk_link(
|
|||
struct xfs_da_blkinfo *tmp_info;
|
||||
struct xfs_da_args *args;
|
||||
struct xfs_buf *bp;
|
||||
xfs_failaddr_t fa;
|
||||
int before = 0;
|
||||
int error;
|
||||
struct xfs_inode *dp = state->args->dp;
|
||||
|
|
@ -1863,6 +1945,13 @@ xfs_da3_blk_link(
|
|||
&bp, args->whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
fa = xfs_da3_header_check(bp, args->owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(bp, fa);
|
||||
xfs_trans_brelse(args->trans, bp);
|
||||
xfs_da_mark_sick(args);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
ASSERT(bp != NULL);
|
||||
tmp_info = bp->b_addr;
|
||||
ASSERT(tmp_info->magic == old_info->magic);
|
||||
|
|
@ -1884,6 +1973,13 @@ xfs_da3_blk_link(
|
|||
&bp, args->whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
fa = xfs_da3_header_check(bp, args->owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(bp, fa);
|
||||
xfs_trans_brelse(args->trans, bp);
|
||||
xfs_da_mark_sick(args);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
ASSERT(bp != NULL);
|
||||
tmp_info = bp->b_addr;
|
||||
ASSERT(tmp_info->magic == old_info->magic);
|
||||
|
|
@ -1913,6 +2009,7 @@ xfs_da3_blk_unlink(
|
|||
struct xfs_da_blkinfo *tmp_info;
|
||||
struct xfs_da_args *args;
|
||||
struct xfs_buf *bp;
|
||||
xfs_failaddr_t fa;
|
||||
int error;
|
||||
|
||||
/*
|
||||
|
|
@ -1943,6 +2040,13 @@ xfs_da3_blk_unlink(
|
|||
&bp, args->whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
fa = xfs_da3_header_check(bp, args->owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(bp, fa);
|
||||
xfs_trans_brelse(args->trans, bp);
|
||||
xfs_da_mark_sick(args);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
ASSERT(bp != NULL);
|
||||
tmp_info = bp->b_addr;
|
||||
ASSERT(tmp_info->magic == save_info->magic);
|
||||
|
|
@ -1960,6 +2064,13 @@ xfs_da3_blk_unlink(
|
|||
&bp, args->whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
fa = xfs_da3_header_check(bp, args->owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(bp, fa);
|
||||
xfs_trans_brelse(args->trans, bp);
|
||||
xfs_da_mark_sick(args);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
ASSERT(bp != NULL);
|
||||
tmp_info = bp->b_addr;
|
||||
ASSERT(tmp_info->magic == save_info->magic);
|
||||
|
|
@ -1996,6 +2107,7 @@ xfs_da3_path_shift(
|
|||
struct xfs_da_node_entry *btree;
|
||||
struct xfs_da3_icnode_hdr nodehdr;
|
||||
struct xfs_buf *bp;
|
||||
xfs_failaddr_t fa;
|
||||
xfs_dablk_t blkno = 0;
|
||||
int level;
|
||||
int error;
|
||||
|
|
@ -2074,6 +2186,12 @@ xfs_da3_path_shift(
|
|||
switch (be16_to_cpu(info->magic)) {
|
||||
case XFS_DA_NODE_MAGIC:
|
||||
case XFS_DA3_NODE_MAGIC:
|
||||
fa = xfs_da3_node_header_check(blk->bp, args->owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(blk->bp, fa);
|
||||
xfs_da_mark_sick(args);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
blk->magic = XFS_DA_NODE_MAGIC;
|
||||
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr,
|
||||
bp->b_addr);
|
||||
|
|
@ -2087,6 +2205,12 @@ xfs_da3_path_shift(
|
|||
break;
|
||||
case XFS_ATTR_LEAF_MAGIC:
|
||||
case XFS_ATTR3_LEAF_MAGIC:
|
||||
fa = xfs_attr3_leaf_header_check(blk->bp, args->owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(blk->bp, fa);
|
||||
xfs_da_mark_sick(args);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
blk->magic = XFS_ATTR_LEAF_MAGIC;
|
||||
ASSERT(level == path->active-1);
|
||||
blk->index = 0;
|
||||
|
|
@ -2094,6 +2218,12 @@ xfs_da3_path_shift(
|
|||
break;
|
||||
case XFS_DIR2_LEAFN_MAGIC:
|
||||
case XFS_DIR3_LEAFN_MAGIC:
|
||||
fa = xfs_dir3_leaf_header_check(blk->bp, args->owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(blk->bp, fa);
|
||||
xfs_da_mark_sick(args);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
blk->magic = XFS_DIR2_LEAFN_MAGIC;
|
||||
ASSERT(level == path->active-1);
|
||||
blk->index = 0;
|
||||
|
|
@ -2167,8 +2297,8 @@ xfs_da_grow_inode_int(
|
|||
struct xfs_inode *dp = args->dp;
|
||||
int w = args->whichfork;
|
||||
xfs_rfsblock_t nblks = dp->i_nblocks;
|
||||
struct xfs_bmbt_irec map, *mapp;
|
||||
int nmap, error, got, i, mapi;
|
||||
struct xfs_bmbt_irec map, *mapp = ↦
|
||||
int nmap, error, got, i, mapi = 1;
|
||||
|
||||
/*
|
||||
* Find a spot in the file space to put the new block.
|
||||
|
|
@ -2184,14 +2314,7 @@ xfs_da_grow_inode_int(
|
|||
error = xfs_bmapi_write(tp, dp, *bno, count,
|
||||
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
|
||||
args->total, &map, &nmap);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
ASSERT(nmap <= 1);
|
||||
if (nmap == 1) {
|
||||
mapp = ↦
|
||||
mapi = 1;
|
||||
} else if (nmap == 0 && count > 1) {
|
||||
if (error == -ENOSPC && count > 1) {
|
||||
xfs_fileoff_t b;
|
||||
int c;
|
||||
|
||||
|
|
@ -2209,16 +2332,13 @@ xfs_da_grow_inode_int(
|
|||
args->total, &mapp[mapi], &nmap);
|
||||
if (error)
|
||||
goto out_free_map;
|
||||
if (nmap < 1)
|
||||
break;
|
||||
mapi += nmap;
|
||||
b = mapp[mapi - 1].br_startoff +
|
||||
mapp[mapi - 1].br_blockcount;
|
||||
}
|
||||
} else {
|
||||
mapi = 0;
|
||||
mapp = NULL;
|
||||
}
|
||||
if (error)
|
||||
goto out_free_map;
|
||||
|
||||
/*
|
||||
* Count the blocks we got, make sure it matches the total.
|
||||
|
|
@ -2290,6 +2410,7 @@ xfs_da3_swap_lastblock(
|
|||
struct xfs_buf *last_buf;
|
||||
struct xfs_buf *sib_buf;
|
||||
struct xfs_buf *par_buf;
|
||||
xfs_failaddr_t fa;
|
||||
xfs_dahash_t dead_hash;
|
||||
xfs_fileoff_t lastoff;
|
||||
xfs_dablk_t dead_blkno;
|
||||
|
|
@ -2326,6 +2447,14 @@ xfs_da3_swap_lastblock(
|
|||
error = xfs_da3_node_read(tp, dp, last_blkno, &last_buf, w);
|
||||
if (error)
|
||||
return error;
|
||||
fa = xfs_da3_header_check(last_buf, args->owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(last_buf, fa);
|
||||
xfs_trans_brelse(tp, last_buf);
|
||||
xfs_da_mark_sick(args);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the last block into the dead buffer and log it.
|
||||
*/
|
||||
|
|
@ -2364,6 +2493,13 @@ xfs_da3_swap_lastblock(
|
|||
error = xfs_da3_node_read(tp, dp, sib_blkno, &sib_buf, w);
|
||||
if (error)
|
||||
goto done;
|
||||
fa = xfs_da3_header_check(sib_buf, args->owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(sib_buf, fa);
|
||||
xfs_da_mark_sick(args);
|
||||
error = -EFSCORRUPTED;
|
||||
goto done;
|
||||
}
|
||||
sib_info = sib_buf->b_addr;
|
||||
if (XFS_IS_CORRUPT(mp,
|
||||
be32_to_cpu(sib_info->forw) != last_blkno ||
|
||||
|
|
@ -2385,6 +2521,13 @@ xfs_da3_swap_lastblock(
|
|||
error = xfs_da3_node_read(tp, dp, sib_blkno, &sib_buf, w);
|
||||
if (error)
|
||||
goto done;
|
||||
fa = xfs_da3_header_check(sib_buf, args->owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(sib_buf, fa);
|
||||
xfs_da_mark_sick(args);
|
||||
error = -EFSCORRUPTED;
|
||||
goto done;
|
||||
}
|
||||
sib_info = sib_buf->b_addr;
|
||||
if (XFS_IS_CORRUPT(mp,
|
||||
be32_to_cpu(sib_info->back) != last_blkno ||
|
||||
|
|
@ -2408,6 +2551,13 @@ xfs_da3_swap_lastblock(
|
|||
error = xfs_da3_node_read(tp, dp, par_blkno, &par_buf, w);
|
||||
if (error)
|
||||
goto done;
|
||||
fa = xfs_da3_node_header_check(par_buf, args->owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(par_buf, fa);
|
||||
xfs_da_mark_sick(args);
|
||||
error = -EFSCORRUPTED;
|
||||
goto done;
|
||||
}
|
||||
par_node = par_buf->b_addr;
|
||||
xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node);
|
||||
if (XFS_IS_CORRUPT(mp,
|
||||
|
|
@ -2457,6 +2607,13 @@ xfs_da3_swap_lastblock(
|
|||
error = xfs_da3_node_read(tp, dp, par_blkno, &par_buf, w);
|
||||
if (error)
|
||||
goto done;
|
||||
fa = xfs_da3_node_header_check(par_buf, args->owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(par_buf, fa);
|
||||
xfs_da_mark_sick(args);
|
||||
error = -EFSCORRUPTED;
|
||||
goto done;
|
||||
}
|
||||
par_node = par_buf->b_addr;
|
||||
xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node);
|
||||
if (XFS_IS_CORRUPT(mp, par_hdr.level != level)) {
|
||||
|
|
|
|||
|
|
@ -54,17 +54,24 @@ enum xfs_dacmp {
|
|||
*/
|
||||
typedef struct xfs_da_args {
|
||||
struct xfs_da_geometry *geo; /* da block geometry */
|
||||
const uint8_t *name; /* string (maybe not NULL terminated) */
|
||||
int namelen; /* length of string (maybe no NULL) */
|
||||
uint8_t filetype; /* filetype of inode for directories */
|
||||
const uint8_t *name; /* string (maybe not NULL terminated) */
|
||||
const uint8_t *new_name; /* new attr name */
|
||||
void *value; /* set of bytes (maybe contain NULLs) */
|
||||
int valuelen; /* length of value */
|
||||
unsigned int attr_filter; /* XFS_ATTR_{ROOT,SECURE,INCOMPLETE} */
|
||||
unsigned int attr_flags; /* XATTR_{CREATE,REPLACE} */
|
||||
xfs_dahash_t hashval; /* hash value of name */
|
||||
xfs_ino_t inumber; /* input/output inode number */
|
||||
void *new_value; /* new xattr value (may contain NULLs) */
|
||||
struct xfs_inode *dp; /* directory inode to manipulate */
|
||||
struct xfs_trans *trans; /* current trans (changes over time) */
|
||||
|
||||
xfs_ino_t inumber; /* input/output inode number */
|
||||
xfs_ino_t owner; /* inode that owns the dir/attr data */
|
||||
|
||||
int valuelen; /* length of value */
|
||||
int new_valuelen; /* length of new_value */
|
||||
uint8_t filetype; /* filetype of inode for directories */
|
||||
uint8_t op_flags; /* operation flags */
|
||||
uint8_t attr_filter; /* XFS_ATTR_{ROOT,SECURE,INCOMPLETE} */
|
||||
short namelen; /* length of string (maybe no NULL) */
|
||||
short new_namelen; /* length of new attr name */
|
||||
xfs_dahash_t hashval; /* hash value of name */
|
||||
xfs_extlen_t total; /* total blocks needed, for 1st bmap */
|
||||
int whichfork; /* data or attribute fork */
|
||||
xfs_dablk_t blkno; /* blkno of attr leaf of interest */
|
||||
|
|
@ -77,7 +84,6 @@ typedef struct xfs_da_args {
|
|||
xfs_dablk_t rmtblkno2; /* remote attr value starting blkno */
|
||||
int rmtblkcnt2; /* remote attr value block count */
|
||||
int rmtvaluelen2; /* remote attr value length in bytes */
|
||||
uint32_t op_flags; /* operation flags */
|
||||
enum xfs_dacmp cmpresult; /* name compare result for lookups */
|
||||
} xfs_da_args_t;
|
||||
|
||||
|
|
@ -89,10 +95,8 @@ typedef struct xfs_da_args {
|
|||
#define XFS_DA_OP_ADDNAME (1u << 2) /* this is an add operation */
|
||||
#define XFS_DA_OP_OKNOENT (1u << 3) /* lookup op, ENOENT ok, else die */
|
||||
#define XFS_DA_OP_CILOOKUP (1u << 4) /* lookup returns CI name if found */
|
||||
#define XFS_DA_OP_NOTIME (1u << 5) /* don't update inode timestamps */
|
||||
#define XFS_DA_OP_REMOVE (1u << 6) /* this is a remove operation */
|
||||
#define XFS_DA_OP_RECOVERY (1u << 7) /* Log recovery operation */
|
||||
#define XFS_DA_OP_LOGGED (1u << 8) /* Use intent items to track op */
|
||||
#define XFS_DA_OP_RECOVERY (1u << 5) /* Log recovery operation */
|
||||
#define XFS_DA_OP_LOGGED (1u << 6) /* Use intent items to track op */
|
||||
|
||||
#define XFS_DA_OP_FLAGS \
|
||||
{ XFS_DA_OP_JUSTCHECK, "JUSTCHECK" }, \
|
||||
|
|
@ -100,8 +104,6 @@ typedef struct xfs_da_args {
|
|||
{ XFS_DA_OP_ADDNAME, "ADDNAME" }, \
|
||||
{ XFS_DA_OP_OKNOENT, "OKNOENT" }, \
|
||||
{ XFS_DA_OP_CILOOKUP, "CILOOKUP" }, \
|
||||
{ XFS_DA_OP_NOTIME, "NOTIME" }, \
|
||||
{ XFS_DA_OP_REMOVE, "REMOVE" }, \
|
||||
{ XFS_DA_OP_RECOVERY, "RECOVERY" }, \
|
||||
{ XFS_DA_OP_LOGGED, "LOGGED" }
|
||||
|
||||
|
|
@ -235,6 +237,8 @@ void xfs_da3_node_hdr_from_disk(struct xfs_mount *mp,
|
|||
struct xfs_da3_icnode_hdr *to, struct xfs_da_intnode *from);
|
||||
void xfs_da3_node_hdr_to_disk(struct xfs_mount *mp,
|
||||
struct xfs_da_intnode *to, struct xfs_da3_icnode_hdr *from);
|
||||
xfs_failaddr_t xfs_da3_header_check(struct xfs_buf *bp, xfs_ino_t owner);
|
||||
xfs_failaddr_t xfs_da3_node_header_check(struct xfs_buf *bp, xfs_ino_t owner);
|
||||
|
||||
extern struct kmem_cache *xfs_da_state_cache;
|
||||
|
||||
|
|
|
|||
|
|
@ -714,12 +714,30 @@ struct xfs_attr3_leafblock {
|
|||
#define XFS_ATTR_LOCAL_BIT 0 /* attr is stored locally */
|
||||
#define XFS_ATTR_ROOT_BIT 1 /* limit access to trusted attrs */
|
||||
#define XFS_ATTR_SECURE_BIT 2 /* limit access to secure attrs */
|
||||
#define XFS_ATTR_PARENT_BIT 3 /* parent pointer attrs */
|
||||
#define XFS_ATTR_INCOMPLETE_BIT 7 /* attr in middle of create/delete */
|
||||
#define XFS_ATTR_LOCAL (1u << XFS_ATTR_LOCAL_BIT)
|
||||
#define XFS_ATTR_ROOT (1u << XFS_ATTR_ROOT_BIT)
|
||||
#define XFS_ATTR_SECURE (1u << XFS_ATTR_SECURE_BIT)
|
||||
#define XFS_ATTR_PARENT (1u << XFS_ATTR_PARENT_BIT)
|
||||
#define XFS_ATTR_INCOMPLETE (1u << XFS_ATTR_INCOMPLETE_BIT)
|
||||
#define XFS_ATTR_NSP_ONDISK_MASK (XFS_ATTR_ROOT | XFS_ATTR_SECURE)
|
||||
|
||||
#define XFS_ATTR_NSP_ONDISK_MASK (XFS_ATTR_ROOT | \
|
||||
XFS_ATTR_SECURE | \
|
||||
XFS_ATTR_PARENT)
|
||||
|
||||
/* Private attr namespaces not exposed to userspace */
|
||||
#define XFS_ATTR_PRIVATE_NSP_MASK (XFS_ATTR_PARENT)
|
||||
|
||||
#define XFS_ATTR_ONDISK_MASK (XFS_ATTR_NSP_ONDISK_MASK | \
|
||||
XFS_ATTR_LOCAL | \
|
||||
XFS_ATTR_INCOMPLETE)
|
||||
|
||||
#define XFS_ATTR_NAMESPACE_STR \
|
||||
{ XFS_ATTR_LOCAL, "local" }, \
|
||||
{ XFS_ATTR_ROOT, "root" }, \
|
||||
{ XFS_ATTR_SECURE, "secure" }, \
|
||||
{ XFS_ATTR_PARENT, "parent" }
|
||||
|
||||
/*
|
||||
* Alignment for namelist and valuelist entries (since they are mixed
|
||||
|
|
@ -862,9 +880,7 @@ struct xfs_attr3_rmt_hdr {
|
|||
|
||||
#define XFS_ATTR3_RMT_CRC_OFF offsetof(struct xfs_attr3_rmt_hdr, rm_crc)
|
||||
|
||||
#define XFS_ATTR3_RMT_BUF_SPACE(mp, bufsize) \
|
||||
((bufsize) - (xfs_has_crc((mp)) ? \
|
||||
sizeof(struct xfs_attr3_rmt_hdr) : 0))
|
||||
unsigned int xfs_attr3_rmt_buf_space(struct xfs_mount *mp);
|
||||
|
||||
/* Number of bytes in a directory block. */
|
||||
static inline unsigned int xfs_dir2_dirblock_bytes(struct xfs_sb *sbp)
|
||||
|
|
@ -875,4 +891,17 @@ static inline unsigned int xfs_dir2_dirblock_bytes(struct xfs_sb *sbp)
|
|||
xfs_failaddr_t xfs_da3_blkinfo_verify(struct xfs_buf *bp,
|
||||
struct xfs_da3_blkinfo *hdr3);
|
||||
|
||||
/*
|
||||
* Parent pointer attribute format definition
|
||||
*
|
||||
* The xattr name contains the dirent name.
|
||||
* The xattr value encodes the parent inode number and generation to ease
|
||||
* opening parents by handle.
|
||||
* The xattr hashval is xfs_dir2_namehash() ^ p_ino
|
||||
*/
|
||||
struct xfs_parent_rec {
|
||||
__be64 p_ino;
|
||||
__be32 p_gen;
|
||||
} __packed;
|
||||
|
||||
#endif /* __XFS_DA_FORMAT_H__ */
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@
|
|||
#include "xfs_da_btree.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_trans_priv.h"
|
||||
#include "xfs_exchmaps.h"
|
||||
|
||||
static struct kmem_cache *xfs_defer_pending_cache;
|
||||
|
||||
|
|
@ -1091,7 +1092,11 @@ xfs_defer_ops_continue(
|
|||
ASSERT(!(tp->t_flags & XFS_TRANS_DIRTY));
|
||||
|
||||
/* Lock the captured resources to the new transaction. */
|
||||
if (dfc->dfc_held.dr_inos == 2)
|
||||
if (dfc->dfc_held.dr_inos > 2) {
|
||||
xfs_sort_inodes(dfc->dfc_held.dr_ip, dfc->dfc_held.dr_inos);
|
||||
xfs_lock_inodes(dfc->dfc_held.dr_ip, dfc->dfc_held.dr_inos,
|
||||
XFS_ILOCK_EXCL);
|
||||
} else if (dfc->dfc_held.dr_inos == 2)
|
||||
xfs_lock_two_inodes(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL,
|
||||
dfc->dfc_held.dr_ip[1], XFS_ILOCK_EXCL);
|
||||
else if (dfc->dfc_held.dr_inos == 1)
|
||||
|
|
@ -1176,6 +1181,10 @@ xfs_defer_init_item_caches(void)
|
|||
error = xfs_attr_intent_init_cache();
|
||||
if (error)
|
||||
goto err;
|
||||
error = xfs_exchmaps_intent_init_cache();
|
||||
if (error)
|
||||
goto err;
|
||||
|
||||
return 0;
|
||||
err:
|
||||
xfs_defer_destroy_item_caches();
|
||||
|
|
@ -1186,6 +1195,7 @@ err:
|
|||
void
|
||||
xfs_defer_destroy_item_caches(void)
|
||||
{
|
||||
xfs_exchmaps_intent_destroy_cache();
|
||||
xfs_attr_intent_destroy_cache();
|
||||
xfs_extfree_intent_destroy_cache();
|
||||
xfs_bmap_intent_destroy_cache();
|
||||
|
|
|
|||
|
|
@ -72,12 +72,18 @@ extern const struct xfs_defer_op_type xfs_rmap_update_defer_type;
|
|||
extern const struct xfs_defer_op_type xfs_extent_free_defer_type;
|
||||
extern const struct xfs_defer_op_type xfs_agfl_free_defer_type;
|
||||
extern const struct xfs_defer_op_type xfs_attr_defer_type;
|
||||
|
||||
extern const struct xfs_defer_op_type xfs_exchmaps_defer_type;
|
||||
|
||||
/*
|
||||
* Deferred operation item relogging limits.
|
||||
*/
|
||||
#define XFS_DEFER_OPS_NR_INODES 2 /* join up to two inodes */
|
||||
|
||||
/*
|
||||
* Rename w/ parent pointers can require up to 5 inodes with deferred ops to
|
||||
* be joined to the transaction: src_dp, target_dp, src_ip, target_ip, and wip.
|
||||
* These inodes are locked in sorted order by their inode numbers
|
||||
*/
|
||||
#define XFS_DEFER_OPS_NR_INODES 5
|
||||
#define XFS_DEFER_OPS_NR_BUFS 2 /* join up to two buffers */
|
||||
|
||||
/* Resources that must be held across a transaction roll. */
|
||||
|
|
|
|||
|
|
@ -250,11 +250,68 @@ xfs_dir_init(
|
|||
args->geo = dp->i_mount->m_dir_geo;
|
||||
args->dp = dp;
|
||||
args->trans = tp;
|
||||
args->owner = dp->i_ino;
|
||||
error = xfs_dir2_sf_create(args, pdp->i_ino);
|
||||
kfree(args);
|
||||
return error;
|
||||
}
|
||||
|
||||
enum xfs_dir2_fmt
|
||||
xfs_dir2_format(
|
||||
struct xfs_da_args *args,
|
||||
int *error)
|
||||
{
|
||||
struct xfs_inode *dp = args->dp;
|
||||
struct xfs_mount *mp = dp->i_mount;
|
||||
struct xfs_da_geometry *geo = mp->m_dir_geo;
|
||||
xfs_fileoff_t eof;
|
||||
|
||||
xfs_assert_ilocked(dp, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL);
|
||||
|
||||
*error = 0;
|
||||
if (dp->i_df.if_format == XFS_DINODE_FMT_LOCAL)
|
||||
return XFS_DIR2_FMT_SF;
|
||||
|
||||
*error = xfs_bmap_last_offset(dp, &eof, XFS_DATA_FORK);
|
||||
if (*error)
|
||||
return XFS_DIR2_FMT_ERROR;
|
||||
|
||||
if (eof == XFS_B_TO_FSB(mp, geo->blksize)) {
|
||||
if (XFS_IS_CORRUPT(mp, dp->i_disk_size != geo->blksize)) {
|
||||
xfs_da_mark_sick(args);
|
||||
*error = -EFSCORRUPTED;
|
||||
return XFS_DIR2_FMT_ERROR;
|
||||
}
|
||||
return XFS_DIR2_FMT_BLOCK;
|
||||
}
|
||||
if (eof == geo->leafblk + geo->fsbcount)
|
||||
return XFS_DIR2_FMT_LEAF;
|
||||
return XFS_DIR2_FMT_NODE;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_dir_createname_args(
|
||||
struct xfs_da_args *args)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (!args->inumber)
|
||||
args->op_flags |= XFS_DA_OP_JUSTCHECK;
|
||||
|
||||
switch (xfs_dir2_format(args, &error)) {
|
||||
case XFS_DIR2_FMT_SF:
|
||||
return xfs_dir2_sf_addname(args);
|
||||
case XFS_DIR2_FMT_BLOCK:
|
||||
return xfs_dir2_block_addname(args);
|
||||
case XFS_DIR2_FMT_LEAF:
|
||||
return xfs_dir2_leaf_addname(args);
|
||||
case XFS_DIR2_FMT_NODE:
|
||||
return xfs_dir2_node_addname(args);
|
||||
default:
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Enter a name in a directory, or check for available space.
|
||||
* If inum is 0, only the available space test is performed.
|
||||
|
|
@ -269,7 +326,6 @@ xfs_dir_createname(
|
|||
{
|
||||
struct xfs_da_args *args;
|
||||
int rval;
|
||||
bool v;
|
||||
|
||||
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
|
||||
|
||||
|
|
@ -295,31 +351,9 @@ xfs_dir_createname(
|
|||
args->whichfork = XFS_DATA_FORK;
|
||||
args->trans = tp;
|
||||
args->op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT;
|
||||
if (!inum)
|
||||
args->op_flags |= XFS_DA_OP_JUSTCHECK;
|
||||
args->owner = dp->i_ino;
|
||||
|
||||
if (dp->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
|
||||
rval = xfs_dir2_sf_addname(args);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
rval = xfs_dir2_isblock(args, &v);
|
||||
if (rval)
|
||||
goto out_free;
|
||||
if (v) {
|
||||
rval = xfs_dir2_block_addname(args);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
rval = xfs_dir2_isleaf(args, &v);
|
||||
if (rval)
|
||||
goto out_free;
|
||||
if (v)
|
||||
rval = xfs_dir2_leaf_addname(args);
|
||||
else
|
||||
rval = xfs_dir2_node_addname(args);
|
||||
|
||||
out_free:
|
||||
rval = xfs_dir_createname_args(args);
|
||||
kfree(args);
|
||||
return rval;
|
||||
}
|
||||
|
|
@ -350,6 +384,34 @@ xfs_dir_cilookup_result(
|
|||
return -EEXIST;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_dir_lookup_args(
|
||||
struct xfs_da_args *args)
|
||||
{
|
||||
int error;
|
||||
|
||||
switch (xfs_dir2_format(args, &error)) {
|
||||
case XFS_DIR2_FMT_SF:
|
||||
error = xfs_dir2_sf_lookup(args);
|
||||
break;
|
||||
case XFS_DIR2_FMT_BLOCK:
|
||||
error = xfs_dir2_block_lookup(args);
|
||||
break;
|
||||
case XFS_DIR2_FMT_LEAF:
|
||||
error = xfs_dir2_leaf_lookup(args);
|
||||
break;
|
||||
case XFS_DIR2_FMT_NODE:
|
||||
error = xfs_dir2_node_lookup(args);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (error != -EEXIST)
|
||||
return error;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Lookup a name in a directory, give back the inode number.
|
||||
* If ci_name is not NULL, returns the actual name in ci_name if it differs
|
||||
|
|
@ -366,7 +428,6 @@ xfs_dir_lookup(
|
|||
{
|
||||
struct xfs_da_args *args;
|
||||
int rval;
|
||||
bool v;
|
||||
int lock_mode;
|
||||
|
||||
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
|
||||
|
|
@ -383,34 +444,12 @@ xfs_dir_lookup(
|
|||
args->whichfork = XFS_DATA_FORK;
|
||||
args->trans = tp;
|
||||
args->op_flags = XFS_DA_OP_OKNOENT;
|
||||
args->owner = dp->i_ino;
|
||||
if (ci_name)
|
||||
args->op_flags |= XFS_DA_OP_CILOOKUP;
|
||||
|
||||
lock_mode = xfs_ilock_data_map_shared(dp);
|
||||
if (dp->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
|
||||
rval = xfs_dir2_sf_lookup(args);
|
||||
goto out_check_rval;
|
||||
}
|
||||
|
||||
rval = xfs_dir2_isblock(args, &v);
|
||||
if (rval)
|
||||
goto out_free;
|
||||
if (v) {
|
||||
rval = xfs_dir2_block_lookup(args);
|
||||
goto out_check_rval;
|
||||
}
|
||||
|
||||
rval = xfs_dir2_isleaf(args, &v);
|
||||
if (rval)
|
||||
goto out_free;
|
||||
if (v)
|
||||
rval = xfs_dir2_leaf_lookup(args);
|
||||
else
|
||||
rval = xfs_dir2_node_lookup(args);
|
||||
|
||||
out_check_rval:
|
||||
if (rval == -EEXIST)
|
||||
rval = 0;
|
||||
rval = xfs_dir_lookup_args(args);
|
||||
if (!rval) {
|
||||
*inum = args->inumber;
|
||||
if (ci_name) {
|
||||
|
|
@ -418,12 +457,31 @@ out_check_rval:
|
|||
ci_name->len = args->valuelen;
|
||||
}
|
||||
}
|
||||
out_free:
|
||||
xfs_iunlock(dp, lock_mode);
|
||||
kfree(args);
|
||||
return rval;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_dir_removename_args(
|
||||
struct xfs_da_args *args)
|
||||
{
|
||||
int error;
|
||||
|
||||
switch (xfs_dir2_format(args, &error)) {
|
||||
case XFS_DIR2_FMT_SF:
|
||||
return xfs_dir2_sf_removename(args);
|
||||
case XFS_DIR2_FMT_BLOCK:
|
||||
return xfs_dir2_block_removename(args);
|
||||
case XFS_DIR2_FMT_LEAF:
|
||||
return xfs_dir2_leaf_removename(args);
|
||||
case XFS_DIR2_FMT_NODE:
|
||||
return xfs_dir2_node_removename(args);
|
||||
default:
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove an entry from a directory.
|
||||
*/
|
||||
|
|
@ -431,13 +489,12 @@ int
|
|||
xfs_dir_removename(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *dp,
|
||||
struct xfs_name *name,
|
||||
const struct xfs_name *name,
|
||||
xfs_ino_t ino,
|
||||
xfs_extlen_t total) /* bmap's total block count */
|
||||
{
|
||||
struct xfs_da_args *args;
|
||||
int rval;
|
||||
bool v;
|
||||
|
||||
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
|
||||
XFS_STATS_INC(dp->i_mount, xs_dir_remove);
|
||||
|
|
@ -456,32 +513,32 @@ xfs_dir_removename(
|
|||
args->total = total;
|
||||
args->whichfork = XFS_DATA_FORK;
|
||||
args->trans = tp;
|
||||
|
||||
if (dp->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
|
||||
rval = xfs_dir2_sf_removename(args);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
rval = xfs_dir2_isblock(args, &v);
|
||||
if (rval)
|
||||
goto out_free;
|
||||
if (v) {
|
||||
rval = xfs_dir2_block_removename(args);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
rval = xfs_dir2_isleaf(args, &v);
|
||||
if (rval)
|
||||
goto out_free;
|
||||
if (v)
|
||||
rval = xfs_dir2_leaf_removename(args);
|
||||
else
|
||||
rval = xfs_dir2_node_removename(args);
|
||||
out_free:
|
||||
args->owner = dp->i_ino;
|
||||
rval = xfs_dir_removename_args(args);
|
||||
kfree(args);
|
||||
return rval;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_dir_replace_args(
|
||||
struct xfs_da_args *args)
|
||||
{
|
||||
int error;
|
||||
|
||||
switch (xfs_dir2_format(args, &error)) {
|
||||
case XFS_DIR2_FMT_SF:
|
||||
return xfs_dir2_sf_replace(args);
|
||||
case XFS_DIR2_FMT_BLOCK:
|
||||
return xfs_dir2_block_replace(args);
|
||||
case XFS_DIR2_FMT_LEAF:
|
||||
return xfs_dir2_leaf_replace(args);
|
||||
case XFS_DIR2_FMT_NODE:
|
||||
return xfs_dir2_node_replace(args);
|
||||
default:
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Replace the inode number of a directory entry.
|
||||
*/
|
||||
|
|
@ -495,7 +552,6 @@ xfs_dir_replace(
|
|||
{
|
||||
struct xfs_da_args *args;
|
||||
int rval;
|
||||
bool v;
|
||||
|
||||
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
|
||||
|
||||
|
|
@ -517,28 +573,8 @@ xfs_dir_replace(
|
|||
args->total = total;
|
||||
args->whichfork = XFS_DATA_FORK;
|
||||
args->trans = tp;
|
||||
|
||||
if (dp->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
|
||||
rval = xfs_dir2_sf_replace(args);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
rval = xfs_dir2_isblock(args, &v);
|
||||
if (rval)
|
||||
goto out_free;
|
||||
if (v) {
|
||||
rval = xfs_dir2_block_replace(args);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
rval = xfs_dir2_isleaf(args, &v);
|
||||
if (rval)
|
||||
goto out_free;
|
||||
if (v)
|
||||
rval = xfs_dir2_leaf_replace(args);
|
||||
else
|
||||
rval = xfs_dir2_node_replace(args);
|
||||
out_free:
|
||||
args->owner = dp->i_ino;
|
||||
rval = xfs_dir_replace_args(args);
|
||||
kfree(args);
|
||||
return rval;
|
||||
}
|
||||
|
|
@ -606,57 +642,6 @@ xfs_dir2_grow_inode(
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* See if the directory is a single-block form directory.
|
||||
*/
|
||||
int
|
||||
xfs_dir2_isblock(
|
||||
struct xfs_da_args *args,
|
||||
bool *isblock)
|
||||
{
|
||||
struct xfs_mount *mp = args->dp->i_mount;
|
||||
xfs_fileoff_t eof;
|
||||
int error;
|
||||
|
||||
error = xfs_bmap_last_offset(args->dp, &eof, XFS_DATA_FORK);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
*isblock = false;
|
||||
if (XFS_FSB_TO_B(mp, eof) != args->geo->blksize)
|
||||
return 0;
|
||||
|
||||
*isblock = true;
|
||||
if (XFS_IS_CORRUPT(mp, args->dp->i_disk_size != args->geo->blksize)) {
|
||||
xfs_da_mark_sick(args);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* See if the directory is a single-leaf form directory.
|
||||
*/
|
||||
int
|
||||
xfs_dir2_isleaf(
|
||||
struct xfs_da_args *args,
|
||||
bool *isleaf)
|
||||
{
|
||||
xfs_fileoff_t eof;
|
||||
int error;
|
||||
|
||||
error = xfs_bmap_last_offset(args->dp, &eof, XFS_DATA_FORK);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
*isleaf = false;
|
||||
if (eof != args->geo->leafblk + args->geo->fsbcount)
|
||||
return 0;
|
||||
|
||||
*isleaf = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove the given block from the directory.
|
||||
* This routine is used for data and free blocks, leaf/node are done
|
||||
|
|
|
|||
|
|
@ -36,6 +36,16 @@ xfs_dir2_samename(
|
|||
return !memcmp(n1->name, n2->name, n1->len);
|
||||
}
|
||||
|
||||
enum xfs_dir2_fmt {
|
||||
XFS_DIR2_FMT_SF,
|
||||
XFS_DIR2_FMT_BLOCK,
|
||||
XFS_DIR2_FMT_LEAF,
|
||||
XFS_DIR2_FMT_NODE,
|
||||
XFS_DIR2_FMT_ERROR,
|
||||
};
|
||||
|
||||
enum xfs_dir2_fmt xfs_dir2_format(struct xfs_da_args *args, int *error);
|
||||
|
||||
/*
|
||||
* Convert inode mode to directory entry filetype
|
||||
*/
|
||||
|
|
@ -58,7 +68,7 @@ extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp,
|
|||
const struct xfs_name *name, xfs_ino_t *inum,
|
||||
struct xfs_name *ci_name);
|
||||
extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp,
|
||||
struct xfs_name *name, xfs_ino_t ino,
|
||||
const struct xfs_name *name, xfs_ino_t ino,
|
||||
xfs_extlen_t tot);
|
||||
extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp,
|
||||
const struct xfs_name *name, xfs_ino_t inum,
|
||||
|
|
@ -66,6 +76,11 @@ extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp,
|
|||
extern int xfs_dir_canenter(struct xfs_trans *tp, struct xfs_inode *dp,
|
||||
struct xfs_name *name);
|
||||
|
||||
int xfs_dir_lookup_args(struct xfs_da_args *args);
|
||||
int xfs_dir_createname_args(struct xfs_da_args *args);
|
||||
int xfs_dir_removename_args(struct xfs_da_args *args);
|
||||
int xfs_dir_replace_args(struct xfs_da_args *args);
|
||||
|
||||
/*
|
||||
* Direct call from the bmap code, bypassing the generic directory layer.
|
||||
*/
|
||||
|
|
@ -74,8 +89,6 @@ extern int xfs_dir2_sf_to_block(struct xfs_da_args *args);
|
|||
/*
|
||||
* Interface routines used by userspace utilities
|
||||
*/
|
||||
extern int xfs_dir2_isblock(struct xfs_da_args *args, bool *isblock);
|
||||
extern int xfs_dir2_isleaf(struct xfs_da_args *args, bool *isleaf);
|
||||
extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
|
||||
struct xfs_buf *bp);
|
||||
|
||||
|
|
@ -101,6 +114,10 @@ extern struct xfs_dir2_data_free *xfs_dir2_data_freefind(
|
|||
|
||||
extern int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino);
|
||||
|
||||
xfs_failaddr_t xfs_dir3_leaf_header_check(struct xfs_buf *bp, xfs_ino_t owner);
|
||||
xfs_failaddr_t xfs_dir3_data_header_check(struct xfs_buf *bp, xfs_ino_t owner);
|
||||
xfs_failaddr_t xfs_dir3_block_header_check(struct xfs_buf *bp, xfs_ino_t owner);
|
||||
|
||||
extern const struct xfs_buf_ops xfs_dir3_block_buf_ops;
|
||||
extern const struct xfs_buf_ops xfs_dir3_leafn_buf_ops;
|
||||
extern const struct xfs_buf_ops xfs_dir3_leaf1_buf_ops;
|
||||
|
|
|
|||
|
|
@ -115,17 +115,20 @@ const struct xfs_buf_ops xfs_dir3_block_buf_ops = {
|
|||
.verify_struct = xfs_dir3_block_verify,
|
||||
};
|
||||
|
||||
static xfs_failaddr_t
|
||||
xfs_failaddr_t
|
||||
xfs_dir3_block_header_check(
|
||||
struct xfs_inode *dp,
|
||||
struct xfs_buf *bp)
|
||||
struct xfs_buf *bp,
|
||||
xfs_ino_t owner)
|
||||
{
|
||||
struct xfs_mount *mp = dp->i_mount;
|
||||
struct xfs_mount *mp = bp->b_mount;
|
||||
|
||||
if (xfs_has_crc(mp)) {
|
||||
struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
|
||||
|
||||
if (be64_to_cpu(hdr3->owner) != dp->i_ino)
|
||||
if (hdr3->magic != cpu_to_be32(XFS_DIR3_BLOCK_MAGIC))
|
||||
return __this_address;
|
||||
|
||||
if (be64_to_cpu(hdr3->owner) != owner)
|
||||
return __this_address;
|
||||
}
|
||||
|
||||
|
|
@ -136,6 +139,7 @@ int
|
|||
xfs_dir3_block_read(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *dp,
|
||||
xfs_ino_t owner,
|
||||
struct xfs_buf **bpp)
|
||||
{
|
||||
struct xfs_mount *mp = dp->i_mount;
|
||||
|
|
@ -148,7 +152,7 @@ xfs_dir3_block_read(
|
|||
return err;
|
||||
|
||||
/* Check things that we can't do in the verifier. */
|
||||
fa = xfs_dir3_block_header_check(dp, *bpp);
|
||||
fa = xfs_dir3_block_header_check(*bpp, owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(*bpp, fa);
|
||||
xfs_trans_brelse(tp, *bpp);
|
||||
|
|
@ -163,12 +167,13 @@ xfs_dir3_block_read(
|
|||
|
||||
static void
|
||||
xfs_dir3_block_init(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_buf *bp,
|
||||
struct xfs_inode *dp)
|
||||
struct xfs_da_args *args,
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
|
||||
struct xfs_trans *tp = args->trans;
|
||||
struct xfs_inode *dp = args->dp;
|
||||
struct xfs_mount *mp = dp->i_mount;
|
||||
struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
|
||||
|
||||
bp->b_ops = &xfs_dir3_block_buf_ops;
|
||||
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_BLOCK_BUF);
|
||||
|
|
@ -177,7 +182,7 @@ xfs_dir3_block_init(
|
|||
memset(hdr3, 0, sizeof(*hdr3));
|
||||
hdr3->magic = cpu_to_be32(XFS_DIR3_BLOCK_MAGIC);
|
||||
hdr3->blkno = cpu_to_be64(xfs_buf_daddr(bp));
|
||||
hdr3->owner = cpu_to_be64(dp->i_ino);
|
||||
hdr3->owner = cpu_to_be64(args->owner);
|
||||
uuid_copy(&hdr3->uuid, &mp->m_sb.sb_meta_uuid);
|
||||
return;
|
||||
|
||||
|
|
@ -382,7 +387,7 @@ xfs_dir2_block_addname(
|
|||
tp = args->trans;
|
||||
|
||||
/* Read the (one and only) directory block into bp. */
|
||||
error = xfs_dir3_block_read(tp, dp, &bp);
|
||||
error = xfs_dir3_block_read(tp, dp, args->owner, &bp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -697,7 +702,7 @@ xfs_dir2_block_lookup_int(
|
|||
dp = args->dp;
|
||||
tp = args->trans;
|
||||
|
||||
error = xfs_dir3_block_read(tp, dp, &bp);
|
||||
error = xfs_dir3_block_read(tp, dp, args->owner, &bp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -981,7 +986,8 @@ xfs_dir2_leaf_to_block(
|
|||
* Read the data block if we don't already have it, give up if it fails.
|
||||
*/
|
||||
if (!dbp) {
|
||||
error = xfs_dir3_data_read(tp, dp, args->geo->datablk, 0, &dbp);
|
||||
error = xfs_dir3_data_read(tp, dp, args->owner,
|
||||
args->geo->datablk, 0, &dbp);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
|
@ -1009,7 +1015,7 @@ xfs_dir2_leaf_to_block(
|
|||
/*
|
||||
* Start converting it to block form.
|
||||
*/
|
||||
xfs_dir3_block_init(mp, tp, dbp, dp);
|
||||
xfs_dir3_block_init(args, dbp);
|
||||
|
||||
needlog = 1;
|
||||
needscan = 0;
|
||||
|
|
@ -1129,7 +1135,7 @@ xfs_dir2_sf_to_block(
|
|||
error = xfs_dir3_data_init(args, blkno, &bp);
|
||||
if (error)
|
||||
goto out_free;
|
||||
xfs_dir3_block_init(mp, tp, bp, dp);
|
||||
xfs_dir3_block_init(args, bp);
|
||||
hdr = bp->b_addr;
|
||||
|
||||
/*
|
||||
|
|
@ -1169,7 +1175,7 @@ xfs_dir2_sf_to_block(
|
|||
* Create entry for .
|
||||
*/
|
||||
dep = bp->b_addr + offset;
|
||||
dep->inumber = cpu_to_be64(dp->i_ino);
|
||||
dep->inumber = cpu_to_be64(args->owner);
|
||||
dep->namelen = 1;
|
||||
dep->name[0] = '.';
|
||||
xfs_dir2_data_put_ftype(mp, dep, XFS_DIR3_FT_DIR);
|
||||
|
|
|
|||
|
|
@ -395,17 +395,20 @@ static const struct xfs_buf_ops xfs_dir3_data_reada_buf_ops = {
|
|||
.verify_write = xfs_dir3_data_write_verify,
|
||||
};
|
||||
|
||||
static xfs_failaddr_t
|
||||
xfs_failaddr_t
|
||||
xfs_dir3_data_header_check(
|
||||
struct xfs_inode *dp,
|
||||
struct xfs_buf *bp)
|
||||
struct xfs_buf *bp,
|
||||
xfs_ino_t owner)
|
||||
{
|
||||
struct xfs_mount *mp = dp->i_mount;
|
||||
struct xfs_mount *mp = bp->b_mount;
|
||||
|
||||
if (xfs_has_crc(mp)) {
|
||||
struct xfs_dir3_data_hdr *hdr3 = bp->b_addr;
|
||||
|
||||
if (be64_to_cpu(hdr3->hdr.owner) != dp->i_ino)
|
||||
if (hdr3->hdr.magic != cpu_to_be32(XFS_DIR3_DATA_MAGIC))
|
||||
return __this_address;
|
||||
|
||||
if (be64_to_cpu(hdr3->hdr.owner) != owner)
|
||||
return __this_address;
|
||||
}
|
||||
|
||||
|
|
@ -416,6 +419,7 @@ int
|
|||
xfs_dir3_data_read(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *dp,
|
||||
xfs_ino_t owner,
|
||||
xfs_dablk_t bno,
|
||||
unsigned int flags,
|
||||
struct xfs_buf **bpp)
|
||||
|
|
@ -429,7 +433,7 @@ xfs_dir3_data_read(
|
|||
return err;
|
||||
|
||||
/* Check things that we can't do in the verifier. */
|
||||
fa = xfs_dir3_data_header_check(dp, *bpp);
|
||||
fa = xfs_dir3_data_header_check(*bpp, owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(*bpp, fa);
|
||||
xfs_trans_brelse(tp, *bpp);
|
||||
|
|
@ -725,7 +729,7 @@ xfs_dir3_data_init(
|
|||
memset(hdr3, 0, sizeof(*hdr3));
|
||||
hdr3->magic = cpu_to_be32(XFS_DIR3_DATA_MAGIC);
|
||||
hdr3->blkno = cpu_to_be64(xfs_buf_daddr(bp));
|
||||
hdr3->owner = cpu_to_be64(dp->i_ino);
|
||||
hdr3->owner = cpu_to_be64(args->owner);
|
||||
uuid_copy(&hdr3->uuid, &mp->m_sb.sb_meta_uuid);
|
||||
|
||||
} else
|
||||
|
|
|
|||
|
|
@ -208,6 +208,29 @@ xfs_dir3_leaf_verify(
|
|||
return xfs_dir3_leaf_check_int(mp, &leafhdr, bp->b_addr, true);
|
||||
}
|
||||
|
||||
xfs_failaddr_t
|
||||
xfs_dir3_leaf_header_check(
|
||||
struct xfs_buf *bp,
|
||||
xfs_ino_t owner)
|
||||
{
|
||||
struct xfs_mount *mp = bp->b_mount;
|
||||
|
||||
if (xfs_has_crc(mp)) {
|
||||
struct xfs_dir3_leaf *hdr3 = bp->b_addr;
|
||||
|
||||
if (hdr3->hdr.info.hdr.magic !=
|
||||
cpu_to_be16(XFS_DIR3_LEAF1_MAGIC) &&
|
||||
hdr3->hdr.info.hdr.magic !=
|
||||
cpu_to_be16(XFS_DIR3_LEAFN_MAGIC))
|
||||
return __this_address;
|
||||
|
||||
if (be64_to_cpu(hdr3->hdr.info.owner) != owner)
|
||||
return __this_address;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
xfs_dir3_leaf_read_verify(
|
||||
struct xfs_buf *bp)
|
||||
|
|
@ -271,32 +294,60 @@ int
|
|||
xfs_dir3_leaf_read(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *dp,
|
||||
xfs_ino_t owner,
|
||||
xfs_dablk_t fbno,
|
||||
struct xfs_buf **bpp)
|
||||
{
|
||||
xfs_failaddr_t fa;
|
||||
int err;
|
||||
|
||||
err = xfs_da_read_buf(tp, dp, fbno, 0, bpp, XFS_DATA_FORK,
|
||||
&xfs_dir3_leaf1_buf_ops);
|
||||
if (!err && tp && *bpp)
|
||||
if (err || !(*bpp))
|
||||
return err;
|
||||
|
||||
fa = xfs_dir3_leaf_header_check(*bpp, owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(*bpp, fa);
|
||||
xfs_trans_brelse(tp, *bpp);
|
||||
*bpp = NULL;
|
||||
xfs_dirattr_mark_sick(dp, XFS_DATA_FORK);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
if (tp)
|
||||
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_LEAF1_BUF);
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_dir3_leafn_read(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *dp,
|
||||
xfs_ino_t owner,
|
||||
xfs_dablk_t fbno,
|
||||
struct xfs_buf **bpp)
|
||||
{
|
||||
xfs_failaddr_t fa;
|
||||
int err;
|
||||
|
||||
err = xfs_da_read_buf(tp, dp, fbno, 0, bpp, XFS_DATA_FORK,
|
||||
&xfs_dir3_leafn_buf_ops);
|
||||
if (!err && tp && *bpp)
|
||||
if (err || !(*bpp))
|
||||
return err;
|
||||
|
||||
fa = xfs_dir3_leaf_header_check(*bpp, owner);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(*bpp, fa);
|
||||
xfs_trans_brelse(tp, *bpp);
|
||||
*bpp = NULL;
|
||||
xfs_dirattr_mark_sick(dp, XFS_DATA_FORK);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
if (tp)
|
||||
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_LEAFN_BUF);
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -304,12 +355,12 @@ xfs_dir3_leafn_read(
|
|||
*/
|
||||
static void
|
||||
xfs_dir3_leaf_init(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_da_args *args,
|
||||
struct xfs_buf *bp,
|
||||
xfs_ino_t owner,
|
||||
uint16_t type)
|
||||
{
|
||||
struct xfs_mount *mp = args->dp->i_mount;
|
||||
struct xfs_trans *tp = args->trans;
|
||||
struct xfs_dir2_leaf *leaf = bp->b_addr;
|
||||
|
||||
ASSERT(type == XFS_DIR2_LEAF1_MAGIC || type == XFS_DIR2_LEAFN_MAGIC);
|
||||
|
|
@ -323,7 +374,7 @@ xfs_dir3_leaf_init(
|
|||
? cpu_to_be16(XFS_DIR3_LEAF1_MAGIC)
|
||||
: cpu_to_be16(XFS_DIR3_LEAFN_MAGIC);
|
||||
leaf3->info.blkno = cpu_to_be64(xfs_buf_daddr(bp));
|
||||
leaf3->info.owner = cpu_to_be64(owner);
|
||||
leaf3->info.owner = cpu_to_be64(args->owner);
|
||||
uuid_copy(&leaf3->info.uuid, &mp->m_sb.sb_meta_uuid);
|
||||
} else {
|
||||
memset(leaf, 0, sizeof(*leaf));
|
||||
|
|
@ -356,7 +407,6 @@ xfs_dir3_leaf_get_buf(
|
|||
{
|
||||
struct xfs_inode *dp = args->dp;
|
||||
struct xfs_trans *tp = args->trans;
|
||||
struct xfs_mount *mp = dp->i_mount;
|
||||
struct xfs_buf *bp;
|
||||
int error;
|
||||
|
||||
|
|
@ -369,7 +419,7 @@ xfs_dir3_leaf_get_buf(
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_dir3_leaf_init(mp, tp, bp, dp->i_ino, magic);
|
||||
xfs_dir3_leaf_init(args, bp, magic);
|
||||
xfs_dir3_leaf_log_header(args, bp);
|
||||
if (magic == XFS_DIR2_LEAF1_MAGIC)
|
||||
xfs_dir3_leaf_log_tail(args, bp);
|
||||
|
|
@ -647,7 +697,8 @@ xfs_dir2_leaf_addname(
|
|||
|
||||
trace_xfs_dir2_leaf_addname(args);
|
||||
|
||||
error = xfs_dir3_leaf_read(tp, dp, args->geo->leafblk, &lbp);
|
||||
error = xfs_dir3_leaf_read(tp, dp, args->owner, args->geo->leafblk,
|
||||
&lbp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -834,9 +885,9 @@ xfs_dir2_leaf_addname(
|
|||
* Already had space in some data block.
|
||||
* Just read that one in.
|
||||
*/
|
||||
error = xfs_dir3_data_read(tp, dp,
|
||||
xfs_dir2_db_to_da(args->geo, use_block),
|
||||
0, &dbp);
|
||||
error = xfs_dir3_data_read(tp, dp, args->owner,
|
||||
xfs_dir2_db_to_da(args->geo, use_block), 0,
|
||||
&dbp);
|
||||
if (error) {
|
||||
xfs_trans_brelse(tp, lbp);
|
||||
return error;
|
||||
|
|
@ -1238,7 +1289,8 @@ xfs_dir2_leaf_lookup_int(
|
|||
tp = args->trans;
|
||||
mp = dp->i_mount;
|
||||
|
||||
error = xfs_dir3_leaf_read(tp, dp, args->geo->leafblk, &lbp);
|
||||
error = xfs_dir3_leaf_read(tp, dp, args->owner, args->geo->leafblk,
|
||||
&lbp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -1276,9 +1328,9 @@ xfs_dir2_leaf_lookup_int(
|
|||
if (newdb != curdb) {
|
||||
if (dbp)
|
||||
xfs_trans_brelse(tp, dbp);
|
||||
error = xfs_dir3_data_read(tp, dp,
|
||||
xfs_dir2_db_to_da(args->geo, newdb),
|
||||
0, &dbp);
|
||||
error = xfs_dir3_data_read(tp, dp, args->owner,
|
||||
xfs_dir2_db_to_da(args->geo, newdb), 0,
|
||||
&dbp);
|
||||
if (error) {
|
||||
xfs_trans_brelse(tp, lbp);
|
||||
return error;
|
||||
|
|
@ -1318,9 +1370,9 @@ xfs_dir2_leaf_lookup_int(
|
|||
ASSERT(cidb != -1);
|
||||
if (cidb != curdb) {
|
||||
xfs_trans_brelse(tp, dbp);
|
||||
error = xfs_dir3_data_read(tp, dp,
|
||||
xfs_dir2_db_to_da(args->geo, cidb),
|
||||
0, &dbp);
|
||||
error = xfs_dir3_data_read(tp, dp, args->owner,
|
||||
xfs_dir2_db_to_da(args->geo, cidb), 0,
|
||||
&dbp);
|
||||
if (error) {
|
||||
xfs_trans_brelse(tp, lbp);
|
||||
return error;
|
||||
|
|
@ -1614,7 +1666,8 @@ xfs_dir2_leaf_trim_data(
|
|||
/*
|
||||
* Read the offending data block. We need its buffer.
|
||||
*/
|
||||
error = xfs_dir3_data_read(tp, dp, xfs_dir2_db_to_da(geo, db), 0, &dbp);
|
||||
error = xfs_dir3_data_read(tp, dp, args->owner,
|
||||
xfs_dir2_db_to_da(geo, db), 0, &dbp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -1753,7 +1806,8 @@ xfs_dir2_node_to_leaf(
|
|||
/*
|
||||
* Read the freespace block.
|
||||
*/
|
||||
error = xfs_dir2_free_read(tp, dp, args->geo->freeblk, &fbp);
|
||||
error = xfs_dir2_free_read(tp, dp, args->owner, args->geo->freeblk,
|
||||
&fbp);
|
||||
if (error)
|
||||
return error;
|
||||
xfs_dir2_free_hdr_from_disk(mp, &freehdr, fbp->b_addr);
|
||||
|
|
|
|||
|
|
@ -175,11 +175,11 @@ const struct xfs_buf_ops xfs_dir3_free_buf_ops = {
|
|||
/* Everything ok in the free block header? */
|
||||
static xfs_failaddr_t
|
||||
xfs_dir3_free_header_check(
|
||||
struct xfs_inode *dp,
|
||||
xfs_dablk_t fbno,
|
||||
struct xfs_buf *bp)
|
||||
struct xfs_buf *bp,
|
||||
xfs_ino_t owner,
|
||||
xfs_dablk_t fbno)
|
||||
{
|
||||
struct xfs_mount *mp = dp->i_mount;
|
||||
struct xfs_mount *mp = bp->b_mount;
|
||||
int maxbests = mp->m_dir_geo->free_max_bests;
|
||||
unsigned int firstdb;
|
||||
|
||||
|
|
@ -195,7 +195,7 @@ xfs_dir3_free_header_check(
|
|||
return __this_address;
|
||||
if (be32_to_cpu(hdr3->nvalid) < be32_to_cpu(hdr3->nused))
|
||||
return __this_address;
|
||||
if (be64_to_cpu(hdr3->hdr.owner) != dp->i_ino)
|
||||
if (be64_to_cpu(hdr3->hdr.owner) != owner)
|
||||
return __this_address;
|
||||
} else {
|
||||
struct xfs_dir2_free_hdr *hdr = bp->b_addr;
|
||||
|
|
@ -214,6 +214,7 @@ static int
|
|||
__xfs_dir3_free_read(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *dp,
|
||||
xfs_ino_t owner,
|
||||
xfs_dablk_t fbno,
|
||||
unsigned int flags,
|
||||
struct xfs_buf **bpp)
|
||||
|
|
@ -227,7 +228,7 @@ __xfs_dir3_free_read(
|
|||
return err;
|
||||
|
||||
/* Check things that we can't do in the verifier. */
|
||||
fa = xfs_dir3_free_header_check(dp, fbno, *bpp);
|
||||
fa = xfs_dir3_free_header_check(*bpp, owner, fbno);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(*bpp, fa);
|
||||
xfs_trans_brelse(tp, *bpp);
|
||||
|
|
@ -299,20 +300,23 @@ int
|
|||
xfs_dir2_free_read(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *dp,
|
||||
xfs_ino_t owner,
|
||||
xfs_dablk_t fbno,
|
||||
struct xfs_buf **bpp)
|
||||
{
|
||||
return __xfs_dir3_free_read(tp, dp, fbno, 0, bpp);
|
||||
return __xfs_dir3_free_read(tp, dp, owner, fbno, 0, bpp);
|
||||
}
|
||||
|
||||
static int
|
||||
xfs_dir2_free_try_read(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *dp,
|
||||
xfs_ino_t owner,
|
||||
xfs_dablk_t fbno,
|
||||
struct xfs_buf **bpp)
|
||||
{
|
||||
return __xfs_dir3_free_read(tp, dp, fbno, XFS_DABUF_MAP_HOLE_OK, bpp);
|
||||
return __xfs_dir3_free_read(tp, dp, owner, fbno, XFS_DABUF_MAP_HOLE_OK,
|
||||
bpp);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
@ -349,7 +353,7 @@ xfs_dir3_free_get_buf(
|
|||
hdr.magic = XFS_DIR3_FREE_MAGIC;
|
||||
|
||||
hdr3->hdr.blkno = cpu_to_be64(xfs_buf_daddr(bp));
|
||||
hdr3->hdr.owner = cpu_to_be64(dp->i_ino);
|
||||
hdr3->hdr.owner = cpu_to_be64(args->owner);
|
||||
uuid_copy(&hdr3->hdr.uuid, &mp->m_sb.sb_meta_uuid);
|
||||
} else
|
||||
hdr.magic = XFS_DIR2_FREE_MAGIC;
|
||||
|
|
@ -717,7 +721,7 @@ xfs_dir2_leafn_lookup_for_addname(
|
|||
if (curbp)
|
||||
xfs_trans_brelse(tp, curbp);
|
||||
|
||||
error = xfs_dir2_free_read(tp, dp,
|
||||
error = xfs_dir2_free_read(tp, dp, args->owner,
|
||||
xfs_dir2_db_to_da(args->geo,
|
||||
newfdb),
|
||||
&curbp);
|
||||
|
|
@ -863,7 +867,7 @@ xfs_dir2_leafn_lookup_for_entry(
|
|||
ASSERT(state->extravalid);
|
||||
curbp = state->extrablk.bp;
|
||||
} else {
|
||||
error = xfs_dir3_data_read(tp, dp,
|
||||
error = xfs_dir3_data_read(tp, dp, args->owner,
|
||||
xfs_dir2_db_to_da(args->geo,
|
||||
newdb),
|
||||
0, &curbp);
|
||||
|
|
@ -1356,8 +1360,8 @@ xfs_dir2_leafn_remove(
|
|||
* read in the free block.
|
||||
*/
|
||||
fdb = xfs_dir2_db_to_fdb(geo, db);
|
||||
error = xfs_dir2_free_read(tp, dp, xfs_dir2_db_to_da(geo, fdb),
|
||||
&fbp);
|
||||
error = xfs_dir2_free_read(tp, dp, args->owner,
|
||||
xfs_dir2_db_to_da(geo, fdb), &fbp);
|
||||
if (error)
|
||||
return error;
|
||||
free = fbp->b_addr;
|
||||
|
|
@ -1562,7 +1566,8 @@ xfs_dir2_leafn_toosmall(
|
|||
/*
|
||||
* Read the sibling leaf block.
|
||||
*/
|
||||
error = xfs_dir3_leafn_read(state->args->trans, dp, blkno, &bp);
|
||||
error = xfs_dir3_leafn_read(state->args->trans, dp,
|
||||
state->args->owner, blkno, &bp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -1715,7 +1720,7 @@ xfs_dir2_node_add_datablk(
|
|||
* that was just allocated.
|
||||
*/
|
||||
fbno = xfs_dir2_db_to_fdb(args->geo, *dbno);
|
||||
error = xfs_dir2_free_try_read(tp, dp,
|
||||
error = xfs_dir2_free_try_read(tp, dp, args->owner,
|
||||
xfs_dir2_db_to_da(args->geo, fbno), &fbp);
|
||||
if (error)
|
||||
return error;
|
||||
|
|
@ -1862,7 +1867,7 @@ xfs_dir2_node_find_freeblk(
|
|||
* so this might not succeed. This should be really rare, so
|
||||
* there's no reason to avoid it.
|
||||
*/
|
||||
error = xfs_dir2_free_try_read(tp, dp,
|
||||
error = xfs_dir2_free_try_read(tp, dp, args->owner,
|
||||
xfs_dir2_db_to_da(args->geo, fbno),
|
||||
&fbp);
|
||||
if (error)
|
||||
|
|
@ -1948,9 +1953,8 @@ xfs_dir2_node_addname_int(
|
|||
&freehdr, &findex);
|
||||
} else {
|
||||
/* Read the data block in. */
|
||||
error = xfs_dir3_data_read(tp, dp,
|
||||
xfs_dir2_db_to_da(args->geo, dbno),
|
||||
0, &dbp);
|
||||
error = xfs_dir3_data_read(tp, dp, args->owner,
|
||||
xfs_dir2_db_to_da(args->geo, dbno), 0, &dbp);
|
||||
}
|
||||
if (error)
|
||||
return error;
|
||||
|
|
@ -2302,7 +2306,7 @@ xfs_dir2_node_trim_free(
|
|||
/*
|
||||
* Read the freespace block.
|
||||
*/
|
||||
error = xfs_dir2_free_try_read(tp, dp, fo, &bp);
|
||||
error = xfs_dir2_free_try_read(tp, dp, args->owner, fo, &bp);
|
||||
if (error)
|
||||
return error;
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -50,8 +50,8 @@ extern int xfs_dir_cilookup_result(struct xfs_da_args *args,
|
|||
|
||||
|
||||
/* xfs_dir2_block.c */
|
||||
extern int xfs_dir3_block_read(struct xfs_trans *tp, struct xfs_inode *dp,
|
||||
struct xfs_buf **bpp);
|
||||
int xfs_dir3_block_read(struct xfs_trans *tp, struct xfs_inode *dp,
|
||||
xfs_ino_t owner, struct xfs_buf **bpp);
|
||||
extern int xfs_dir2_block_addname(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_block_lookup(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_block_removename(struct xfs_da_args *args);
|
||||
|
|
@ -78,7 +78,8 @@ extern void xfs_dir3_data_check(struct xfs_inode *dp, struct xfs_buf *bp);
|
|||
extern xfs_failaddr_t __xfs_dir3_data_check(struct xfs_inode *dp,
|
||||
struct xfs_buf *bp);
|
||||
int xfs_dir3_data_read(struct xfs_trans *tp, struct xfs_inode *dp,
|
||||
xfs_dablk_t bno, unsigned int flags, struct xfs_buf **bpp);
|
||||
xfs_ino_t owner, xfs_dablk_t bno, unsigned int flags,
|
||||
struct xfs_buf **bpp);
|
||||
int xfs_dir3_data_readahead(struct xfs_inode *dp, xfs_dablk_t bno,
|
||||
unsigned int flags);
|
||||
|
||||
|
|
@ -95,9 +96,9 @@ void xfs_dir2_leaf_hdr_from_disk(struct xfs_mount *mp,
|
|||
void xfs_dir2_leaf_hdr_to_disk(struct xfs_mount *mp, struct xfs_dir2_leaf *to,
|
||||
struct xfs_dir3_icleaf_hdr *from);
|
||||
int xfs_dir3_leaf_read(struct xfs_trans *tp, struct xfs_inode *dp,
|
||||
xfs_dablk_t fbno, struct xfs_buf **bpp);
|
||||
xfs_ino_t owner, xfs_dablk_t fbno, struct xfs_buf **bpp);
|
||||
int xfs_dir3_leafn_read(struct xfs_trans *tp, struct xfs_inode *dp,
|
||||
xfs_dablk_t fbno, struct xfs_buf **bpp);
|
||||
xfs_ino_t owner, xfs_dablk_t fbno, struct xfs_buf **bpp);
|
||||
extern int xfs_dir2_block_to_leaf(struct xfs_da_args *args,
|
||||
struct xfs_buf *dbp);
|
||||
extern int xfs_dir2_leaf_addname(struct xfs_da_args *args);
|
||||
|
|
@ -154,8 +155,8 @@ extern int xfs_dir2_node_removename(struct xfs_da_args *args);
|
|||
extern int xfs_dir2_node_replace(struct xfs_da_args *args);
|
||||
extern int xfs_dir2_node_trim_free(struct xfs_da_args *args, xfs_fileoff_t fo,
|
||||
int *rvalp);
|
||||
extern int xfs_dir2_free_read(struct xfs_trans *tp, struct xfs_inode *dp,
|
||||
xfs_dablk_t fbno, struct xfs_buf **bpp);
|
||||
int xfs_dir2_free_read(struct xfs_trans *tp, struct xfs_inode *dp,
|
||||
xfs_ino_t owner, xfs_dablk_t fbno, struct xfs_buf **bpp);
|
||||
|
||||
/* xfs_dir2_sf.c */
|
||||
xfs_ino_t xfs_dir2_sf_get_ino(struct xfs_mount *mp, struct xfs_dir2_sf_hdr *hdr,
|
||||
|
|
|
|||
|
|
@ -63,7 +63,8 @@
|
|||
#define XFS_ERRTAG_ATTR_LEAF_TO_NODE 41
|
||||
#define XFS_ERRTAG_WB_DELAY_MS 42
|
||||
#define XFS_ERRTAG_WRITE_DELAY_MS 43
|
||||
#define XFS_ERRTAG_MAX 44
|
||||
#define XFS_ERRTAG_EXCHMAPS_FINISH_ONE 44
|
||||
#define XFS_ERRTAG_MAX 45
|
||||
|
||||
/*
|
||||
* Random factors for above tags, 1 means always, 2 means 1/2 time, etc.
|
||||
|
|
@ -111,5 +112,6 @@
|
|||
#define XFS_RANDOM_ATTR_LEAF_TO_NODE 1
|
||||
#define XFS_RANDOM_WB_DELAY_MS 3000
|
||||
#define XFS_RANDOM_WRITE_DELAY_MS 3000
|
||||
#define XFS_RANDOM_EXCHMAPS_FINISH_ONE 1
|
||||
|
||||
#endif /* __XFS_ERRORTAG_H_ */
|
||||
|
|
|
|||
1235
fs/xfs/libxfs/xfs_exchmaps.c
Normal file
1235
fs/xfs/libxfs/xfs_exchmaps.c
Normal file
File diff suppressed because it is too large
Load diff
124
fs/xfs/libxfs/xfs_exchmaps.h
Normal file
124
fs/xfs/libxfs/xfs_exchmaps.h
Normal file
|
|
@ -0,0 +1,124 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/*
|
||||
* Copyright (c) 2020-2024 Oracle. All Rights Reserved.
|
||||
* Author: Darrick J. Wong <djwong@kernel.org>
|
||||
*/
|
||||
#ifndef __XFS_EXCHMAPS_H__
|
||||
#define __XFS_EXCHMAPS_H__
|
||||
|
||||
/* In-core deferred operation info about a file mapping exchange request. */
|
||||
struct xfs_exchmaps_intent {
|
||||
/* List of other incore deferred work. */
|
||||
struct list_head xmi_list;
|
||||
|
||||
/* Inodes participating in the operation. */
|
||||
struct xfs_inode *xmi_ip1;
|
||||
struct xfs_inode *xmi_ip2;
|
||||
|
||||
/* File offset range information. */
|
||||
xfs_fileoff_t xmi_startoff1;
|
||||
xfs_fileoff_t xmi_startoff2;
|
||||
xfs_filblks_t xmi_blockcount;
|
||||
|
||||
/* Set these file sizes after the operation, unless negative. */
|
||||
xfs_fsize_t xmi_isize1;
|
||||
xfs_fsize_t xmi_isize2;
|
||||
|
||||
uint64_t xmi_flags; /* XFS_EXCHMAPS_* flags */
|
||||
};
|
||||
|
||||
/* Try to convert inode2 from block to short format at the end, if possible. */
|
||||
#define __XFS_EXCHMAPS_INO2_SHORTFORM (1ULL << 63)
|
||||
|
||||
#define XFS_EXCHMAPS_INTERNAL_FLAGS (__XFS_EXCHMAPS_INO2_SHORTFORM)
|
||||
|
||||
/* flags that can be passed to xfs_exchmaps_{estimate,mappings} */
|
||||
#define XFS_EXCHMAPS_PARAMS (XFS_EXCHMAPS_ATTR_FORK | \
|
||||
XFS_EXCHMAPS_SET_SIZES | \
|
||||
XFS_EXCHMAPS_INO1_WRITTEN)
|
||||
|
||||
static inline int
|
||||
xfs_exchmaps_whichfork(const struct xfs_exchmaps_intent *xmi)
|
||||
{
|
||||
if (xmi->xmi_flags & XFS_EXCHMAPS_ATTR_FORK)
|
||||
return XFS_ATTR_FORK;
|
||||
return XFS_DATA_FORK;
|
||||
}
|
||||
|
||||
/* Parameters for a mapping exchange request. */
|
||||
struct xfs_exchmaps_req {
|
||||
/* Inodes participating in the operation. */
|
||||
struct xfs_inode *ip1;
|
||||
struct xfs_inode *ip2;
|
||||
|
||||
/* File offset range information. */
|
||||
xfs_fileoff_t startoff1;
|
||||
xfs_fileoff_t startoff2;
|
||||
xfs_filblks_t blockcount;
|
||||
|
||||
/* XFS_EXCHMAPS_* operation flags */
|
||||
uint64_t flags;
|
||||
|
||||
/*
|
||||
* Fields below this line are filled out by xfs_exchmaps_estimate;
|
||||
* callers should initialize this part of the struct to zero.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Data device blocks to be moved out of ip1, and free space needed to
|
||||
* handle the bmbt changes.
|
||||
*/
|
||||
xfs_filblks_t ip1_bcount;
|
||||
|
||||
/*
|
||||
* Data device blocks to be moved out of ip2, and free space needed to
|
||||
* handle the bmbt changes.
|
||||
*/
|
||||
xfs_filblks_t ip2_bcount;
|
||||
|
||||
/* rt blocks to be moved out of ip1. */
|
||||
xfs_filblks_t ip1_rtbcount;
|
||||
|
||||
/* rt blocks to be moved out of ip2. */
|
||||
xfs_filblks_t ip2_rtbcount;
|
||||
|
||||
/* Free space needed to handle the bmbt changes */
|
||||
unsigned long long resblks;
|
||||
|
||||
/* Number of exchanges needed to complete the operation */
|
||||
unsigned long long nr_exchanges;
|
||||
};
|
||||
|
||||
static inline int
|
||||
xfs_exchmaps_reqfork(const struct xfs_exchmaps_req *req)
|
||||
{
|
||||
if (req->flags & XFS_EXCHMAPS_ATTR_FORK)
|
||||
return XFS_ATTR_FORK;
|
||||
return XFS_DATA_FORK;
|
||||
}
|
||||
|
||||
int xfs_exchmaps_estimate_overhead(struct xfs_exchmaps_req *req);
|
||||
int xfs_exchmaps_estimate(struct xfs_exchmaps_req *req);
|
||||
|
||||
extern struct kmem_cache *xfs_exchmaps_intent_cache;
|
||||
|
||||
int __init xfs_exchmaps_intent_init_cache(void);
|
||||
void xfs_exchmaps_intent_destroy_cache(void);
|
||||
|
||||
struct xfs_exchmaps_intent *xfs_exchmaps_init_intent(
|
||||
const struct xfs_exchmaps_req *req);
|
||||
void xfs_exchmaps_ensure_reflink(struct xfs_trans *tp,
|
||||
const struct xfs_exchmaps_intent *xmi);
|
||||
void xfs_exchmaps_upgrade_extent_counts(struct xfs_trans *tp,
|
||||
const struct xfs_exchmaps_intent *xmi);
|
||||
|
||||
int xfs_exchmaps_finish_one(struct xfs_trans *tp,
|
||||
struct xfs_exchmaps_intent *xmi);
|
||||
|
||||
int xfs_exchmaps_check_forks(struct xfs_mount *mp,
|
||||
const struct xfs_exchmaps_req *req);
|
||||
|
||||
void xfs_exchange_mappings(struct xfs_trans *tp,
|
||||
const struct xfs_exchmaps_req *req);
|
||||
|
||||
#endif /* __XFS_EXCHMAPS_H__ */
|
||||
|
|
@ -367,19 +367,23 @@ xfs_sb_has_ro_compat_feature(
|
|||
return (sbp->sb_features_ro_compat & feature) != 0;
|
||||
}
|
||||
|
||||
#define XFS_SB_FEAT_INCOMPAT_FTYPE (1 << 0) /* filetype in dirent */
|
||||
#define XFS_SB_FEAT_INCOMPAT_SPINODES (1 << 1) /* sparse inode chunks */
|
||||
#define XFS_SB_FEAT_INCOMPAT_META_UUID (1 << 2) /* metadata UUID */
|
||||
#define XFS_SB_FEAT_INCOMPAT_BIGTIME (1 << 3) /* large timestamps */
|
||||
#define XFS_SB_FEAT_INCOMPAT_NEEDSREPAIR (1 << 4) /* needs xfs_repair */
|
||||
#define XFS_SB_FEAT_INCOMPAT_NREXT64 (1 << 5) /* large extent counters */
|
||||
#define XFS_SB_FEAT_INCOMPAT_FTYPE (1 << 0) /* filetype in dirent */
|
||||
#define XFS_SB_FEAT_INCOMPAT_SPINODES (1 << 1) /* sparse inode chunks */
|
||||
#define XFS_SB_FEAT_INCOMPAT_META_UUID (1 << 2) /* metadata UUID */
|
||||
#define XFS_SB_FEAT_INCOMPAT_BIGTIME (1 << 3) /* large timestamps */
|
||||
#define XFS_SB_FEAT_INCOMPAT_NEEDSREPAIR (1 << 4) /* needs xfs_repair */
|
||||
#define XFS_SB_FEAT_INCOMPAT_NREXT64 (1 << 5) /* large extent counters */
|
||||
#define XFS_SB_FEAT_INCOMPAT_EXCHRANGE (1 << 6) /* exchangerange supported */
|
||||
#define XFS_SB_FEAT_INCOMPAT_PARENT (1 << 7) /* parent pointers */
|
||||
#define XFS_SB_FEAT_INCOMPAT_ALL \
|
||||
(XFS_SB_FEAT_INCOMPAT_FTYPE| \
|
||||
XFS_SB_FEAT_INCOMPAT_SPINODES| \
|
||||
XFS_SB_FEAT_INCOMPAT_META_UUID| \
|
||||
XFS_SB_FEAT_INCOMPAT_BIGTIME| \
|
||||
XFS_SB_FEAT_INCOMPAT_NEEDSREPAIR| \
|
||||
XFS_SB_FEAT_INCOMPAT_NREXT64)
|
||||
(XFS_SB_FEAT_INCOMPAT_FTYPE | \
|
||||
XFS_SB_FEAT_INCOMPAT_SPINODES | \
|
||||
XFS_SB_FEAT_INCOMPAT_META_UUID | \
|
||||
XFS_SB_FEAT_INCOMPAT_BIGTIME | \
|
||||
XFS_SB_FEAT_INCOMPAT_NEEDSREPAIR | \
|
||||
XFS_SB_FEAT_INCOMPAT_NREXT64 | \
|
||||
XFS_SB_FEAT_INCOMPAT_EXCHRANGE | \
|
||||
XFS_SB_FEAT_INCOMPAT_PARENT)
|
||||
|
||||
#define XFS_SB_FEAT_INCOMPAT_UNKNOWN ~XFS_SB_FEAT_INCOMPAT_ALL
|
||||
static inline bool
|
||||
|
|
@ -897,6 +901,12 @@ static inline uint xfs_dinode_size(int version)
|
|||
*/
|
||||
#define XFS_MAXLINK ((1U << 31) - 1U)
|
||||
|
||||
/*
|
||||
* Any file that hits the maximum ondisk link count should be pinned to avoid
|
||||
* a use-after-free situation.
|
||||
*/
|
||||
#define XFS_NLINK_PINNED (~0U)
|
||||
|
||||
/*
|
||||
* Values for di_format
|
||||
*
|
||||
|
|
|
|||
|
|
@ -239,6 +239,8 @@ typedef struct xfs_fsop_resblks {
|
|||
#define XFS_FSOP_GEOM_FLAGS_BIGTIME (1 << 21) /* 64-bit nsec timestamps */
|
||||
#define XFS_FSOP_GEOM_FLAGS_INOBTCNT (1 << 22) /* inobt btree counter */
|
||||
#define XFS_FSOP_GEOM_FLAGS_NREXT64 (1 << 23) /* large extent counters */
|
||||
#define XFS_FSOP_GEOM_FLAGS_EXCHANGE_RANGE (1 << 24) /* exchange range */
|
||||
#define XFS_FSOP_GEOM_FLAGS_PARENT (1 << 25) /* linux parent pointers */
|
||||
|
||||
/*
|
||||
* Minimum and maximum sizes need for growth checks.
|
||||
|
|
@ -409,6 +411,7 @@ struct xfs_bulkstat {
|
|||
#define XFS_BS_SICK_XATTR (1 << 5) /* extended attributes */
|
||||
#define XFS_BS_SICK_SYMLINK (1 << 6) /* symbolic link remote target */
|
||||
#define XFS_BS_SICK_PARENT (1 << 7) /* parent pointers */
|
||||
#define XFS_BS_SICK_DIRTREE (1 << 8) /* directory tree structure */
|
||||
|
||||
/*
|
||||
* Project quota id helpers (previously projid was 16bit only
|
||||
|
|
@ -632,7 +635,9 @@ typedef struct xfs_fsop_attrmulti_handlereq {
|
|||
/*
|
||||
* per machine unique filesystem identifier types.
|
||||
*/
|
||||
typedef struct { __u32 val[2]; } xfs_fsid_t; /* file system id type */
|
||||
typedef struct xfs_fsid {
|
||||
__u32 val[2]; /* file system id type */
|
||||
} xfs_fsid_t;
|
||||
|
||||
typedef struct xfs_fid {
|
||||
__u16 fid_len; /* length of remainder */
|
||||
|
|
@ -715,9 +720,19 @@ struct xfs_scrub_metadata {
|
|||
#define XFS_SCRUB_TYPE_QUOTACHECK 25 /* quota counters */
|
||||
#define XFS_SCRUB_TYPE_NLINKS 26 /* inode link counts */
|
||||
#define XFS_SCRUB_TYPE_HEALTHY 27 /* everything checked out ok */
|
||||
#define XFS_SCRUB_TYPE_DIRTREE 28 /* directory tree structure */
|
||||
|
||||
/* Number of scrub subcommands. */
|
||||
#define XFS_SCRUB_TYPE_NR 28
|
||||
#define XFS_SCRUB_TYPE_NR 29
|
||||
|
||||
/*
|
||||
* This special type code only applies to the vectored scrub implementation.
|
||||
*
|
||||
* If any of the previous scrub vectors recorded runtime errors or have
|
||||
* sv_flags bits set that match the OFLAG bits in the barrier vector's
|
||||
* sv_flags, set the barrier's sv_ret to -ECANCELED and return to userspace.
|
||||
*/
|
||||
#define XFS_SCRUB_TYPE_BARRIER (0xFFFFFFFF)
|
||||
|
||||
/* i: Repair this metadata. */
|
||||
#define XFS_SCRUB_IFLAG_REPAIR (1u << 0)
|
||||
|
|
@ -763,6 +778,29 @@ struct xfs_scrub_metadata {
|
|||
XFS_SCRUB_OFLAG_NO_REPAIR_NEEDED)
|
||||
#define XFS_SCRUB_FLAGS_ALL (XFS_SCRUB_FLAGS_IN | XFS_SCRUB_FLAGS_OUT)
|
||||
|
||||
/* Vectored scrub calls to reduce the number of kernel transitions. */
|
||||
|
||||
struct xfs_scrub_vec {
|
||||
__u32 sv_type; /* XFS_SCRUB_TYPE_* */
|
||||
__u32 sv_flags; /* XFS_SCRUB_FLAGS_* */
|
||||
__s32 sv_ret; /* 0 or a negative error code */
|
||||
__u32 sv_reserved; /* must be zero */
|
||||
};
|
||||
|
||||
/* Vectored metadata scrub control structure. */
|
||||
struct xfs_scrub_vec_head {
|
||||
__u64 svh_ino; /* inode number. */
|
||||
__u32 svh_gen; /* inode generation. */
|
||||
__u32 svh_agno; /* ag number. */
|
||||
__u32 svh_flags; /* XFS_SCRUB_VEC_FLAGS_* */
|
||||
__u16 svh_rest_us; /* wait this much time between vector items */
|
||||
__u16 svh_nr; /* number of svh_vectors */
|
||||
__u64 svh_reserved; /* must be zero */
|
||||
__u64 svh_vectors; /* pointer to buffer of xfs_scrub_vec */
|
||||
};
|
||||
|
||||
#define XFS_SCRUB_VEC_FLAGS_ALL (0)
|
||||
|
||||
/*
|
||||
* ioctl limits
|
||||
*/
|
||||
|
|
@ -772,6 +810,118 @@ struct xfs_scrub_metadata {
|
|||
# define XFS_XATTR_LIST_MAX 65536
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Exchange part of file1 with part of the file that this ioctl that is being
|
||||
* called against (which we'll call file2). Filesystems must be able to
|
||||
* restart and complete the operation even after the system goes down.
|
||||
*/
|
||||
struct xfs_exchange_range {
|
||||
__s32 file1_fd;
|
||||
__u32 pad; /* must be zeroes */
|
||||
__u64 file1_offset; /* file1 offset, bytes */
|
||||
__u64 file2_offset; /* file2 offset, bytes */
|
||||
__u64 length; /* bytes to exchange */
|
||||
|
||||
__u64 flags; /* see XFS_EXCHANGE_RANGE_* below */
|
||||
};
|
||||
|
||||
/*
|
||||
* Exchange file data all the way to the ends of both files, and then exchange
|
||||
* the file sizes. This flag can be used to replace a file's contents with a
|
||||
* different amount of data. length will be ignored.
|
||||
*/
|
||||
#define XFS_EXCHANGE_RANGE_TO_EOF (1ULL << 0)
|
||||
|
||||
/* Flush all changes in file data and file metadata to disk before returning. */
|
||||
#define XFS_EXCHANGE_RANGE_DSYNC (1ULL << 1)
|
||||
|
||||
/* Dry run; do all the parameter verification but do not change anything. */
|
||||
#define XFS_EXCHANGE_RANGE_DRY_RUN (1ULL << 2)
|
||||
|
||||
/*
|
||||
* Exchange only the parts of the two files where the file allocation units
|
||||
* mapped to file1's range have been written to. This can accelerate
|
||||
* scatter-gather atomic writes with a temp file if all writes are aligned to
|
||||
* the file allocation unit.
|
||||
*/
|
||||
#define XFS_EXCHANGE_RANGE_FILE1_WRITTEN (1ULL << 3)
|
||||
|
||||
#define XFS_EXCHANGE_RANGE_ALL_FLAGS (XFS_EXCHANGE_RANGE_TO_EOF | \
|
||||
XFS_EXCHANGE_RANGE_DSYNC | \
|
||||
XFS_EXCHANGE_RANGE_DRY_RUN | \
|
||||
XFS_EXCHANGE_RANGE_FILE1_WRITTEN)
|
||||
|
||||
/* Iterating parent pointers of files. */
|
||||
|
||||
/* target was the root directory */
|
||||
#define XFS_GETPARENTS_OFLAG_ROOT (1U << 0)
|
||||
|
||||
/* Cursor is done iterating pptrs */
|
||||
#define XFS_GETPARENTS_OFLAG_DONE (1U << 1)
|
||||
|
||||
#define XFS_GETPARENTS_OFLAGS_ALL (XFS_GETPARENTS_OFLAG_ROOT | \
|
||||
XFS_GETPARENTS_OFLAG_DONE)
|
||||
|
||||
#define XFS_GETPARENTS_IFLAGS_ALL (0)
|
||||
|
||||
struct xfs_getparents_rec {
|
||||
struct xfs_handle gpr_parent; /* Handle to parent */
|
||||
__u32 gpr_reclen; /* Length of entire record */
|
||||
__u32 gpr_reserved; /* zero */
|
||||
char gpr_name[]; /* Null-terminated filename */
|
||||
};
|
||||
|
||||
/* Iterate through this file's directory parent pointers */
|
||||
struct xfs_getparents {
|
||||
/*
|
||||
* Structure to track progress in iterating the parent pointers.
|
||||
* Must be initialized to zeroes before the first ioctl call, and
|
||||
* not touched by callers after that.
|
||||
*/
|
||||
struct xfs_attrlist_cursor gp_cursor;
|
||||
|
||||
/* Input flags: XFS_GETPARENTS_IFLAG* */
|
||||
__u16 gp_iflags;
|
||||
|
||||
/* Output flags: XFS_GETPARENTS_OFLAG* */
|
||||
__u16 gp_oflags;
|
||||
|
||||
/* Size of the gp_buffer in bytes */
|
||||
__u32 gp_bufsize;
|
||||
|
||||
/* Must be set to zero */
|
||||
__u64 gp_reserved;
|
||||
|
||||
/* Pointer to a buffer in which to place xfs_getparents_rec */
|
||||
__u64 gp_buffer;
|
||||
};
|
||||
|
||||
static inline struct xfs_getparents_rec *
|
||||
xfs_getparents_first_rec(struct xfs_getparents *gp)
|
||||
{
|
||||
return (struct xfs_getparents_rec *)(uintptr_t)gp->gp_buffer;
|
||||
}
|
||||
|
||||
static inline struct xfs_getparents_rec *
|
||||
xfs_getparents_next_rec(struct xfs_getparents *gp,
|
||||
struct xfs_getparents_rec *gpr)
|
||||
{
|
||||
void *next = ((void *)gpr + gpr->gpr_reclen);
|
||||
void *end = (void *)(uintptr_t)(gp->gp_buffer + gp->gp_bufsize);
|
||||
|
||||
if (next >= end)
|
||||
return NULL;
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
/* Iterate through this file handle's directory parent pointers. */
|
||||
struct xfs_getparents_by_handle {
|
||||
/* Handle to file whose parents we want. */
|
||||
struct xfs_handle gph_handle;
|
||||
|
||||
struct xfs_getparents gph_request;
|
||||
};
|
||||
|
||||
/*
|
||||
* ioctl commands that are used by Linux filesystems
|
||||
|
|
@ -808,6 +958,9 @@ struct xfs_scrub_metadata {
|
|||
/* XFS_IOC_GETFSMAP ------ hoisted 59 */
|
||||
#define XFS_IOC_SCRUB_METADATA _IOWR('X', 60, struct xfs_scrub_metadata)
|
||||
#define XFS_IOC_AG_GEOMETRY _IOWR('X', 61, struct xfs_ag_geometry)
|
||||
#define XFS_IOC_GETPARENTS _IOWR('X', 62, struct xfs_getparents)
|
||||
#define XFS_IOC_GETPARENTS_BY_HANDLE _IOWR('X', 63, struct xfs_getparents_by_handle)
|
||||
#define XFS_IOC_SCRUBV_METADATA _IOWR('X', 64, struct xfs_scrub_vec_head)
|
||||
|
||||
/*
|
||||
* ioctl commands that replace IRIX syssgi()'s
|
||||
|
|
@ -843,6 +996,7 @@ struct xfs_scrub_metadata {
|
|||
#define XFS_IOC_FSGEOMETRY _IOR ('X', 126, struct xfs_fsop_geom)
|
||||
#define XFS_IOC_BULKSTAT _IOR ('X', 127, struct xfs_bulkstat_req)
|
||||
#define XFS_IOC_INUMBERS _IOR ('X', 128, struct xfs_inumbers_req)
|
||||
#define XFS_IOC_EXCHANGE_RANGE _IOWR('X', 129, struct xfs_exchange_range)
|
||||
/* XFS_IOC_GETFSUUID ---------- deprecated 140 */
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -95,6 +95,7 @@ struct xfs_da_args;
|
|||
|
||||
/* Don't propagate sick status to ag health summary during inactivation */
|
||||
#define XFS_SICK_INO_FORGET (1 << 12)
|
||||
#define XFS_SICK_INO_DIRTREE (1 << 13) /* directory tree structure */
|
||||
|
||||
/* Primary evidence of health problems in a given group. */
|
||||
#define XFS_SICK_FS_PRIMARY (XFS_SICK_FS_COUNTERS | \
|
||||
|
|
@ -125,7 +126,8 @@ struct xfs_da_args;
|
|||
XFS_SICK_INO_DIR | \
|
||||
XFS_SICK_INO_XATTR | \
|
||||
XFS_SICK_INO_SYMLINK | \
|
||||
XFS_SICK_INO_PARENT)
|
||||
XFS_SICK_INO_PARENT | \
|
||||
XFS_SICK_INO_DIRTREE)
|
||||
|
||||
#define XFS_SICK_INO_ZAPPED (XFS_SICK_INO_BMBTD_ZAPPED | \
|
||||
XFS_SICK_INO_BMBTA_ZAPPED | \
|
||||
|
|
|
|||
|
|
@ -1057,6 +1057,33 @@ xfs_inobt_first_free_inode(
|
|||
return xfs_lowbit64(realfree);
|
||||
}
|
||||
|
||||
/*
|
||||
* If this AG has corrupt inodes, check if allocating this inode would fail
|
||||
* with corruption errors. Returns 0 if we're clear, or EAGAIN to try again
|
||||
* somewhere else.
|
||||
*/
|
||||
static int
|
||||
xfs_dialloc_check_ino(
|
||||
struct xfs_perag *pag,
|
||||
struct xfs_trans *tp,
|
||||
xfs_ino_t ino)
|
||||
{
|
||||
struct xfs_imap imap;
|
||||
struct xfs_buf *bp;
|
||||
int error;
|
||||
|
||||
error = xfs_imap(pag, tp, ino, &imap, 0);
|
||||
if (error)
|
||||
return -EAGAIN;
|
||||
|
||||
error = xfs_imap_to_bp(pag->pag_mount, tp, &imap, &bp);
|
||||
if (error)
|
||||
return -EAGAIN;
|
||||
|
||||
xfs_trans_brelse(tp, bp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate an inode using the inobt-only algorithm.
|
||||
*/
|
||||
|
|
@ -1309,6 +1336,13 @@ alloc_inode:
|
|||
ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
|
||||
XFS_INODES_PER_CHUNK) == 0);
|
||||
ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset);
|
||||
|
||||
if (xfs_ag_has_sickness(pag, XFS_SICK_AG_INODES)) {
|
||||
error = xfs_dialloc_check_ino(pag, tp, ino);
|
||||
if (error)
|
||||
goto error0;
|
||||
}
|
||||
|
||||
rec.ir_free &= ~XFS_INOBT_MASK(offset);
|
||||
rec.ir_freecount--;
|
||||
error = xfs_inobt_update(cur, &rec);
|
||||
|
|
@ -1584,6 +1618,12 @@ xfs_dialloc_ag(
|
|||
XFS_INODES_PER_CHUNK) == 0);
|
||||
ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, rec.ir_startino + offset);
|
||||
|
||||
if (xfs_ag_has_sickness(pag, XFS_SICK_AG_INODES)) {
|
||||
error = xfs_dialloc_check_ino(pag, tp, ino);
|
||||
if (error)
|
||||
goto error_cur;
|
||||
}
|
||||
|
||||
/*
|
||||
* Modify or remove the finobt record.
|
||||
*/
|
||||
|
|
@ -1699,7 +1739,7 @@ xfs_dialloc_good_ag(
|
|||
return false;
|
||||
|
||||
if (!xfs_perag_initialised_agi(pag)) {
|
||||
error = xfs_ialloc_read_agi(pag, tp, NULL);
|
||||
error = xfs_ialloc_read_agi(pag, tp, 0, NULL);
|
||||
if (error)
|
||||
return false;
|
||||
}
|
||||
|
|
@ -1768,7 +1808,7 @@ xfs_dialloc_try_ag(
|
|||
* Then read in the AGI buffer and recheck with the AGI buffer
|
||||
* lock held.
|
||||
*/
|
||||
error = xfs_ialloc_read_agi(pag, *tpp, &agbp);
|
||||
error = xfs_ialloc_read_agi(pag, *tpp, 0, &agbp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -2286,7 +2326,7 @@ xfs_difree(
|
|||
/*
|
||||
* Get the allocation group header.
|
||||
*/
|
||||
error = xfs_ialloc_read_agi(pag, tp, &agbp);
|
||||
error = xfs_ialloc_read_agi(pag, tp, 0, &agbp);
|
||||
if (error) {
|
||||
xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.",
|
||||
__func__, error);
|
||||
|
|
@ -2332,7 +2372,7 @@ xfs_imap_lookup(
|
|||
int error;
|
||||
int i;
|
||||
|
||||
error = xfs_ialloc_read_agi(pag, tp, &agbp);
|
||||
error = xfs_ialloc_read_agi(pag, tp, 0, &agbp);
|
||||
if (error) {
|
||||
xfs_alert(mp,
|
||||
"%s: xfs_ialloc_read_agi() returned error %d, agno %d",
|
||||
|
|
@ -2675,6 +2715,7 @@ int
|
|||
xfs_read_agi(
|
||||
struct xfs_perag *pag,
|
||||
struct xfs_trans *tp,
|
||||
xfs_buf_flags_t flags,
|
||||
struct xfs_buf **agibpp)
|
||||
{
|
||||
struct xfs_mount *mp = pag->pag_mount;
|
||||
|
|
@ -2684,7 +2725,7 @@ xfs_read_agi(
|
|||
|
||||
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
|
||||
XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGI_DADDR(mp)),
|
||||
XFS_FSS_TO_BB(mp, 1), 0, agibpp, &xfs_agi_buf_ops);
|
||||
XFS_FSS_TO_BB(mp, 1), flags, agibpp, &xfs_agi_buf_ops);
|
||||
if (xfs_metadata_is_sick(error))
|
||||
xfs_ag_mark_sick(pag, XFS_SICK_AG_AGI);
|
||||
if (error)
|
||||
|
|
@ -2704,6 +2745,7 @@ int
|
|||
xfs_ialloc_read_agi(
|
||||
struct xfs_perag *pag,
|
||||
struct xfs_trans *tp,
|
||||
int flags,
|
||||
struct xfs_buf **agibpp)
|
||||
{
|
||||
struct xfs_buf *agibp;
|
||||
|
|
@ -2712,7 +2754,9 @@ xfs_ialloc_read_agi(
|
|||
|
||||
trace_xfs_ialloc_read_agi(pag->pag_mount, pag->pag_agno);
|
||||
|
||||
error = xfs_read_agi(pag, tp, &agibp);
|
||||
error = xfs_read_agi(pag, tp,
|
||||
(flags & XFS_IALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
|
||||
&agibp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
|
|||
|
|
@ -63,10 +63,11 @@ xfs_ialloc_log_agi(
|
|||
struct xfs_buf *bp, /* allocation group header buffer */
|
||||
uint32_t fields); /* bitmask of fields to log */
|
||||
|
||||
int xfs_read_agi(struct xfs_perag *pag, struct xfs_trans *tp,
|
||||
int xfs_read_agi(struct xfs_perag *pag, struct xfs_trans *tp, xfs_buf_flags_t flags,
|
||||
struct xfs_buf **agibpp);
|
||||
int xfs_ialloc_read_agi(struct xfs_perag *pag, struct xfs_trans *tp,
|
||||
struct xfs_buf **agibpp);
|
||||
int flags, struct xfs_buf **agibpp);
|
||||
#define XFS_IALLOC_FLAG_TRYLOCK (1U << 0) /* use trylock for buffer locking */
|
||||
|
||||
/*
|
||||
* Lookup a record by ino in the btree given by cur.
|
||||
|
|
|
|||
|
|
@ -745,7 +745,7 @@ xfs_finobt_count_blocks(
|
|||
struct xfs_btree_cur *cur;
|
||||
int error;
|
||||
|
||||
error = xfs_ialloc_read_agi(pag, tp, &agbp);
|
||||
error = xfs_ialloc_read_agi(pag, tp, 0, &agbp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -768,7 +768,7 @@ xfs_finobt_read_blocks(
|
|||
struct xfs_agi *agi;
|
||||
int error;
|
||||
|
||||
error = xfs_ialloc_read_agi(pag, tp, &agbp);
|
||||
error = xfs_ialloc_read_agi(pag, tp, 0, &agbp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
|
|||
|
|
@ -491,6 +491,14 @@ xfs_dinode_verify(
|
|||
return __this_address;
|
||||
}
|
||||
|
||||
if (dip->di_version > 1) {
|
||||
if (dip->di_onlink)
|
||||
return __this_address;
|
||||
} else {
|
||||
if (dip->di_nlink)
|
||||
return __this_address;
|
||||
}
|
||||
|
||||
/* don't allow invalid i_size */
|
||||
di_size = be64_to_cpu(dip->di_size);
|
||||
if (di_size & (1ULL << 63))
|
||||
|
|
|
|||
|
|
@ -765,53 +765,46 @@ xfs_ifork_verify_local_attr(
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the inode fork supports adding nr_to_add more extents.
|
||||
*
|
||||
* If it doesn't but we can upgrade it to large extent counters, do the upgrade.
|
||||
* If we can't upgrade or are already using big counters but still can't fit the
|
||||
* additional extents, return -EFBIG.
|
||||
*/
|
||||
int
|
||||
xfs_iext_count_may_overflow(
|
||||
xfs_iext_count_extend(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *ip,
|
||||
int whichfork,
|
||||
int nr_to_add)
|
||||
uint nr_to_add)
|
||||
{
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
bool has_large =
|
||||
xfs_inode_has_large_extent_counts(ip);
|
||||
struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
|
||||
uint64_t max_exts;
|
||||
uint64_t nr_exts;
|
||||
|
||||
ASSERT(nr_to_add <= XFS_MAX_EXTCNT_UPGRADE_NR);
|
||||
|
||||
if (whichfork == XFS_COW_FORK)
|
||||
return 0;
|
||||
|
||||
max_exts = xfs_iext_max_nextents(xfs_inode_has_large_extent_counts(ip),
|
||||
whichfork);
|
||||
|
||||
if (XFS_TEST_ERROR(false, ip->i_mount, XFS_ERRTAG_REDUCE_MAX_IEXTENTS))
|
||||
max_exts = 10;
|
||||
|
||||
/* no point in upgrading if if_nextents overflows */
|
||||
nr_exts = ifp->if_nextents + nr_to_add;
|
||||
if (nr_exts < ifp->if_nextents || nr_exts > max_exts)
|
||||
if (nr_exts < ifp->if_nextents)
|
||||
return -EFBIG;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Upgrade this inode's extent counter fields to be able to handle a potential
|
||||
* increase in the extent count by nr_to_add. Normally this is the same
|
||||
* quantity that caused xfs_iext_count_may_overflow() to return -EFBIG.
|
||||
*/
|
||||
int
|
||||
xfs_iext_count_upgrade(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *ip,
|
||||
uint nr_to_add)
|
||||
{
|
||||
ASSERT(nr_to_add <= XFS_MAX_EXTCNT_UPGRADE_NR);
|
||||
|
||||
if (!xfs_has_large_extent_counts(ip->i_mount) ||
|
||||
xfs_inode_has_large_extent_counts(ip) ||
|
||||
XFS_TEST_ERROR(false, ip->i_mount, XFS_ERRTAG_REDUCE_MAX_IEXTENTS))
|
||||
if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_REDUCE_MAX_IEXTENTS) &&
|
||||
nr_exts > 10)
|
||||
return -EFBIG;
|
||||
|
||||
ip->i_diflags2 |= XFS_DIFLAG2_NREXT64;
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
|
||||
if (nr_exts > xfs_iext_max_nextents(has_large, whichfork)) {
|
||||
if (has_large || !xfs_has_large_extent_counts(mp))
|
||||
return -EFBIG;
|
||||
ip->i_diflags2 |= XFS_DIFLAG2_NREXT64;
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -256,10 +256,8 @@ extern void xfs_ifork_init_cow(struct xfs_inode *ip);
|
|||
|
||||
int xfs_ifork_verify_local_data(struct xfs_inode *ip);
|
||||
int xfs_ifork_verify_local_attr(struct xfs_inode *ip);
|
||||
int xfs_iext_count_may_overflow(struct xfs_inode *ip, int whichfork,
|
||||
int nr_to_add);
|
||||
int xfs_iext_count_upgrade(struct xfs_trans *tp, struct xfs_inode *ip,
|
||||
uint nr_to_add);
|
||||
int xfs_iext_count_extend(struct xfs_trans *tp, struct xfs_inode *ip,
|
||||
int whichfork, uint nr_to_add);
|
||||
bool xfs_ifork_is_realtime(struct xfs_inode *ip, int whichfork);
|
||||
|
||||
/* returns true if the fork has extents but they are not read in yet. */
|
||||
|
|
|
|||
|
|
@ -115,10 +115,13 @@ struct xfs_unmount_log_format {
|
|||
#define XLOG_REG_TYPE_BUD_FORMAT 26
|
||||
#define XLOG_REG_TYPE_ATTRI_FORMAT 27
|
||||
#define XLOG_REG_TYPE_ATTRD_FORMAT 28
|
||||
#define XLOG_REG_TYPE_ATTR_NAME 29
|
||||
#define XLOG_REG_TYPE_ATTR_NAME 29
|
||||
#define XLOG_REG_TYPE_ATTR_VALUE 30
|
||||
#define XLOG_REG_TYPE_MAX 30
|
||||
|
||||
#define XLOG_REG_TYPE_XMI_FORMAT 31
|
||||
#define XLOG_REG_TYPE_XMD_FORMAT 32
|
||||
#define XLOG_REG_TYPE_ATTR_NEWNAME 33
|
||||
#define XLOG_REG_TYPE_ATTR_NEWVALUE 34
|
||||
#define XLOG_REG_TYPE_MAX 34
|
||||
|
||||
/*
|
||||
* Flags to log operation header
|
||||
|
|
@ -243,6 +246,8 @@ typedef struct xfs_trans_header {
|
|||
#define XFS_LI_BUD 0x1245
|
||||
#define XFS_LI_ATTRI 0x1246 /* attr set/remove intent*/
|
||||
#define XFS_LI_ATTRD 0x1247 /* attr set/remove done */
|
||||
#define XFS_LI_XMI 0x1248 /* mapping exchange intent */
|
||||
#define XFS_LI_XMD 0x1249 /* mapping exchange done */
|
||||
|
||||
#define XFS_LI_TYPE_DESC \
|
||||
{ XFS_LI_EFI, "XFS_LI_EFI" }, \
|
||||
|
|
@ -260,7 +265,9 @@ typedef struct xfs_trans_header {
|
|||
{ XFS_LI_BUI, "XFS_LI_BUI" }, \
|
||||
{ XFS_LI_BUD, "XFS_LI_BUD" }, \
|
||||
{ XFS_LI_ATTRI, "XFS_LI_ATTRI" }, \
|
||||
{ XFS_LI_ATTRD, "XFS_LI_ATTRD" }
|
||||
{ XFS_LI_ATTRD, "XFS_LI_ATTRD" }, \
|
||||
{ XFS_LI_XMI, "XFS_LI_XMI" }, \
|
||||
{ XFS_LI_XMD, "XFS_LI_XMD" }
|
||||
|
||||
/*
|
||||
* Inode Log Item Format definitions.
|
||||
|
|
@ -878,6 +885,61 @@ struct xfs_bud_log_format {
|
|||
uint64_t bud_bui_id; /* id of corresponding bui */
|
||||
};
|
||||
|
||||
/*
|
||||
* XMI/XMD (file mapping exchange) log format definitions
|
||||
*/
|
||||
|
||||
/* This is the structure used to lay out an mapping exchange log item. */
|
||||
struct xfs_xmi_log_format {
|
||||
uint16_t xmi_type; /* xmi log item type */
|
||||
uint16_t xmi_size; /* size of this item */
|
||||
uint32_t __pad; /* must be zero */
|
||||
uint64_t xmi_id; /* xmi identifier */
|
||||
|
||||
uint64_t xmi_inode1; /* inumber of first file */
|
||||
uint64_t xmi_inode2; /* inumber of second file */
|
||||
uint32_t xmi_igen1; /* generation of first file */
|
||||
uint32_t xmi_igen2; /* generation of second file */
|
||||
uint64_t xmi_startoff1; /* block offset into file1 */
|
||||
uint64_t xmi_startoff2; /* block offset into file2 */
|
||||
uint64_t xmi_blockcount; /* number of blocks */
|
||||
uint64_t xmi_flags; /* XFS_EXCHMAPS_* */
|
||||
uint64_t xmi_isize1; /* intended file1 size */
|
||||
uint64_t xmi_isize2; /* intended file2 size */
|
||||
};
|
||||
|
||||
/* Exchange mappings between extended attribute forks instead of data forks. */
|
||||
#define XFS_EXCHMAPS_ATTR_FORK (1ULL << 0)
|
||||
|
||||
/* Set the file sizes when finished. */
|
||||
#define XFS_EXCHMAPS_SET_SIZES (1ULL << 1)
|
||||
|
||||
/*
|
||||
* Exchange the mappings of the two files only if the file allocation units
|
||||
* mapped to file1's range have been written.
|
||||
*/
|
||||
#define XFS_EXCHMAPS_INO1_WRITTEN (1ULL << 2)
|
||||
|
||||
/* Clear the reflink flag from inode1 after the operation. */
|
||||
#define XFS_EXCHMAPS_CLEAR_INO1_REFLINK (1ULL << 3)
|
||||
|
||||
/* Clear the reflink flag from inode2 after the operation. */
|
||||
#define XFS_EXCHMAPS_CLEAR_INO2_REFLINK (1ULL << 4)
|
||||
|
||||
#define XFS_EXCHMAPS_LOGGED_FLAGS (XFS_EXCHMAPS_ATTR_FORK | \
|
||||
XFS_EXCHMAPS_SET_SIZES | \
|
||||
XFS_EXCHMAPS_INO1_WRITTEN | \
|
||||
XFS_EXCHMAPS_CLEAR_INO1_REFLINK | \
|
||||
XFS_EXCHMAPS_CLEAR_INO2_REFLINK)
|
||||
|
||||
/* This is the structure used to lay out an mapping exchange done log item. */
|
||||
struct xfs_xmd_log_format {
|
||||
uint16_t xmd_type; /* xmd log item type */
|
||||
uint16_t xmd_size; /* size of this item */
|
||||
uint32_t __pad;
|
||||
uint64_t xmd_xmi_id; /* id of corresponding xmi */
|
||||
};
|
||||
|
||||
/*
|
||||
* Dquot Log format definitions.
|
||||
*
|
||||
|
|
@ -966,6 +1028,9 @@ struct xfs_icreate_log {
|
|||
#define XFS_ATTRI_OP_FLAGS_SET 1 /* Set the attribute */
|
||||
#define XFS_ATTRI_OP_FLAGS_REMOVE 2 /* Remove the attribute */
|
||||
#define XFS_ATTRI_OP_FLAGS_REPLACE 3 /* Replace the attribute */
|
||||
#define XFS_ATTRI_OP_FLAGS_PPTR_SET 4 /* Set parent pointer */
|
||||
#define XFS_ATTRI_OP_FLAGS_PPTR_REMOVE 5 /* Remove parent pointer */
|
||||
#define XFS_ATTRI_OP_FLAGS_PPTR_REPLACE 6 /* Replace parent pointer */
|
||||
#define XFS_ATTRI_OP_FLAGS_TYPE_MASK 0xFF /* Flags type mask */
|
||||
|
||||
/*
|
||||
|
|
@ -974,6 +1039,7 @@ struct xfs_icreate_log {
|
|||
*/
|
||||
#define XFS_ATTRI_FILTER_MASK (XFS_ATTR_ROOT | \
|
||||
XFS_ATTR_SECURE | \
|
||||
XFS_ATTR_PARENT | \
|
||||
XFS_ATTR_INCOMPLETE)
|
||||
|
||||
/*
|
||||
|
|
@ -983,11 +1049,22 @@ struct xfs_icreate_log {
|
|||
struct xfs_attri_log_format {
|
||||
uint16_t alfi_type; /* attri log item type */
|
||||
uint16_t alfi_size; /* size of this item */
|
||||
uint32_t __pad; /* pad to 64 bit aligned */
|
||||
uint32_t alfi_igen; /* generation of alfi_ino for pptr ops */
|
||||
uint64_t alfi_id; /* attri identifier */
|
||||
uint64_t alfi_ino; /* the inode for this attr operation */
|
||||
uint32_t alfi_op_flags; /* marks the op as a set or remove */
|
||||
uint32_t alfi_name_len; /* attr name length */
|
||||
union {
|
||||
uint32_t alfi_name_len; /* attr name length */
|
||||
struct {
|
||||
/*
|
||||
* For PPTR_REPLACE, these are the lengths of the old
|
||||
* and new attr names. The new and old values must
|
||||
* have the same length.
|
||||
*/
|
||||
uint16_t alfi_old_name_len;
|
||||
uint16_t alfi_new_name_len;
|
||||
};
|
||||
};
|
||||
uint32_t alfi_value_len; /* attr value length */
|
||||
uint32_t alfi_attr_filter;/* attr filter flags */
|
||||
};
|
||||
|
|
|
|||
|
|
@ -75,6 +75,8 @@ extern const struct xlog_recover_item_ops xlog_cui_item_ops;
|
|||
extern const struct xlog_recover_item_ops xlog_cud_item_ops;
|
||||
extern const struct xlog_recover_item_ops xlog_attri_item_ops;
|
||||
extern const struct xlog_recover_item_ops xlog_attrd_item_ops;
|
||||
extern const struct xlog_recover_item_ops xlog_xmi_item_ops;
|
||||
extern const struct xlog_recover_item_ops xlog_xmd_item_ops;
|
||||
|
||||
/*
|
||||
* Macros, structures, prototypes for internal log manager use.
|
||||
|
|
@ -121,6 +123,8 @@ bool xlog_is_buffer_cancelled(struct xlog *log, xfs_daddr_t blkno, uint len);
|
|||
|
||||
int xlog_recover_iget(struct xfs_mount *mp, xfs_ino_t ino,
|
||||
struct xfs_inode **ipp);
|
||||
int xlog_recover_iget_handle(struct xfs_mount *mp, xfs_ino_t ino, uint32_t gen,
|
||||
struct xfs_inode **ipp);
|
||||
void xlog_recover_release_intent(struct xlog *log, unsigned short intent_type,
|
||||
uint64_t intent_id);
|
||||
int xlog_alloc_buf_cancel_table(struct xlog *log);
|
||||
|
|
|
|||
|
|
@ -16,6 +16,34 @@
|
|||
#include "xfs_bmap_btree.h"
|
||||
#include "xfs_trace.h"
|
||||
|
||||
/*
|
||||
* Shortly after enabling the large extents count feature in 2023, longstanding
|
||||
* bugs were found in the code that computes the minimum log size. Luckily,
|
||||
* the bugs resulted in over-estimates of that size, so there's no impact to
|
||||
* existing users. However, we don't want to reduce the minimum log size
|
||||
* because that can create the situation where a newer mkfs writes a new
|
||||
* filesystem that an older kernel won't mount.
|
||||
*
|
||||
* Several years prior, we also discovered that the transaction reservations
|
||||
* for rmap and reflink operations were unnecessarily large. That was fixed,
|
||||
* but the minimum log size computation was left alone to avoid the
|
||||
* compatibility problems noted above. Fix that too.
|
||||
*
|
||||
* Therefore, we only may correct the computation starting with filesystem
|
||||
* features that didn't exist in 2023. In other words, only turn this on if
|
||||
* the filesystem has parent pointers.
|
||||
*
|
||||
* This function can be called before the XFS_HAS_* flags have been set up,
|
||||
* (e.g. mkfs) so we must check the ondisk superblock.
|
||||
*/
|
||||
static inline bool
|
||||
xfs_want_minlogsize_fixes(
|
||||
struct xfs_sb *sb)
|
||||
{
|
||||
return xfs_sb_is_v5(sb) &&
|
||||
xfs_sb_has_incompat_feature(sb, XFS_SB_FEAT_INCOMPAT_PARENT);
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the maximum length in bytes that would be required for a local
|
||||
* attribute value as large attributes out of line are not logged.
|
||||
|
|
@ -31,6 +59,15 @@ xfs_log_calc_max_attrsetm_res(
|
|||
MAXNAMELEN - 1;
|
||||
nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK);
|
||||
nblks += XFS_B_TO_FSB(mp, size);
|
||||
|
||||
/*
|
||||
* If the feature set is new enough, correct a unit conversion error in
|
||||
* the xattr transaction reservation code that resulted in oversized
|
||||
* minimum log size computations.
|
||||
*/
|
||||
if (xfs_want_minlogsize_fixes(&mp->m_sb))
|
||||
size = XFS_B_TO_FSB(mp, size);
|
||||
|
||||
nblks += XFS_NEXTENTADD_SPACE_RES(mp, size, XFS_ATTR_FORK);
|
||||
|
||||
return M_RES(mp)->tr_attrsetm.tr_logres +
|
||||
|
|
@ -48,6 +85,15 @@ xfs_log_calc_trans_resv_for_minlogblocks(
|
|||
{
|
||||
unsigned int rmap_maxlevels = mp->m_rmap_maxlevels;
|
||||
|
||||
/*
|
||||
* If the feature set is new enough, drop the oversized minimum log
|
||||
* size computation introduced by the original reflink code.
|
||||
*/
|
||||
if (xfs_want_minlogsize_fixes(&mp->m_sb)) {
|
||||
xfs_trans_resv_calc(mp, resv);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* In the early days of rmap+reflink, we always set the rmap maxlevels
|
||||
* to 9 even if the AG was small enough that it would never grow to
|
||||
|
|
|
|||
|
|
@ -119,6 +119,7 @@ xfs_check_ondisk_structs(void)
|
|||
XFS_CHECK_OFFSET(xfs_dir2_sf_entry_t, offset, 1);
|
||||
XFS_CHECK_OFFSET(xfs_dir2_sf_entry_t, name, 3);
|
||||
XFS_CHECK_STRUCT_SIZE(xfs_dir2_sf_hdr_t, 10);
|
||||
XFS_CHECK_STRUCT_SIZE(struct xfs_parent_rec, 12);
|
||||
|
||||
/* log structures */
|
||||
XFS_CHECK_STRUCT_SIZE(struct xfs_buf_log_format, 88);
|
||||
|
|
@ -155,6 +156,11 @@ xfs_check_ondisk_structs(void)
|
|||
XFS_CHECK_OFFSET(struct xfs_efi_log_format_32, efi_extents, 16);
|
||||
XFS_CHECK_OFFSET(struct xfs_efi_log_format_64, efi_extents, 16);
|
||||
|
||||
/* parent pointer ioctls */
|
||||
XFS_CHECK_STRUCT_SIZE(struct xfs_getparents_rec, 32);
|
||||
XFS_CHECK_STRUCT_SIZE(struct xfs_getparents, 40);
|
||||
XFS_CHECK_STRUCT_SIZE(struct xfs_getparents_by_handle, 64);
|
||||
|
||||
/*
|
||||
* The v5 superblock format extended several v4 header structures with
|
||||
* additional data. While new fields are only accessible on v5
|
||||
|
|
|
|||
379
fs/xfs/libxfs/xfs_parent.c
Normal file
379
fs/xfs/libxfs/xfs_parent.c
Normal file
|
|
@ -0,0 +1,379 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2022-2024 Oracle.
|
||||
* All rights reserved.
|
||||
*/
|
||||
#include "xfs.h"
|
||||
#include "xfs_fs.h"
|
||||
#include "xfs_format.h"
|
||||
#include "xfs_da_format.h"
|
||||
#include "xfs_log_format.h"
|
||||
#include "xfs_shared.h"
|
||||
#include "xfs_trans_resv.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_bmap_btree.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_trace.h"
|
||||
#include "xfs_trans.h"
|
||||
#include "xfs_da_btree.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_dir2.h"
|
||||
#include "xfs_dir2_priv.h"
|
||||
#include "xfs_attr_sf.h"
|
||||
#include "xfs_bmap.h"
|
||||
#include "xfs_defer.h"
|
||||
#include "xfs_log.h"
|
||||
#include "xfs_xattr.h"
|
||||
#include "xfs_parent.h"
|
||||
#include "xfs_trans_space.h"
|
||||
#include "xfs_attr_item.h"
|
||||
#include "xfs_health.h"
|
||||
|
||||
struct kmem_cache *xfs_parent_args_cache;
|
||||
|
||||
/*
|
||||
* Parent pointer attribute handling.
|
||||
*
|
||||
* Because the attribute name is a filename component, it will never be longer
|
||||
* than 255 bytes and must not contain nulls or slashes. These are roughly the
|
||||
* same constraints that apply to attribute names.
|
||||
*
|
||||
* The attribute value must always be a struct xfs_parent_rec. This means the
|
||||
* attribute will never be in remote format because 12 bytes is nowhere near
|
||||
* xfs_attr_leaf_entsize_local_max() (~75% of block size).
|
||||
*
|
||||
* Creating a new parent attribute will always create a new attribute - there
|
||||
* should never, ever be an existing attribute in the tree for a new inode.
|
||||
* ENOSPC behavior is problematic - creating the inode without the parent
|
||||
* pointer is effectively a corruption, so we allow parent attribute creation
|
||||
* to dip into the reserve block pool to avoid unexpected ENOSPC errors from
|
||||
* occurring.
|
||||
*/
|
||||
|
||||
/* Return true if parent pointer attr name is valid. */
|
||||
bool
|
||||
xfs_parent_namecheck(
|
||||
unsigned int attr_flags,
|
||||
const void *name,
|
||||
size_t length)
|
||||
{
|
||||
/*
|
||||
* Parent pointers always use logged operations, so there should never
|
||||
* be incomplete xattrs.
|
||||
*/
|
||||
if (attr_flags & XFS_ATTR_INCOMPLETE)
|
||||
return false;
|
||||
|
||||
return xfs_dir2_namecheck(name, length);
|
||||
}
|
||||
|
||||
/* Return true if parent pointer attr value is valid. */
|
||||
bool
|
||||
xfs_parent_valuecheck(
|
||||
struct xfs_mount *mp,
|
||||
const void *value,
|
||||
size_t valuelen)
|
||||
{
|
||||
const struct xfs_parent_rec *rec = value;
|
||||
|
||||
if (!xfs_has_parent(mp))
|
||||
return false;
|
||||
|
||||
/* The xattr value must be a parent record. */
|
||||
if (valuelen != sizeof(struct xfs_parent_rec))
|
||||
return false;
|
||||
|
||||
/* The parent record must be local. */
|
||||
if (value == NULL)
|
||||
return false;
|
||||
|
||||
/* The parent inumber must be valid. */
|
||||
if (!xfs_verify_dir_ino(mp, be64_to_cpu(rec->p_ino)))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Compute the attribute name hash for a parent pointer. */
|
||||
xfs_dahash_t
|
||||
xfs_parent_hashval(
|
||||
struct xfs_mount *mp,
|
||||
const uint8_t *name,
|
||||
int namelen,
|
||||
xfs_ino_t parent_ino)
|
||||
{
|
||||
struct xfs_name xname = {
|
||||
.name = name,
|
||||
.len = namelen,
|
||||
};
|
||||
|
||||
/*
|
||||
* Use the same dirent name hash as would be used on the directory, but
|
||||
* mix in the parent inode number to avoid collisions on hardlinked
|
||||
* files with identical names but different parents.
|
||||
*/
|
||||
return xfs_dir2_hashname(mp, &xname) ^
|
||||
upper_32_bits(parent_ino) ^ lower_32_bits(parent_ino);
|
||||
}
|
||||
|
||||
/* Compute the attribute name hash from the xattr components. */
|
||||
xfs_dahash_t
|
||||
xfs_parent_hashattr(
|
||||
struct xfs_mount *mp,
|
||||
const uint8_t *name,
|
||||
int namelen,
|
||||
const void *value,
|
||||
int valuelen)
|
||||
{
|
||||
const struct xfs_parent_rec *rec = value;
|
||||
|
||||
/* Requires a local attr value in xfs_parent_rec format */
|
||||
if (valuelen != sizeof(struct xfs_parent_rec)) {
|
||||
ASSERT(valuelen == sizeof(struct xfs_parent_rec));
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!value) {
|
||||
ASSERT(value != NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return xfs_parent_hashval(mp, name, namelen, be64_to_cpu(rec->p_ino));
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the parent pointer arguments structure. Caller must have zeroed
|
||||
* the contents of @args. @tp is only required for updates.
|
||||
*/
|
||||
static void
|
||||
xfs_parent_da_args_init(
|
||||
struct xfs_da_args *args,
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_parent_rec *rec,
|
||||
struct xfs_inode *child,
|
||||
xfs_ino_t owner,
|
||||
const struct xfs_name *parent_name)
|
||||
{
|
||||
args->geo = child->i_mount->m_attr_geo;
|
||||
args->whichfork = XFS_ATTR_FORK;
|
||||
args->attr_filter = XFS_ATTR_PARENT;
|
||||
args->op_flags = XFS_DA_OP_LOGGED | XFS_DA_OP_OKNOENT;
|
||||
args->trans = tp;
|
||||
args->dp = child;
|
||||
args->owner = owner;
|
||||
args->name = parent_name->name;
|
||||
args->namelen = parent_name->len;
|
||||
args->value = rec;
|
||||
args->valuelen = sizeof(struct xfs_parent_rec);
|
||||
xfs_attr_sethash(args);
|
||||
}
|
||||
|
||||
/* Make sure the incore state is ready for a parent pointer query/update. */
|
||||
static inline int
|
||||
xfs_parent_iread_extents(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *child)
|
||||
{
|
||||
/* Parent pointers require that the attr fork must exist. */
|
||||
if (XFS_IS_CORRUPT(child->i_mount, !xfs_inode_has_attr_fork(child))) {
|
||||
xfs_inode_mark_sick(child, XFS_SICK_INO_PARENT);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
return xfs_iread_extents(tp, child, XFS_ATTR_FORK);
|
||||
}
|
||||
|
||||
/* Add a parent pointer to reflect a dirent addition. */
|
||||
int
|
||||
xfs_parent_addname(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_parent_args *ppargs,
|
||||
struct xfs_inode *dp,
|
||||
const struct xfs_name *parent_name,
|
||||
struct xfs_inode *child)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = xfs_parent_iread_extents(tp, child);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_inode_to_parent_rec(&ppargs->rec, dp);
|
||||
xfs_parent_da_args_init(&ppargs->args, tp, &ppargs->rec, child,
|
||||
child->i_ino, parent_name);
|
||||
xfs_attr_defer_add(&ppargs->args, XFS_ATTR_DEFER_SET);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Remove a parent pointer to reflect a dirent removal. */
|
||||
int
|
||||
xfs_parent_removename(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_parent_args *ppargs,
|
||||
struct xfs_inode *dp,
|
||||
const struct xfs_name *parent_name,
|
||||
struct xfs_inode *child)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = xfs_parent_iread_extents(tp, child);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_inode_to_parent_rec(&ppargs->rec, dp);
|
||||
xfs_parent_da_args_init(&ppargs->args, tp, &ppargs->rec, child,
|
||||
child->i_ino, parent_name);
|
||||
xfs_attr_defer_add(&ppargs->args, XFS_ATTR_DEFER_REMOVE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Replace one parent pointer with another to reflect a rename. */
|
||||
int
|
||||
xfs_parent_replacename(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_parent_args *ppargs,
|
||||
struct xfs_inode *old_dp,
|
||||
const struct xfs_name *old_name,
|
||||
struct xfs_inode *new_dp,
|
||||
const struct xfs_name *new_name,
|
||||
struct xfs_inode *child)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = xfs_parent_iread_extents(tp, child);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_inode_to_parent_rec(&ppargs->rec, old_dp);
|
||||
xfs_parent_da_args_init(&ppargs->args, tp, &ppargs->rec, child,
|
||||
child->i_ino, old_name);
|
||||
|
||||
xfs_inode_to_parent_rec(&ppargs->new_rec, new_dp);
|
||||
ppargs->args.new_name = new_name->name;
|
||||
ppargs->args.new_namelen = new_name->len;
|
||||
ppargs->args.new_value = &ppargs->new_rec;
|
||||
ppargs->args.new_valuelen = sizeof(struct xfs_parent_rec);
|
||||
xfs_attr_defer_add(&ppargs->args, XFS_ATTR_DEFER_REPLACE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Extract parent pointer information from any parent pointer xattr into
|
||||
* @parent_ino/gen. The last two parameters can be NULL pointers.
|
||||
*
|
||||
* Returns 0 if this is not a parent pointer xattr at all; or -EFSCORRUPTED for
|
||||
* garbage.
|
||||
*/
|
||||
int
|
||||
xfs_parent_from_attr(
|
||||
struct xfs_mount *mp,
|
||||
unsigned int attr_flags,
|
||||
const unsigned char *name,
|
||||
unsigned int namelen,
|
||||
const void *value,
|
||||
unsigned int valuelen,
|
||||
xfs_ino_t *parent_ino,
|
||||
uint32_t *parent_gen)
|
||||
{
|
||||
const struct xfs_parent_rec *rec = value;
|
||||
|
||||
ASSERT(attr_flags & XFS_ATTR_PARENT);
|
||||
|
||||
if (!xfs_parent_namecheck(attr_flags, name, namelen))
|
||||
return -EFSCORRUPTED;
|
||||
if (!xfs_parent_valuecheck(mp, value, valuelen))
|
||||
return -EFSCORRUPTED;
|
||||
|
||||
if (parent_ino)
|
||||
*parent_ino = be64_to_cpu(rec->p_ino);
|
||||
if (parent_gen)
|
||||
*parent_gen = be32_to_cpu(rec->p_gen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Look up a parent pointer record (@parent_name -> @pptr) of @ip.
|
||||
*
|
||||
* Caller must hold at least ILOCK_SHARED. The scratchpad need not be
|
||||
* initialized.
|
||||
*
|
||||
* Returns 0 if the pointer is found, -ENOATTR if there is no match, or a
|
||||
* negative errno.
|
||||
*/
|
||||
int
|
||||
xfs_parent_lookup(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *ip,
|
||||
const struct xfs_name *parent_name,
|
||||
struct xfs_parent_rec *pptr,
|
||||
struct xfs_da_args *scratch)
|
||||
{
|
||||
memset(scratch, 0, sizeof(struct xfs_da_args));
|
||||
xfs_parent_da_args_init(scratch, tp, pptr, ip, ip->i_ino, parent_name);
|
||||
return xfs_attr_get_ilocked(scratch);
|
||||
}
|
||||
|
||||
/* Sanity-check a parent pointer before we try to perform repairs. */
|
||||
static inline bool
|
||||
xfs_parent_sanity_check(
|
||||
struct xfs_mount *mp,
|
||||
const struct xfs_name *parent_name,
|
||||
const struct xfs_parent_rec *pptr)
|
||||
{
|
||||
if (!xfs_parent_namecheck(XFS_ATTR_PARENT, parent_name->name,
|
||||
parent_name->len))
|
||||
return false;
|
||||
|
||||
if (!xfs_parent_valuecheck(mp, pptr, sizeof(*pptr)))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Attach the parent pointer (@parent_name -> @pptr) to @ip immediately.
|
||||
* Caller must not have a transaction or hold the ILOCK. This is for
|
||||
* specialized repair functions only. The scratchpad need not be initialized.
|
||||
*/
|
||||
int
|
||||
xfs_parent_set(
|
||||
struct xfs_inode *ip,
|
||||
xfs_ino_t owner,
|
||||
const struct xfs_name *parent_name,
|
||||
struct xfs_parent_rec *pptr,
|
||||
struct xfs_da_args *scratch)
|
||||
{
|
||||
if (!xfs_parent_sanity_check(ip->i_mount, parent_name, pptr)) {
|
||||
ASSERT(0);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
memset(scratch, 0, sizeof(struct xfs_da_args));
|
||||
xfs_parent_da_args_init(scratch, NULL, pptr, ip, owner, parent_name);
|
||||
return xfs_attr_set(scratch, XFS_ATTRUPDATE_CREATE, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove the parent pointer (@parent_name -> @pptr) from @ip immediately.
|
||||
* Caller must not have a transaction or hold the ILOCK. This is for
|
||||
* specialized repair functions only. The scratchpad need not be initialized.
|
||||
*/
|
||||
int
|
||||
xfs_parent_unset(
|
||||
struct xfs_inode *ip,
|
||||
xfs_ino_t owner,
|
||||
const struct xfs_name *parent_name,
|
||||
struct xfs_parent_rec *pptr,
|
||||
struct xfs_da_args *scratch)
|
||||
{
|
||||
if (!xfs_parent_sanity_check(ip->i_mount, parent_name, pptr)) {
|
||||
ASSERT(0);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
memset(scratch, 0, sizeof(struct xfs_da_args));
|
||||
xfs_parent_da_args_init(scratch, NULL, pptr, ip, owner, parent_name);
|
||||
return xfs_attr_set(scratch, XFS_ATTRUPDATE_REMOVE, false);
|
||||
}
|
||||
110
fs/xfs/libxfs/xfs_parent.h
Normal file
110
fs/xfs/libxfs/xfs_parent.h
Normal file
|
|
@ -0,0 +1,110 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2022-2024 Oracle.
|
||||
* All Rights Reserved.
|
||||
*/
|
||||
#ifndef __XFS_PARENT_H__
|
||||
#define __XFS_PARENT_H__
|
||||
|
||||
/* Metadata validators */
|
||||
bool xfs_parent_namecheck(unsigned int attr_flags, const void *name,
|
||||
size_t length);
|
||||
bool xfs_parent_valuecheck(struct xfs_mount *mp, const void *value,
|
||||
size_t valuelen);
|
||||
|
||||
xfs_dahash_t xfs_parent_hashval(struct xfs_mount *mp, const uint8_t *name,
|
||||
int namelen, xfs_ino_t parent_ino);
|
||||
xfs_dahash_t xfs_parent_hashattr(struct xfs_mount *mp, const uint8_t *name,
|
||||
int namelen, const void *value, int valuelen);
|
||||
|
||||
/* Initializes a xfs_parent_rec to be stored as an attribute name. */
|
||||
static inline void
|
||||
xfs_parent_rec_init(
|
||||
struct xfs_parent_rec *rec,
|
||||
xfs_ino_t ino,
|
||||
uint32_t gen)
|
||||
{
|
||||
rec->p_ino = cpu_to_be64(ino);
|
||||
rec->p_gen = cpu_to_be32(gen);
|
||||
}
|
||||
|
||||
/* Initializes a xfs_parent_rec to be stored as an attribute name. */
|
||||
static inline void
|
||||
xfs_inode_to_parent_rec(
|
||||
struct xfs_parent_rec *rec,
|
||||
const struct xfs_inode *dp)
|
||||
{
|
||||
xfs_parent_rec_init(rec, dp->i_ino, VFS_IC(dp)->i_generation);
|
||||
}
|
||||
|
||||
extern struct kmem_cache *xfs_parent_args_cache;
|
||||
|
||||
/*
|
||||
* Parent pointer information needed to pass around the deferred xattr update
|
||||
* machinery.
|
||||
*/
|
||||
struct xfs_parent_args {
|
||||
struct xfs_parent_rec rec;
|
||||
struct xfs_parent_rec new_rec;
|
||||
struct xfs_da_args args;
|
||||
};
|
||||
|
||||
/*
|
||||
* Start a parent pointer update by allocating the context object we need to
|
||||
* perform a parent pointer update.
|
||||
*/
|
||||
static inline int
|
||||
xfs_parent_start(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_parent_args **ppargsp)
|
||||
{
|
||||
if (!xfs_has_parent(mp)) {
|
||||
*ppargsp = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*ppargsp = kmem_cache_zalloc(xfs_parent_args_cache, GFP_KERNEL);
|
||||
if (!*ppargsp)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Finish a parent pointer update by freeing the context object. */
|
||||
static inline void
|
||||
xfs_parent_finish(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_parent_args *ppargs)
|
||||
{
|
||||
if (ppargs)
|
||||
kmem_cache_free(xfs_parent_args_cache, ppargs);
|
||||
}
|
||||
|
||||
int xfs_parent_addname(struct xfs_trans *tp, struct xfs_parent_args *ppargs,
|
||||
struct xfs_inode *dp, const struct xfs_name *parent_name,
|
||||
struct xfs_inode *child);
|
||||
int xfs_parent_removename(struct xfs_trans *tp, struct xfs_parent_args *ppargs,
|
||||
struct xfs_inode *dp, const struct xfs_name *parent_name,
|
||||
struct xfs_inode *child);
|
||||
int xfs_parent_replacename(struct xfs_trans *tp,
|
||||
struct xfs_parent_args *ppargs,
|
||||
struct xfs_inode *old_dp, const struct xfs_name *old_name,
|
||||
struct xfs_inode *new_dp, const struct xfs_name *new_name,
|
||||
struct xfs_inode *child);
|
||||
|
||||
int xfs_parent_from_attr(struct xfs_mount *mp, unsigned int attr_flags,
|
||||
const unsigned char *name, unsigned int namelen,
|
||||
const void *value, unsigned int valuelen,
|
||||
xfs_ino_t *parent_ino, uint32_t *parent_gen);
|
||||
|
||||
/* Repair functions */
|
||||
int xfs_parent_lookup(struct xfs_trans *tp, struct xfs_inode *ip,
|
||||
const struct xfs_name *name, struct xfs_parent_rec *pptr,
|
||||
struct xfs_da_args *scratch);
|
||||
int xfs_parent_set(struct xfs_inode *ip, xfs_ino_t owner,
|
||||
const struct xfs_name *name, struct xfs_parent_rec *pptr,
|
||||
struct xfs_da_args *scratch);
|
||||
int xfs_parent_unset(struct xfs_inode *ip, xfs_ino_t owner,
|
||||
const struct xfs_name *name, struct xfs_parent_rec *pptr,
|
||||
struct xfs_da_args *scratch);
|
||||
|
||||
#endif /* __XFS_PARENT_H__ */
|
||||
|
|
@ -1168,3 +1168,60 @@ xfs_rtsummary_wordcount(
|
|||
blocks = xfs_rtsummary_blockcount(mp, rsumlevels, rbmblocks);
|
||||
return XFS_FSB_TO_B(mp, blocks) >> XFS_WORDLOG;
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock both realtime free space metadata inodes for a freespace update. If a
|
||||
* transaction is given, the inodes will be joined to the transaction and the
|
||||
* ILOCKs will be released on transaction commit.
|
||||
*/
|
||||
void
|
||||
xfs_rtbitmap_lock(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL | XFS_ILOCK_RTBITMAP);
|
||||
if (tp)
|
||||
xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
|
||||
|
||||
xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL | XFS_ILOCK_RTSUM);
|
||||
if (tp)
|
||||
xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
|
||||
}
|
||||
|
||||
/* Unlock both realtime free space metadata inodes after a freespace update. */
|
||||
void
|
||||
xfs_rtbitmap_unlock(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
xfs_iunlock(mp->m_rsumip, XFS_ILOCK_EXCL | XFS_ILOCK_RTSUM);
|
||||
xfs_iunlock(mp->m_rbmip, XFS_ILOCK_EXCL | XFS_ILOCK_RTBITMAP);
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock the realtime free space metadata inodes for a freespace scan. Callers
|
||||
* must walk metadata blocks in order of increasing file offset.
|
||||
*/
|
||||
void
|
||||
xfs_rtbitmap_lock_shared(
|
||||
struct xfs_mount *mp,
|
||||
unsigned int rbmlock_flags)
|
||||
{
|
||||
if (rbmlock_flags & XFS_RBMLOCK_BITMAP)
|
||||
xfs_ilock(mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
|
||||
|
||||
if (rbmlock_flags & XFS_RBMLOCK_SUMMARY)
|
||||
xfs_ilock(mp->m_rsumip, XFS_ILOCK_SHARED | XFS_ILOCK_RTSUM);
|
||||
}
|
||||
|
||||
/* Unlock the realtime free space metadata inodes after a freespace scan. */
|
||||
void
|
||||
xfs_rtbitmap_unlock_shared(
|
||||
struct xfs_mount *mp,
|
||||
unsigned int rbmlock_flags)
|
||||
{
|
||||
if (rbmlock_flags & XFS_RBMLOCK_SUMMARY)
|
||||
xfs_iunlock(mp->m_rsumip, XFS_ILOCK_SHARED | XFS_ILOCK_RTSUM);
|
||||
|
||||
if (rbmlock_flags & XFS_RBMLOCK_BITMAP)
|
||||
xfs_iunlock(mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -360,6 +360,19 @@ xfs_filblks_t xfs_rtsummary_blockcount(struct xfs_mount *mp,
|
|||
unsigned int rsumlevels, xfs_extlen_t rbmblocks);
|
||||
unsigned long long xfs_rtsummary_wordcount(struct xfs_mount *mp,
|
||||
unsigned int rsumlevels, xfs_extlen_t rbmblocks);
|
||||
|
||||
void xfs_rtbitmap_lock(struct xfs_trans *tp, struct xfs_mount *mp);
|
||||
void xfs_rtbitmap_unlock(struct xfs_mount *mp);
|
||||
|
||||
/* Lock the rt bitmap inode in shared mode */
|
||||
#define XFS_RBMLOCK_BITMAP (1U << 0)
|
||||
/* Lock the rt summary inode in shared mode */
|
||||
#define XFS_RBMLOCK_SUMMARY (1U << 1)
|
||||
|
||||
void xfs_rtbitmap_lock_shared(struct xfs_mount *mp,
|
||||
unsigned int rbmlock_flags);
|
||||
void xfs_rtbitmap_unlock_shared(struct xfs_mount *mp,
|
||||
unsigned int rbmlock_flags);
|
||||
#else /* CONFIG_XFS_RT */
|
||||
# define xfs_rtfree_extent(t,b,l) (-ENOSYS)
|
||||
# define xfs_rtfree_blocks(t,rb,rl) (-ENOSYS)
|
||||
|
|
@ -378,6 +391,10 @@ xfs_rtbitmap_blockcount(struct xfs_mount *mp, xfs_rtbxlen_t rtextents)
|
|||
# define xfs_rtbitmap_wordcount(mp, r) (0)
|
||||
# define xfs_rtsummary_blockcount(mp, l, b) (0)
|
||||
# define xfs_rtsummary_wordcount(mp, l, b) (0)
|
||||
# define xfs_rtbitmap_lock(tp, mp) do { } while (0)
|
||||
# define xfs_rtbitmap_unlock(mp) do { } while (0)
|
||||
# define xfs_rtbitmap_lock_shared(mp, lf) do { } while (0)
|
||||
# define xfs_rtbitmap_unlock_shared(mp, lf) do { } while (0)
|
||||
#endif /* CONFIG_XFS_RT */
|
||||
|
||||
#endif /* __XFS_RTBITMAP_H__ */
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@
|
|||
#include "xfs_health.h"
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_rtbitmap.h"
|
||||
#include "xfs_exchrange.h"
|
||||
|
||||
/*
|
||||
* Physical superblock buffer manipulations. Shared with libxfs in userspace.
|
||||
|
|
@ -175,6 +176,10 @@ xfs_sb_version_to_features(
|
|||
features |= XFS_FEAT_NEEDSREPAIR;
|
||||
if (sbp->sb_features_incompat & XFS_SB_FEAT_INCOMPAT_NREXT64)
|
||||
features |= XFS_FEAT_NREXT64;
|
||||
if (sbp->sb_features_incompat & XFS_SB_FEAT_INCOMPAT_EXCHRANGE)
|
||||
features |= XFS_FEAT_EXCHANGE_RANGE;
|
||||
if (sbp->sb_features_incompat & XFS_SB_FEAT_INCOMPAT_PARENT)
|
||||
features |= XFS_FEAT_PARENT;
|
||||
|
||||
return features;
|
||||
}
|
||||
|
|
@ -1251,6 +1256,8 @@ xfs_fs_geometry(
|
|||
geo->flags |= XFS_FSOP_GEOM_FLAGS_BIGTIME;
|
||||
if (xfs_has_inobtcounts(mp))
|
||||
geo->flags |= XFS_FSOP_GEOM_FLAGS_INOBTCNT;
|
||||
if (xfs_has_parent(mp))
|
||||
geo->flags |= XFS_FSOP_GEOM_FLAGS_PARENT;
|
||||
if (xfs_has_sector(mp)) {
|
||||
geo->flags |= XFS_FSOP_GEOM_FLAGS_SECTOR;
|
||||
geo->logsectsize = sbp->sb_logsectsize;
|
||||
|
|
@ -1259,6 +1266,8 @@ xfs_fs_geometry(
|
|||
}
|
||||
if (xfs_has_large_extent_counts(mp))
|
||||
geo->flags |= XFS_FSOP_GEOM_FLAGS_NREXT64;
|
||||
if (xfs_has_exchange_range(mp))
|
||||
geo->flags |= XFS_FSOP_GEOM_FLAGS_EXCHANGE_RANGE;
|
||||
geo->rtsectsize = sbp->sb_blocksize;
|
||||
geo->dirblocksize = xfs_dir2_dirblock_bytes(sbp);
|
||||
|
||||
|
|
|
|||
|
|
@ -124,7 +124,6 @@ void xfs_log_get_max_trans_res(struct xfs_mount *mp,
|
|||
#define XFS_TRANS_RES_FDBLKS (1u << 6)
|
||||
/* Transaction contains an intent done log item */
|
||||
#define XFS_TRANS_HAS_INTENT_DONE (1u << 7)
|
||||
|
||||
/*
|
||||
* LOWMODE is used by the allocator to activate the lowspace algorithm - when
|
||||
* free space is running low the extent allocator may choose to allocate an
|
||||
|
|
@ -136,7 +135,10 @@ void xfs_log_get_max_trans_res(struct xfs_mount *mp,
|
|||
* for free space from AG 0. If the correct transaction reservations have been
|
||||
* made then this algorithm will eventually find all the space it needs.
|
||||
*/
|
||||
#define XFS_TRANS_LOWMODE 0x100 /* allocate in low space mode */
|
||||
#define XFS_TRANS_LOWMODE (1u << 8)
|
||||
|
||||
/* Transaction has locked the rtbitmap and rtsum inodes */
|
||||
#define XFS_TRANS_RTBITMAP_LOCKED (1u << 9)
|
||||
|
||||
/*
|
||||
* Field values for xfs_trans_mod_sb.
|
||||
|
|
|
|||
|
|
@ -169,7 +169,8 @@ xfs_symlink_local_to_remote(
|
|||
struct xfs_trans *tp,
|
||||
struct xfs_buf *bp,
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_ifork *ifp)
|
||||
struct xfs_ifork *ifp,
|
||||
void *priv)
|
||||
{
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
char *buf;
|
||||
|
|
@ -310,6 +311,7 @@ int
|
|||
xfs_symlink_write_target(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *ip,
|
||||
xfs_ino_t owner,
|
||||
const char *target_path,
|
||||
int pathlen,
|
||||
xfs_fsblock_t fs_blocks,
|
||||
|
|
@ -364,8 +366,7 @@ xfs_symlink_write_target(
|
|||
byte_cnt = min(byte_cnt, pathlen);
|
||||
|
||||
buf = bp->b_addr;
|
||||
buf += xfs_symlink_hdr_set(mp, ip->i_ino, offset, byte_cnt,
|
||||
bp);
|
||||
buf += xfs_symlink_hdr_set(mp, owner, offset, byte_cnt, bp);
|
||||
|
||||
memcpy(buf, cur_chunk, byte_cnt);
|
||||
|
||||
|
|
@ -380,3 +381,50 @@ xfs_symlink_write_target(
|
|||
ASSERT(pathlen == 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Remove all the blocks from a symlink and invalidate buffers. */
|
||||
int
|
||||
xfs_symlink_remote_truncate(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *ip)
|
||||
{
|
||||
struct xfs_bmbt_irec mval[XFS_SYMLINK_MAPS];
|
||||
struct xfs_mount *mp = tp->t_mountp;
|
||||
struct xfs_buf *bp;
|
||||
int nmaps = XFS_SYMLINK_MAPS;
|
||||
int done = 0;
|
||||
int i;
|
||||
int error;
|
||||
|
||||
/* Read mappings and invalidate buffers. */
|
||||
error = xfs_bmapi_read(ip, 0, XFS_MAX_FILEOFF, mval, &nmaps, 0);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
for (i = 0; i < nmaps; i++) {
|
||||
if (!xfs_bmap_is_real_extent(&mval[i]))
|
||||
break;
|
||||
|
||||
error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
|
||||
XFS_FSB_TO_DADDR(mp, mval[i].br_startblock),
|
||||
XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0,
|
||||
&bp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_trans_binval(tp, bp);
|
||||
}
|
||||
|
||||
/* Unmap the remote blocks. */
|
||||
error = xfs_bunmapi(tp, ip, 0, XFS_MAX_FILEOFF, 0, nmaps, &done);
|
||||
if (error)
|
||||
return error;
|
||||
if (!done) {
|
||||
ASSERT(done);
|
||||
xfs_inode_mark_sick(ip, XFS_SICK_INO_SYMLINK);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,11 +16,13 @@ int xfs_symlink_hdr_set(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset,
|
|||
bool xfs_symlink_hdr_ok(xfs_ino_t ino, uint32_t offset,
|
||||
uint32_t size, struct xfs_buf *bp);
|
||||
void xfs_symlink_local_to_remote(struct xfs_trans *tp, struct xfs_buf *bp,
|
||||
struct xfs_inode *ip, struct xfs_ifork *ifp);
|
||||
struct xfs_inode *ip, struct xfs_ifork *ifp,
|
||||
void *priv);
|
||||
xfs_failaddr_t xfs_symlink_shortform_verify(void *sfp, int64_t size);
|
||||
int xfs_symlink_remote_read(struct xfs_inode *ip, char *link);
|
||||
int xfs_symlink_write_target(struct xfs_trans *tp, struct xfs_inode *ip,
|
||||
const char *target_path, int pathlen, xfs_fsblock_t fs_blocks,
|
||||
uint resblks);
|
||||
xfs_ino_t owner, const char *target_path, int pathlen,
|
||||
xfs_fsblock_t fs_blocks, uint resblks);
|
||||
int xfs_symlink_remote_truncate(struct xfs_trans *tp, struct xfs_inode *ip);
|
||||
|
||||
#endif /* __XFS_SYMLINK_REMOTE_H */
|
||||
|
|
|
|||
|
|
@ -20,6 +20,9 @@
|
|||
#include "xfs_qm.h"
|
||||
#include "xfs_trans_space.h"
|
||||
#include "xfs_rtbitmap.h"
|
||||
#include "xfs_attr_item.h"
|
||||
#include "xfs_log.h"
|
||||
#include "xfs_da_format.h"
|
||||
|
||||
#define _ALLOC true
|
||||
#define _FREE false
|
||||
|
|
@ -422,29 +425,110 @@ xfs_calc_itruncate_reservation_minlogsize(
|
|||
return xfs_calc_itruncate_reservation(mp, true);
|
||||
}
|
||||
|
||||
static inline unsigned int xfs_calc_pptr_link_overhead(void)
|
||||
{
|
||||
return sizeof(struct xfs_attri_log_format) +
|
||||
xlog_calc_iovec_len(sizeof(struct xfs_parent_rec)) +
|
||||
xlog_calc_iovec_len(MAXNAMELEN - 1);
|
||||
}
|
||||
static inline unsigned int xfs_calc_pptr_unlink_overhead(void)
|
||||
{
|
||||
return sizeof(struct xfs_attri_log_format) +
|
||||
xlog_calc_iovec_len(sizeof(struct xfs_parent_rec)) +
|
||||
xlog_calc_iovec_len(MAXNAMELEN - 1);
|
||||
}
|
||||
static inline unsigned int xfs_calc_pptr_replace_overhead(void)
|
||||
{
|
||||
return sizeof(struct xfs_attri_log_format) +
|
||||
xlog_calc_iovec_len(sizeof(struct xfs_parent_rec)) +
|
||||
xlog_calc_iovec_len(MAXNAMELEN - 1) +
|
||||
xlog_calc_iovec_len(sizeof(struct xfs_parent_rec)) +
|
||||
xlog_calc_iovec_len(MAXNAMELEN - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* In renaming a files we can modify:
|
||||
* the five inodes involved: 5 * inode size
|
||||
* the two directory btrees: 2 * (max depth + v2) * dir block size
|
||||
* the two directory bmap btrees: 2 * max depth * block size
|
||||
* And the bmap_finish transaction can free dir and bmap blocks (two sets
|
||||
* of bmap blocks) giving:
|
||||
* of bmap blocks) giving (t2):
|
||||
* the agf for the ags in which the blocks live: 3 * sector size
|
||||
* the agfl for the ags in which the blocks live: 3 * sector size
|
||||
* the superblock for the free block count: sector size
|
||||
* the allocation btrees: 3 exts * 2 trees * (2 * max depth - 1) * block size
|
||||
* If parent pointers are enabled (t3), then each transaction in the chain
|
||||
* must be capable of setting or removing the extended attribute
|
||||
* containing the parent information. It must also be able to handle
|
||||
* the three xattr intent items that track the progress of the parent
|
||||
* pointer update.
|
||||
*/
|
||||
STATIC uint
|
||||
xfs_calc_rename_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return XFS_DQUOT_LOGRES(mp) +
|
||||
max((xfs_calc_inode_res(mp, 5) +
|
||||
xfs_calc_buf_res(2 * XFS_DIROP_LOG_COUNT(mp),
|
||||
XFS_FSB_TO_B(mp, 1))),
|
||||
(xfs_calc_buf_res(7, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(xfs_allocfree_block_count(mp, 3),
|
||||
XFS_FSB_TO_B(mp, 1))));
|
||||
unsigned int overhead = XFS_DQUOT_LOGRES(mp);
|
||||
struct xfs_trans_resv *resp = M_RES(mp);
|
||||
unsigned int t1, t2, t3 = 0;
|
||||
|
||||
t1 = xfs_calc_inode_res(mp, 5) +
|
||||
xfs_calc_buf_res(2 * XFS_DIROP_LOG_COUNT(mp),
|
||||
XFS_FSB_TO_B(mp, 1));
|
||||
|
||||
t2 = xfs_calc_buf_res(7, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(xfs_allocfree_block_count(mp, 3),
|
||||
XFS_FSB_TO_B(mp, 1));
|
||||
|
||||
if (xfs_has_parent(mp)) {
|
||||
unsigned int rename_overhead, exchange_overhead;
|
||||
|
||||
t3 = max(resp->tr_attrsetm.tr_logres,
|
||||
resp->tr_attrrm.tr_logres);
|
||||
|
||||
/*
|
||||
* For a standard rename, the three xattr intent log items
|
||||
* are (1) replacing the pptr for the source file; (2)
|
||||
* removing the pptr on the dest file; and (3) adding a
|
||||
* pptr for the whiteout file in the src dir.
|
||||
*
|
||||
* For an RENAME_EXCHANGE, there are two xattr intent
|
||||
* items to replace the pptr for both src and dest
|
||||
* files. Link counts don't change and there is no
|
||||
* whiteout.
|
||||
*
|
||||
* In the worst case we can end up relogging all log
|
||||
* intent items to allow the log tail to move ahead, so
|
||||
* they become overhead added to each transaction in a
|
||||
* processing chain.
|
||||
*/
|
||||
rename_overhead = xfs_calc_pptr_replace_overhead() +
|
||||
xfs_calc_pptr_unlink_overhead() +
|
||||
xfs_calc_pptr_link_overhead();
|
||||
exchange_overhead = 2 * xfs_calc_pptr_replace_overhead();
|
||||
|
||||
overhead += max(rename_overhead, exchange_overhead);
|
||||
}
|
||||
|
||||
return overhead + max3(t1, t2, t3);
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
xfs_rename_log_count(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_trans_resv *resp)
|
||||
{
|
||||
/* One for the rename, one more for freeing blocks */
|
||||
unsigned int ret = XFS_RENAME_LOG_COUNT;
|
||||
|
||||
/*
|
||||
* Pre-reserve enough log reservation to handle the transaction
|
||||
* rolling needed to remove or add one parent pointer.
|
||||
*/
|
||||
if (xfs_has_parent(mp))
|
||||
ret += max(resp->tr_attrsetm.tr_logcount,
|
||||
resp->tr_attrrm.tr_logcount);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -461,6 +545,23 @@ xfs_calc_iunlink_remove_reservation(
|
|||
2 * M_IGEO(mp)->inode_cluster_size;
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
xfs_link_log_count(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_trans_resv *resp)
|
||||
{
|
||||
unsigned int ret = XFS_LINK_LOG_COUNT;
|
||||
|
||||
/*
|
||||
* Pre-reserve enough log reservation to handle the transaction
|
||||
* rolling needed to add one parent pointer.
|
||||
*/
|
||||
if (xfs_has_parent(mp))
|
||||
ret += resp->tr_attrsetm.tr_logcount;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* For creating a link to an inode:
|
||||
* the parent directory inode: inode size
|
||||
|
|
@ -477,14 +578,23 @@ STATIC uint
|
|||
xfs_calc_link_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return XFS_DQUOT_LOGRES(mp) +
|
||||
xfs_calc_iunlink_remove_reservation(mp) +
|
||||
max((xfs_calc_inode_res(mp, 2) +
|
||||
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp),
|
||||
XFS_FSB_TO_B(mp, 1))),
|
||||
(xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
|
||||
XFS_FSB_TO_B(mp, 1))));
|
||||
unsigned int overhead = XFS_DQUOT_LOGRES(mp);
|
||||
struct xfs_trans_resv *resp = M_RES(mp);
|
||||
unsigned int t1, t2, t3 = 0;
|
||||
|
||||
overhead += xfs_calc_iunlink_remove_reservation(mp);
|
||||
t1 = xfs_calc_inode_res(mp, 2) +
|
||||
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1));
|
||||
t2 = xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
|
||||
XFS_FSB_TO_B(mp, 1));
|
||||
|
||||
if (xfs_has_parent(mp)) {
|
||||
t3 = resp->tr_attrsetm.tr_logres;
|
||||
overhead += xfs_calc_pptr_link_overhead();
|
||||
}
|
||||
|
||||
return overhead + max3(t1, t2, t3);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -499,6 +609,23 @@ xfs_calc_iunlink_add_reservation(xfs_mount_t *mp)
|
|||
M_IGEO(mp)->inode_cluster_size;
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
xfs_remove_log_count(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_trans_resv *resp)
|
||||
{
|
||||
unsigned int ret = XFS_REMOVE_LOG_COUNT;
|
||||
|
||||
/*
|
||||
* Pre-reserve enough log reservation to handle the transaction
|
||||
* rolling needed to add one parent pointer.
|
||||
*/
|
||||
if (xfs_has_parent(mp))
|
||||
ret += resp->tr_attrrm.tr_logcount;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* For removing a directory entry we can modify:
|
||||
* the parent directory inode: inode size
|
||||
|
|
@ -515,14 +642,24 @@ STATIC uint
|
|||
xfs_calc_remove_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return XFS_DQUOT_LOGRES(mp) +
|
||||
xfs_calc_iunlink_add_reservation(mp) +
|
||||
max((xfs_calc_inode_res(mp, 2) +
|
||||
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp),
|
||||
XFS_FSB_TO_B(mp, 1))),
|
||||
(xfs_calc_buf_res(4, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2),
|
||||
XFS_FSB_TO_B(mp, 1))));
|
||||
unsigned int overhead = XFS_DQUOT_LOGRES(mp);
|
||||
struct xfs_trans_resv *resp = M_RES(mp);
|
||||
unsigned int t1, t2, t3 = 0;
|
||||
|
||||
overhead += xfs_calc_iunlink_add_reservation(mp);
|
||||
|
||||
t1 = xfs_calc_inode_res(mp, 2) +
|
||||
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1));
|
||||
t2 = xfs_calc_buf_res(4, mp->m_sb.sb_sectsize) +
|
||||
xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2),
|
||||
XFS_FSB_TO_B(mp, 1));
|
||||
|
||||
if (xfs_has_parent(mp)) {
|
||||
t3 = resp->tr_attrrm.tr_logres;
|
||||
overhead += xfs_calc_pptr_unlink_overhead();
|
||||
}
|
||||
|
||||
return overhead + max3(t1, t2, t3);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -571,12 +708,40 @@ xfs_calc_icreate_resv_alloc(
|
|||
xfs_calc_finobt_res(mp);
|
||||
}
|
||||
|
||||
STATIC uint
|
||||
xfs_calc_icreate_reservation(xfs_mount_t *mp)
|
||||
static inline unsigned int
|
||||
xfs_icreate_log_count(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_trans_resv *resp)
|
||||
{
|
||||
return XFS_DQUOT_LOGRES(mp) +
|
||||
max(xfs_calc_icreate_resv_alloc(mp),
|
||||
xfs_calc_create_resv_modify(mp));
|
||||
unsigned int ret = XFS_CREATE_LOG_COUNT;
|
||||
|
||||
/*
|
||||
* Pre-reserve enough log reservation to handle the transaction
|
||||
* rolling needed to add one parent pointer.
|
||||
*/
|
||||
if (xfs_has_parent(mp))
|
||||
ret += resp->tr_attrsetm.tr_logcount;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
STATIC uint
|
||||
xfs_calc_icreate_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
struct xfs_trans_resv *resp = M_RES(mp);
|
||||
unsigned int overhead = XFS_DQUOT_LOGRES(mp);
|
||||
unsigned int t1, t2, t3 = 0;
|
||||
|
||||
t1 = xfs_calc_icreate_resv_alloc(mp);
|
||||
t2 = xfs_calc_create_resv_modify(mp);
|
||||
|
||||
if (xfs_has_parent(mp)) {
|
||||
t3 = resp->tr_attrsetm.tr_logres;
|
||||
overhead += xfs_calc_pptr_link_overhead();
|
||||
}
|
||||
|
||||
return overhead + max3(t1, t2, t3);
|
||||
}
|
||||
|
||||
STATIC uint
|
||||
|
|
@ -589,6 +754,23 @@ xfs_calc_create_tmpfile_reservation(
|
|||
return res + xfs_calc_iunlink_add_reservation(mp);
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
xfs_mkdir_log_count(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_trans_resv *resp)
|
||||
{
|
||||
unsigned int ret = XFS_MKDIR_LOG_COUNT;
|
||||
|
||||
/*
|
||||
* Pre-reserve enough log reservation to handle the transaction
|
||||
* rolling needed to add one parent pointer.
|
||||
*/
|
||||
if (xfs_has_parent(mp))
|
||||
ret += resp->tr_attrsetm.tr_logcount;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Making a new directory is the same as creating a new file.
|
||||
*/
|
||||
|
|
@ -599,6 +781,22 @@ xfs_calc_mkdir_reservation(
|
|||
return xfs_calc_icreate_reservation(mp);
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
xfs_symlink_log_count(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_trans_resv *resp)
|
||||
{
|
||||
unsigned int ret = XFS_SYMLINK_LOG_COUNT;
|
||||
|
||||
/*
|
||||
* Pre-reserve enough log reservation to handle the transaction
|
||||
* rolling needed to add one parent pointer.
|
||||
*/
|
||||
if (xfs_has_parent(mp))
|
||||
ret += resp->tr_attrsetm.tr_logcount;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Making a new symplink is the same as creating a new file, but
|
||||
|
|
@ -911,6 +1109,52 @@ xfs_calc_sb_reservation(
|
|||
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
|
||||
}
|
||||
|
||||
/*
|
||||
* Namespace reservations.
|
||||
*
|
||||
* These get tricky when parent pointers are enabled as we have attribute
|
||||
* modifications occurring from within these transactions. Rather than confuse
|
||||
* each of these reservation calculations with the conditional attribute
|
||||
* reservations, add them here in a clear and concise manner. This requires that
|
||||
* the attribute reservations have already been calculated.
|
||||
*
|
||||
* Note that we only include the static attribute reservation here; the runtime
|
||||
* reservation will have to be modified by the size of the attributes being
|
||||
* added/removed/modified. See the comments on the attribute reservation
|
||||
* calculations for more details.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_calc_namespace_reservations(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_trans_resv *resp)
|
||||
{
|
||||
ASSERT(resp->tr_attrsetm.tr_logres > 0);
|
||||
|
||||
resp->tr_rename.tr_logres = xfs_calc_rename_reservation(mp);
|
||||
resp->tr_rename.tr_logcount = xfs_rename_log_count(mp, resp);
|
||||
resp->tr_rename.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
||||
|
||||
resp->tr_link.tr_logres = xfs_calc_link_reservation(mp);
|
||||
resp->tr_link.tr_logcount = xfs_link_log_count(mp, resp);
|
||||
resp->tr_link.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
||||
|
||||
resp->tr_remove.tr_logres = xfs_calc_remove_reservation(mp);
|
||||
resp->tr_remove.tr_logcount = xfs_remove_log_count(mp, resp);
|
||||
resp->tr_remove.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
||||
|
||||
resp->tr_symlink.tr_logres = xfs_calc_symlink_reservation(mp);
|
||||
resp->tr_symlink.tr_logcount = xfs_symlink_log_count(mp, resp);
|
||||
resp->tr_symlink.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
||||
|
||||
resp->tr_create.tr_logres = xfs_calc_icreate_reservation(mp);
|
||||
resp->tr_create.tr_logcount = xfs_icreate_log_count(mp, resp);
|
||||
resp->tr_create.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
||||
|
||||
resp->tr_mkdir.tr_logres = xfs_calc_mkdir_reservation(mp);
|
||||
resp->tr_mkdir.tr_logcount = xfs_mkdir_log_count(mp, resp);
|
||||
resp->tr_mkdir.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
||||
}
|
||||
|
||||
void
|
||||
xfs_trans_resv_calc(
|
||||
struct xfs_mount *mp,
|
||||
|
|
@ -930,35 +1174,11 @@ xfs_trans_resv_calc(
|
|||
resp->tr_itruncate.tr_logcount = XFS_ITRUNCATE_LOG_COUNT;
|
||||
resp->tr_itruncate.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
||||
|
||||
resp->tr_rename.tr_logres = xfs_calc_rename_reservation(mp);
|
||||
resp->tr_rename.tr_logcount = XFS_RENAME_LOG_COUNT;
|
||||
resp->tr_rename.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
||||
|
||||
resp->tr_link.tr_logres = xfs_calc_link_reservation(mp);
|
||||
resp->tr_link.tr_logcount = XFS_LINK_LOG_COUNT;
|
||||
resp->tr_link.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
||||
|
||||
resp->tr_remove.tr_logres = xfs_calc_remove_reservation(mp);
|
||||
resp->tr_remove.tr_logcount = XFS_REMOVE_LOG_COUNT;
|
||||
resp->tr_remove.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
||||
|
||||
resp->tr_symlink.tr_logres = xfs_calc_symlink_reservation(mp);
|
||||
resp->tr_symlink.tr_logcount = XFS_SYMLINK_LOG_COUNT;
|
||||
resp->tr_symlink.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
||||
|
||||
resp->tr_create.tr_logres = xfs_calc_icreate_reservation(mp);
|
||||
resp->tr_create.tr_logcount = XFS_CREATE_LOG_COUNT;
|
||||
resp->tr_create.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
||||
|
||||
resp->tr_create_tmpfile.tr_logres =
|
||||
xfs_calc_create_tmpfile_reservation(mp);
|
||||
resp->tr_create_tmpfile.tr_logcount = XFS_CREATE_TMPFILE_LOG_COUNT;
|
||||
resp->tr_create_tmpfile.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
||||
|
||||
resp->tr_mkdir.tr_logres = xfs_calc_mkdir_reservation(mp);
|
||||
resp->tr_mkdir.tr_logcount = XFS_MKDIR_LOG_COUNT;
|
||||
resp->tr_mkdir.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
||||
|
||||
resp->tr_ifree.tr_logres = xfs_calc_ifree_reservation(mp);
|
||||
resp->tr_ifree.tr_logcount = XFS_INACTIVE_LOG_COUNT;
|
||||
resp->tr_ifree.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
||||
|
|
@ -988,6 +1208,8 @@ xfs_trans_resv_calc(
|
|||
resp->tr_qm_dqalloc.tr_logcount = XFS_WRITE_LOG_COUNT;
|
||||
resp->tr_qm_dqalloc.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
||||
|
||||
xfs_calc_namespace_reservations(mp, resp);
|
||||
|
||||
/*
|
||||
* The following transactions are logged in logical format with
|
||||
* a default log count.
|
||||
|
|
|
|||
121
fs/xfs/libxfs/xfs_trans_space.c
Normal file
121
fs/xfs/libxfs/xfs_trans_space.c
Normal file
|
|
@ -0,0 +1,121 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2000,2005 Silicon Graphics, Inc.
|
||||
* All Rights Reserved.
|
||||
*/
|
||||
#include "xfs.h"
|
||||
#include "xfs_fs.h"
|
||||
#include "xfs_shared.h"
|
||||
#include "xfs_format.h"
|
||||
#include "xfs_da_format.h"
|
||||
#include "xfs_log_format.h"
|
||||
#include "xfs_trans_resv.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_da_btree.h"
|
||||
#include "xfs_bmap_btree.h"
|
||||
#include "xfs_trans_space.h"
|
||||
|
||||
/* Calculate the disk space required to add a parent pointer. */
|
||||
unsigned int
|
||||
xfs_parent_calc_space_res(
|
||||
struct xfs_mount *mp,
|
||||
unsigned int namelen)
|
||||
{
|
||||
/*
|
||||
* Parent pointers are always the first attr in an attr tree, and never
|
||||
* larger than a block
|
||||
*/
|
||||
return XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK) +
|
||||
XFS_NEXTENTADD_SPACE_RES(mp, namelen, XFS_ATTR_FORK);
|
||||
}
|
||||
|
||||
unsigned int
|
||||
xfs_create_space_res(
|
||||
struct xfs_mount *mp,
|
||||
unsigned int namelen)
|
||||
{
|
||||
unsigned int ret;
|
||||
|
||||
ret = XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp, namelen);
|
||||
if (xfs_has_parent(mp))
|
||||
ret += xfs_parent_calc_space_res(mp, namelen);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
unsigned int
|
||||
xfs_mkdir_space_res(
|
||||
struct xfs_mount *mp,
|
||||
unsigned int namelen)
|
||||
{
|
||||
return xfs_create_space_res(mp, namelen);
|
||||
}
|
||||
|
||||
unsigned int
|
||||
xfs_link_space_res(
|
||||
struct xfs_mount *mp,
|
||||
unsigned int namelen)
|
||||
{
|
||||
unsigned int ret;
|
||||
|
||||
ret = XFS_DIRENTER_SPACE_RES(mp, namelen);
|
||||
if (xfs_has_parent(mp))
|
||||
ret += xfs_parent_calc_space_res(mp, namelen);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
unsigned int
|
||||
xfs_symlink_space_res(
|
||||
struct xfs_mount *mp,
|
||||
unsigned int namelen,
|
||||
unsigned int fsblocks)
|
||||
{
|
||||
unsigned int ret;
|
||||
|
||||
ret = XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp, namelen) +
|
||||
fsblocks;
|
||||
|
||||
if (xfs_has_parent(mp))
|
||||
ret += xfs_parent_calc_space_res(mp, namelen);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
unsigned int
|
||||
xfs_remove_space_res(
|
||||
struct xfs_mount *mp,
|
||||
unsigned int namelen)
|
||||
{
|
||||
unsigned int ret = XFS_DIRREMOVE_SPACE_RES(mp);
|
||||
|
||||
if (xfs_has_parent(mp))
|
||||
ret += xfs_parent_calc_space_res(mp, namelen);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
unsigned int
|
||||
xfs_rename_space_res(
|
||||
struct xfs_mount *mp,
|
||||
unsigned int src_namelen,
|
||||
bool target_exists,
|
||||
unsigned int target_namelen,
|
||||
bool has_whiteout)
|
||||
{
|
||||
unsigned int ret;
|
||||
|
||||
ret = XFS_DIRREMOVE_SPACE_RES(mp) +
|
||||
XFS_DIRENTER_SPACE_RES(mp, target_namelen);
|
||||
|
||||
if (xfs_has_parent(mp)) {
|
||||
if (has_whiteout)
|
||||
ret += xfs_parent_calc_space_res(mp, src_namelen);
|
||||
ret += 2 * xfs_parent_calc_space_res(mp, target_namelen);
|
||||
}
|
||||
|
||||
if (target_exists)
|
||||
ret += xfs_parent_calc_space_res(mp, target_namelen);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -10,6 +10,10 @@
|
|||
* Components of space reservations.
|
||||
*/
|
||||
|
||||
/* Worst case number of bmaps that can be held in a block. */
|
||||
#define XFS_MAX_CONTIG_BMAPS_PER_BLOCK(mp) \
|
||||
(((mp)->m_bmap_dmxr[0]) - ((mp)->m_bmap_dmnr[0]))
|
||||
|
||||
/* Worst case number of rmaps that can be held in a block. */
|
||||
#define XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp) \
|
||||
(((mp)->m_rmap_mxr[0]) - ((mp)->m_rmap_mnr[0]))
|
||||
|
|
@ -76,31 +80,32 @@
|
|||
/* This macro is not used - see inline code in xfs_attr_set */
|
||||
#define XFS_ATTRSET_SPACE_RES(mp, v) \
|
||||
(XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK) + XFS_B_TO_FSB(mp, v))
|
||||
#define XFS_CREATE_SPACE_RES(mp,nl) \
|
||||
(XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl))
|
||||
#define XFS_DIOSTRAT_SPACE_RES(mp, v) \
|
||||
(XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK) + (v))
|
||||
#define XFS_GROWFS_SPACE_RES(mp) \
|
||||
(2 * (mp)->m_alloc_maxlevels)
|
||||
#define XFS_GROWFSRT_SPACE_RES(mp,b) \
|
||||
((b) + XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK))
|
||||
#define XFS_LINK_SPACE_RES(mp,nl) \
|
||||
XFS_DIRENTER_SPACE_RES(mp,nl)
|
||||
#define XFS_MKDIR_SPACE_RES(mp,nl) \
|
||||
(XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl))
|
||||
#define XFS_QM_DQALLOC_SPACE_RES(mp) \
|
||||
(XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK) + \
|
||||
XFS_DQUOT_CLUSTER_SIZE_FSB)
|
||||
#define XFS_QM_QINOCREATE_SPACE_RES(mp) \
|
||||
XFS_IALLOC_SPACE_RES(mp)
|
||||
#define XFS_REMOVE_SPACE_RES(mp) \
|
||||
XFS_DIRREMOVE_SPACE_RES(mp)
|
||||
#define XFS_RENAME_SPACE_RES(mp,nl) \
|
||||
(XFS_DIRREMOVE_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl))
|
||||
#define XFS_SYMLINK_SPACE_RES(mp,nl,b) \
|
||||
(XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl) + (b))
|
||||
#define XFS_IFREE_SPACE_RES(mp) \
|
||||
(xfs_has_finobt(mp) ? M_IGEO(mp)->inobt_maxlevels : 0)
|
||||
|
||||
unsigned int xfs_parent_calc_space_res(struct xfs_mount *mp,
|
||||
unsigned int namelen);
|
||||
|
||||
unsigned int xfs_create_space_res(struct xfs_mount *mp, unsigned int namelen);
|
||||
unsigned int xfs_mkdir_space_res(struct xfs_mount *mp, unsigned int namelen);
|
||||
unsigned int xfs_link_space_res(struct xfs_mount *mp, unsigned int namelen);
|
||||
unsigned int xfs_symlink_space_res(struct xfs_mount *mp, unsigned int namelen,
|
||||
unsigned int fsblocks);
|
||||
unsigned int xfs_remove_space_res(struct xfs_mount *mp, unsigned int namelen);
|
||||
|
||||
unsigned int xfs_rename_space_res(struct xfs_mount *mp,
|
||||
unsigned int src_namelen, bool target_exists,
|
||||
unsigned int target_namelen, bool has_whiteout);
|
||||
|
||||
#endif /* __XFS_TRANS_SPACE_H__ */
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@
|
|||
#include "xfs_ialloc.h"
|
||||
#include "xfs_rmap.h"
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "scrub/scrub.h"
|
||||
#include "scrub/common.h"
|
||||
|
||||
|
|
@ -165,8 +166,7 @@ xchk_superblock(
|
|||
xchk_block_set_corrupt(sc, bp);
|
||||
|
||||
/* Check sb_versionnum bits that are set at mkfs time. */
|
||||
vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
|
||||
XFS_SB_VERSION_NUMBITS |
|
||||
vernum_mask = cpu_to_be16(XFS_SB_VERSION_NUMBITS |
|
||||
XFS_SB_VERSION_ALIGNBIT |
|
||||
XFS_SB_VERSION_DALIGNBIT |
|
||||
XFS_SB_VERSION_SHAREDBIT |
|
||||
|
|
@ -865,6 +865,43 @@ xchk_agi_xref(
|
|||
/* scrub teardown will take care of sc->sa for us */
|
||||
}
|
||||
|
||||
/*
|
||||
* Check the unlinked buckets for links to bad inodes. We hold the AGI, so
|
||||
* there cannot be any threads updating unlinked list pointers in this AG.
|
||||
*/
|
||||
STATIC void
|
||||
xchk_iunlink(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_agi *agi)
|
||||
{
|
||||
unsigned int i;
|
||||
struct xfs_inode *ip;
|
||||
|
||||
for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
|
||||
xfs_agino_t agino = be32_to_cpu(agi->agi_unlinked[i]);
|
||||
|
||||
while (agino != NULLAGINO) {
|
||||
if (agino % XFS_AGI_UNLINKED_BUCKETS != i) {
|
||||
xchk_block_set_corrupt(sc, sc->sa.agi_bp);
|
||||
return;
|
||||
}
|
||||
|
||||
ip = xfs_iunlink_lookup(sc->sa.pag, agino);
|
||||
if (!ip) {
|
||||
xchk_block_set_corrupt(sc, sc->sa.agi_bp);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!xfs_inode_on_unlinked_list(ip)) {
|
||||
xchk_block_set_corrupt(sc, sc->sa.agi_bp);
|
||||
return;
|
||||
}
|
||||
|
||||
agino = ip->i_next_unlinked;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Scrub the AGI. */
|
||||
int
|
||||
xchk_agi(
|
||||
|
|
@ -949,6 +986,8 @@ xchk_agi(
|
|||
if (pag->pagi_freecount != be32_to_cpu(agi->agi_freecount))
|
||||
xchk_block_set_corrupt(sc, sc->sa.agi_bp);
|
||||
|
||||
xchk_iunlink(sc, agi);
|
||||
|
||||
xchk_agi_xref(sc);
|
||||
out:
|
||||
return error;
|
||||
|
|
|
|||
|
|
@ -21,13 +21,18 @@
|
|||
#include "xfs_rmap_btree.h"
|
||||
#include "xfs_refcount_btree.h"
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_iunlink_item.h"
|
||||
#include "scrub/scrub.h"
|
||||
#include "scrub/common.h"
|
||||
#include "scrub/trace.h"
|
||||
#include "scrub/repair.h"
|
||||
#include "scrub/bitmap.h"
|
||||
#include "scrub/agb_bitmap.h"
|
||||
#include "scrub/agino_bitmap.h"
|
||||
#include "scrub/reap.h"
|
||||
#include "scrub/xfile.h"
|
||||
#include "scrub/xfarray.h"
|
||||
|
||||
/* Superblock */
|
||||
|
||||
|
|
@ -796,15 +801,57 @@ enum {
|
|||
XREP_AGI_MAX
|
||||
};
|
||||
|
||||
#define XREP_AGI_LOOKUP_BATCH 32
|
||||
|
||||
struct xrep_agi {
|
||||
struct xfs_scrub *sc;
|
||||
|
||||
/* AGI buffer, tracked separately */
|
||||
struct xfs_buf *agi_bp;
|
||||
|
||||
/* context for finding btree roots */
|
||||
struct xrep_find_ag_btree fab[XREP_AGI_MAX];
|
||||
|
||||
/* old AGI contents in case we have to revert */
|
||||
struct xfs_agi old_agi;
|
||||
|
||||
/* bitmap of which inodes are unlinked */
|
||||
struct xagino_bitmap iunlink_bmp;
|
||||
|
||||
/* heads of the unlinked inode bucket lists */
|
||||
xfs_agino_t iunlink_heads[XFS_AGI_UNLINKED_BUCKETS];
|
||||
|
||||
/* scratchpad for batched lookups of the radix tree */
|
||||
struct xfs_inode *lookup_batch[XREP_AGI_LOOKUP_BATCH];
|
||||
|
||||
/* Map of ino -> next_ino for unlinked inode processing. */
|
||||
struct xfarray *iunlink_next;
|
||||
|
||||
/* Map of ino -> prev_ino for unlinked inode processing. */
|
||||
struct xfarray *iunlink_prev;
|
||||
};
|
||||
|
||||
static void
|
||||
xrep_agi_buf_cleanup(
|
||||
void *buf)
|
||||
{
|
||||
struct xrep_agi *ragi = buf;
|
||||
|
||||
xfarray_destroy(ragi->iunlink_prev);
|
||||
xfarray_destroy(ragi->iunlink_next);
|
||||
xagino_bitmap_destroy(&ragi->iunlink_bmp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Given the inode btree roots described by *fab, find the roots, check them
|
||||
* for sanity, and pass the root data back out via *fab.
|
||||
*/
|
||||
STATIC int
|
||||
xrep_agi_find_btrees(
|
||||
struct xfs_scrub *sc,
|
||||
struct xrep_find_ag_btree *fab)
|
||||
struct xrep_agi *ragi)
|
||||
{
|
||||
struct xfs_scrub *sc = ragi->sc;
|
||||
struct xrep_find_ag_btree *fab = ragi->fab;
|
||||
struct xfs_buf *agf_bp;
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
int error;
|
||||
|
|
@ -837,10 +884,11 @@ xrep_agi_find_btrees(
|
|||
*/
|
||||
STATIC void
|
||||
xrep_agi_init_header(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_buf *agi_bp,
|
||||
struct xfs_agi *old_agi)
|
||||
struct xrep_agi *ragi)
|
||||
{
|
||||
struct xfs_scrub *sc = ragi->sc;
|
||||
struct xfs_buf *agi_bp = ragi->agi_bp;
|
||||
struct xfs_agi *old_agi = &ragi->old_agi;
|
||||
struct xfs_agi *agi = agi_bp->b_addr;
|
||||
struct xfs_perag *pag = sc->sa.pag;
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
|
|
@ -856,10 +904,6 @@ xrep_agi_init_header(
|
|||
if (xfs_has_crc(mp))
|
||||
uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
|
||||
|
||||
/* We don't know how to fix the unlinked list yet. */
|
||||
memcpy(&agi->agi_unlinked, &old_agi->agi_unlinked,
|
||||
sizeof(agi->agi_unlinked));
|
||||
|
||||
/* Mark the incore AGF data stale until we're done fixing things. */
|
||||
ASSERT(xfs_perag_initialised_agi(pag));
|
||||
clear_bit(XFS_AGSTATE_AGI_INIT, &pag->pag_opstate);
|
||||
|
|
@ -868,10 +912,12 @@ xrep_agi_init_header(
|
|||
/* Set btree root information in an AGI. */
|
||||
STATIC void
|
||||
xrep_agi_set_roots(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_agi *agi,
|
||||
struct xrep_find_ag_btree *fab)
|
||||
struct xrep_agi *ragi)
|
||||
{
|
||||
struct xfs_scrub *sc = ragi->sc;
|
||||
struct xfs_agi *agi = ragi->agi_bp->b_addr;
|
||||
struct xrep_find_ag_btree *fab = ragi->fab;
|
||||
|
||||
agi->agi_root = cpu_to_be32(fab[XREP_AGI_INOBT].root);
|
||||
agi->agi_level = cpu_to_be32(fab[XREP_AGI_INOBT].height);
|
||||
|
||||
|
|
@ -884,9 +930,10 @@ xrep_agi_set_roots(
|
|||
/* Update the AGI counters. */
|
||||
STATIC int
|
||||
xrep_agi_calc_from_btrees(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_buf *agi_bp)
|
||||
struct xrep_agi *ragi)
|
||||
{
|
||||
struct xfs_scrub *sc = ragi->sc;
|
||||
struct xfs_buf *agi_bp = ragi->agi_bp;
|
||||
struct xfs_btree_cur *cur;
|
||||
struct xfs_agi *agi = agi_bp->b_addr;
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
|
|
@ -928,12 +975,721 @@ err:
|
|||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Record a forwards unlinked chain pointer from agino -> next_agino in our
|
||||
* staging information.
|
||||
*/
|
||||
static inline int
|
||||
xrep_iunlink_store_next(
|
||||
struct xrep_agi *ragi,
|
||||
xfs_agino_t agino,
|
||||
xfs_agino_t next_agino)
|
||||
{
|
||||
ASSERT(next_agino != 0);
|
||||
|
||||
return xfarray_store(ragi->iunlink_next, agino, &next_agino);
|
||||
}
|
||||
|
||||
/*
|
||||
* Record a backwards unlinked chain pointer from prev_ino <- agino in our
|
||||
* staging information.
|
||||
*/
|
||||
static inline int
|
||||
xrep_iunlink_store_prev(
|
||||
struct xrep_agi *ragi,
|
||||
xfs_agino_t agino,
|
||||
xfs_agino_t prev_agino)
|
||||
{
|
||||
ASSERT(prev_agino != 0);
|
||||
|
||||
return xfarray_store(ragi->iunlink_prev, agino, &prev_agino);
|
||||
}
|
||||
|
||||
/*
|
||||
* Given an @agino, look up the next inode in the iunlink bucket. Returns
|
||||
* NULLAGINO if we're at the end of the chain, 0 if @agino is not in memory
|
||||
* like it should be, or a per-AG inode number.
|
||||
*/
|
||||
static inline xfs_agino_t
|
||||
xrep_iunlink_next(
|
||||
struct xfs_scrub *sc,
|
||||
xfs_agino_t agino)
|
||||
{
|
||||
struct xfs_inode *ip;
|
||||
|
||||
ip = xfs_iunlink_lookup(sc->sa.pag, agino);
|
||||
if (!ip)
|
||||
return 0;
|
||||
|
||||
return ip->i_next_unlinked;
|
||||
}
|
||||
|
||||
/*
|
||||
* Load the inode @agino into memory, set its i_prev_unlinked, and drop the
|
||||
* inode so it can be inactivated. Returns NULLAGINO if we're at the end of
|
||||
* the chain or if we should stop walking the chain due to corruption; or a
|
||||
* per-AG inode number.
|
||||
*/
|
||||
STATIC xfs_agino_t
|
||||
xrep_iunlink_reload_next(
|
||||
struct xrep_agi *ragi,
|
||||
xfs_agino_t prev_agino,
|
||||
xfs_agino_t agino)
|
||||
{
|
||||
struct xfs_scrub *sc = ragi->sc;
|
||||
struct xfs_inode *ip;
|
||||
xfs_ino_t ino;
|
||||
xfs_agino_t ret = NULLAGINO;
|
||||
int error;
|
||||
|
||||
ino = XFS_AGINO_TO_INO(sc->mp, sc->sa.pag->pag_agno, agino);
|
||||
error = xchk_iget(ragi->sc, ino, &ip);
|
||||
if (error)
|
||||
return ret;
|
||||
|
||||
trace_xrep_iunlink_reload_next(ip, prev_agino);
|
||||
|
||||
/* If this is a linked inode, stop processing the chain. */
|
||||
if (VFS_I(ip)->i_nlink != 0) {
|
||||
xrep_iunlink_store_next(ragi, agino, NULLAGINO);
|
||||
goto rele;
|
||||
}
|
||||
|
||||
ip->i_prev_unlinked = prev_agino;
|
||||
ret = ip->i_next_unlinked;
|
||||
|
||||
/*
|
||||
* Drop the inode reference that we just took. We hold the AGI, so
|
||||
* this inode cannot move off the unlinked list and hence cannot be
|
||||
* reclaimed.
|
||||
*/
|
||||
rele:
|
||||
xchk_irele(sc, ip);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Walk an AGI unlinked bucket's list to load incore any unlinked inodes that
|
||||
* still existed at mount time. This can happen if iunlink processing fails
|
||||
* during log recovery.
|
||||
*/
|
||||
STATIC int
|
||||
xrep_iunlink_walk_ondisk_bucket(
|
||||
struct xrep_agi *ragi,
|
||||
unsigned int bucket)
|
||||
{
|
||||
struct xfs_scrub *sc = ragi->sc;
|
||||
struct xfs_agi *agi = sc->sa.agi_bp->b_addr;
|
||||
xfs_agino_t prev_agino = NULLAGINO;
|
||||
xfs_agino_t next_agino;
|
||||
int error = 0;
|
||||
|
||||
next_agino = be32_to_cpu(agi->agi_unlinked[bucket]);
|
||||
while (next_agino != NULLAGINO) {
|
||||
xfs_agino_t agino = next_agino;
|
||||
|
||||
if (xchk_should_terminate(ragi->sc, &error))
|
||||
return error;
|
||||
|
||||
trace_xrep_iunlink_walk_ondisk_bucket(sc->sa.pag, bucket,
|
||||
prev_agino, agino);
|
||||
|
||||
if (bucket != agino % XFS_AGI_UNLINKED_BUCKETS)
|
||||
break;
|
||||
|
||||
next_agino = xrep_iunlink_next(sc, agino);
|
||||
if (!next_agino)
|
||||
next_agino = xrep_iunlink_reload_next(ragi, prev_agino,
|
||||
agino);
|
||||
|
||||
prev_agino = agino;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Decide if this is an unlinked inode in this AG. */
|
||||
STATIC bool
|
||||
xrep_iunlink_igrab(
|
||||
struct xfs_perag *pag,
|
||||
struct xfs_inode *ip)
|
||||
{
|
||||
struct xfs_mount *mp = pag->pag_mount;
|
||||
|
||||
if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
|
||||
return false;
|
||||
|
||||
if (!xfs_inode_on_unlinked_list(ip))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark the given inode in the lookup batch in our unlinked inode bitmap, and
|
||||
* remember if this inode is the start of the unlinked chain.
|
||||
*/
|
||||
STATIC int
|
||||
xrep_iunlink_visit(
|
||||
struct xrep_agi *ragi,
|
||||
unsigned int batch_idx)
|
||||
{
|
||||
struct xfs_mount *mp = ragi->sc->mp;
|
||||
struct xfs_inode *ip = ragi->lookup_batch[batch_idx];
|
||||
xfs_agino_t agino;
|
||||
unsigned int bucket;
|
||||
int error;
|
||||
|
||||
ASSERT(XFS_INO_TO_AGNO(mp, ip->i_ino) == ragi->sc->sa.pag->pag_agno);
|
||||
ASSERT(xfs_inode_on_unlinked_list(ip));
|
||||
|
||||
agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
|
||||
bucket = agino % XFS_AGI_UNLINKED_BUCKETS;
|
||||
|
||||
trace_xrep_iunlink_visit(ragi->sc->sa.pag, bucket,
|
||||
ragi->iunlink_heads[bucket], ip);
|
||||
|
||||
error = xagino_bitmap_set(&ragi->iunlink_bmp, agino, 1);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (ip->i_prev_unlinked == NULLAGINO) {
|
||||
if (ragi->iunlink_heads[bucket] == NULLAGINO)
|
||||
ragi->iunlink_heads[bucket] = agino;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find all incore unlinked inodes so that we can rebuild the unlinked buckets.
|
||||
* We hold the AGI so there should not be any modifications to the unlinked
|
||||
* list.
|
||||
*/
|
||||
STATIC int
|
||||
xrep_iunlink_mark_incore(
|
||||
struct xrep_agi *ragi)
|
||||
{
|
||||
struct xfs_perag *pag = ragi->sc->sa.pag;
|
||||
struct xfs_mount *mp = pag->pag_mount;
|
||||
uint32_t first_index = 0;
|
||||
bool done = false;
|
||||
unsigned int nr_found = 0;
|
||||
|
||||
do {
|
||||
unsigned int i;
|
||||
int error = 0;
|
||||
|
||||
if (xchk_should_terminate(ragi->sc, &error))
|
||||
return error;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
|
||||
(void **)&ragi->lookup_batch, first_index,
|
||||
XREP_AGI_LOOKUP_BATCH);
|
||||
if (!nr_found) {
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_found; i++) {
|
||||
struct xfs_inode *ip = ragi->lookup_batch[i];
|
||||
|
||||
if (done || !xrep_iunlink_igrab(pag, ip))
|
||||
ragi->lookup_batch[i] = NULL;
|
||||
|
||||
/*
|
||||
* Update the index for the next lookup. Catch
|
||||
* overflows into the next AG range which can occur if
|
||||
* we have inodes in the last block of the AG and we
|
||||
* are currently pointing to the last inode.
|
||||
*
|
||||
* Because we may see inodes that are from the wrong AG
|
||||
* due to RCU freeing and reallocation, only update the
|
||||
* index if it lies in this AG. It was a race that lead
|
||||
* us to see this inode, so another lookup from the
|
||||
* same index will not find it again.
|
||||
*/
|
||||
if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
|
||||
continue;
|
||||
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
|
||||
if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
|
||||
done = true;
|
||||
}
|
||||
|
||||
/* unlock now we've grabbed the inodes. */
|
||||
rcu_read_unlock();
|
||||
|
||||
for (i = 0; i < nr_found; i++) {
|
||||
if (!ragi->lookup_batch[i])
|
||||
continue;
|
||||
error = xrep_iunlink_visit(ragi, i);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
} while (!done);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Mark all the unlinked ondisk inodes in this inobt record in iunlink_bmp. */
|
||||
STATIC int
|
||||
xrep_iunlink_mark_ondisk_rec(
|
||||
struct xfs_btree_cur *cur,
|
||||
const union xfs_btree_rec *rec,
|
||||
void *priv)
|
||||
{
|
||||
struct xfs_inobt_rec_incore irec;
|
||||
struct xrep_agi *ragi = priv;
|
||||
struct xfs_scrub *sc = ragi->sc;
|
||||
struct xfs_mount *mp = cur->bc_mp;
|
||||
xfs_agino_t agino;
|
||||
unsigned int i;
|
||||
int error = 0;
|
||||
|
||||
xfs_inobt_btrec_to_irec(mp, rec, &irec);
|
||||
|
||||
for (i = 0, agino = irec.ir_startino;
|
||||
i < XFS_INODES_PER_CHUNK;
|
||||
i++, agino++) {
|
||||
struct xfs_inode *ip;
|
||||
unsigned int len = 1;
|
||||
|
||||
/* Skip free inodes */
|
||||
if (XFS_INOBT_MASK(i) & irec.ir_free)
|
||||
continue;
|
||||
/* Skip inodes we've seen before */
|
||||
if (xagino_bitmap_test(&ragi->iunlink_bmp, agino, &len))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Skip incore inodes; these were already picked up by
|
||||
* the _mark_incore step.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
ip = radix_tree_lookup(&sc->sa.pag->pag_ici_root, agino);
|
||||
rcu_read_unlock();
|
||||
if (ip)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Try to look up this inode. If we can't get it, just move
|
||||
* on because we haven't actually scrubbed the inobt or the
|
||||
* inodes yet.
|
||||
*/
|
||||
error = xchk_iget(ragi->sc,
|
||||
XFS_AGINO_TO_INO(mp, sc->sa.pag->pag_agno,
|
||||
agino),
|
||||
&ip);
|
||||
if (error)
|
||||
continue;
|
||||
|
||||
trace_xrep_iunlink_reload_ondisk(ip);
|
||||
|
||||
if (VFS_I(ip)->i_nlink == 0)
|
||||
error = xagino_bitmap_set(&ragi->iunlink_bmp, agino, 1);
|
||||
xchk_irele(sc, ip);
|
||||
if (error)
|
||||
break;
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find ondisk inodes that are unlinked and not in cache, and mark them in
|
||||
* iunlink_bmp. We haven't checked the inobt yet, so we don't error out if
|
||||
* the btree is corrupt.
|
||||
*/
|
||||
STATIC void
|
||||
xrep_iunlink_mark_ondisk(
|
||||
struct xrep_agi *ragi)
|
||||
{
|
||||
struct xfs_scrub *sc = ragi->sc;
|
||||
struct xfs_buf *agi_bp = ragi->agi_bp;
|
||||
struct xfs_btree_cur *cur;
|
||||
int error;
|
||||
|
||||
cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp, agi_bp);
|
||||
error = xfs_btree_query_all(cur, xrep_iunlink_mark_ondisk_rec, ragi);
|
||||
xfs_btree_del_cursor(cur, error);
|
||||
}
|
||||
|
||||
/*
|
||||
* Walk an iunlink bucket's inode list. For each inode that should be on this
|
||||
* chain, clear its entry in in iunlink_bmp because it's ok and we don't need
|
||||
* to touch it further.
|
||||
*/
|
||||
STATIC int
|
||||
xrep_iunlink_resolve_bucket(
|
||||
struct xrep_agi *ragi,
|
||||
unsigned int bucket)
|
||||
{
|
||||
struct xfs_scrub *sc = ragi->sc;
|
||||
struct xfs_inode *ip;
|
||||
xfs_agino_t prev_agino = NULLAGINO;
|
||||
xfs_agino_t next_agino = ragi->iunlink_heads[bucket];
|
||||
int error = 0;
|
||||
|
||||
while (next_agino != NULLAGINO) {
|
||||
if (xchk_should_terminate(ragi->sc, &error))
|
||||
return error;
|
||||
|
||||
/* Find the next inode in the chain. */
|
||||
ip = xfs_iunlink_lookup(sc->sa.pag, next_agino);
|
||||
if (!ip) {
|
||||
/* Inode not incore? Terminate the chain. */
|
||||
trace_xrep_iunlink_resolve_uncached(sc->sa.pag,
|
||||
bucket, prev_agino, next_agino);
|
||||
|
||||
next_agino = NULLAGINO;
|
||||
break;
|
||||
}
|
||||
|
||||
if (next_agino % XFS_AGI_UNLINKED_BUCKETS != bucket) {
|
||||
/*
|
||||
* Inode is in the wrong bucket. Advance the list,
|
||||
* but pretend we didn't see this inode.
|
||||
*/
|
||||
trace_xrep_iunlink_resolve_wronglist(sc->sa.pag,
|
||||
bucket, prev_agino, next_agino);
|
||||
|
||||
next_agino = ip->i_next_unlinked;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!xfs_inode_on_unlinked_list(ip)) {
|
||||
/*
|
||||
* Incore inode doesn't think this inode is on an
|
||||
* unlinked list. This is probably because we reloaded
|
||||
* it from disk. Advance the list, but pretend we
|
||||
* didn't see this inode; we'll fix that later.
|
||||
*/
|
||||
trace_xrep_iunlink_resolve_nolist(sc->sa.pag,
|
||||
bucket, prev_agino, next_agino);
|
||||
next_agino = ip->i_next_unlinked;
|
||||
continue;
|
||||
}
|
||||
|
||||
trace_xrep_iunlink_resolve_ok(sc->sa.pag, bucket, prev_agino,
|
||||
next_agino);
|
||||
|
||||
/*
|
||||
* Otherwise, this inode's unlinked pointers are ok. Clear it
|
||||
* from the unlinked bitmap since we're done with it, and make
|
||||
* sure the chain is still correct.
|
||||
*/
|
||||
error = xagino_bitmap_clear(&ragi->iunlink_bmp, next_agino, 1);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* Remember the previous inode's next pointer. */
|
||||
if (prev_agino != NULLAGINO) {
|
||||
error = xrep_iunlink_store_next(ragi, prev_agino,
|
||||
next_agino);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Remember this inode's previous pointer. */
|
||||
error = xrep_iunlink_store_prev(ragi, next_agino, prev_agino);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* Advance the list and remember this inode. */
|
||||
prev_agino = next_agino;
|
||||
next_agino = ip->i_next_unlinked;
|
||||
}
|
||||
|
||||
/* Update the previous inode's next pointer. */
|
||||
if (prev_agino != NULLAGINO) {
|
||||
error = xrep_iunlink_store_next(ragi, prev_agino, next_agino);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Reinsert this unlinked inode into the head of the staged bucket list. */
|
||||
STATIC int
|
||||
xrep_iunlink_add_to_bucket(
|
||||
struct xrep_agi *ragi,
|
||||
xfs_agino_t agino)
|
||||
{
|
||||
xfs_agino_t current_head;
|
||||
unsigned int bucket;
|
||||
int error;
|
||||
|
||||
bucket = agino % XFS_AGI_UNLINKED_BUCKETS;
|
||||
|
||||
/* Point this inode at the current head of the bucket list. */
|
||||
current_head = ragi->iunlink_heads[bucket];
|
||||
|
||||
trace_xrep_iunlink_add_to_bucket(ragi->sc->sa.pag, bucket, agino,
|
||||
current_head);
|
||||
|
||||
error = xrep_iunlink_store_next(ragi, agino, current_head);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* Remember the head inode's previous pointer. */
|
||||
if (current_head != NULLAGINO) {
|
||||
error = xrep_iunlink_store_prev(ragi, current_head, agino);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
ragi->iunlink_heads[bucket] = agino;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Reinsert unlinked inodes into the staged iunlink buckets. */
|
||||
STATIC int
|
||||
xrep_iunlink_add_lost_inodes(
|
||||
uint32_t start,
|
||||
uint32_t len,
|
||||
void *priv)
|
||||
{
|
||||
struct xrep_agi *ragi = priv;
|
||||
int error;
|
||||
|
||||
for (; len > 0; start++, len--) {
|
||||
error = xrep_iunlink_add_to_bucket(ragi, start);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Figure out the iunlink bucket values and find inodes that need to be
|
||||
* reinserted into the list.
|
||||
*/
|
||||
STATIC int
|
||||
xrep_iunlink_rebuild_buckets(
|
||||
struct xrep_agi *ragi)
|
||||
{
|
||||
unsigned int i;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Walk the ondisk AGI unlinked list to find inodes that are on the
|
||||
* list but aren't in memory. This can happen if a past log recovery
|
||||
* tried to clear the iunlinked list but failed. Our scan rebuilds the
|
||||
* unlinked list using incore inodes, so we must load and link them
|
||||
* properly.
|
||||
*/
|
||||
for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
|
||||
error = xrep_iunlink_walk_ondisk_bucket(ragi, i);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Record all the incore unlinked inodes in iunlink_bmp that we didn't
|
||||
* find by walking the ondisk iunlink buckets. This shouldn't happen,
|
||||
* but we can't risk forgetting an inode somewhere.
|
||||
*/
|
||||
error = xrep_iunlink_mark_incore(ragi);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* If there are ondisk inodes that are unlinked and are not been loaded
|
||||
* into cache, record them in iunlink_bmp.
|
||||
*/
|
||||
xrep_iunlink_mark_ondisk(ragi);
|
||||
|
||||
/*
|
||||
* Walk each iunlink bucket to (re)construct as much of the incore list
|
||||
* as would be correct. For each inode that survives this step, mark
|
||||
* it clear in iunlink_bmp; we're done with those inodes.
|
||||
*/
|
||||
for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
|
||||
error = xrep_iunlink_resolve_bucket(ragi, i);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Any unlinked inodes that we didn't find through the bucket list
|
||||
* walk (or was ignored by the walk) must be inserted into the bucket
|
||||
* list. Stage this in memory for now.
|
||||
*/
|
||||
return xagino_bitmap_walk(&ragi->iunlink_bmp,
|
||||
xrep_iunlink_add_lost_inodes, ragi);
|
||||
}
|
||||
|
||||
/* Update i_next_iunlinked for the inode @agino. */
|
||||
STATIC int
|
||||
xrep_iunlink_relink_next(
|
||||
struct xrep_agi *ragi,
|
||||
xfarray_idx_t idx,
|
||||
xfs_agino_t next_agino)
|
||||
{
|
||||
struct xfs_scrub *sc = ragi->sc;
|
||||
struct xfs_perag *pag = sc->sa.pag;
|
||||
struct xfs_inode *ip;
|
||||
xfarray_idx_t agino = idx - 1;
|
||||
bool want_rele = false;
|
||||
int error = 0;
|
||||
|
||||
ip = xfs_iunlink_lookup(pag, agino);
|
||||
if (!ip) {
|
||||
xfs_ino_t ino;
|
||||
xfs_agino_t prev_agino;
|
||||
|
||||
/*
|
||||
* No inode exists in cache. Load it off the disk so that we
|
||||
* can reinsert it into the incore unlinked list.
|
||||
*/
|
||||
ino = XFS_AGINO_TO_INO(sc->mp, pag->pag_agno, agino);
|
||||
error = xchk_iget(sc, ino, &ip);
|
||||
if (error)
|
||||
return -EFSCORRUPTED;
|
||||
|
||||
want_rele = true;
|
||||
|
||||
/* Set the backward pointer since this just came off disk. */
|
||||
error = xfarray_load(ragi->iunlink_prev, agino, &prev_agino);
|
||||
if (error)
|
||||
goto out_rele;
|
||||
|
||||
trace_xrep_iunlink_relink_prev(ip, prev_agino);
|
||||
ip->i_prev_unlinked = prev_agino;
|
||||
}
|
||||
|
||||
/* Update the forward pointer. */
|
||||
if (ip->i_next_unlinked != next_agino) {
|
||||
error = xfs_iunlink_log_inode(sc->tp, ip, pag, next_agino);
|
||||
if (error)
|
||||
goto out_rele;
|
||||
|
||||
trace_xrep_iunlink_relink_next(ip, next_agino);
|
||||
ip->i_next_unlinked = next_agino;
|
||||
}
|
||||
|
||||
out_rele:
|
||||
/*
|
||||
* The iunlink lookup doesn't igrab because we hold the AGI buffer lock
|
||||
* and the inode cannot be reclaimed. However, if we used iget to load
|
||||
* a missing inode, we must irele it here.
|
||||
*/
|
||||
if (want_rele)
|
||||
xchk_irele(sc, ip);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Update i_prev_iunlinked for the inode @agino. */
|
||||
STATIC int
|
||||
xrep_iunlink_relink_prev(
|
||||
struct xrep_agi *ragi,
|
||||
xfarray_idx_t idx,
|
||||
xfs_agino_t prev_agino)
|
||||
{
|
||||
struct xfs_scrub *sc = ragi->sc;
|
||||
struct xfs_perag *pag = sc->sa.pag;
|
||||
struct xfs_inode *ip;
|
||||
xfarray_idx_t agino = idx - 1;
|
||||
bool want_rele = false;
|
||||
int error = 0;
|
||||
|
||||
ASSERT(prev_agino != 0);
|
||||
|
||||
ip = xfs_iunlink_lookup(pag, agino);
|
||||
if (!ip) {
|
||||
xfs_ino_t ino;
|
||||
xfs_agino_t next_agino;
|
||||
|
||||
/*
|
||||
* No inode exists in cache. Load it off the disk so that we
|
||||
* can reinsert it into the incore unlinked list.
|
||||
*/
|
||||
ino = XFS_AGINO_TO_INO(sc->mp, pag->pag_agno, agino);
|
||||
error = xchk_iget(sc, ino, &ip);
|
||||
if (error)
|
||||
return -EFSCORRUPTED;
|
||||
|
||||
want_rele = true;
|
||||
|
||||
/* Set the forward pointer since this just came off disk. */
|
||||
error = xfarray_load(ragi->iunlink_prev, agino, &next_agino);
|
||||
if (error)
|
||||
goto out_rele;
|
||||
|
||||
error = xfs_iunlink_log_inode(sc->tp, ip, pag, next_agino);
|
||||
if (error)
|
||||
goto out_rele;
|
||||
|
||||
trace_xrep_iunlink_relink_next(ip, next_agino);
|
||||
ip->i_next_unlinked = next_agino;
|
||||
}
|
||||
|
||||
/* Update the backward pointer. */
|
||||
if (ip->i_prev_unlinked != prev_agino) {
|
||||
trace_xrep_iunlink_relink_prev(ip, prev_agino);
|
||||
ip->i_prev_unlinked = prev_agino;
|
||||
}
|
||||
|
||||
out_rele:
|
||||
/*
|
||||
* The iunlink lookup doesn't igrab because we hold the AGI buffer lock
|
||||
* and the inode cannot be reclaimed. However, if we used iget to load
|
||||
* a missing inode, we must irele it here.
|
||||
*/
|
||||
if (want_rele)
|
||||
xchk_irele(sc, ip);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Log all the iunlink updates we need to finish regenerating the AGI. */
|
||||
STATIC int
|
||||
xrep_iunlink_commit(
|
||||
struct xrep_agi *ragi)
|
||||
{
|
||||
struct xfs_agi *agi = ragi->agi_bp->b_addr;
|
||||
xfarray_idx_t idx = XFARRAY_CURSOR_INIT;
|
||||
xfs_agino_t agino;
|
||||
unsigned int i;
|
||||
int error;
|
||||
|
||||
/* Fix all the forward links */
|
||||
while ((error = xfarray_iter(ragi->iunlink_next, &idx, &agino)) == 1) {
|
||||
error = xrep_iunlink_relink_next(ragi, idx, agino);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Fix all the back links */
|
||||
idx = XFARRAY_CURSOR_INIT;
|
||||
while ((error = xfarray_iter(ragi->iunlink_prev, &idx, &agino)) == 1) {
|
||||
error = xrep_iunlink_relink_prev(ragi, idx, agino);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Copy the staged iunlink buckets to the new AGI. */
|
||||
for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
|
||||
trace_xrep_iunlink_commit_bucket(ragi->sc->sa.pag, i,
|
||||
be32_to_cpu(ragi->old_agi.agi_unlinked[i]),
|
||||
ragi->iunlink_heads[i]);
|
||||
|
||||
agi->agi_unlinked[i] = cpu_to_be32(ragi->iunlink_heads[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Trigger reinitialization of the in-core data. */
|
||||
STATIC int
|
||||
xrep_agi_commit_new(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_buf *agi_bp)
|
||||
struct xrep_agi *ragi)
|
||||
{
|
||||
struct xfs_scrub *sc = ragi->sc;
|
||||
struct xfs_buf *agi_bp = ragi->agi_bp;
|
||||
struct xfs_perag *pag;
|
||||
struct xfs_agi *agi = agi_bp->b_addr;
|
||||
|
||||
|
|
@ -956,33 +1712,58 @@ xrep_agi_commit_new(
|
|||
/* Repair the AGI. */
|
||||
int
|
||||
xrep_agi(
|
||||
struct xfs_scrub *sc)
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xrep_find_ag_btree fab[XREP_AGI_MAX] = {
|
||||
[XREP_AGI_INOBT] = {
|
||||
.rmap_owner = XFS_RMAP_OWN_INOBT,
|
||||
.buf_ops = &xfs_inobt_buf_ops,
|
||||
.maxlevels = M_IGEO(sc->mp)->inobt_maxlevels,
|
||||
},
|
||||
[XREP_AGI_FINOBT] = {
|
||||
.rmap_owner = XFS_RMAP_OWN_INOBT,
|
||||
.buf_ops = &xfs_finobt_buf_ops,
|
||||
.maxlevels = M_IGEO(sc->mp)->inobt_maxlevels,
|
||||
},
|
||||
[XREP_AGI_END] = {
|
||||
.buf_ops = NULL
|
||||
},
|
||||
};
|
||||
struct xfs_agi old_agi;
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
struct xfs_buf *agi_bp;
|
||||
struct xfs_agi *agi;
|
||||
int error;
|
||||
struct xrep_agi *ragi;
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
char *descr;
|
||||
unsigned int i;
|
||||
int error;
|
||||
|
||||
/* We require the rmapbt to rebuild anything. */
|
||||
if (!xfs_has_rmapbt(mp))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
sc->buf = kzalloc(sizeof(struct xrep_agi), XCHK_GFP_FLAGS);
|
||||
if (!sc->buf)
|
||||
return -ENOMEM;
|
||||
ragi = sc->buf;
|
||||
ragi->sc = sc;
|
||||
|
||||
ragi->fab[XREP_AGI_INOBT] = (struct xrep_find_ag_btree){
|
||||
.rmap_owner = XFS_RMAP_OWN_INOBT,
|
||||
.buf_ops = &xfs_inobt_buf_ops,
|
||||
.maxlevels = M_IGEO(sc->mp)->inobt_maxlevels,
|
||||
};
|
||||
ragi->fab[XREP_AGI_FINOBT] = (struct xrep_find_ag_btree){
|
||||
.rmap_owner = XFS_RMAP_OWN_INOBT,
|
||||
.buf_ops = &xfs_finobt_buf_ops,
|
||||
.maxlevels = M_IGEO(sc->mp)->inobt_maxlevels,
|
||||
};
|
||||
ragi->fab[XREP_AGI_END] = (struct xrep_find_ag_btree){
|
||||
.buf_ops = NULL,
|
||||
};
|
||||
|
||||
for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++)
|
||||
ragi->iunlink_heads[i] = NULLAGINO;
|
||||
|
||||
xagino_bitmap_init(&ragi->iunlink_bmp);
|
||||
sc->buf_cleanup = xrep_agi_buf_cleanup;
|
||||
|
||||
descr = xchk_xfile_ag_descr(sc, "iunlinked next pointers");
|
||||
error = xfarray_create(descr, 0, sizeof(xfs_agino_t),
|
||||
&ragi->iunlink_next);
|
||||
kfree(descr);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
descr = xchk_xfile_ag_descr(sc, "iunlinked prev pointers");
|
||||
error = xfarray_create(descr, 0, sizeof(xfs_agino_t),
|
||||
&ragi->iunlink_prev);
|
||||
kfree(descr);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* Make sure we have the AGI buffer, as scrub might have decided it
|
||||
* was corrupt after xfs_ialloc_read_agi failed with -EFSCORRUPTED.
|
||||
|
|
@ -990,14 +1771,17 @@ xrep_agi(
|
|||
error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
|
||||
XFS_AG_DADDR(mp, sc->sa.pag->pag_agno,
|
||||
XFS_AGI_DADDR(mp)),
|
||||
XFS_FSS_TO_BB(mp, 1), 0, &agi_bp, NULL);
|
||||
XFS_FSS_TO_BB(mp, 1), 0, &ragi->agi_bp, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
agi_bp->b_ops = &xfs_agi_buf_ops;
|
||||
agi = agi_bp->b_addr;
|
||||
ragi->agi_bp->b_ops = &xfs_agi_buf_ops;
|
||||
|
||||
/* Find the AGI btree roots. */
|
||||
error = xrep_agi_find_btrees(sc, fab);
|
||||
error = xrep_agi_find_btrees(ragi);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xrep_iunlink_rebuild_buckets(ragi);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -1006,18 +1790,21 @@ xrep_agi(
|
|||
return error;
|
||||
|
||||
/* Start rewriting the header and implant the btrees we found. */
|
||||
xrep_agi_init_header(sc, agi_bp, &old_agi);
|
||||
xrep_agi_set_roots(sc, agi, fab);
|
||||
error = xrep_agi_calc_from_btrees(sc, agi_bp);
|
||||
xrep_agi_init_header(ragi);
|
||||
xrep_agi_set_roots(ragi);
|
||||
error = xrep_agi_calc_from_btrees(ragi);
|
||||
if (error)
|
||||
goto out_revert;
|
||||
error = xrep_iunlink_commit(ragi);
|
||||
if (error)
|
||||
goto out_revert;
|
||||
|
||||
/* Reinitialize in-core state. */
|
||||
return xrep_agi_commit_new(sc, agi_bp);
|
||||
return xrep_agi_commit_new(ragi);
|
||||
|
||||
out_revert:
|
||||
/* Mark the incore AGI state stale and revert the AGI. */
|
||||
clear_bit(XFS_AGSTATE_AGI_INIT, &sc->sa.pag->pag_opstate);
|
||||
memcpy(agi, &old_agi, sizeof(old_agi));
|
||||
memcpy(ragi->agi_bp->b_addr, &ragi->old_agi, sizeof(struct xfs_agi));
|
||||
return error;
|
||||
}
|
||||
|
|
|
|||
49
fs/xfs/scrub/agino_bitmap.h
Normal file
49
fs/xfs/scrub/agino_bitmap.h
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Copyright (c) 2018-2024 Oracle. All Rights Reserved.
|
||||
* Author: Darrick J. Wong <djwong@kernel.org>
|
||||
*/
|
||||
#ifndef __XFS_SCRUB_AGINO_BITMAP_H__
|
||||
#define __XFS_SCRUB_AGINO_BITMAP_H__
|
||||
|
||||
/* Bitmaps, but for type-checked for xfs_agino_t */
|
||||
|
||||
struct xagino_bitmap {
|
||||
struct xbitmap32 aginobitmap;
|
||||
};
|
||||
|
||||
static inline void xagino_bitmap_init(struct xagino_bitmap *bitmap)
|
||||
{
|
||||
xbitmap32_init(&bitmap->aginobitmap);
|
||||
}
|
||||
|
||||
static inline void xagino_bitmap_destroy(struct xagino_bitmap *bitmap)
|
||||
{
|
||||
xbitmap32_destroy(&bitmap->aginobitmap);
|
||||
}
|
||||
|
||||
static inline int xagino_bitmap_clear(struct xagino_bitmap *bitmap,
|
||||
xfs_agino_t agino, unsigned int len)
|
||||
{
|
||||
return xbitmap32_clear(&bitmap->aginobitmap, agino, len);
|
||||
}
|
||||
|
||||
static inline int xagino_bitmap_set(struct xagino_bitmap *bitmap,
|
||||
xfs_agino_t agino, unsigned int len)
|
||||
{
|
||||
return xbitmap32_set(&bitmap->aginobitmap, agino, len);
|
||||
}
|
||||
|
||||
static inline bool xagino_bitmap_test(struct xagino_bitmap *bitmap,
|
||||
xfs_agino_t agino, unsigned int *len)
|
||||
{
|
||||
return xbitmap32_test(&bitmap->aginobitmap, agino, len);
|
||||
}
|
||||
|
||||
static inline int xagino_bitmap_walk(struct xagino_bitmap *bitmap,
|
||||
xbitmap32_walk_fn fn, void *priv)
|
||||
{
|
||||
return xbitmap32_walk(&bitmap->aginobitmap, fn, priv);
|
||||
}
|
||||
|
||||
#endif /* __XFS_SCRUB_AGINO_BITMAP_H__ */
|
||||
|
|
@ -778,7 +778,7 @@ xrep_abt_build_new_trees(
|
|||
|
||||
error = xrep_bnobt_sort_records(ra);
|
||||
if (error)
|
||||
return error;
|
||||
goto err_levels;
|
||||
|
||||
/* Load the free space by block number tree. */
|
||||
ra->array_cur = XFARRAY_CURSOR_INIT;
|
||||
|
|
|
|||
|
|
@ -10,16 +10,20 @@
|
|||
#include "xfs_trans_resv.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_log_format.h"
|
||||
#include "xfs_trans.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_da_format.h"
|
||||
#include "xfs_da_btree.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_attr_leaf.h"
|
||||
#include "xfs_attr_sf.h"
|
||||
#include "xfs_parent.h"
|
||||
#include "scrub/scrub.h"
|
||||
#include "scrub/common.h"
|
||||
#include "scrub/dabtree.h"
|
||||
#include "scrub/attr.h"
|
||||
#include "scrub/listxattr.h"
|
||||
#include "scrub/repair.h"
|
||||
|
||||
/* Free the buffers linked from the xattr buffer. */
|
||||
static void
|
||||
|
|
@ -35,6 +39,8 @@ xchk_xattr_buf_cleanup(
|
|||
kvfree(ab->value);
|
||||
ab->value = NULL;
|
||||
ab->value_sz = 0;
|
||||
kvfree(ab->name);
|
||||
ab->name = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -65,7 +71,7 @@ xchk_xattr_want_freemap(
|
|||
* reallocating the buffer if necessary. Buffer contents are not preserved
|
||||
* across a reallocation.
|
||||
*/
|
||||
static int
|
||||
int
|
||||
xchk_setup_xattr_buf(
|
||||
struct xfs_scrub *sc,
|
||||
size_t value_size)
|
||||
|
|
@ -95,6 +101,12 @@ xchk_setup_xattr_buf(
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (xchk_could_repair(sc)) {
|
||||
ab->name = kvmalloc(XATTR_NAME_MAX + 1, XCHK_GFP_FLAGS);
|
||||
if (!ab->name)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
resize_value:
|
||||
if (ab->value_sz >= value_size)
|
||||
return 0;
|
||||
|
|
@ -121,6 +133,12 @@ xchk_setup_xattr(
|
|||
{
|
||||
int error;
|
||||
|
||||
if (xchk_could_repair(sc)) {
|
||||
error = xrep_setup_xattr(sc);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* We failed to get memory while checking attrs, so this time try to
|
||||
* get all the memory we're ever going to need. Allocate the buffer
|
||||
|
|
@ -137,106 +155,105 @@ xchk_setup_xattr(
|
|||
|
||||
/* Extended Attributes */
|
||||
|
||||
struct xchk_xattr {
|
||||
struct xfs_attr_list_context context;
|
||||
struct xfs_scrub *sc;
|
||||
};
|
||||
|
||||
/*
|
||||
* Check that an extended attribute key can be looked up by hash.
|
||||
*
|
||||
* We use the XFS attribute list iterator (i.e. xfs_attr_list_ilocked)
|
||||
* to call this function for every attribute key in an inode. Once
|
||||
* we're here, we load the attribute value to see if any errors happen,
|
||||
* or if we get more or less data than we expected.
|
||||
* We use the extended attribute walk helper to call this function for every
|
||||
* attribute key in an inode. Once we're here, we load the attribute value to
|
||||
* see if any errors happen, or if we get more or less data than we expected.
|
||||
*/
|
||||
static void
|
||||
xchk_xattr_listent(
|
||||
struct xfs_attr_list_context *context,
|
||||
int flags,
|
||||
unsigned char *name,
|
||||
int namelen,
|
||||
int valuelen)
|
||||
static int
|
||||
xchk_xattr_actor(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
unsigned int attr_flags,
|
||||
const unsigned char *name,
|
||||
unsigned int namelen,
|
||||
const void *value,
|
||||
unsigned int valuelen,
|
||||
void *priv)
|
||||
{
|
||||
struct xfs_da_args args = {
|
||||
.op_flags = XFS_DA_OP_NOTIME,
|
||||
.attr_filter = flags & XFS_ATTR_NSP_ONDISK_MASK,
|
||||
.geo = context->dp->i_mount->m_attr_geo,
|
||||
.attr_filter = attr_flags & XFS_ATTR_NSP_ONDISK_MASK,
|
||||
.geo = sc->mp->m_attr_geo,
|
||||
.whichfork = XFS_ATTR_FORK,
|
||||
.dp = context->dp,
|
||||
.dp = ip,
|
||||
.name = name,
|
||||
.namelen = namelen,
|
||||
.hashval = xfs_da_hashname(name, namelen),
|
||||
.trans = context->tp,
|
||||
.trans = sc->tp,
|
||||
.valuelen = valuelen,
|
||||
.owner = ip->i_ino,
|
||||
};
|
||||
struct xchk_xattr_buf *ab;
|
||||
struct xchk_xattr *sx;
|
||||
int error = 0;
|
||||
|
||||
sx = container_of(context, struct xchk_xattr, context);
|
||||
ab = sx->sc->buf;
|
||||
ab = sc->buf;
|
||||
|
||||
if (xchk_should_terminate(sx->sc, &error)) {
|
||||
context->seen_enough = error;
|
||||
return;
|
||||
if (xchk_should_terminate(sc, &error))
|
||||
return error;
|
||||
|
||||
if (attr_flags & ~XFS_ATTR_ONDISK_MASK) {
|
||||
xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, args.blkno);
|
||||
return -ECANCELED;
|
||||
}
|
||||
|
||||
if (flags & XFS_ATTR_INCOMPLETE) {
|
||||
if (attr_flags & XFS_ATTR_INCOMPLETE) {
|
||||
/* Incomplete attr key, just mark the inode for preening. */
|
||||
xchk_ino_set_preen(sx->sc, context->dp->i_ino);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Only one namespace bit allowed. */
|
||||
if (hweight32(flags & XFS_ATTR_NSP_ONDISK_MASK) > 1) {
|
||||
xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, args.blkno);
|
||||
goto fail_xref;
|
||||
xchk_ino_set_preen(sc, ip->i_ino);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Does this name make sense? */
|
||||
if (!xfs_attr_namecheck(name, namelen)) {
|
||||
xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, args.blkno);
|
||||
goto fail_xref;
|
||||
if (!xfs_attr_namecheck(attr_flags, name, namelen)) {
|
||||
xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, args.blkno);
|
||||
return -ECANCELED;
|
||||
}
|
||||
|
||||
/* Check parent pointer record. */
|
||||
if ((attr_flags & XFS_ATTR_PARENT) &&
|
||||
!xfs_parent_valuecheck(sc->mp, value, valuelen)) {
|
||||
xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, args.blkno);
|
||||
return -ECANCELED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Local xattr values are stored in the attr leaf block, so we don't
|
||||
* need to retrieve the value from a remote block to detect corruption
|
||||
* problems.
|
||||
* Try to allocate enough memory to extract the attr value. If that
|
||||
* doesn't work, return -EDEADLOCK as a signal to try again with a
|
||||
* maximally sized buffer.
|
||||
*/
|
||||
if (flags & XFS_ATTR_LOCAL)
|
||||
goto fail_xref;
|
||||
|
||||
/*
|
||||
* Try to allocate enough memory to extrat the attr value. If that
|
||||
* doesn't work, we overload the seen_enough variable to convey
|
||||
* the error message back to the main scrub function.
|
||||
*/
|
||||
error = xchk_setup_xattr_buf(sx->sc, valuelen);
|
||||
error = xchk_setup_xattr_buf(sc, valuelen);
|
||||
if (error == -ENOMEM)
|
||||
error = -EDEADLOCK;
|
||||
if (error) {
|
||||
context->seen_enough = error;
|
||||
return;
|
||||
}
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* Parent pointers are matched on attr name and value, so we must
|
||||
* supply the xfs_parent_rec here when confirming that the dabtree
|
||||
* indexing works correctly.
|
||||
*/
|
||||
if (attr_flags & XFS_ATTR_PARENT)
|
||||
memcpy(ab->value, value, valuelen);
|
||||
|
||||
args.value = ab->value;
|
||||
|
||||
/*
|
||||
* Get the attr value to ensure that lookup can find this attribute
|
||||
* through the dabtree indexing and that remote value retrieval also
|
||||
* works correctly.
|
||||
*/
|
||||
xfs_attr_sethash(&args);
|
||||
error = xfs_attr_get_ilocked(&args);
|
||||
/* ENODATA means the hash lookup failed and the attr is bad */
|
||||
if (error == -ENODATA)
|
||||
error = -EFSCORRUPTED;
|
||||
if (!xchk_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno,
|
||||
if (!xchk_fblock_process_error(sc, XFS_ATTR_FORK, args.blkno,
|
||||
&error))
|
||||
goto fail_xref;
|
||||
return error;
|
||||
if (args.valuelen != valuelen)
|
||||
xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK,
|
||||
args.blkno);
|
||||
fail_xref:
|
||||
if (sx->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
|
||||
context->seen_enough = 1;
|
||||
return;
|
||||
xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, args.blkno);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -246,7 +263,7 @@ fail_xref:
|
|||
* Within a char, the lowest bit of the char represents the byte with
|
||||
* the smallest address
|
||||
*/
|
||||
STATIC bool
|
||||
bool
|
||||
xchk_xattr_set_map(
|
||||
struct xfs_scrub *sc,
|
||||
unsigned long *map,
|
||||
|
|
@ -403,6 +420,17 @@ xchk_xattr_block(
|
|||
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
|
||||
hdrsize = xfs_attr3_leaf_hdr_size(leaf);
|
||||
|
||||
/*
|
||||
* Empty xattr leaf blocks mapped at block 0 are probably a byproduct
|
||||
* of a race between setxattr and a log shutdown. Anywhere else in the
|
||||
* attr fork is a corruption.
|
||||
*/
|
||||
if (leafhdr.count == 0) {
|
||||
if (blk->blkno == 0)
|
||||
xchk_da_set_preen(ds, level);
|
||||
else
|
||||
xchk_da_set_corrupt(ds, level);
|
||||
}
|
||||
if (leafhdr.usedbytes > mp->m_attr_geo->blksize)
|
||||
xchk_da_set_corrupt(ds, level);
|
||||
if (leafhdr.firstused > mp->m_attr_geo->blksize)
|
||||
|
|
@ -411,6 +439,8 @@ xchk_xattr_block(
|
|||
xchk_da_set_corrupt(ds, level);
|
||||
if (!xchk_xattr_set_map(ds->sc, ab->usedmap, 0, hdrsize))
|
||||
xchk_da_set_corrupt(ds, level);
|
||||
if (leafhdr.holes)
|
||||
xchk_da_set_preen(ds, level);
|
||||
|
||||
if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
|
||||
goto out;
|
||||
|
|
@ -463,7 +493,6 @@ xchk_xattr_rec(
|
|||
xfs_dahash_t hash;
|
||||
int nameidx;
|
||||
int hdrsize;
|
||||
unsigned int badflags;
|
||||
int error;
|
||||
|
||||
ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
|
||||
|
|
@ -493,10 +522,15 @@ xchk_xattr_rec(
|
|||
|
||||
/* Retrieve the entry and check it. */
|
||||
hash = be32_to_cpu(ent->hashval);
|
||||
badflags = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_SECURE |
|
||||
XFS_ATTR_INCOMPLETE);
|
||||
if ((ent->flags & badflags) != 0)
|
||||
if (ent->flags & ~XFS_ATTR_ONDISK_MASK) {
|
||||
xchk_da_set_corrupt(ds, level);
|
||||
return 0;
|
||||
}
|
||||
if (!xfs_attr_check_namespace(ent->flags)) {
|
||||
xchk_da_set_corrupt(ds, level);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ent->flags & XFS_ATTR_LOCAL) {
|
||||
lentry = (struct xfs_attr_leaf_name_local *)
|
||||
(((char *)bp->b_addr) + nameidx);
|
||||
|
|
@ -504,7 +538,10 @@ xchk_xattr_rec(
|
|||
xchk_da_set_corrupt(ds, level);
|
||||
goto out;
|
||||
}
|
||||
calc_hash = xfs_da_hashname(lentry->nameval, lentry->namelen);
|
||||
calc_hash = xfs_attr_hashval(mp, ent->flags, lentry->nameval,
|
||||
lentry->namelen,
|
||||
lentry->nameval + lentry->namelen,
|
||||
be16_to_cpu(lentry->valuelen));
|
||||
} else {
|
||||
rentry = (struct xfs_attr_leaf_name_remote *)
|
||||
(((char *)bp->b_addr) + nameidx);
|
||||
|
|
@ -512,7 +549,13 @@ xchk_xattr_rec(
|
|||
xchk_da_set_corrupt(ds, level);
|
||||
goto out;
|
||||
}
|
||||
calc_hash = xfs_da_hashname(rentry->name, rentry->namelen);
|
||||
if (ent->flags & XFS_ATTR_PARENT) {
|
||||
xchk_da_set_corrupt(ds, level);
|
||||
goto out;
|
||||
}
|
||||
calc_hash = xfs_attr_hashval(mp, ent->flags, rentry->name,
|
||||
rentry->namelen, NULL,
|
||||
be32_to_cpu(rentry->valuelen));
|
||||
}
|
||||
if (calc_hash != hash)
|
||||
xchk_da_set_corrupt(ds, level);
|
||||
|
|
@ -556,6 +599,15 @@ xchk_xattr_check_sf(
|
|||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Shortform entries do not set LOCAL or INCOMPLETE, so the
|
||||
* only valid flag bits here are for namespaces.
|
||||
*/
|
||||
if (sfe->flags & ~XFS_ATTR_NSP_ONDISK_MASK) {
|
||||
xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!xchk_xattr_set_map(sc, ab->usedmap,
|
||||
(char *)sfe - (char *)sf,
|
||||
sizeof(struct xfs_attr_sf_entry))) {
|
||||
|
|
@ -588,16 +640,6 @@ int
|
|||
xchk_xattr(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xchk_xattr sx = {
|
||||
.sc = sc,
|
||||
.context = {
|
||||
.dp = sc->ip,
|
||||
.tp = sc->tp,
|
||||
.resynch = 1,
|
||||
.put_listent = xchk_xattr_listent,
|
||||
.allow_incomplete = true,
|
||||
},
|
||||
};
|
||||
xfs_dablk_t last_checked = -1U;
|
||||
int error = 0;
|
||||
|
||||
|
|
@ -626,12 +668,6 @@ xchk_xattr(
|
|||
/*
|
||||
* Look up every xattr in this file by name and hash.
|
||||
*
|
||||
* Use the backend implementation of xfs_attr_list to call
|
||||
* xchk_xattr_listent on every attribute key in this inode.
|
||||
* In other words, we use the same iterator/callback mechanism
|
||||
* that listattr uses to scrub extended attributes, though in our
|
||||
* _listent function, we check the value of the attribute.
|
||||
*
|
||||
* The VFS only locks i_rwsem when modifying attrs, so keep all
|
||||
* three locks held because that's the only way to ensure we're
|
||||
* the only thread poking into the da btree. We traverse the da
|
||||
|
|
@ -639,13 +675,9 @@ xchk_xattr(
|
|||
* iteration, which doesn't really follow the usual buffer
|
||||
* locking order.
|
||||
*/
|
||||
error = xfs_attr_list_ilocked(&sx.context);
|
||||
error = xchk_xattr_walk(sc, sc->ip, xchk_xattr_actor, NULL, NULL);
|
||||
if (!xchk_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error))
|
||||
return error;
|
||||
|
||||
/* Did our listent function try to return any errors? */
|
||||
if (sx.context.seen_enough < 0)
|
||||
return sx.context.seen_enough;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,9 +16,16 @@ struct xchk_xattr_buf {
|
|||
/* Bitmap of free space in xattr leaf blocks. */
|
||||
unsigned long *freemap;
|
||||
|
||||
/* Memory buffer used to hold salvaged xattr names. */
|
||||
unsigned char *name;
|
||||
|
||||
/* Memory buffer used to extract xattr values. */
|
||||
void *value;
|
||||
size_t value_sz;
|
||||
};
|
||||
|
||||
bool xchk_xattr_set_map(struct xfs_scrub *sc, unsigned long *map,
|
||||
unsigned int start, unsigned int len);
|
||||
int xchk_setup_xattr_buf(struct xfs_scrub *sc, size_t value_size);
|
||||
|
||||
#endif /* __XFS_SCRUB_ATTR_H__ */
|
||||
|
|
|
|||
1663
fs/xfs/scrub/attr_repair.c
Normal file
1663
fs/xfs/scrub/attr_repair.c
Normal file
File diff suppressed because it is too large
Load diff
15
fs/xfs/scrub/attr_repair.h
Normal file
15
fs/xfs/scrub/attr_repair.h
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Copyright (c) 2018-2024 Oracle. All Rights Reserved.
|
||||
* Author: Darrick J. Wong <djwong@kernel.org>
|
||||
*/
|
||||
#ifndef __XFS_SCRUB_ATTR_REPAIR_H__
|
||||
#define __XFS_SCRUB_ATTR_REPAIR_H__
|
||||
|
||||
struct xrep_tempexch;
|
||||
|
||||
int xrep_xattr_swap(struct xfs_scrub *sc, struct xrep_tempexch *tx);
|
||||
int xrep_xattr_reset_fork(struct xfs_scrub *sc);
|
||||
int xrep_xattr_reset_tempfile_fork(struct xfs_scrub *sc);
|
||||
|
||||
#endif /* __XFS_SCRUB_ATTR_REPAIR_H__ */
|
||||
|
|
@ -40,22 +40,23 @@ struct xbitmap64_node {
|
|||
* These functions are defined by the INTERVAL_TREE_DEFINE macro, but we'll
|
||||
* forward-declare them anyway for clarity.
|
||||
*/
|
||||
static inline void
|
||||
static inline __maybe_unused void
|
||||
xbitmap64_tree_insert(struct xbitmap64_node *node, struct rb_root_cached *root);
|
||||
|
||||
static inline void
|
||||
static inline __maybe_unused void
|
||||
xbitmap64_tree_remove(struct xbitmap64_node *node, struct rb_root_cached *root);
|
||||
|
||||
static inline struct xbitmap64_node *
|
||||
static inline __maybe_unused struct xbitmap64_node *
|
||||
xbitmap64_tree_iter_first(struct rb_root_cached *root, uint64_t start,
|
||||
uint64_t last);
|
||||
|
||||
static inline struct xbitmap64_node *
|
||||
static inline __maybe_unused struct xbitmap64_node *
|
||||
xbitmap64_tree_iter_next(struct xbitmap64_node *node, uint64_t start,
|
||||
uint64_t last);
|
||||
|
||||
INTERVAL_TREE_DEFINE(struct xbitmap64_node, bn_rbnode, uint64_t,
|
||||
__bn_subtree_last, START, LAST, static inline, xbitmap64_tree)
|
||||
__bn_subtree_last, START, LAST, static inline __maybe_unused,
|
||||
xbitmap64_tree)
|
||||
|
||||
/* Iterate each interval of a bitmap. Do not change the bitmap. */
|
||||
#define for_each_xbitmap64_extent(bn, bitmap) \
|
||||
|
|
@ -314,22 +315,23 @@ struct xbitmap32_node {
|
|||
* These functions are defined by the INTERVAL_TREE_DEFINE macro, but we'll
|
||||
* forward-declare them anyway for clarity.
|
||||
*/
|
||||
static inline void
|
||||
static inline __maybe_unused void
|
||||
xbitmap32_tree_insert(struct xbitmap32_node *node, struct rb_root_cached *root);
|
||||
|
||||
static inline void
|
||||
static inline __maybe_unused void
|
||||
xbitmap32_tree_remove(struct xbitmap32_node *node, struct rb_root_cached *root);
|
||||
|
||||
static inline struct xbitmap32_node *
|
||||
static inline __maybe_unused struct xbitmap32_node *
|
||||
xbitmap32_tree_iter_first(struct rb_root_cached *root, uint32_t start,
|
||||
uint32_t last);
|
||||
|
||||
static inline struct xbitmap32_node *
|
||||
static inline __maybe_unused struct xbitmap32_node *
|
||||
xbitmap32_tree_iter_next(struct xbitmap32_node *node, uint32_t start,
|
||||
uint32_t last);
|
||||
|
||||
INTERVAL_TREE_DEFINE(struct xbitmap32_node, bn_rbnode, uint32_t,
|
||||
__bn_subtree_last, START, LAST, static inline, xbitmap32_tree)
|
||||
__bn_subtree_last, START, LAST, static inline __maybe_unused,
|
||||
xbitmap32_tree)
|
||||
|
||||
/* Iterate each interval of a bitmap. Do not change the bitmap. */
|
||||
#define for_each_xbitmap32_extent(bn, bitmap) \
|
||||
|
|
|
|||
|
|
@ -31,6 +31,8 @@
|
|||
#include "xfs_ag.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_quota.h"
|
||||
#include "xfs_exchmaps.h"
|
||||
#include "xfs_rtbitmap.h"
|
||||
#include "scrub/scrub.h"
|
||||
#include "scrub/common.h"
|
||||
#include "scrub/trace.h"
|
||||
|
|
@ -445,7 +447,7 @@ xchk_perag_read_headers(
|
|||
{
|
||||
int error;
|
||||
|
||||
error = xfs_ialloc_read_agi(sa->pag, sc->tp, &sa->agi_bp);
|
||||
error = xfs_ialloc_read_agi(sa->pag, sc->tp, 0, &sa->agi_bp);
|
||||
if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI))
|
||||
return error;
|
||||
|
||||
|
|
@ -781,7 +783,7 @@ xchk_iget(
|
|||
{
|
||||
ASSERT(sc->tp != NULL);
|
||||
|
||||
return xfs_iget(sc->mp, sc->tp, inum, XFS_IGET_UNTRUSTED, 0, ipp);
|
||||
return xfs_iget(sc->mp, sc->tp, inum, XCHK_IGET_FLAGS, 0, ipp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -827,13 +829,13 @@ again:
|
|||
* in the iget cache miss path.
|
||||
*/
|
||||
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
|
||||
error = xfs_ialloc_read_agi(pag, tp, agi_bpp);
|
||||
error = xfs_ialloc_read_agi(pag, tp, 0, agi_bpp);
|
||||
xfs_perag_put(pag);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xfs_iget(mp, tp, inum,
|
||||
XFS_IGET_NORETRY | XFS_IGET_UNTRUSTED, 0, ipp);
|
||||
error = xfs_iget(mp, tp, inum, XFS_IGET_NORETRY | XCHK_IGET_FLAGS, 0,
|
||||
ipp);
|
||||
if (error == -EAGAIN) {
|
||||
/*
|
||||
* The inode may be in core but temporarily unavailable and may
|
||||
|
|
@ -1060,12 +1062,6 @@ xchk_irele(
|
|||
spin_lock(&VFS_I(ip)->i_lock);
|
||||
VFS_I(ip)->i_state &= ~I_DONTCACHE;
|
||||
spin_unlock(&VFS_I(ip)->i_lock);
|
||||
} else if (atomic_read(&VFS_I(ip)->i_count) == 1) {
|
||||
/*
|
||||
* If this is the last reference to the inode and the caller
|
||||
* permits it, set DONTCACHE to avoid thrashing.
|
||||
*/
|
||||
d_mark_dontcache(VFS_I(ip));
|
||||
}
|
||||
|
||||
xfs_irele(ip);
|
||||
|
|
@ -1202,27 +1198,12 @@ xchk_metadata_inode_subtype(
|
|||
struct xfs_scrub *sc,
|
||||
unsigned int scrub_type)
|
||||
{
|
||||
__u32 smtype = sc->sm->sm_type;
|
||||
unsigned int sick_mask = sc->sick_mask;
|
||||
struct xfs_scrub_subord *sub;
|
||||
int error;
|
||||
|
||||
sc->sm->sm_type = scrub_type;
|
||||
|
||||
switch (scrub_type) {
|
||||
case XFS_SCRUB_TYPE_INODE:
|
||||
error = xchk_inode(sc);
|
||||
break;
|
||||
case XFS_SCRUB_TYPE_BMBTD:
|
||||
error = xchk_bmap_data(sc);
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
error = -EFSCORRUPTED;
|
||||
break;
|
||||
}
|
||||
|
||||
sc->sick_mask = sick_mask;
|
||||
sc->sm->sm_type = smtype;
|
||||
sub = xchk_scrub_create_subord(sc, scrub_type);
|
||||
error = sub->sc.ops->scrub(&sub->sc);
|
||||
xchk_scrub_free_subord(sub);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -6,31 +6,6 @@
|
|||
#ifndef __XFS_SCRUB_COMMON_H__
|
||||
#define __XFS_SCRUB_COMMON_H__
|
||||
|
||||
/*
|
||||
* We /could/ terminate a scrub/repair operation early. If we're not
|
||||
* in a good place to continue (fatal signal, etc.) then bail out.
|
||||
* Note that we're careful not to make any judgements about *error.
|
||||
*/
|
||||
static inline bool
|
||||
xchk_should_terminate(
|
||||
struct xfs_scrub *sc,
|
||||
int *error)
|
||||
{
|
||||
/*
|
||||
* If preemption is disabled, we need to yield to the scheduler every
|
||||
* few seconds so that we don't run afoul of the soft lockup watchdog
|
||||
* or RCU stall detector.
|
||||
*/
|
||||
cond_resched();
|
||||
|
||||
if (fatal_signal_pending(current)) {
|
||||
if (*error == 0)
|
||||
*error = -EINTR;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int xchk_trans_alloc(struct xfs_scrub *sc, uint resblks);
|
||||
int xchk_trans_alloc_empty(struct xfs_scrub *sc);
|
||||
void xchk_trans_cancel(struct xfs_scrub *sc);
|
||||
|
|
@ -92,6 +67,7 @@ int xchk_setup_directory(struct xfs_scrub *sc);
|
|||
int xchk_setup_xattr(struct xfs_scrub *sc);
|
||||
int xchk_setup_symlink(struct xfs_scrub *sc);
|
||||
int xchk_setup_parent(struct xfs_scrub *sc);
|
||||
int xchk_setup_dirtree(struct xfs_scrub *sc);
|
||||
#ifdef CONFIG_XFS_RT
|
||||
int xchk_setup_rtbitmap(struct xfs_scrub *sc);
|
||||
int xchk_setup_rtsummary(struct xfs_scrub *sc);
|
||||
|
|
@ -212,6 +188,7 @@ static inline bool xchk_skip_xref(struct xfs_scrub_metadata *sm)
|
|||
}
|
||||
|
||||
bool xchk_dir_looks_zapped(struct xfs_inode *dp);
|
||||
bool xchk_pptr_looks_zapped(struct xfs_inode *ip);
|
||||
|
||||
#ifdef CONFIG_XFS_ONLINE_REPAIR
|
||||
/* Decide if a repair is required. */
|
||||
|
|
|
|||
37
fs/xfs/scrub/dab_bitmap.h
Normal file
37
fs/xfs/scrub/dab_bitmap.h
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Copyright (c) 2022-2024 Oracle. All Rights Reserved.
|
||||
* Author: Darrick J. Wong <djwong@kernel.org>
|
||||
*/
|
||||
#ifndef __XFS_SCRUB_DAB_BITMAP_H__
|
||||
#define __XFS_SCRUB_DAB_BITMAP_H__
|
||||
|
||||
/* Bitmaps, but for type-checked for xfs_dablk_t */
|
||||
|
||||
struct xdab_bitmap {
|
||||
struct xbitmap32 dabitmap;
|
||||
};
|
||||
|
||||
static inline void xdab_bitmap_init(struct xdab_bitmap *bitmap)
|
||||
{
|
||||
xbitmap32_init(&bitmap->dabitmap);
|
||||
}
|
||||
|
||||
static inline void xdab_bitmap_destroy(struct xdab_bitmap *bitmap)
|
||||
{
|
||||
xbitmap32_destroy(&bitmap->dabitmap);
|
||||
}
|
||||
|
||||
static inline int xdab_bitmap_set(struct xdab_bitmap *bitmap,
|
||||
xfs_dablk_t dabno, xfs_extlen_t len)
|
||||
{
|
||||
return xbitmap32_set(&bitmap->dabitmap, dabno, len);
|
||||
}
|
||||
|
||||
static inline bool xdab_bitmap_test(struct xdab_bitmap *bitmap,
|
||||
xfs_dablk_t dabno, xfs_extlen_t *len)
|
||||
{
|
||||
return xbitmap32_test(&bitmap->dabitmap, dabno, len);
|
||||
}
|
||||
|
||||
#endif /* __XFS_SCRUB_DAB_BITMAP_H__ */
|
||||
|
|
@ -78,6 +78,22 @@ xchk_da_set_corrupt(
|
|||
__return_address);
|
||||
}
|
||||
|
||||
/* Flag a da btree node in need of optimization. */
|
||||
void
|
||||
xchk_da_set_preen(
|
||||
struct xchk_da_btree *ds,
|
||||
int level)
|
||||
{
|
||||
struct xfs_scrub *sc = ds->sc;
|
||||
|
||||
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
|
||||
trace_xchk_fblock_preen(sc, ds->dargs.whichfork,
|
||||
xfs_dir2_da_to_db(ds->dargs.geo,
|
||||
ds->state->path.blk[level].blkno),
|
||||
__return_address);
|
||||
}
|
||||
|
||||
/* Find an entry at a certain level in a da btree. */
|
||||
static struct xfs_da_node_entry *
|
||||
xchk_da_btree_node_entry(
|
||||
struct xchk_da_btree *ds,
|
||||
|
|
@ -320,6 +336,7 @@ xchk_da_btree_block(
|
|||
struct xfs_da3_blkinfo *hdr3;
|
||||
struct xfs_da_args *dargs = &ds->dargs;
|
||||
struct xfs_inode *ip = ds->dargs.dp;
|
||||
xfs_failaddr_t fa;
|
||||
xfs_ino_t owner;
|
||||
int *pmaxrecs;
|
||||
struct xfs_da3_icnode_hdr nodehdr;
|
||||
|
|
@ -442,6 +459,12 @@ xchk_da_btree_block(
|
|||
goto out_freebp;
|
||||
}
|
||||
|
||||
fa = xfs_da3_header_check(blk->bp, dargs->owner);
|
||||
if (fa) {
|
||||
xchk_da_set_corrupt(ds, level);
|
||||
goto out_freebp;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we've been handed a block that is below the dabtree root, does
|
||||
* its hashval match what the parent block expected to see?
|
||||
|
|
@ -494,6 +517,7 @@ xchk_da_btree(
|
|||
ds->dargs.whichfork = whichfork;
|
||||
ds->dargs.trans = sc->tp;
|
||||
ds->dargs.op_flags = XFS_DA_OP_OKNOENT;
|
||||
ds->dargs.owner = sc->ip->i_ino;
|
||||
ds->state = xfs_da_state_alloc(&ds->dargs);
|
||||
ds->sc = sc;
|
||||
ds->private = private;
|
||||
|
|
|
|||
|
|
@ -35,6 +35,9 @@ bool xchk_da_process_error(struct xchk_da_btree *ds, int level, int *error);
|
|||
|
||||
/* Check for da btree corruption. */
|
||||
void xchk_da_set_corrupt(struct xchk_da_btree *ds, int level);
|
||||
void xchk_da_set_preen(struct xchk_da_btree *ds, int level);
|
||||
|
||||
void xchk_da_set_preen(struct xchk_da_btree *ds, int level);
|
||||
|
||||
int xchk_da_btree_hash(struct xchk_da_btree *ds, int level, __be32 *hashp);
|
||||
int xchk_da_btree(struct xfs_scrub *sc, int whichfork,
|
||||
|
|
|
|||
|
|
@ -16,22 +16,70 @@
|
|||
#include "xfs_dir2.h"
|
||||
#include "xfs_dir2_priv.h"
|
||||
#include "xfs_health.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_parent.h"
|
||||
#include "scrub/scrub.h"
|
||||
#include "scrub/common.h"
|
||||
#include "scrub/dabtree.h"
|
||||
#include "scrub/readdir.h"
|
||||
#include "scrub/health.h"
|
||||
#include "scrub/repair.h"
|
||||
#include "scrub/trace.h"
|
||||
#include "scrub/xfile.h"
|
||||
#include "scrub/xfarray.h"
|
||||
#include "scrub/xfblob.h"
|
||||
|
||||
/* Set us up to scrub directories. */
|
||||
int
|
||||
xchk_setup_directory(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (xchk_could_repair(sc)) {
|
||||
error = xrep_setup_directory(sc);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
return xchk_setup_inode_contents(sc, 0);
|
||||
}
|
||||
|
||||
/* Directories */
|
||||
|
||||
/* Deferred directory entry that we saved for later. */
|
||||
struct xchk_dirent {
|
||||
/* Cookie for retrieval of the dirent name. */
|
||||
xfblob_cookie name_cookie;
|
||||
|
||||
/* Child inode number. */
|
||||
xfs_ino_t ino;
|
||||
|
||||
/* Length of the pptr name. */
|
||||
uint8_t namelen;
|
||||
};
|
||||
|
||||
struct xchk_dir {
|
||||
struct xfs_scrub *sc;
|
||||
|
||||
/* information for parent pointer validation. */
|
||||
struct xfs_parent_rec pptr_rec;
|
||||
struct xfs_da_args pptr_args;
|
||||
|
||||
/* Fixed-size array of xchk_dirent structures. */
|
||||
struct xfarray *dir_entries;
|
||||
|
||||
/* Blobs containing dirent names. */
|
||||
struct xfblob *dir_names;
|
||||
|
||||
/* If we've cycled the ILOCK, we must revalidate deferred dirents. */
|
||||
bool need_revalidate;
|
||||
|
||||
/* Name buffer for dirent revalidation. */
|
||||
struct xfs_name xname;
|
||||
uint8_t namebuf[MAXNAMELEN];
|
||||
};
|
||||
|
||||
/* Scrub a directory entry. */
|
||||
|
||||
/* Check that an inode's mode matches a given XFS_DIR3_FT_* type. */
|
||||
|
|
@ -54,6 +102,108 @@ xchk_dir_check_ftype(
|
|||
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to lock a child file for checking parent pointers. Returns the inode
|
||||
* flags for the locks we now hold, or zero if we failed.
|
||||
*/
|
||||
STATIC unsigned int
|
||||
xchk_dir_lock_child(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip)
|
||||
{
|
||||
if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
|
||||
return 0;
|
||||
|
||||
if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
|
||||
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!xfs_inode_has_attr_fork(ip) || !xfs_need_iread_extents(&ip->i_af))
|
||||
return XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED;
|
||||
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
|
||||
if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
|
||||
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL;
|
||||
}
|
||||
|
||||
/* Check the backwards link (parent pointer) associated with this dirent. */
|
||||
STATIC int
|
||||
xchk_dir_parent_pointer(
|
||||
struct xchk_dir *sd,
|
||||
const struct xfs_name *name,
|
||||
struct xfs_inode *ip)
|
||||
{
|
||||
struct xfs_scrub *sc = sd->sc;
|
||||
int error;
|
||||
|
||||
xfs_inode_to_parent_rec(&sd->pptr_rec, sc->ip);
|
||||
error = xfs_parent_lookup(sc->tp, ip, name, &sd->pptr_rec,
|
||||
&sd->pptr_args);
|
||||
if (error == -ENOATTR)
|
||||
xchk_fblock_xref_set_corrupt(sc, XFS_DATA_FORK, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Look for a parent pointer matching this dirent, if the child isn't busy. */
|
||||
STATIC int
|
||||
xchk_dir_check_pptr_fast(
|
||||
struct xchk_dir *sd,
|
||||
xfs_dir2_dataptr_t dapos,
|
||||
const struct xfs_name *name,
|
||||
struct xfs_inode *ip)
|
||||
{
|
||||
struct xfs_scrub *sc = sd->sc;
|
||||
unsigned int lockmode;
|
||||
int error;
|
||||
|
||||
/* dot and dotdot entries do not have parent pointers */
|
||||
if (xfs_dir2_samename(name, &xfs_name_dot) ||
|
||||
xfs_dir2_samename(name, &xfs_name_dotdot))
|
||||
return 0;
|
||||
|
||||
/* No self-referential non-dot or dotdot dirents. */
|
||||
if (ip == sc->ip) {
|
||||
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
|
||||
return -ECANCELED;
|
||||
}
|
||||
|
||||
/* Try to lock the inode. */
|
||||
lockmode = xchk_dir_lock_child(sc, ip);
|
||||
if (!lockmode) {
|
||||
struct xchk_dirent save_de = {
|
||||
.namelen = name->len,
|
||||
.ino = ip->i_ino,
|
||||
};
|
||||
|
||||
/* Couldn't lock the inode, so save the dirent for later. */
|
||||
trace_xchk_dir_defer(sc->ip, name, ip->i_ino);
|
||||
|
||||
error = xfblob_storename(sd->dir_names, &save_de.name_cookie,
|
||||
name);
|
||||
if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0,
|
||||
&error))
|
||||
return error;
|
||||
|
||||
error = xfarray_append(sd->dir_entries, &save_de);
|
||||
if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0,
|
||||
&error))
|
||||
return error;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
error = xchk_dir_parent_pointer(sd, name, ip);
|
||||
xfs_iunlock(ip, lockmode);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Scrub a single directory entry.
|
||||
*
|
||||
|
|
@ -71,6 +221,7 @@ xchk_dir_actor(
|
|||
{
|
||||
struct xfs_mount *mp = dp->i_mount;
|
||||
struct xfs_inode *ip;
|
||||
struct xchk_dir *sd = priv;
|
||||
xfs_ino_t lookup_ino;
|
||||
xfs_dablk_t offset;
|
||||
int error = 0;
|
||||
|
|
@ -137,6 +288,14 @@ xchk_dir_actor(
|
|||
goto out;
|
||||
|
||||
xchk_dir_check_ftype(sc, offset, ip, name->type);
|
||||
|
||||
if (xfs_has_parent(mp)) {
|
||||
error = xchk_dir_check_pptr_fast(sd, dapos, name, ip);
|
||||
if (error)
|
||||
goto out_rele;
|
||||
}
|
||||
|
||||
out_rele:
|
||||
xchk_irele(sc, ip);
|
||||
out:
|
||||
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
|
||||
|
|
@ -196,8 +355,8 @@ xchk_dir_rec(
|
|||
xchk_da_set_corrupt(ds, level);
|
||||
goto out;
|
||||
}
|
||||
error = xfs_dir3_data_read(ds->dargs.trans, dp, rec_bno,
|
||||
XFS_DABUF_MAP_HOLE_OK, &bp);
|
||||
error = xfs_dir3_data_read(ds->dargs.trans, dp, ds->dargs.owner,
|
||||
rec_bno, XFS_DABUF_MAP_HOLE_OK, &bp);
|
||||
if (!xchk_fblock_process_error(ds->sc, XFS_DATA_FORK, rec_bno,
|
||||
&error))
|
||||
goto out;
|
||||
|
|
@ -315,10 +474,11 @@ xchk_directory_data_bestfree(
|
|||
/* dir block format */
|
||||
if (lblk != XFS_B_TO_FSBT(mp, XFS_DIR2_DATA_OFFSET))
|
||||
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
|
||||
error = xfs_dir3_block_read(sc->tp, sc->ip, &bp);
|
||||
error = xfs_dir3_block_read(sc->tp, sc->ip, sc->ip->i_ino, &bp);
|
||||
} else {
|
||||
/* dir data format */
|
||||
error = xfs_dir3_data_read(sc->tp, sc->ip, lblk, 0, &bp);
|
||||
error = xfs_dir3_data_read(sc->tp, sc->ip, sc->ip->i_ino, lblk,
|
||||
0, &bp);
|
||||
}
|
||||
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
|
||||
goto out;
|
||||
|
|
@ -470,7 +630,7 @@ xchk_directory_leaf1_bestfree(
|
|||
int error;
|
||||
|
||||
/* Read the free space block. */
|
||||
error = xfs_dir3_leaf_read(sc->tp, sc->ip, lblk, &bp);
|
||||
error = xfs_dir3_leaf_read(sc->tp, sc->ip, sc->ip->i_ino, lblk, &bp);
|
||||
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
|
||||
return error;
|
||||
xchk_buffer_recheck(sc, bp);
|
||||
|
|
@ -531,10 +691,9 @@ xchk_directory_leaf1_bestfree(
|
|||
/* Check all the bestfree entries. */
|
||||
for (i = 0; i < bestcount; i++, bestp++) {
|
||||
best = be16_to_cpu(*bestp);
|
||||
error = xfs_dir3_data_read(sc->tp, sc->ip,
|
||||
error = xfs_dir3_data_read(sc->tp, sc->ip, args->owner,
|
||||
xfs_dir2_db_to_da(args->geo, i),
|
||||
XFS_DABUF_MAP_HOLE_OK,
|
||||
&dbp);
|
||||
XFS_DABUF_MAP_HOLE_OK, &dbp);
|
||||
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk,
|
||||
&error))
|
||||
break;
|
||||
|
|
@ -577,7 +736,7 @@ xchk_directory_free_bestfree(
|
|||
int error;
|
||||
|
||||
/* Read the free space block */
|
||||
error = xfs_dir2_free_read(sc->tp, sc->ip, lblk, &bp);
|
||||
error = xfs_dir2_free_read(sc->tp, sc->ip, sc->ip->i_ino, lblk, &bp);
|
||||
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
|
||||
return error;
|
||||
xchk_buffer_recheck(sc, bp);
|
||||
|
|
@ -597,7 +756,7 @@ xchk_directory_free_bestfree(
|
|||
stale++;
|
||||
continue;
|
||||
}
|
||||
error = xfs_dir3_data_read(sc->tp, sc->ip,
|
||||
error = xfs_dir3_data_read(sc->tp, sc->ip, args->owner,
|
||||
(freehdr.firstdb + i) * args->geo->fsbcount,
|
||||
0, &dbp);
|
||||
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk,
|
||||
|
|
@ -621,10 +780,11 @@ xchk_directory_blocks(
|
|||
{
|
||||
struct xfs_bmbt_irec got;
|
||||
struct xfs_da_args args = {
|
||||
.dp = sc ->ip,
|
||||
.dp = sc->ip,
|
||||
.whichfork = XFS_DATA_FORK,
|
||||
.geo = sc->mp->m_dir_geo,
|
||||
.trans = sc->tp,
|
||||
.owner = sc->ip->i_ino,
|
||||
};
|
||||
struct xfs_ifork *ifp = xfs_ifork_ptr(sc->ip, XFS_DATA_FORK);
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
|
|
@ -648,7 +808,8 @@ xchk_directory_blocks(
|
|||
free_lblk = XFS_B_TO_FSB(mp, XFS_DIR2_FREE_OFFSET);
|
||||
|
||||
/* Is this a block dir? */
|
||||
error = xfs_dir2_isblock(&args, &is_block);
|
||||
if (xfs_dir2_format(&args, &error) == XFS_DIR2_FMT_BLOCK)
|
||||
is_block = true;
|
||||
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
|
||||
goto out;
|
||||
|
||||
|
|
@ -752,11 +913,148 @@ out:
|
|||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Revalidate a dirent that we collected in the past but couldn't check because
|
||||
* of lock contention. Returns 0 if the dirent is still valid, -ENOENT if it
|
||||
* has gone away on us, or a negative errno.
|
||||
*/
|
||||
STATIC int
|
||||
xchk_dir_revalidate_dirent(
|
||||
struct xchk_dir *sd,
|
||||
const struct xfs_name *xname,
|
||||
xfs_ino_t ino)
|
||||
{
|
||||
struct xfs_scrub *sc = sd->sc;
|
||||
xfs_ino_t child_ino;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Look up the directory entry. If we get -ENOENT, the directory entry
|
||||
* went away and there's nothing to revalidate. Return any other
|
||||
* error.
|
||||
*/
|
||||
error = xchk_dir_lookup(sc, sc->ip, xname, &child_ino);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* The inode number changed, nothing to revalidate. */
|
||||
if (ino != child_ino)
|
||||
return -ENOENT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check a directory entry's parent pointers the slow way, which means we cycle
|
||||
* locks a bunch and put up with revalidation until we get it done.
|
||||
*/
|
||||
STATIC int
|
||||
xchk_dir_slow_dirent(
|
||||
struct xchk_dir *sd,
|
||||
struct xchk_dirent *dirent,
|
||||
const struct xfs_name *xname)
|
||||
{
|
||||
struct xfs_scrub *sc = sd->sc;
|
||||
struct xfs_inode *ip;
|
||||
unsigned int lockmode;
|
||||
int error;
|
||||
|
||||
/* Check that the deferred dirent still exists. */
|
||||
if (sd->need_revalidate) {
|
||||
error = xchk_dir_revalidate_dirent(sd, xname, dirent->ino);
|
||||
if (error == -ENOENT)
|
||||
return 0;
|
||||
if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0,
|
||||
&error))
|
||||
return error;
|
||||
}
|
||||
|
||||
error = xchk_iget(sc, dirent->ino, &ip);
|
||||
if (error == -EINVAL || error == -ENOENT) {
|
||||
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
|
||||
return 0;
|
||||
}
|
||||
if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
|
||||
return error;
|
||||
|
||||
/*
|
||||
* If we can grab both IOLOCK and ILOCK of the alleged child, we can
|
||||
* proceed with the validation.
|
||||
*/
|
||||
lockmode = xchk_dir_lock_child(sc, ip);
|
||||
if (lockmode) {
|
||||
trace_xchk_dir_slowpath(sc->ip, xname, ip->i_ino);
|
||||
goto check_pptr;
|
||||
}
|
||||
|
||||
/*
|
||||
* We couldn't lock the child file. Drop all the locks and try to
|
||||
* get them again, one at a time.
|
||||
*/
|
||||
xchk_iunlock(sc, sc->ilock_flags);
|
||||
sd->need_revalidate = true;
|
||||
|
||||
trace_xchk_dir_ultraslowpath(sc->ip, xname, ip->i_ino);
|
||||
|
||||
error = xchk_dir_trylock_for_pptrs(sc, ip, &lockmode);
|
||||
if (error)
|
||||
goto out_rele;
|
||||
|
||||
/* Revalidate, since we just cycled the locks. */
|
||||
error = xchk_dir_revalidate_dirent(sd, xname, dirent->ino);
|
||||
if (error == -ENOENT) {
|
||||
error = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
|
||||
goto out_unlock;
|
||||
|
||||
check_pptr:
|
||||
error = xchk_dir_parent_pointer(sd, xname, ip);
|
||||
out_unlock:
|
||||
xfs_iunlock(ip, lockmode);
|
||||
out_rele:
|
||||
xchk_irele(sc, ip);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Check all the dirents that we deferred the first time around. */
|
||||
STATIC int
|
||||
xchk_dir_finish_slow_dirents(
|
||||
struct xchk_dir *sd)
|
||||
{
|
||||
xfarray_idx_t array_cur;
|
||||
int error;
|
||||
|
||||
foreach_xfarray_idx(sd->dir_entries, array_cur) {
|
||||
struct xchk_dirent dirent;
|
||||
|
||||
if (sd->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
|
||||
return 0;
|
||||
|
||||
error = xfarray_load(sd->dir_entries, array_cur, &dirent);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xfblob_loadname(sd->dir_names, dirent.name_cookie,
|
||||
&sd->xname, dirent.namelen);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xchk_dir_slow_dirent(sd, &dirent, &sd->xname);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Scrub a whole directory. */
|
||||
int
|
||||
xchk_directory(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xchk_dir *sd;
|
||||
int error;
|
||||
|
||||
if (!S_ISDIR(VFS_I(sc->ip)->i_mode))
|
||||
|
|
@ -789,9 +1087,60 @@ xchk_directory(
|
|||
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
|
||||
return 0;
|
||||
|
||||
sd = kvzalloc(sizeof(struct xchk_dir), XCHK_GFP_FLAGS);
|
||||
if (!sd)
|
||||
return -ENOMEM;
|
||||
sd->sc = sc;
|
||||
sd->xname.name = sd->namebuf;
|
||||
|
||||
if (xfs_has_parent(sc->mp)) {
|
||||
char *descr;
|
||||
|
||||
/*
|
||||
* Set up some staging memory for dirents that we can't check
|
||||
* due to locking contention.
|
||||
*/
|
||||
descr = xchk_xfile_ino_descr(sc, "slow directory entries");
|
||||
error = xfarray_create(descr, 0, sizeof(struct xchk_dirent),
|
||||
&sd->dir_entries);
|
||||
kfree(descr);
|
||||
if (error)
|
||||
goto out_sd;
|
||||
|
||||
descr = xchk_xfile_ino_descr(sc, "slow directory entry names");
|
||||
error = xfblob_create(descr, &sd->dir_names);
|
||||
kfree(descr);
|
||||
if (error)
|
||||
goto out_entries;
|
||||
}
|
||||
|
||||
/* Look up every name in this directory by hash. */
|
||||
error = xchk_dir_walk(sc, sc->ip, xchk_dir_actor, NULL);
|
||||
if (error && error != -ECANCELED)
|
||||
error = xchk_dir_walk(sc, sc->ip, xchk_dir_actor, sd);
|
||||
if (error == -ECANCELED)
|
||||
error = 0;
|
||||
if (error)
|
||||
goto out_names;
|
||||
|
||||
if (xfs_has_parent(sc->mp)) {
|
||||
error = xchk_dir_finish_slow_dirents(sd);
|
||||
if (error == -ETIMEDOUT) {
|
||||
/* Couldn't grab a lock, scrub was marked incomplete */
|
||||
error = 0;
|
||||
goto out_names;
|
||||
}
|
||||
if (error)
|
||||
goto out_names;
|
||||
}
|
||||
|
||||
out_names:
|
||||
if (sd->dir_names)
|
||||
xfblob_destroy(sd->dir_names);
|
||||
out_entries:
|
||||
if (sd->dir_entries)
|
||||
xfarray_destroy(sd->dir_entries);
|
||||
out_sd:
|
||||
kvfree(sd);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* If the dir is clean, it is clearly not zapped. */
|
||||
|
|
|
|||
1958
fs/xfs/scrub/dir_repair.c
Normal file
1958
fs/xfs/scrub/dir_repair.c
Normal file
File diff suppressed because it is too large
Load diff
985
fs/xfs/scrub/dirtree.c
Normal file
985
fs/xfs/scrub/dirtree.c
Normal file
|
|
@ -0,0 +1,985 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Copyright (c) 2023-2024 Oracle. All Rights Reserved.
|
||||
* Author: Darrick J. Wong <djwong@kernel.org>
|
||||
*/
|
||||
#include "xfs.h"
|
||||
#include "xfs_fs.h"
|
||||
#include "xfs_shared.h"
|
||||
#include "xfs_format.h"
|
||||
#include "xfs_trans_resv.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_log_format.h"
|
||||
#include "xfs_trans.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_icache.h"
|
||||
#include "xfs_dir2.h"
|
||||
#include "xfs_dir2_priv.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_parent.h"
|
||||
#include "scrub/scrub.h"
|
||||
#include "scrub/common.h"
|
||||
#include "scrub/bitmap.h"
|
||||
#include "scrub/ino_bitmap.h"
|
||||
#include "scrub/xfile.h"
|
||||
#include "scrub/xfarray.h"
|
||||
#include "scrub/xfblob.h"
|
||||
#include "scrub/listxattr.h"
|
||||
#include "scrub/trace.h"
|
||||
#include "scrub/repair.h"
|
||||
#include "scrub/orphanage.h"
|
||||
#include "scrub/dirtree.h"
|
||||
|
||||
/*
|
||||
* Directory Tree Structure Validation
|
||||
* ===================================
|
||||
*
|
||||
* Validating the tree qualities of the directory tree structure can be
|
||||
* difficult. If the tree is frozen, running a depth (or breadth) first search
|
||||
* and marking a bitmap suffices to determine if there is a cycle. XORing the
|
||||
* mark bitmap with the inode bitmap afterwards tells us if there are
|
||||
* disconnected cycles. If the tree is not frozen, directory updates can move
|
||||
* subtrees across the scanner wavefront, which complicates the design greatly.
|
||||
*
|
||||
* Directory parent pointers change that by enabling an incremental approach to
|
||||
* validation of the tree structure. Instead of using one thread to scan the
|
||||
* entire filesystem, we instead can have multiple threads walking individual
|
||||
* subdirectories upwards to the root. In a perfect world, the IOLOCK would
|
||||
* suffice to stabilize two directories in a parent -> child relationship.
|
||||
* Unfortunately, the VFS does not take the IOLOCK when moving a child
|
||||
* subdirectory, so we instead synchronize on ILOCK and use dirent update hooks
|
||||
* to detect a race. If a race occurs in a path, we restart the scan.
|
||||
*
|
||||
* If the walk terminates without reaching the root, we know the path is
|
||||
* disconnected and ought to be attached to the lost and found. If on the walk
|
||||
* we find the same subdir that we're scanning, we know this is a cycle and
|
||||
* should delete an incoming edge. If we find multiple paths to the root, we
|
||||
* know to delete an incoming edge.
|
||||
*
|
||||
* There are two big hitches with this approach: first, all file link counts
|
||||
* must be correct to prevent other writers from doing the wrong thing with the
|
||||
* directory tree structure. Second, because we're walking upwards in a tree
|
||||
* of arbitrary depth, we cannot hold all the ILOCKs. Instead, we will use a
|
||||
* directory update hook to invalidate the scan results if one of the paths
|
||||
* we've scanned has changed.
|
||||
*/
|
||||
|
||||
/* Clean up the dirtree checking resources. */
|
||||
STATIC void
|
||||
xchk_dirtree_buf_cleanup(
|
||||
void *buf)
|
||||
{
|
||||
struct xchk_dirtree *dl = buf;
|
||||
struct xchk_dirpath *path, *n;
|
||||
|
||||
if (dl->scan_ino != NULLFSINO)
|
||||
xfs_dir_hook_del(dl->sc->mp, &dl->dhook);
|
||||
|
||||
xchk_dirtree_for_each_path_safe(dl, path, n) {
|
||||
list_del_init(&path->list);
|
||||
xino_bitmap_destroy(&path->seen_inodes);
|
||||
kfree(path);
|
||||
}
|
||||
|
||||
xfblob_destroy(dl->path_names);
|
||||
xfarray_destroy(dl->path_steps);
|
||||
mutex_destroy(&dl->lock);
|
||||
}
|
||||
|
||||
/* Set us up to look for directory loops. */
|
||||
int
|
||||
xchk_setup_dirtree(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xchk_dirtree *dl;
|
||||
char *descr;
|
||||
int error;
|
||||
|
||||
xchk_fsgates_enable(sc, XCHK_FSGATES_DIRENTS);
|
||||
|
||||
if (xchk_could_repair(sc)) {
|
||||
error = xrep_setup_dirtree(sc);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
dl = kvzalloc(sizeof(struct xchk_dirtree), XCHK_GFP_FLAGS);
|
||||
if (!dl)
|
||||
return -ENOMEM;
|
||||
dl->sc = sc;
|
||||
dl->xname.name = dl->namebuf;
|
||||
dl->hook_xname.name = dl->hook_namebuf;
|
||||
INIT_LIST_HEAD(&dl->path_list);
|
||||
dl->root_ino = NULLFSINO;
|
||||
dl->scan_ino = NULLFSINO;
|
||||
dl->parent_ino = NULLFSINO;
|
||||
|
||||
mutex_init(&dl->lock);
|
||||
|
||||
descr = xchk_xfile_ino_descr(sc, "dirtree path steps");
|
||||
error = xfarray_create(descr, 0, sizeof(struct xchk_dirpath_step),
|
||||
&dl->path_steps);
|
||||
kfree(descr);
|
||||
if (error)
|
||||
goto out_dl;
|
||||
|
||||
descr = xchk_xfile_ino_descr(sc, "dirtree path names");
|
||||
error = xfblob_create(descr, &dl->path_names);
|
||||
kfree(descr);
|
||||
if (error)
|
||||
goto out_steps;
|
||||
|
||||
error = xchk_setup_inode_contents(sc, 0);
|
||||
if (error)
|
||||
goto out_names;
|
||||
|
||||
sc->buf = dl;
|
||||
sc->buf_cleanup = xchk_dirtree_buf_cleanup;
|
||||
return 0;
|
||||
|
||||
out_names:
|
||||
xfblob_destroy(dl->path_names);
|
||||
out_steps:
|
||||
xfarray_destroy(dl->path_steps);
|
||||
out_dl:
|
||||
mutex_destroy(&dl->lock);
|
||||
kvfree(dl);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add the parent pointer described by @dl->pptr to the given path as a new
|
||||
* step. Returns -ELNRNG if the path is too deep.
|
||||
*/
|
||||
int
|
||||
xchk_dirpath_append(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xfs_inode *ip,
|
||||
struct xchk_dirpath *path,
|
||||
const struct xfs_name *name,
|
||||
const struct xfs_parent_rec *pptr)
|
||||
{
|
||||
struct xchk_dirpath_step step = {
|
||||
.pptr_rec = *pptr, /* struct copy */
|
||||
.name_len = name->len,
|
||||
};
|
||||
int error;
|
||||
|
||||
/*
|
||||
* If this path is more than 2 billion steps long, this directory tree
|
||||
* is too far gone to fix.
|
||||
*/
|
||||
if (path->nr_steps >= XFS_MAXLINK)
|
||||
return -ELNRNG;
|
||||
|
||||
error = xfblob_storename(dl->path_names, &step.name_cookie, name);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xino_bitmap_set(&path->seen_inodes, ip->i_ino);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xfarray_append(dl->path_steps, &step);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
path->nr_steps++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create an xchk_path for each parent pointer of the directory that we're
|
||||
* scanning. For each path created, we will eventually try to walk towards the
|
||||
* root with the goal of deleting all parents except for one that leads to the
|
||||
* root.
|
||||
*
|
||||
* Returns -EFSCORRUPTED to signal that the inode being scanned has a corrupt
|
||||
* parent pointer and hence there's no point in continuing; or -ENOSR if there
|
||||
* are too many parent pointers for this directory.
|
||||
*/
|
||||
STATIC int
|
||||
xchk_dirtree_create_path(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
unsigned int attr_flags,
|
||||
const unsigned char *name,
|
||||
unsigned int namelen,
|
||||
const void *value,
|
||||
unsigned int valuelen,
|
||||
void *priv)
|
||||
{
|
||||
struct xfs_name xname = {
|
||||
.name = name,
|
||||
.len = namelen,
|
||||
};
|
||||
struct xchk_dirtree *dl = priv;
|
||||
struct xchk_dirpath *path;
|
||||
const struct xfs_parent_rec *rec = value;
|
||||
int error;
|
||||
|
||||
if (!(attr_flags & XFS_ATTR_PARENT))
|
||||
return 0;
|
||||
|
||||
error = xfs_parent_from_attr(sc->mp, attr_flags, name, namelen, value,
|
||||
valuelen, NULL, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* If there are more than 2 billion actual parent pointers for this
|
||||
* subdirectory, this fs is too far gone to fix.
|
||||
*/
|
||||
if (dl->nr_paths >= XFS_MAXLINK)
|
||||
return -ENOSR;
|
||||
|
||||
trace_xchk_dirtree_create_path(sc, ip, dl->nr_paths, &xname, rec);
|
||||
|
||||
/*
|
||||
* Create a new xchk_path structure to remember this parent pointer
|
||||
* and record the first name step.
|
||||
*/
|
||||
path = kmalloc(sizeof(struct xchk_dirpath), XCHK_GFP_FLAGS);
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&path->list);
|
||||
xino_bitmap_init(&path->seen_inodes);
|
||||
path->nr_steps = 0;
|
||||
path->outcome = XCHK_DIRPATH_SCANNING;
|
||||
|
||||
error = xchk_dirpath_append(dl, sc->ip, path, &xname, rec);
|
||||
if (error)
|
||||
goto out_path;
|
||||
|
||||
path->first_step = xfarray_length(dl->path_steps) - 1;
|
||||
path->second_step = XFARRAY_NULLIDX;
|
||||
path->path_nr = dl->nr_paths;
|
||||
|
||||
list_add_tail(&path->list, &dl->path_list);
|
||||
dl->nr_paths++;
|
||||
return 0;
|
||||
out_path:
|
||||
kfree(path);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Validate that the first step of this path still has a corresponding
|
||||
* parent pointer in @sc->ip. We probably dropped @sc->ip's ILOCK while
|
||||
* walking towards the roots, which is why this is necessary.
|
||||
*
|
||||
* This function has a side effect of loading the first parent pointer of this
|
||||
* path into the parent pointer scratch pad. This prepares us to walk up the
|
||||
* directory tree towards the root. Returns -ESTALE if the scan data is now
|
||||
* out of date.
|
||||
*/
|
||||
STATIC int
|
||||
xchk_dirpath_revalidate(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xchk_dirpath *path)
|
||||
{
|
||||
struct xfs_scrub *sc = dl->sc;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Look up the parent pointer that corresponds to the start of this
|
||||
* path. If the parent pointer has disappeared on us, dump all the
|
||||
* scan results and try again.
|
||||
*/
|
||||
error = xfs_parent_lookup(sc->tp, sc->ip, &dl->xname, &dl->pptr_rec,
|
||||
&dl->pptr_args);
|
||||
if (error == -ENOATTR) {
|
||||
trace_xchk_dirpath_disappeared(dl->sc, sc->ip, path->path_nr,
|
||||
path->first_step, &dl->xname, &dl->pptr_rec);
|
||||
dl->stale = true;
|
||||
return -ESTALE;
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Walk the parent pointers of a directory at the end of a path and record
|
||||
* the parent that we find in @dl->xname/pptr_rec.
|
||||
*/
|
||||
STATIC int
|
||||
xchk_dirpath_find_next_step(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
unsigned int attr_flags,
|
||||
const unsigned char *name,
|
||||
unsigned int namelen,
|
||||
const void *value,
|
||||
unsigned int valuelen,
|
||||
void *priv)
|
||||
{
|
||||
struct xchk_dirtree *dl = priv;
|
||||
const struct xfs_parent_rec *rec = value;
|
||||
int error;
|
||||
|
||||
if (!(attr_flags & XFS_ATTR_PARENT))
|
||||
return 0;
|
||||
|
||||
error = xfs_parent_from_attr(sc->mp, attr_flags, name, namelen, value,
|
||||
valuelen, NULL, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* If we've already set @dl->pptr_rec, then this directory has multiple
|
||||
* parents. Signal this back to the caller via -EMLINK.
|
||||
*/
|
||||
if (dl->parents_found > 0)
|
||||
return -EMLINK;
|
||||
|
||||
dl->parents_found++;
|
||||
memcpy(dl->namebuf, name, namelen);
|
||||
dl->xname.len = namelen;
|
||||
dl->pptr_rec = *rec; /* struct copy */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Set and log the outcome of a path walk. */
|
||||
static inline void
|
||||
xchk_dirpath_set_outcome(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xchk_dirpath *path,
|
||||
enum xchk_dirpath_outcome outcome)
|
||||
{
|
||||
trace_xchk_dirpath_set_outcome(dl->sc, path->path_nr, path->nr_steps,
|
||||
outcome);
|
||||
|
||||
path->outcome = outcome;
|
||||
}
|
||||
|
||||
/*
|
||||
* Scan the directory at the end of this path for its parent directory link.
|
||||
* If we find one, extend the path. Returns -ESTALE if the scan data out of
|
||||
* date. Returns -EFSCORRUPTED if the parent pointer is bad; or -ELNRNG if
|
||||
* the path got too deep.
|
||||
*/
|
||||
STATIC int
|
||||
xchk_dirpath_step_up(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xchk_dirpath *path)
|
||||
{
|
||||
struct xfs_scrub *sc = dl->sc;
|
||||
struct xfs_inode *dp;
|
||||
xfs_ino_t parent_ino = be64_to_cpu(dl->pptr_rec.p_ino);
|
||||
unsigned int lock_mode;
|
||||
int error;
|
||||
|
||||
/* Grab and lock the parent directory. */
|
||||
error = xchk_iget(sc, parent_ino, &dp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
lock_mode = xfs_ilock_attr_map_shared(dp);
|
||||
mutex_lock(&dl->lock);
|
||||
|
||||
if (dl->stale) {
|
||||
error = -ESTALE;
|
||||
goto out_scanlock;
|
||||
}
|
||||
|
||||
/* We've reached the root directory; the path is ok. */
|
||||
if (parent_ino == dl->root_ino) {
|
||||
xchk_dirpath_set_outcome(dl, path, XCHK_DIRPATH_OK);
|
||||
error = 0;
|
||||
goto out_scanlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* The inode being scanned is its own distant ancestor! Get rid of
|
||||
* this path.
|
||||
*/
|
||||
if (parent_ino == sc->ip->i_ino) {
|
||||
xchk_dirpath_set_outcome(dl, path, XCHK_DIRPATH_DELETE);
|
||||
error = 0;
|
||||
goto out_scanlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* We've seen this inode before during the path walk. There's a loop
|
||||
* above us in the directory tree. This probably means that we cannot
|
||||
* continue, but let's keep walking paths to get a full picture.
|
||||
*/
|
||||
if (xino_bitmap_test(&path->seen_inodes, parent_ino)) {
|
||||
xchk_dirpath_set_outcome(dl, path, XCHK_DIRPATH_LOOP);
|
||||
error = 0;
|
||||
goto out_scanlock;
|
||||
}
|
||||
|
||||
/* The handle encoded in the parent pointer must match. */
|
||||
if (VFS_I(dp)->i_generation != be32_to_cpu(dl->pptr_rec.p_gen)) {
|
||||
trace_xchk_dirpath_badgen(dl->sc, dp, path->path_nr,
|
||||
path->nr_steps, &dl->xname, &dl->pptr_rec);
|
||||
error = -EFSCORRUPTED;
|
||||
goto out_scanlock;
|
||||
}
|
||||
|
||||
/* Parent pointer must point up to a directory. */
|
||||
if (!S_ISDIR(VFS_I(dp)->i_mode)) {
|
||||
trace_xchk_dirpath_nondir_parent(dl->sc, dp, path->path_nr,
|
||||
path->nr_steps, &dl->xname, &dl->pptr_rec);
|
||||
error = -EFSCORRUPTED;
|
||||
goto out_scanlock;
|
||||
}
|
||||
|
||||
/* Parent cannot be an unlinked directory. */
|
||||
if (VFS_I(dp)->i_nlink == 0) {
|
||||
trace_xchk_dirpath_unlinked_parent(dl->sc, dp, path->path_nr,
|
||||
path->nr_steps, &dl->xname, &dl->pptr_rec);
|
||||
error = -EFSCORRUPTED;
|
||||
goto out_scanlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the extended attributes look as though they has been zapped by
|
||||
* the inode record repair code, we cannot scan for parent pointers.
|
||||
*/
|
||||
if (xchk_pptr_looks_zapped(dp)) {
|
||||
error = -EBUSY;
|
||||
xchk_set_incomplete(sc);
|
||||
goto out_scanlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Walk the parent pointers of @dp to find the parent of this directory
|
||||
* to find the next step in our walk. If we find that @dp has exactly
|
||||
* one parent, the parent pointer information will be stored in
|
||||
* @dl->pptr_rec. This prepares us for the next step of the walk.
|
||||
*/
|
||||
mutex_unlock(&dl->lock);
|
||||
dl->parents_found = 0;
|
||||
error = xchk_xattr_walk(sc, dp, xchk_dirpath_find_next_step, NULL, dl);
|
||||
mutex_lock(&dl->lock);
|
||||
if (error == -EFSCORRUPTED || error == -EMLINK ||
|
||||
(!error && dl->parents_found == 0)) {
|
||||
/*
|
||||
* Further up the directory tree from @sc->ip, we found a
|
||||
* corrupt parent pointer, multiple parent pointers while
|
||||
* finding this directory's parent, or zero parents despite
|
||||
* having a nonzero link count. Keep looking for other paths.
|
||||
*/
|
||||
xchk_dirpath_set_outcome(dl, path, XCHK_DIRPATH_CORRUPT);
|
||||
error = 0;
|
||||
goto out_scanlock;
|
||||
}
|
||||
if (error)
|
||||
goto out_scanlock;
|
||||
|
||||
if (dl->stale) {
|
||||
error = -ESTALE;
|
||||
goto out_scanlock;
|
||||
}
|
||||
|
||||
trace_xchk_dirpath_found_next_step(sc, dp, path->path_nr,
|
||||
path->nr_steps, &dl->xname, &dl->pptr_rec);
|
||||
|
||||
/* Append to the path steps */
|
||||
error = xchk_dirpath_append(dl, dp, path, &dl->xname, &dl->pptr_rec);
|
||||
if (error)
|
||||
goto out_scanlock;
|
||||
|
||||
if (path->second_step == XFARRAY_NULLIDX)
|
||||
path->second_step = xfarray_length(dl->path_steps) - 1;
|
||||
|
||||
out_scanlock:
|
||||
mutex_unlock(&dl->lock);
|
||||
xfs_iunlock(dp, lock_mode);
|
||||
xchk_irele(sc, dp);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Walk the directory tree upwards towards what is hopefully the root
|
||||
* directory, recording path steps as we go. The current path components are
|
||||
* stored in dl->pptr_rec and dl->xname.
|
||||
*
|
||||
* Returns -ESTALE if the scan data are out of date. Returns -EFSCORRUPTED
|
||||
* only if the direct parent pointer of @sc->ip associated with this path is
|
||||
* corrupt.
|
||||
*/
|
||||
STATIC int
|
||||
xchk_dirpath_walk_upwards(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xchk_dirpath *path)
|
||||
{
|
||||
struct xfs_scrub *sc = dl->sc;
|
||||
int error;
|
||||
|
||||
ASSERT(sc->ilock_flags & XFS_ILOCK_EXCL);
|
||||
|
||||
/* Reload the start of this path and make sure it's still there. */
|
||||
error = xchk_dirpath_revalidate(dl, path);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
trace_xchk_dirpath_walk_upwards(sc, sc->ip, path->path_nr, &dl->xname,
|
||||
&dl->pptr_rec);
|
||||
|
||||
/*
|
||||
* The inode being scanned is its own direct ancestor!
|
||||
* Get rid of this path.
|
||||
*/
|
||||
if (be64_to_cpu(dl->pptr_rec.p_ino) == sc->ip->i_ino) {
|
||||
xchk_dirpath_set_outcome(dl, path, XCHK_DIRPATH_DELETE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop ILOCK_EXCL on the inode being scanned. We still hold
|
||||
* IOLOCK_EXCL on it, so it cannot move around or be renamed.
|
||||
*
|
||||
* Beyond this point we're walking up the directory tree, which means
|
||||
* that we can acquire and drop the ILOCK on an alias of sc->ip. The
|
||||
* ILOCK state is no longer tracked in the scrub context. Hence we
|
||||
* must drop @sc->ip's ILOCK during the walk.
|
||||
*/
|
||||
mutex_unlock(&dl->lock);
|
||||
xchk_iunlock(sc, XFS_ILOCK_EXCL);
|
||||
|
||||
/*
|
||||
* Take the first step in the walk towards the root by checking the
|
||||
* start of this path, which is a direct parent pointer of @sc->ip.
|
||||
* If we see any kind of error here (including corruptions), the parent
|
||||
* pointer of @sc->ip is corrupt. Stop the whole scan.
|
||||
*/
|
||||
error = xchk_dirpath_step_up(dl, path);
|
||||
if (error) {
|
||||
xchk_ilock(sc, XFS_ILOCK_EXCL);
|
||||
mutex_lock(&dl->lock);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Take steps upward from the second step in this path towards the
|
||||
* root. If we hit corruption errors here, there's a problem
|
||||
* *somewhere* in the path, but we don't need to stop scanning.
|
||||
*/
|
||||
while (!error && path->outcome == XCHK_DIRPATH_SCANNING)
|
||||
error = xchk_dirpath_step_up(dl, path);
|
||||
|
||||
/* Retake the locks we had, mark paths, etc. */
|
||||
xchk_ilock(sc, XFS_ILOCK_EXCL);
|
||||
mutex_lock(&dl->lock);
|
||||
if (error == -EFSCORRUPTED) {
|
||||
xchk_dirpath_set_outcome(dl, path, XCHK_DIRPATH_CORRUPT);
|
||||
error = 0;
|
||||
}
|
||||
if (!error && dl->stale)
|
||||
return -ESTALE;
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Decide if this path step has been touched by this live update. Returns
|
||||
* 1 for yes, 0 for no, or a negative errno.
|
||||
*/
|
||||
STATIC int
|
||||
xchk_dirpath_step_is_stale(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xchk_dirpath *path,
|
||||
unsigned int step_nr,
|
||||
xfarray_idx_t step_idx,
|
||||
struct xfs_dir_update_params *p,
|
||||
xfs_ino_t *cursor)
|
||||
{
|
||||
struct xchk_dirpath_step step;
|
||||
xfs_ino_t child_ino = *cursor;
|
||||
int error;
|
||||
|
||||
error = xfarray_load(dl->path_steps, step_idx, &step);
|
||||
if (error)
|
||||
return error;
|
||||
*cursor = be64_to_cpu(step.pptr_rec.p_ino);
|
||||
|
||||
/*
|
||||
* If the parent and child being updated are not the ones mentioned in
|
||||
* this path step, the scan data is still ok.
|
||||
*/
|
||||
if (p->ip->i_ino != child_ino || p->dp->i_ino != *cursor)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If the dirent name lengths or byte sequences are different, the scan
|
||||
* data is still ok.
|
||||
*/
|
||||
if (p->name->len != step.name_len)
|
||||
return 0;
|
||||
|
||||
error = xfblob_loadname(dl->path_names, step.name_cookie,
|
||||
&dl->hook_xname, step.name_len);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (memcmp(dl->hook_xname.name, p->name->name, p->name->len) != 0)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If the update comes from the repair code itself, walk the state
|
||||
* machine forward.
|
||||
*/
|
||||
if (p->ip->i_ino == dl->scan_ino &&
|
||||
path->outcome == XREP_DIRPATH_ADOPTING) {
|
||||
xchk_dirpath_set_outcome(dl, path, XREP_DIRPATH_ADOPTED);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (p->ip->i_ino == dl->scan_ino &&
|
||||
path->outcome == XREP_DIRPATH_DELETING) {
|
||||
xchk_dirpath_set_outcome(dl, path, XREP_DIRPATH_DELETED);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Exact match, scan data is out of date. */
|
||||
trace_xchk_dirpath_changed(dl->sc, path->path_nr, step_nr, p->dp,
|
||||
p->ip, p->name);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Decide if this path has been touched by this live update. Returns 1 for
|
||||
* yes, 0 for no, or a negative errno.
|
||||
*/
|
||||
STATIC int
|
||||
xchk_dirpath_is_stale(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xchk_dirpath *path,
|
||||
struct xfs_dir_update_params *p)
|
||||
{
|
||||
xfs_ino_t cursor = dl->scan_ino;
|
||||
xfarray_idx_t idx = path->first_step;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* The child being updated has not been seen by this path at all; this
|
||||
* path cannot be stale.
|
||||
*/
|
||||
if (!xino_bitmap_test(&path->seen_inodes, p->ip->i_ino))
|
||||
return 0;
|
||||
|
||||
ret = xchk_dirpath_step_is_stale(dl, path, 0, idx, p, &cursor);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
for (i = 1, idx = path->second_step; i < path->nr_steps; i++, idx++) {
|
||||
ret = xchk_dirpath_step_is_stale(dl, path, i, idx, p, &cursor);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Decide if a directory update from the regular filesystem touches any of the
|
||||
* paths we've scanned, and invalidate the scan data if true.
|
||||
*/
|
||||
STATIC int
|
||||
xchk_dirtree_live_update(
|
||||
struct notifier_block *nb,
|
||||
unsigned long action,
|
||||
void *data)
|
||||
{
|
||||
struct xfs_dir_update_params *p = data;
|
||||
struct xchk_dirtree *dl;
|
||||
struct xchk_dirpath *path;
|
||||
int ret;
|
||||
|
||||
dl = container_of(nb, struct xchk_dirtree, dhook.dirent_hook.nb);
|
||||
|
||||
trace_xchk_dirtree_live_update(dl->sc, p->dp, action, p->ip, p->delta,
|
||||
p->name);
|
||||
|
||||
mutex_lock(&dl->lock);
|
||||
|
||||
if (dl->stale || dl->aborted)
|
||||
goto out_unlock;
|
||||
|
||||
xchk_dirtree_for_each_path(dl, path) {
|
||||
ret = xchk_dirpath_is_stale(dl, path, p);
|
||||
if (ret < 0) {
|
||||
dl->aborted = true;
|
||||
break;
|
||||
}
|
||||
if (ret == 1) {
|
||||
dl->stale = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&dl->lock);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
/* Delete all the collected path information. */
|
||||
STATIC void
|
||||
xchk_dirtree_reset(
|
||||
void *buf)
|
||||
{
|
||||
struct xchk_dirtree *dl = buf;
|
||||
struct xchk_dirpath *path, *n;
|
||||
|
||||
ASSERT(dl->sc->ilock_flags & XFS_ILOCK_EXCL);
|
||||
|
||||
xchk_dirtree_for_each_path_safe(dl, path, n) {
|
||||
list_del_init(&path->list);
|
||||
xino_bitmap_destroy(&path->seen_inodes);
|
||||
kfree(path);
|
||||
}
|
||||
dl->nr_paths = 0;
|
||||
|
||||
xfarray_truncate(dl->path_steps);
|
||||
xfblob_truncate(dl->path_names);
|
||||
|
||||
dl->stale = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Load the name/pptr from the first step in this path into @dl->pptr_rec and
|
||||
* @dl->xname.
|
||||
*/
|
||||
STATIC int
|
||||
xchk_dirtree_load_path(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xchk_dirpath *path)
|
||||
{
|
||||
struct xchk_dirpath_step step;
|
||||
int error;
|
||||
|
||||
error = xfarray_load(dl->path_steps, path->first_step, &step);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xfblob_loadname(dl->path_names, step.name_cookie, &dl->xname,
|
||||
step.name_len);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
dl->pptr_rec = step.pptr_rec; /* struct copy */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* For each parent pointer of this subdir, trace a path upwards towards the
|
||||
* root directory and record what we find. Returns 0 for success;
|
||||
* -EFSCORRUPTED if walking the parent pointers of @sc->ip failed, -ELNRNG if a
|
||||
* path was too deep; -ENOSR if there were too many parent pointers; or
|
||||
* a negative errno.
|
||||
*/
|
||||
int
|
||||
xchk_dirtree_find_paths_to_root(
|
||||
struct xchk_dirtree *dl)
|
||||
{
|
||||
struct xfs_scrub *sc = dl->sc;
|
||||
struct xchk_dirpath *path;
|
||||
int error = 0;
|
||||
|
||||
do {
|
||||
if (xchk_should_terminate(sc, &error))
|
||||
return error;
|
||||
|
||||
xchk_dirtree_reset(dl);
|
||||
|
||||
/*
|
||||
* If the extended attributes look as though they has been
|
||||
* zapped by the inode record repair code, we cannot scan for
|
||||
* parent pointers.
|
||||
*/
|
||||
if (xchk_pptr_looks_zapped(sc->ip)) {
|
||||
xchk_set_incomplete(sc);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create path walk contexts for each parent of the directory
|
||||
* that is being scanned. Directories are supposed to have
|
||||
* only one parent, but this is how we detect multiple parents.
|
||||
*/
|
||||
error = xchk_xattr_walk(sc, sc->ip, xchk_dirtree_create_path,
|
||||
NULL, dl);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xchk_dirtree_for_each_path(dl, path) {
|
||||
/* Load path components into dl->pptr/xname */
|
||||
error = xchk_dirtree_load_path(dl, path);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* Try to walk up each path to the root. This enables
|
||||
* us to find directory loops in ancestors, and the
|
||||
* like.
|
||||
*/
|
||||
error = xchk_dirpath_walk_upwards(dl, path);
|
||||
if (error == -EFSCORRUPTED) {
|
||||
/*
|
||||
* A parent pointer of @sc->ip is bad, don't
|
||||
* bother continuing.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
if (error == -ESTALE) {
|
||||
/* This had better be an invalidation. */
|
||||
ASSERT(dl->stale);
|
||||
break;
|
||||
}
|
||||
if (error)
|
||||
return error;
|
||||
if (dl->aborted)
|
||||
return 0;
|
||||
}
|
||||
} while (dl->stale);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Figure out what to do with the paths we tried to find. Do not call this
|
||||
* if the scan results are stale.
|
||||
*/
|
||||
void
|
||||
xchk_dirtree_evaluate(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xchk_dirtree_outcomes *oc)
|
||||
{
|
||||
struct xchk_dirpath *path;
|
||||
|
||||
ASSERT(!dl->stale);
|
||||
|
||||
/* Scan the paths we have to decide what to do. */
|
||||
memset(oc, 0, sizeof(struct xchk_dirtree_outcomes));
|
||||
xchk_dirtree_for_each_path(dl, path) {
|
||||
trace_xchk_dirpath_evaluate_path(dl->sc, path->path_nr,
|
||||
path->nr_steps, path->outcome);
|
||||
|
||||
switch (path->outcome) {
|
||||
case XCHK_DIRPATH_SCANNING:
|
||||
/* shouldn't get here */
|
||||
ASSERT(0);
|
||||
break;
|
||||
case XCHK_DIRPATH_DELETE:
|
||||
/* This one is already going away. */
|
||||
oc->bad++;
|
||||
break;
|
||||
case XCHK_DIRPATH_CORRUPT:
|
||||
case XCHK_DIRPATH_LOOP:
|
||||
/* Couldn't find the end of this path. */
|
||||
oc->suspect++;
|
||||
break;
|
||||
case XCHK_DIRPATH_STALE:
|
||||
/* shouldn't get here either */
|
||||
ASSERT(0);
|
||||
break;
|
||||
case XCHK_DIRPATH_OK:
|
||||
/* This path got all the way to the root. */
|
||||
oc->good++;
|
||||
break;
|
||||
case XREP_DIRPATH_DELETING:
|
||||
case XREP_DIRPATH_DELETED:
|
||||
case XREP_DIRPATH_ADOPTING:
|
||||
case XREP_DIRPATH_ADOPTED:
|
||||
/* These should not be in progress! */
|
||||
ASSERT(0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
trace_xchk_dirtree_evaluate(dl, oc);
|
||||
}
|
||||
|
||||
/* Look for directory loops. */
|
||||
int
|
||||
xchk_dirtree(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xchk_dirtree_outcomes oc;
|
||||
struct xchk_dirtree *dl = sc->buf;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Nondirectories do not point downwards to other files, so they cannot
|
||||
* cause a cycle in the directory tree.
|
||||
*/
|
||||
if (!S_ISDIR(VFS_I(sc->ip)->i_mode))
|
||||
return -ENOENT;
|
||||
|
||||
ASSERT(xfs_has_parent(sc->mp));
|
||||
|
||||
/*
|
||||
* Find the root of the directory tree. Remember which directory to
|
||||
* scan, because the hook doesn't detach until after sc->ip gets
|
||||
* released during teardown.
|
||||
*/
|
||||
dl->root_ino = sc->mp->m_rootip->i_ino;
|
||||
dl->scan_ino = sc->ip->i_ino;
|
||||
|
||||
trace_xchk_dirtree_start(sc->ip, sc->sm, 0);
|
||||
|
||||
/*
|
||||
* Hook into the directory entry code so that we can capture updates to
|
||||
* paths that we have already scanned. The scanner thread takes each
|
||||
* directory's ILOCK, which means that any in-progress directory update
|
||||
* will finish before we can scan the directory.
|
||||
*/
|
||||
ASSERT(sc->flags & XCHK_FSGATES_DIRENTS);
|
||||
xfs_dir_hook_setup(&dl->dhook, xchk_dirtree_live_update);
|
||||
error = xfs_dir_hook_add(sc->mp, &dl->dhook);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&dl->lock);
|
||||
|
||||
/* Trace each parent pointer's path to the root. */
|
||||
error = xchk_dirtree_find_paths_to_root(dl);
|
||||
if (error == -EFSCORRUPTED || error == -ELNRNG || error == -ENOSR) {
|
||||
/*
|
||||
* Don't bother walking the paths if the xattr structure or the
|
||||
* parent pointers are corrupt; this scan cannot be completed
|
||||
* without full information.
|
||||
*/
|
||||
xchk_ino_xref_set_corrupt(sc, sc->ip->i_ino);
|
||||
error = 0;
|
||||
goto out_scanlock;
|
||||
}
|
||||
if (error == -EBUSY) {
|
||||
/*
|
||||
* We couldn't scan some directory's parent pointers because
|
||||
* the attr fork looked like it had been zapped. The
|
||||
* scan was marked incomplete, so no further error code
|
||||
* is necessary.
|
||||
*/
|
||||
error = 0;
|
||||
goto out_scanlock;
|
||||
}
|
||||
if (error)
|
||||
goto out_scanlock;
|
||||
if (dl->aborted) {
|
||||
xchk_set_incomplete(sc);
|
||||
goto out_scanlock;
|
||||
}
|
||||
|
||||
/* Assess what we found in our path evaluation. */
|
||||
xchk_dirtree_evaluate(dl, &oc);
|
||||
if (xchk_dirtree_parentless(dl)) {
|
||||
if (oc.good || oc.bad || oc.suspect)
|
||||
xchk_ino_set_corrupt(sc, sc->ip->i_ino);
|
||||
} else {
|
||||
if (oc.bad || oc.good + oc.suspect != 1)
|
||||
xchk_ino_set_corrupt(sc, sc->ip->i_ino);
|
||||
if (oc.suspect)
|
||||
xchk_ino_xref_set_corrupt(sc, sc->ip->i_ino);
|
||||
}
|
||||
|
||||
out_scanlock:
|
||||
mutex_unlock(&dl->lock);
|
||||
out:
|
||||
trace_xchk_dirtree_done(sc->ip, sc->sm, error);
|
||||
return error;
|
||||
}
|
||||
178
fs/xfs/scrub/dirtree.h
Normal file
178
fs/xfs/scrub/dirtree.h
Normal file
|
|
@ -0,0 +1,178 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/*
|
||||
* Copyright (c) 2023-2024 Oracle. All Rights Reserved.
|
||||
* Author: Darrick J. Wong <djwong@kernel.org>
|
||||
*/
|
||||
#ifndef __XFS_SCRUB_DIRTREE_H__
|
||||
#define __XFS_SCRUB_DIRTREE_H__
|
||||
|
||||
/*
|
||||
* Each of these represents one parent pointer path step in a chain going
|
||||
* up towards the directory tree root. These are stored inside an xfarray.
|
||||
*/
|
||||
struct xchk_dirpath_step {
|
||||
/* Directory entry name associated with this parent link. */
|
||||
xfblob_cookie name_cookie;
|
||||
unsigned int name_len;
|
||||
|
||||
/* Handle of the parent directory. */
|
||||
struct xfs_parent_rec pptr_rec;
|
||||
};
|
||||
|
||||
enum xchk_dirpath_outcome {
|
||||
XCHK_DIRPATH_SCANNING = 0, /* still being put together */
|
||||
XCHK_DIRPATH_DELETE, /* delete this path */
|
||||
XCHK_DIRPATH_CORRUPT, /* corruption detected in path */
|
||||
XCHK_DIRPATH_LOOP, /* cycle detected further up */
|
||||
XCHK_DIRPATH_STALE, /* path is stale */
|
||||
XCHK_DIRPATH_OK, /* path reaches the root */
|
||||
|
||||
XREP_DIRPATH_DELETING, /* path is being deleted */
|
||||
XREP_DIRPATH_DELETED, /* path has been deleted */
|
||||
XREP_DIRPATH_ADOPTING, /* path is being adopted */
|
||||
XREP_DIRPATH_ADOPTED, /* path has been adopted */
|
||||
};
|
||||
|
||||
/*
|
||||
* Each of these represents one parent pointer path out of the directory being
|
||||
* scanned. These exist in-core, and hopefully there aren't more than a
|
||||
* handful of them.
|
||||
*/
|
||||
struct xchk_dirpath {
|
||||
struct list_head list;
|
||||
|
||||
/* Index of the first step in this path. */
|
||||
xfarray_idx_t first_step;
|
||||
|
||||
/* Index of the second step in this path. */
|
||||
xfarray_idx_t second_step;
|
||||
|
||||
/* Inodes seen while walking this path. */
|
||||
struct xino_bitmap seen_inodes;
|
||||
|
||||
/* Number of steps in this path. */
|
||||
unsigned int nr_steps;
|
||||
|
||||
/* Which path is this? */
|
||||
unsigned int path_nr;
|
||||
|
||||
/* What did we conclude from following this path? */
|
||||
enum xchk_dirpath_outcome outcome;
|
||||
};
|
||||
|
||||
struct xchk_dirtree_outcomes {
|
||||
/* Number of XCHK_DIRPATH_DELETE */
|
||||
unsigned int bad;
|
||||
|
||||
/* Number of XCHK_DIRPATH_CORRUPT or XCHK_DIRPATH_LOOP */
|
||||
unsigned int suspect;
|
||||
|
||||
/* Number of XCHK_DIRPATH_OK */
|
||||
unsigned int good;
|
||||
|
||||
/* Directory needs to be added to lost+found */
|
||||
bool needs_adoption;
|
||||
};
|
||||
|
||||
struct xchk_dirtree {
|
||||
struct xfs_scrub *sc;
|
||||
|
||||
/* Root inode that we're looking for. */
|
||||
xfs_ino_t root_ino;
|
||||
|
||||
/*
|
||||
* This is the inode that we're scanning. The live update hook can
|
||||
* continue to be called after xchk_teardown drops sc->ip but before
|
||||
* it calls buf_cleanup, so we keep a copy.
|
||||
*/
|
||||
xfs_ino_t scan_ino;
|
||||
|
||||
/*
|
||||
* If we start deleting redundant paths to this subdirectory, this is
|
||||
* the inode number of the surviving parent and the dotdot entry will
|
||||
* be set to this value. If the value is NULLFSINO, then use @root_ino
|
||||
* as a stand-in until the orphanage can adopt the subdirectory.
|
||||
*/
|
||||
xfs_ino_t parent_ino;
|
||||
|
||||
/* Scratch buffer for scanning pptr xattrs */
|
||||
struct xfs_parent_rec pptr_rec;
|
||||
struct xfs_da_args pptr_args;
|
||||
|
||||
/* Name buffer */
|
||||
struct xfs_name xname;
|
||||
char namebuf[MAXNAMELEN];
|
||||
|
||||
/* Information for reparenting this directory. */
|
||||
struct xrep_adoption adoption;
|
||||
|
||||
/*
|
||||
* Hook into directory updates so that we can receive live updates
|
||||
* from other writer threads.
|
||||
*/
|
||||
struct xfs_dir_hook dhook;
|
||||
|
||||
/* Parent pointer update arguments. */
|
||||
struct xfs_parent_args ppargs;
|
||||
|
||||
/* lock for everything below here */
|
||||
struct mutex lock;
|
||||
|
||||
/* buffer for the live update functions to use for dirent names */
|
||||
struct xfs_name hook_xname;
|
||||
unsigned char hook_namebuf[MAXNAMELEN];
|
||||
|
||||
/*
|
||||
* All path steps observed during this scan. Each of the path
|
||||
* steps for a particular pathwalk are recorded in sequential
|
||||
* order in the xfarray. A pathwalk ends either with a step
|
||||
* pointing to the root directory (success) or pointing to NULLFSINO
|
||||
* (loop detected, empty dir detected, etc).
|
||||
*/
|
||||
struct xfarray *path_steps;
|
||||
|
||||
/* All names observed during this scan. */
|
||||
struct xfblob *path_names;
|
||||
|
||||
/* All paths being tracked by this scanner. */
|
||||
struct list_head path_list;
|
||||
|
||||
/* Number of paths in path_list. */
|
||||
unsigned int nr_paths;
|
||||
|
||||
/* Number of parents found by a pptr scan. */
|
||||
unsigned int parents_found;
|
||||
|
||||
/* Have the path data been invalidated by a concurrent update? */
|
||||
bool stale:1;
|
||||
|
||||
/* Has the scan been aborted? */
|
||||
bool aborted:1;
|
||||
};
|
||||
|
||||
#define xchk_dirtree_for_each_path_safe(dl, path, n) \
|
||||
list_for_each_entry_safe((path), (n), &(dl)->path_list, list)
|
||||
|
||||
#define xchk_dirtree_for_each_path(dl, path) \
|
||||
list_for_each_entry((path), &(dl)->path_list, list)
|
||||
|
||||
static inline bool
|
||||
xchk_dirtree_parentless(const struct xchk_dirtree *dl)
|
||||
{
|
||||
struct xfs_scrub *sc = dl->sc;
|
||||
|
||||
if (sc->ip == sc->mp->m_rootip)
|
||||
return true;
|
||||
if (VFS_I(sc->ip)->i_nlink == 0)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
int xchk_dirtree_find_paths_to_root(struct xchk_dirtree *dl);
|
||||
int xchk_dirpath_append(struct xchk_dirtree *dl, struct xfs_inode *ip,
|
||||
struct xchk_dirpath *path, const struct xfs_name *name,
|
||||
const struct xfs_parent_rec *pptr);
|
||||
void xchk_dirtree_evaluate(struct xchk_dirtree *dl,
|
||||
struct xchk_dirtree_outcomes *oc);
|
||||
|
||||
#endif /* __XFS_SCRUB_DIRTREE_H__ */
|
||||
821
fs/xfs/scrub/dirtree_repair.c
Normal file
821
fs/xfs/scrub/dirtree_repair.c
Normal file
|
|
@ -0,0 +1,821 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Copyright (c) 2023-2024 Oracle. All Rights Reserved.
|
||||
* Author: Darrick J. Wong <djwong@kernel.org>
|
||||
*/
|
||||
#include "xfs.h"
|
||||
#include "xfs_fs.h"
|
||||
#include "xfs_shared.h"
|
||||
#include "xfs_format.h"
|
||||
#include "xfs_trans_resv.h"
|
||||
#include "xfs_trans_space.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_log_format.h"
|
||||
#include "xfs_trans.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_icache.h"
|
||||
#include "xfs_dir2.h"
|
||||
#include "xfs_dir2_priv.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_parent.h"
|
||||
#include "scrub/scrub.h"
|
||||
#include "scrub/common.h"
|
||||
#include "scrub/bitmap.h"
|
||||
#include "scrub/ino_bitmap.h"
|
||||
#include "scrub/xfile.h"
|
||||
#include "scrub/xfarray.h"
|
||||
#include "scrub/xfblob.h"
|
||||
#include "scrub/listxattr.h"
|
||||
#include "scrub/trace.h"
|
||||
#include "scrub/repair.h"
|
||||
#include "scrub/orphanage.h"
|
||||
#include "scrub/dirtree.h"
|
||||
#include "scrub/readdir.h"
|
||||
|
||||
/*
|
||||
* Directory Tree Structure Repairs
|
||||
* ================================
|
||||
*
|
||||
* If we decide that the directory being scanned is participating in a
|
||||
* directory loop, the only change we can make is to remove directory entries
|
||||
* pointing down to @sc->ip. If that leaves it with no parents, the directory
|
||||
* should be adopted by the orphanage.
|
||||
*/
|
||||
|
||||
/* Set up to repair directory loops. */
|
||||
int
|
||||
xrep_setup_dirtree(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
return xrep_orphanage_try_create(sc);
|
||||
}
|
||||
|
||||
/* Change the outcome of this path. */
|
||||
static inline void
|
||||
xrep_dirpath_set_outcome(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xchk_dirpath *path,
|
||||
enum xchk_dirpath_outcome outcome)
|
||||
{
|
||||
trace_xrep_dirpath_set_outcome(dl->sc, path->path_nr, path->nr_steps,
|
||||
outcome);
|
||||
|
||||
path->outcome = outcome;
|
||||
}
|
||||
|
||||
/* Delete all paths. */
|
||||
STATIC void
|
||||
xrep_dirtree_delete_all_paths(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xchk_dirtree_outcomes *oc)
|
||||
{
|
||||
struct xchk_dirpath *path;
|
||||
|
||||
xchk_dirtree_for_each_path(dl, path) {
|
||||
switch (path->outcome) {
|
||||
case XCHK_DIRPATH_CORRUPT:
|
||||
case XCHK_DIRPATH_LOOP:
|
||||
oc->suspect--;
|
||||
oc->bad++;
|
||||
xrep_dirpath_set_outcome(dl, path, XCHK_DIRPATH_DELETE);
|
||||
break;
|
||||
case XCHK_DIRPATH_OK:
|
||||
oc->good--;
|
||||
oc->bad++;
|
||||
xrep_dirpath_set_outcome(dl, path, XCHK_DIRPATH_DELETE);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(oc->suspect == 0);
|
||||
ASSERT(oc->good == 0);
|
||||
}
|
||||
|
||||
/* Since this is the surviving path, set the dotdot entry to this value. */
|
||||
STATIC void
|
||||
xrep_dirpath_retain_parent(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xchk_dirpath *path)
|
||||
{
|
||||
struct xchk_dirpath_step step;
|
||||
int error;
|
||||
|
||||
error = xfarray_load(dl->path_steps, path->first_step, &step);
|
||||
if (error)
|
||||
return;
|
||||
|
||||
dl->parent_ino = be64_to_cpu(step.pptr_rec.p_ino);
|
||||
}
|
||||
|
||||
/* Find the one surviving path so we know how to set dotdot. */
|
||||
STATIC void
|
||||
xrep_dirtree_find_surviving_path(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xchk_dirtree_outcomes *oc)
|
||||
{
|
||||
struct xchk_dirpath *path;
|
||||
bool foundit = false;
|
||||
|
||||
xchk_dirtree_for_each_path(dl, path) {
|
||||
switch (path->outcome) {
|
||||
case XCHK_DIRPATH_CORRUPT:
|
||||
case XCHK_DIRPATH_LOOP:
|
||||
case XCHK_DIRPATH_OK:
|
||||
if (!foundit) {
|
||||
xrep_dirpath_retain_parent(dl, path);
|
||||
foundit = true;
|
||||
continue;
|
||||
}
|
||||
ASSERT(foundit == false);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(oc->suspect + oc->good == 1);
|
||||
}
|
||||
|
||||
/* Delete all paths except for the one good one. */
|
||||
STATIC void
|
||||
xrep_dirtree_keep_one_good_path(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xchk_dirtree_outcomes *oc)
|
||||
{
|
||||
struct xchk_dirpath *path;
|
||||
bool foundit = false;
|
||||
|
||||
xchk_dirtree_for_each_path(dl, path) {
|
||||
switch (path->outcome) {
|
||||
case XCHK_DIRPATH_CORRUPT:
|
||||
case XCHK_DIRPATH_LOOP:
|
||||
oc->suspect--;
|
||||
oc->bad++;
|
||||
xrep_dirpath_set_outcome(dl, path, XCHK_DIRPATH_DELETE);
|
||||
break;
|
||||
case XCHK_DIRPATH_OK:
|
||||
if (!foundit) {
|
||||
xrep_dirpath_retain_parent(dl, path);
|
||||
foundit = true;
|
||||
continue;
|
||||
}
|
||||
oc->good--;
|
||||
oc->bad++;
|
||||
xrep_dirpath_set_outcome(dl, path, XCHK_DIRPATH_DELETE);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(oc->suspect == 0);
|
||||
ASSERT(oc->good < 2);
|
||||
}
|
||||
|
||||
/* Delete all paths except for one suspect one. */
|
||||
STATIC void
|
||||
xrep_dirtree_keep_one_suspect_path(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xchk_dirtree_outcomes *oc)
|
||||
{
|
||||
struct xchk_dirpath *path;
|
||||
bool foundit = false;
|
||||
|
||||
xchk_dirtree_for_each_path(dl, path) {
|
||||
switch (path->outcome) {
|
||||
case XCHK_DIRPATH_CORRUPT:
|
||||
case XCHK_DIRPATH_LOOP:
|
||||
if (!foundit) {
|
||||
xrep_dirpath_retain_parent(dl, path);
|
||||
foundit = true;
|
||||
continue;
|
||||
}
|
||||
oc->suspect--;
|
||||
oc->bad++;
|
||||
xrep_dirpath_set_outcome(dl, path, XCHK_DIRPATH_DELETE);
|
||||
break;
|
||||
case XCHK_DIRPATH_OK:
|
||||
ASSERT(0);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(oc->suspect == 1);
|
||||
ASSERT(oc->good == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Figure out what to do with the paths we tried to find. Returns -EDEADLOCK
|
||||
* if the scan results have become stale.
|
||||
*/
|
||||
STATIC void
|
||||
xrep_dirtree_decide_fate(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xchk_dirtree_outcomes *oc)
|
||||
{
|
||||
xchk_dirtree_evaluate(dl, oc);
|
||||
|
||||
/* Parentless directories should not have any paths at all. */
|
||||
if (xchk_dirtree_parentless(dl)) {
|
||||
xrep_dirtree_delete_all_paths(dl, oc);
|
||||
return;
|
||||
}
|
||||
|
||||
/* One path is exactly the number of paths we want. */
|
||||
if (oc->good + oc->suspect == 1) {
|
||||
xrep_dirtree_find_surviving_path(dl, oc);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Zero paths means we should reattach the subdir to the orphanage. */
|
||||
if (oc->good + oc->suspect == 0) {
|
||||
if (dl->sc->orphanage)
|
||||
oc->needs_adoption = true;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Otherwise, this subdirectory has too many parents. If there's at
|
||||
* least one good path, keep it and delete the others.
|
||||
*/
|
||||
if (oc->good > 0) {
|
||||
xrep_dirtree_keep_one_good_path(dl, oc);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* There are no good paths and there are too many suspect paths.
|
||||
* Keep the first suspect path and delete the rest.
|
||||
*/
|
||||
xrep_dirtree_keep_one_suspect_path(dl, oc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Load the first step of this path into @step and @dl->xname/pptr
|
||||
* for later repair work.
|
||||
*/
|
||||
STATIC int
|
||||
xrep_dirtree_prep_path(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xchk_dirpath *path,
|
||||
struct xchk_dirpath_step *step)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = xfarray_load(dl->path_steps, path->first_step, step);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xfblob_loadname(dl->path_names, step->name_cookie, &dl->xname,
|
||||
step->name_len);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
dl->pptr_rec = step->pptr_rec; /* struct copy */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Delete the VFS dentry for a removed child. */
|
||||
STATIC int
|
||||
xrep_dirtree_purge_dentry(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xfs_inode *dp,
|
||||
const struct xfs_name *name)
|
||||
{
|
||||
struct qstr qname = QSTR_INIT(name->name, name->len);
|
||||
struct dentry *parent_dentry, *child_dentry;
|
||||
int error = 0;
|
||||
|
||||
/*
|
||||
* Find the dentry for the parent directory. If there isn't one, we're
|
||||
* done. Caller already holds i_rwsem for parent and child.
|
||||
*/
|
||||
parent_dentry = d_find_alias(VFS_I(dp));
|
||||
if (!parent_dentry)
|
||||
return 0;
|
||||
|
||||
/* The VFS thinks the parent is a directory, right? */
|
||||
if (!d_is_dir(parent_dentry)) {
|
||||
ASSERT(d_is_dir(parent_dentry));
|
||||
error = -EFSCORRUPTED;
|
||||
goto out_dput_parent;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to find the dirent pointing to the child. If there isn't one,
|
||||
* we're done.
|
||||
*/
|
||||
qname.hash = full_name_hash(parent_dentry, name->name, name->len);
|
||||
child_dentry = d_lookup(parent_dentry, &qname);
|
||||
if (!child_dentry) {
|
||||
error = 0;
|
||||
goto out_dput_parent;
|
||||
}
|
||||
|
||||
trace_xrep_dirtree_delete_child(dp->i_mount, child_dentry);
|
||||
|
||||
/* Child is not a directory? We're screwed. */
|
||||
if (!d_is_dir(child_dentry)) {
|
||||
ASSERT(d_is_dir(child_dentry));
|
||||
error = -EFSCORRUPTED;
|
||||
goto out_dput_child;
|
||||
}
|
||||
|
||||
/* Replace the child dentry with a negative one. */
|
||||
d_delete(child_dentry);
|
||||
|
||||
out_dput_child:
|
||||
dput(child_dentry);
|
||||
out_dput_parent:
|
||||
dput(parent_dentry);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Prepare to delete a link by taking the IOLOCK of the parent and the child
|
||||
* (scrub target). Caller must hold IOLOCK_EXCL on @sc->ip. Returns 0 if we
|
||||
* took both locks, or a negative errno if we couldn't lock the parent in time.
|
||||
*/
|
||||
static inline int
|
||||
xrep_dirtree_unlink_iolock(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *dp)
|
||||
{
|
||||
int error;
|
||||
|
||||
ASSERT(sc->ilock_flags & XFS_IOLOCK_EXCL);
|
||||
|
||||
if (xfs_ilock_nowait(dp, XFS_IOLOCK_EXCL))
|
||||
return 0;
|
||||
|
||||
xchk_iunlock(sc, XFS_IOLOCK_EXCL);
|
||||
do {
|
||||
xfs_ilock(dp, XFS_IOLOCK_EXCL);
|
||||
if (xchk_ilock_nowait(sc, XFS_IOLOCK_EXCL))
|
||||
break;
|
||||
xfs_iunlock(dp, XFS_IOLOCK_EXCL);
|
||||
|
||||
if (xchk_should_terminate(sc, &error)) {
|
||||
xchk_ilock(sc, XFS_IOLOCK_EXCL);
|
||||
return error;
|
||||
}
|
||||
|
||||
delay(1);
|
||||
} while (1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove a link from the directory tree and update the dcache. Returns
|
||||
* -ESTALE if the scan data are now out of date.
|
||||
*/
|
||||
STATIC int
|
||||
xrep_dirtree_unlink(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xfs_inode *dp,
|
||||
struct xchk_dirpath *path,
|
||||
struct xchk_dirpath_step *step)
|
||||
{
|
||||
struct xfs_scrub *sc = dl->sc;
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
xfs_ino_t dotdot_ino;
|
||||
xfs_ino_t parent_ino = dl->parent_ino;
|
||||
unsigned int resblks;
|
||||
int dontcare;
|
||||
int error;
|
||||
|
||||
/* Take IOLOCK_EXCL of the parent and child. */
|
||||
error = xrep_dirtree_unlink_iolock(sc, dp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* Create the transaction that we need to sever the path. Ignore
|
||||
* EDQUOT and ENOSPC being returned via nospace_error because the
|
||||
* directory code can handle a reservationless update.
|
||||
*/
|
||||
resblks = xfs_remove_space_res(mp, step->name_len);
|
||||
error = xfs_trans_alloc_dir(dp, &M_RES(mp)->tr_remove, sc->ip,
|
||||
&resblks, &sc->tp, &dontcare);
|
||||
if (error)
|
||||
goto out_iolock;
|
||||
|
||||
/*
|
||||
* Cancel if someone invalidate the paths while we were trying to get
|
||||
* the ILOCK.
|
||||
*/
|
||||
mutex_lock(&dl->lock);
|
||||
if (dl->stale) {
|
||||
mutex_unlock(&dl->lock);
|
||||
error = -ESTALE;
|
||||
goto out_trans_cancel;
|
||||
}
|
||||
xrep_dirpath_set_outcome(dl, path, XREP_DIRPATH_DELETING);
|
||||
mutex_unlock(&dl->lock);
|
||||
|
||||
trace_xrep_dirtree_delete_path(dl->sc, sc->ip, path->path_nr,
|
||||
&dl->xname, &dl->pptr_rec);
|
||||
|
||||
/*
|
||||
* Decide if we need to reset the dotdot entry. Rules:
|
||||
*
|
||||
* - If there's a surviving parent, we want dotdot to point there.
|
||||
* - If we don't have any surviving parents, then point dotdot at the
|
||||
* root dir.
|
||||
* - If dotdot is already set to the value we want, pass in NULLFSINO
|
||||
* for no change necessary.
|
||||
*
|
||||
* Do this /before/ we dirty anything, in case the dotdot lookup
|
||||
* fails.
|
||||
*/
|
||||
error = xchk_dir_lookup(sc, sc->ip, &xfs_name_dotdot, &dotdot_ino);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
if (parent_ino == NULLFSINO)
|
||||
parent_ino = dl->root_ino;
|
||||
if (dotdot_ino == parent_ino)
|
||||
parent_ino = NULLFSINO;
|
||||
|
||||
/* Drop the link from sc->ip's dotdot entry. */
|
||||
error = xfs_droplink(sc->tp, dp);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
|
||||
/* Reset the dotdot entry to a surviving parent. */
|
||||
if (parent_ino != NULLFSINO) {
|
||||
error = xfs_dir_replace(sc->tp, sc->ip, &xfs_name_dotdot,
|
||||
parent_ino, 0);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
}
|
||||
|
||||
/* Drop the link from dp to sc->ip. */
|
||||
error = xfs_droplink(sc->tp, sc->ip);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
|
||||
error = xfs_dir_removename(sc->tp, dp, &dl->xname, sc->ip->i_ino,
|
||||
resblks);
|
||||
if (error) {
|
||||
ASSERT(error != -ENOENT);
|
||||
goto out_trans_cancel;
|
||||
}
|
||||
|
||||
if (xfs_has_parent(sc->mp)) {
|
||||
error = xfs_parent_removename(sc->tp, &dl->ppargs, dp,
|
||||
&dl->xname, sc->ip);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
}
|
||||
|
||||
/*
|
||||
* Notify dirent hooks that we removed the bad link, invalidate the
|
||||
* dcache, and commit the repair.
|
||||
*/
|
||||
xfs_dir_update_hook(dp, sc->ip, -1, &dl->xname);
|
||||
error = xrep_dirtree_purge_dentry(dl, dp, &dl->xname);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
|
||||
error = xrep_trans_commit(sc);
|
||||
goto out_ilock;
|
||||
|
||||
out_trans_cancel:
|
||||
xchk_trans_cancel(sc);
|
||||
out_ilock:
|
||||
xfs_iunlock(sc->ip, XFS_ILOCK_EXCL);
|
||||
xfs_iunlock(dp, XFS_ILOCK_EXCL);
|
||||
out_iolock:
|
||||
xfs_iunlock(dp, XFS_IOLOCK_EXCL);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Delete a directory entry that points to this directory. Returns -ESTALE
|
||||
* if the scan data are now out of date.
|
||||
*/
|
||||
STATIC int
|
||||
xrep_dirtree_delete_path(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xchk_dirpath *path)
|
||||
{
|
||||
struct xchk_dirpath_step step;
|
||||
struct xfs_scrub *sc = dl->sc;
|
||||
struct xfs_inode *dp;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Load the parent pointer and directory inode for this path, then
|
||||
* drop the scan lock, the ILOCK, and the transaction so that
|
||||
* _delete_path can reserve the proper transaction. This sets up
|
||||
* @dl->xname for the deletion.
|
||||
*/
|
||||
error = xrep_dirtree_prep_path(dl, path, &step);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xchk_iget(sc, be64_to_cpu(step.pptr_rec.p_ino), &dp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
mutex_unlock(&dl->lock);
|
||||
xchk_trans_cancel(sc);
|
||||
xchk_iunlock(sc, XFS_ILOCK_EXCL);
|
||||
|
||||
/* Delete the directory link and release the parent. */
|
||||
error = xrep_dirtree_unlink(dl, dp, path, &step);
|
||||
xchk_irele(sc, dp);
|
||||
|
||||
/*
|
||||
* Retake all the resources we had at the beginning even if the repair
|
||||
* failed or the scan data are now stale. This keeps things simple for
|
||||
* the caller.
|
||||
*/
|
||||
xchk_trans_alloc_empty(sc);
|
||||
xchk_ilock(sc, XFS_ILOCK_EXCL);
|
||||
mutex_lock(&dl->lock);
|
||||
|
||||
if (!error && dl->stale)
|
||||
error = -ESTALE;
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Add a new path to represent our in-progress adoption. */
|
||||
STATIC int
|
||||
xrep_dirtree_create_adoption_path(
|
||||
struct xchk_dirtree *dl)
|
||||
{
|
||||
struct xfs_scrub *sc = dl->sc;
|
||||
struct xchk_dirpath *path;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* We should have capped the number of paths at XFS_MAXLINK-1 in the
|
||||
* scanner.
|
||||
*/
|
||||
if (dl->nr_paths > XFS_MAXLINK) {
|
||||
ASSERT(dl->nr_paths <= XFS_MAXLINK);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a new xchk_path structure to remember this parent pointer
|
||||
* and record the first name step.
|
||||
*/
|
||||
path = kmalloc(sizeof(struct xchk_dirpath), XCHK_GFP_FLAGS);
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&path->list);
|
||||
xino_bitmap_init(&path->seen_inodes);
|
||||
path->nr_steps = 0;
|
||||
path->outcome = XREP_DIRPATH_ADOPTING;
|
||||
|
||||
/*
|
||||
* Record the new link that we just created in the orphanage. Because
|
||||
* adoption is the last repair that we perform, we don't bother filling
|
||||
* in the path all the way back to the root.
|
||||
*/
|
||||
xfs_inode_to_parent_rec(&dl->pptr_rec, sc->orphanage);
|
||||
|
||||
error = xino_bitmap_set(&path->seen_inodes, sc->orphanage->i_ino);
|
||||
if (error)
|
||||
goto out_path;
|
||||
|
||||
trace_xrep_dirtree_create_adoption(sc, sc->ip, dl->nr_paths,
|
||||
&dl->xname, &dl->pptr_rec);
|
||||
|
||||
error = xchk_dirpath_append(dl, sc->ip, path, &dl->xname,
|
||||
&dl->pptr_rec);
|
||||
if (error)
|
||||
goto out_path;
|
||||
|
||||
path->first_step = xfarray_length(dl->path_steps) - 1;
|
||||
path->second_step = XFARRAY_NULLIDX;
|
||||
path->path_nr = dl->nr_paths;
|
||||
|
||||
list_add_tail(&path->list, &dl->path_list);
|
||||
dl->nr_paths++;
|
||||
return 0;
|
||||
|
||||
out_path:
|
||||
kfree(path);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Prepare to move a file to the orphanage by taking the IOLOCK of the
|
||||
* orphanage and the child (scrub target). Caller must hold IOLOCK_EXCL on
|
||||
* @sc->ip. Returns 0 if we took both locks, or a negative errno if we
|
||||
* couldn't lock the orphanage in time.
|
||||
*/
|
||||
static inline int
|
||||
xrep_dirtree_adopt_iolock(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
int error;
|
||||
|
||||
ASSERT(sc->ilock_flags & XFS_IOLOCK_EXCL);
|
||||
|
||||
if (xrep_orphanage_ilock_nowait(sc, XFS_IOLOCK_EXCL))
|
||||
return 0;
|
||||
|
||||
xchk_iunlock(sc, XFS_IOLOCK_EXCL);
|
||||
do {
|
||||
xrep_orphanage_ilock(sc, XFS_IOLOCK_EXCL);
|
||||
if (xchk_ilock_nowait(sc, XFS_IOLOCK_EXCL))
|
||||
break;
|
||||
xrep_orphanage_iunlock(sc, XFS_IOLOCK_EXCL);
|
||||
|
||||
if (xchk_should_terminate(sc, &error)) {
|
||||
xchk_ilock(sc, XFS_IOLOCK_EXCL);
|
||||
return error;
|
||||
}
|
||||
|
||||
delay(1);
|
||||
} while (1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reattach this orphaned directory to the orphanage. Do not call this with
|
||||
* any resources held. Returns -ESTALE if the scan data have become out of
|
||||
* date.
|
||||
*/
|
||||
STATIC int
|
||||
xrep_dirtree_adopt(
|
||||
struct xchk_dirtree *dl)
|
||||
{
|
||||
struct xfs_scrub *sc = dl->sc;
|
||||
int error;
|
||||
|
||||
/* Take the IOLOCK of the orphanage and the scrub target. */
|
||||
error = xrep_dirtree_adopt_iolock(sc);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* Set up for an adoption. The directory tree fixer runs after the
|
||||
* link counts have been corrected. Therefore, we must bump the
|
||||
* child's link count since there will be no further opportunity to fix
|
||||
* errors.
|
||||
*/
|
||||
error = xrep_adoption_trans_alloc(sc, &dl->adoption);
|
||||
if (error)
|
||||
goto out_iolock;
|
||||
dl->adoption.bump_child_nlink = true;
|
||||
|
||||
/* Figure out what name we're going to use here. */
|
||||
error = xrep_adoption_compute_name(&dl->adoption, &dl->xname);
|
||||
if (error)
|
||||
goto out_trans;
|
||||
|
||||
/*
|
||||
* Now that we have a proposed name for the orphanage entry, create
|
||||
* a faux path so that the live update hook will see it.
|
||||
*/
|
||||
mutex_lock(&dl->lock);
|
||||
if (dl->stale) {
|
||||
mutex_unlock(&dl->lock);
|
||||
error = -ESTALE;
|
||||
goto out_trans;
|
||||
}
|
||||
error = xrep_dirtree_create_adoption_path(dl);
|
||||
mutex_unlock(&dl->lock);
|
||||
if (error)
|
||||
goto out_trans;
|
||||
|
||||
/* Reparent the directory. */
|
||||
error = xrep_adoption_move(&dl->adoption);
|
||||
if (error)
|
||||
goto out_trans;
|
||||
|
||||
/*
|
||||
* Commit the name and release all inode locks except for the scrub
|
||||
* target's IOLOCK.
|
||||
*/
|
||||
error = xrep_trans_commit(sc);
|
||||
goto out_ilock;
|
||||
|
||||
out_trans:
|
||||
xchk_trans_cancel(sc);
|
||||
out_ilock:
|
||||
xchk_iunlock(sc, XFS_ILOCK_EXCL);
|
||||
xrep_orphanage_iunlock(sc, XFS_ILOCK_EXCL);
|
||||
out_iolock:
|
||||
xrep_orphanage_iunlock(sc, XFS_IOLOCK_EXCL);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* This newly orphaned directory needs to be adopted by the orphanage.
|
||||
* Make this happen.
|
||||
*/
|
||||
STATIC int
|
||||
xrep_dirtree_move_to_orphanage(
|
||||
struct xchk_dirtree *dl)
|
||||
{
|
||||
struct xfs_scrub *sc = dl->sc;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Start by dropping all the resources that we hold so that we can grab
|
||||
* all the resources that we need for the adoption.
|
||||
*/
|
||||
mutex_unlock(&dl->lock);
|
||||
xchk_trans_cancel(sc);
|
||||
xchk_iunlock(sc, XFS_ILOCK_EXCL);
|
||||
|
||||
/* Perform the adoption. */
|
||||
error = xrep_dirtree_adopt(dl);
|
||||
|
||||
/*
|
||||
* Retake all the resources we had at the beginning even if the repair
|
||||
* failed or the scan data are now stale. This keeps things simple for
|
||||
* the caller.
|
||||
*/
|
||||
xchk_trans_alloc_empty(sc);
|
||||
xchk_ilock(sc, XFS_ILOCK_EXCL);
|
||||
mutex_lock(&dl->lock);
|
||||
|
||||
if (!error && dl->stale)
|
||||
error = -ESTALE;
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to fix all the problems. Returns -ESTALE if the scan data have become
|
||||
* out of date.
|
||||
*/
|
||||
STATIC int
|
||||
xrep_dirtree_fix_problems(
|
||||
struct xchk_dirtree *dl,
|
||||
struct xchk_dirtree_outcomes *oc)
|
||||
{
|
||||
struct xchk_dirpath *path;
|
||||
int error;
|
||||
|
||||
/* Delete all the paths we don't want. */
|
||||
xchk_dirtree_for_each_path(dl, path) {
|
||||
if (path->outcome != XCHK_DIRPATH_DELETE)
|
||||
continue;
|
||||
|
||||
error = xrep_dirtree_delete_path(dl, path);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Reparent this directory to the orphanage. */
|
||||
if (oc->needs_adoption) {
|
||||
if (xrep_orphanage_can_adopt(dl->sc))
|
||||
return xrep_dirtree_move_to_orphanage(dl);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Fix directory loops involving this directory. */
|
||||
int
|
||||
xrep_dirtree(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xchk_dirtree *dl = sc->buf;
|
||||
struct xchk_dirtree_outcomes oc;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Prepare to fix the directory tree by retaking the scan lock. The
|
||||
* order of resource acquisition is still IOLOCK -> transaction ->
|
||||
* ILOCK -> scan lock.
|
||||
*/
|
||||
mutex_lock(&dl->lock);
|
||||
do {
|
||||
/*
|
||||
* Decide what we're going to do, then do it. An -ESTALE
|
||||
* return here means the scan results are invalid and we have
|
||||
* to walk again.
|
||||
*/
|
||||
if (!dl->stale) {
|
||||
xrep_dirtree_decide_fate(dl, &oc);
|
||||
|
||||
trace_xrep_dirtree_decided_fate(dl, &oc);
|
||||
|
||||
error = xrep_dirtree_fix_problems(dl, &oc);
|
||||
if (!error || error != -ESTALE)
|
||||
break;
|
||||
}
|
||||
error = xchk_dirtree_find_paths_to_root(dl);
|
||||
if (error == -ELNRNG || error == -ENOSR)
|
||||
error = -EFSCORRUPTED;
|
||||
} while (!error);
|
||||
mutex_unlock(&dl->lock);
|
||||
|
||||
return error;
|
||||
}
|
||||
454
fs/xfs/scrub/findparent.c
Normal file
454
fs/xfs/scrub/findparent.c
Normal file
|
|
@ -0,0 +1,454 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Copyright (c) 2020-2024 Oracle. All Rights Reserved.
|
||||
* Author: Darrick J. Wong <djwong@kernel.org>
|
||||
*/
|
||||
#include "xfs.h"
|
||||
#include "xfs_fs.h"
|
||||
#include "xfs_shared.h"
|
||||
#include "xfs_format.h"
|
||||
#include "xfs_trans_resv.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_defer.h"
|
||||
#include "xfs_bit.h"
|
||||
#include "xfs_log_format.h"
|
||||
#include "xfs_trans.h"
|
||||
#include "xfs_sb.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_icache.h"
|
||||
#include "xfs_da_format.h"
|
||||
#include "xfs_da_btree.h"
|
||||
#include "xfs_dir2.h"
|
||||
#include "xfs_bmap_btree.h"
|
||||
#include "xfs_dir2_priv.h"
|
||||
#include "xfs_trans_space.h"
|
||||
#include "xfs_health.h"
|
||||
#include "xfs_exchmaps.h"
|
||||
#include "xfs_parent.h"
|
||||
#include "scrub/xfs_scrub.h"
|
||||
#include "scrub/scrub.h"
|
||||
#include "scrub/common.h"
|
||||
#include "scrub/trace.h"
|
||||
#include "scrub/repair.h"
|
||||
#include "scrub/iscan.h"
|
||||
#include "scrub/findparent.h"
|
||||
#include "scrub/readdir.h"
|
||||
#include "scrub/tempfile.h"
|
||||
#include "scrub/listxattr.h"
|
||||
|
||||
/*
|
||||
* Finding the Parent of a Directory
|
||||
* =================================
|
||||
*
|
||||
* Directories have parent pointers, in the sense that each directory contains
|
||||
* a dotdot entry that points to the single allowed parent. The brute force
|
||||
* way to find the parent of a given directory is to scan every directory in
|
||||
* the filesystem looking for a child dirent that references this directory.
|
||||
*
|
||||
* This module wraps the process of scanning the directory tree. It requires
|
||||
* that @sc->ip is the directory whose parent we want to find, and that the
|
||||
* caller hold only the IOLOCK on that directory. The scan itself needs to
|
||||
* take the ILOCK of each directory visited.
|
||||
*
|
||||
* Because we cannot hold @sc->ip's ILOCK during a scan of the whole fs, it is
|
||||
* necessary to use dirent hook to update the parent scan results. Callers
|
||||
* must not read the scan results without re-taking @sc->ip's ILOCK.
|
||||
*
|
||||
* There are a few shortcuts that we can take to avoid scanning the entire
|
||||
* filesystem, such as noticing directory tree roots and querying the dentry
|
||||
* cache for parent information.
|
||||
*/
|
||||
|
||||
struct xrep_findparent_info {
|
||||
/* The directory currently being scanned. */
|
||||
struct xfs_inode *dp;
|
||||
|
||||
/*
|
||||
* Scrub context. We're looking for a @dp containing a directory
|
||||
* entry pointing to sc->ip->i_ino.
|
||||
*/
|
||||
struct xfs_scrub *sc;
|
||||
|
||||
/* Optional scan information for a xrep_findparent_scan call. */
|
||||
struct xrep_parent_scan_info *parent_scan;
|
||||
|
||||
/*
|
||||
* Parent that we've found for sc->ip. If we're scanning the entire
|
||||
* directory tree, we need this to ensure that we only find /one/
|
||||
* parent directory.
|
||||
*/
|
||||
xfs_ino_t found_parent;
|
||||
|
||||
/*
|
||||
* This is set to true if @found_parent was not observed directly from
|
||||
* the directory scan but by noticing a change in dotdot entries after
|
||||
* cycling the sc->ip IOLOCK.
|
||||
*/
|
||||
bool parent_tentative;
|
||||
};
|
||||
|
||||
/*
|
||||
* If this directory entry points to the scrub target inode, then the directory
|
||||
* we're scanning is the parent of the scrub target inode.
|
||||
*/
|
||||
STATIC int
|
||||
xrep_findparent_dirent(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *dp,
|
||||
xfs_dir2_dataptr_t dapos,
|
||||
const struct xfs_name *name,
|
||||
xfs_ino_t ino,
|
||||
void *priv)
|
||||
{
|
||||
struct xrep_findparent_info *fpi = priv;
|
||||
int error = 0;
|
||||
|
||||
if (xchk_should_terminate(fpi->sc, &error))
|
||||
return error;
|
||||
|
||||
if (ino != fpi->sc->ip->i_ino)
|
||||
return 0;
|
||||
|
||||
/* Ignore garbage directory entry names. */
|
||||
if (name->len == 0 || !xfs_dir2_namecheck(name->name, name->len))
|
||||
return -EFSCORRUPTED;
|
||||
|
||||
/*
|
||||
* Ignore dotdot and dot entries -- we're looking for parent -> child
|
||||
* links only.
|
||||
*/
|
||||
if (name->name[0] == '.' && (name->len == 1 ||
|
||||
(name->len == 2 && name->name[1] == '.')))
|
||||
return 0;
|
||||
|
||||
/* Uhoh, more than one parent for a dir? */
|
||||
if (fpi->found_parent != NULLFSINO &&
|
||||
!(fpi->parent_tentative && fpi->found_parent == fpi->dp->i_ino)) {
|
||||
trace_xrep_findparent_dirent(fpi->sc->ip, 0);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
/* We found a potential parent; remember this. */
|
||||
trace_xrep_findparent_dirent(fpi->sc->ip, fpi->dp->i_ino);
|
||||
fpi->found_parent = fpi->dp->i_ino;
|
||||
fpi->parent_tentative = false;
|
||||
|
||||
if (fpi->parent_scan)
|
||||
xrep_findparent_scan_found(fpi->parent_scan, fpi->dp->i_ino);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this is a directory, walk the dirents looking for any that point to the
|
||||
* scrub target inode.
|
||||
*/
|
||||
STATIC int
|
||||
xrep_findparent_walk_directory(
|
||||
struct xrep_findparent_info *fpi)
|
||||
{
|
||||
struct xfs_scrub *sc = fpi->sc;
|
||||
struct xfs_inode *dp = fpi->dp;
|
||||
unsigned int lock_mode;
|
||||
int error = 0;
|
||||
|
||||
/*
|
||||
* The inode being scanned cannot be its own parent, nor can any
|
||||
* temporary directory we created to stage this repair.
|
||||
*/
|
||||
if (dp == sc->ip || dp == sc->tempip)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Similarly, temporary files created to stage a repair cannot be the
|
||||
* parent of this inode.
|
||||
*/
|
||||
if (xrep_is_tempfile(dp))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Scan the directory to see if there it contains an entry pointing to
|
||||
* the directory that we are repairing.
|
||||
*/
|
||||
lock_mode = xfs_ilock_data_map_shared(dp);
|
||||
|
||||
/*
|
||||
* If this directory is known to be sick, we cannot scan it reliably
|
||||
* and must abort.
|
||||
*/
|
||||
if (xfs_inode_has_sickness(dp, XFS_SICK_INO_CORE |
|
||||
XFS_SICK_INO_BMBTD |
|
||||
XFS_SICK_INO_DIR)) {
|
||||
error = -EFSCORRUPTED;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* We cannot complete our parent pointer scan if a directory looks as
|
||||
* though it has been zapped by the inode record repair code.
|
||||
*/
|
||||
if (xchk_dir_looks_zapped(dp)) {
|
||||
error = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
error = xchk_dir_walk(sc, dp, xrep_findparent_dirent, fpi);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
|
||||
out_unlock:
|
||||
xfs_iunlock(dp, lock_mode);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update this directory's dotdot pointer based on ongoing dirent updates.
|
||||
*/
|
||||
STATIC int
|
||||
xrep_findparent_live_update(
|
||||
struct notifier_block *nb,
|
||||
unsigned long action,
|
||||
void *data)
|
||||
{
|
||||
struct xfs_dir_update_params *p = data;
|
||||
struct xrep_parent_scan_info *pscan;
|
||||
struct xfs_scrub *sc;
|
||||
|
||||
pscan = container_of(nb, struct xrep_parent_scan_info,
|
||||
dhook.dirent_hook.nb);
|
||||
sc = pscan->sc;
|
||||
|
||||
/*
|
||||
* If @p->ip is the subdirectory that we're interested in and we've
|
||||
* already scanned @p->dp, update the dotdot target inumber to the
|
||||
* parent inode.
|
||||
*/
|
||||
if (p->ip->i_ino == sc->ip->i_ino &&
|
||||
xchk_iscan_want_live_update(&pscan->iscan, p->dp->i_ino)) {
|
||||
if (p->delta > 0) {
|
||||
xrep_findparent_scan_found(pscan, p->dp->i_ino);
|
||||
} else {
|
||||
xrep_findparent_scan_found(pscan, NULLFSINO);
|
||||
}
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up a scan to find the parent of a directory. The provided dirent hook
|
||||
* will be called when there is a dotdot update for the inode being repaired.
|
||||
*/
|
||||
int
|
||||
__xrep_findparent_scan_start(
|
||||
struct xfs_scrub *sc,
|
||||
struct xrep_parent_scan_info *pscan,
|
||||
notifier_fn_t custom_fn)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (!(sc->flags & XCHK_FSGATES_DIRENTS)) {
|
||||
ASSERT(sc->flags & XCHK_FSGATES_DIRENTS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pscan->sc = sc;
|
||||
pscan->parent_ino = NULLFSINO;
|
||||
|
||||
mutex_init(&pscan->lock);
|
||||
|
||||
xchk_iscan_start(sc, 30000, 100, &pscan->iscan);
|
||||
|
||||
/*
|
||||
* Hook into the dirent update code. The hook only operates on inodes
|
||||
* that were already scanned, and the scanner thread takes each inode's
|
||||
* ILOCK, which means that any in-progress inode updates will finish
|
||||
* before we can scan the inode.
|
||||
*/
|
||||
if (custom_fn)
|
||||
xfs_dir_hook_setup(&pscan->dhook, custom_fn);
|
||||
else
|
||||
xfs_dir_hook_setup(&pscan->dhook, xrep_findparent_live_update);
|
||||
error = xfs_dir_hook_add(sc->mp, &pscan->dhook);
|
||||
if (error)
|
||||
goto out_iscan;
|
||||
|
||||
return 0;
|
||||
out_iscan:
|
||||
xchk_iscan_teardown(&pscan->iscan);
|
||||
mutex_destroy(&pscan->lock);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Scan the entire filesystem looking for a parent inode for the inode being
|
||||
* scrubbed. @sc->ip must not be the root of a directory tree. Callers must
|
||||
* not hold a dirty transaction or any lock that would interfere with taking
|
||||
* an ILOCK.
|
||||
*
|
||||
* Returns 0 with @pscan->parent_ino set to the parent that we found.
|
||||
* Returns 0 with @pscan->parent_ino set to NULLFSINO if we found no parents.
|
||||
* Returns the usual negative errno if something else happened.
|
||||
*/
|
||||
int
|
||||
xrep_findparent_scan(
|
||||
struct xrep_parent_scan_info *pscan)
|
||||
{
|
||||
struct xrep_findparent_info fpi = {
|
||||
.sc = pscan->sc,
|
||||
.found_parent = NULLFSINO,
|
||||
.parent_scan = pscan,
|
||||
};
|
||||
struct xfs_scrub *sc = pscan->sc;
|
||||
int ret;
|
||||
|
||||
ASSERT(S_ISDIR(VFS_IC(sc->ip)->i_mode));
|
||||
|
||||
while ((ret = xchk_iscan_iter(&pscan->iscan, &fpi.dp)) == 1) {
|
||||
if (S_ISDIR(VFS_I(fpi.dp)->i_mode))
|
||||
ret = xrep_findparent_walk_directory(&fpi);
|
||||
else
|
||||
ret = 0;
|
||||
xchk_iscan_mark_visited(&pscan->iscan, fpi.dp);
|
||||
xchk_irele(sc, fpi.dp);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (xchk_should_terminate(sc, &ret))
|
||||
break;
|
||||
}
|
||||
xchk_iscan_iter_finish(&pscan->iscan);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Tear down a parent scan. */
|
||||
void
|
||||
xrep_findparent_scan_teardown(
|
||||
struct xrep_parent_scan_info *pscan)
|
||||
{
|
||||
xfs_dir_hook_del(pscan->sc->mp, &pscan->dhook);
|
||||
xchk_iscan_teardown(&pscan->iscan);
|
||||
mutex_destroy(&pscan->lock);
|
||||
}
|
||||
|
||||
/* Finish a parent scan early. */
|
||||
void
|
||||
xrep_findparent_scan_finish_early(
|
||||
struct xrep_parent_scan_info *pscan,
|
||||
xfs_ino_t ino)
|
||||
{
|
||||
xrep_findparent_scan_found(pscan, ino);
|
||||
xchk_iscan_finish_early(&pscan->iscan);
|
||||
}
|
||||
|
||||
/*
|
||||
* Confirm that the directory @parent_ino actually contains a directory entry
|
||||
* pointing to the child @sc->ip->ino. This function returns one of several
|
||||
* ways:
|
||||
*
|
||||
* Returns 0 with @parent_ino unchanged if the parent was confirmed.
|
||||
* Returns 0 with @parent_ino set to NULLFSINO if the parent was not valid.
|
||||
* Returns the usual negative errno if something else happened.
|
||||
*/
|
||||
int
|
||||
xrep_findparent_confirm(
|
||||
struct xfs_scrub *sc,
|
||||
xfs_ino_t *parent_ino)
|
||||
{
|
||||
struct xrep_findparent_info fpi = {
|
||||
.sc = sc,
|
||||
.found_parent = NULLFSINO,
|
||||
};
|
||||
int error;
|
||||
|
||||
/*
|
||||
* The root directory always points to itself. Unlinked dirs can point
|
||||
* anywhere, so we point them at the root dir too.
|
||||
*/
|
||||
if (sc->ip == sc->mp->m_rootip || VFS_I(sc->ip)->i_nlink == 0) {
|
||||
*parent_ino = sc->mp->m_sb.sb_rootino;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Reject garbage parent inode numbers and self-referential parents. */
|
||||
if (*parent_ino == NULLFSINO)
|
||||
return 0;
|
||||
if (!xfs_verify_dir_ino(sc->mp, *parent_ino) ||
|
||||
*parent_ino == sc->ip->i_ino) {
|
||||
*parent_ino = NULLFSINO;
|
||||
return 0;
|
||||
}
|
||||
|
||||
error = xchk_iget(sc, *parent_ino, &fpi.dp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (!S_ISDIR(VFS_I(fpi.dp)->i_mode)) {
|
||||
*parent_ino = NULLFSINO;
|
||||
goto out_rele;
|
||||
}
|
||||
|
||||
error = xrep_findparent_walk_directory(&fpi);
|
||||
if (error)
|
||||
goto out_rele;
|
||||
|
||||
*parent_ino = fpi.found_parent;
|
||||
out_rele:
|
||||
xchk_irele(sc, fpi.dp);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we're the root of a directory tree, we are our own parent. If we're an
|
||||
* unlinked directory, the parent /won't/ have a link to us. Set the parent
|
||||
* directory to the root for both cases. Returns NULLFSINO if we don't know
|
||||
* what to do.
|
||||
*/
|
||||
xfs_ino_t
|
||||
xrep_findparent_self_reference(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
if (sc->ip->i_ino == sc->mp->m_sb.sb_rootino)
|
||||
return sc->mp->m_sb.sb_rootino;
|
||||
|
||||
if (VFS_I(sc->ip)->i_nlink == 0)
|
||||
return sc->mp->m_sb.sb_rootino;
|
||||
|
||||
return NULLFSINO;
|
||||
}
|
||||
|
||||
/* Check the dentry cache to see if knows of a parent for the scrub target. */
|
||||
xfs_ino_t
|
||||
xrep_findparent_from_dcache(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct inode *pip = NULL;
|
||||
struct dentry *dentry, *parent;
|
||||
xfs_ino_t ret = NULLFSINO;
|
||||
|
||||
dentry = d_find_alias(VFS_I(sc->ip));
|
||||
if (!dentry)
|
||||
goto out;
|
||||
|
||||
parent = dget_parent(dentry);
|
||||
if (!parent)
|
||||
goto out_dput;
|
||||
|
||||
ASSERT(parent->d_sb == sc->ip->i_mount->m_super);
|
||||
|
||||
pip = igrab(d_inode(parent));
|
||||
dput(parent);
|
||||
|
||||
if (S_ISDIR(pip->i_mode)) {
|
||||
trace_xrep_findparent_from_dcache(sc->ip, XFS_I(pip)->i_ino);
|
||||
ret = XFS_I(pip)->i_ino;
|
||||
}
|
||||
|
||||
xchk_irele(sc, XFS_I(pip));
|
||||
|
||||
out_dput:
|
||||
dput(dentry);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
56
fs/xfs/scrub/findparent.h
Normal file
56
fs/xfs/scrub/findparent.h
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/*
|
||||
* Copyright (c) 2020-2024 Oracle. All Rights Reserved.
|
||||
* Author: Darrick J. Wong <djwong@kernel.org>
|
||||
*/
|
||||
#ifndef __XFS_SCRUB_FINDPARENT_H__
|
||||
#define __XFS_SCRUB_FINDPARENT_H__
|
||||
|
||||
struct xrep_parent_scan_info {
|
||||
struct xfs_scrub *sc;
|
||||
|
||||
/* Inode scan cursor. */
|
||||
struct xchk_iscan iscan;
|
||||
|
||||
/* Hook to capture directory entry updates. */
|
||||
struct xfs_dir_hook dhook;
|
||||
|
||||
/* Lock protecting parent_ino. */
|
||||
struct mutex lock;
|
||||
|
||||
/* Parent inode that we've found. */
|
||||
xfs_ino_t parent_ino;
|
||||
|
||||
bool lookup_parent;
|
||||
};
|
||||
|
||||
int __xrep_findparent_scan_start(struct xfs_scrub *sc,
|
||||
struct xrep_parent_scan_info *pscan,
|
||||
notifier_fn_t custom_fn);
|
||||
static inline int xrep_findparent_scan_start(struct xfs_scrub *sc,
|
||||
struct xrep_parent_scan_info *pscan)
|
||||
{
|
||||
return __xrep_findparent_scan_start(sc, pscan, NULL);
|
||||
}
|
||||
int xrep_findparent_scan(struct xrep_parent_scan_info *pscan);
|
||||
void xrep_findparent_scan_teardown(struct xrep_parent_scan_info *pscan);
|
||||
|
||||
static inline void
|
||||
xrep_findparent_scan_found(
|
||||
struct xrep_parent_scan_info *pscan,
|
||||
xfs_ino_t ino)
|
||||
{
|
||||
mutex_lock(&pscan->lock);
|
||||
pscan->parent_ino = ino;
|
||||
mutex_unlock(&pscan->lock);
|
||||
}
|
||||
|
||||
void xrep_findparent_scan_finish_early(struct xrep_parent_scan_info *pscan,
|
||||
xfs_ino_t ino);
|
||||
|
||||
int xrep_findparent_confirm(struct xfs_scrub *sc, xfs_ino_t *parent_ino);
|
||||
|
||||
xfs_ino_t xrep_findparent_self_reference(struct xfs_scrub *sc);
|
||||
xfs_ino_t xrep_findparent_from_dcache(struct xfs_scrub *sc);
|
||||
|
||||
#endif /* __XFS_SCRUB_FINDPARENT_H__ */
|
||||
|
|
@ -85,7 +85,7 @@ xchk_fscount_warmup(
|
|||
continue;
|
||||
|
||||
/* Lock both AG headers. */
|
||||
error = xfs_ialloc_read_agi(pag, sc->tp, &agi_bp);
|
||||
error = xfs_ialloc_read_agi(pag, sc->tp, 0, &agi_bp);
|
||||
if (error)
|
||||
break;
|
||||
error = xfs_alloc_read_agf(pag, sc->tp, 0, &agf_bp);
|
||||
|
|
@ -412,10 +412,11 @@ xchk_fscount_count_frextents(
|
|||
int error;
|
||||
|
||||
fsc->frextents = 0;
|
||||
fsc->frextents_delayed = 0;
|
||||
if (!xfs_has_realtime(mp))
|
||||
return 0;
|
||||
|
||||
xfs_ilock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
|
||||
xfs_rtbitmap_lock_shared(sc->mp, XFS_RBMLOCK_BITMAP);
|
||||
error = xfs_rtalloc_query_all(sc->mp, sc->tp,
|
||||
xchk_fscount_add_frextent, fsc);
|
||||
if (error) {
|
||||
|
|
@ -423,8 +424,10 @@ xchk_fscount_count_frextents(
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
fsc->frextents_delayed = percpu_counter_sum(&mp->m_delalloc_rtextents);
|
||||
|
||||
out_unlock:
|
||||
xfs_iunlock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
|
||||
xfs_rtbitmap_unlock_shared(sc->mp, XFS_RBMLOCK_BITMAP);
|
||||
return error;
|
||||
}
|
||||
#else
|
||||
|
|
@ -434,6 +437,7 @@ xchk_fscount_count_frextents(
|
|||
struct xchk_fscounters *fsc)
|
||||
{
|
||||
fsc->frextents = 0;
|
||||
fsc->frextents_delayed = 0;
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_XFS_RT */
|
||||
|
|
@ -517,7 +521,7 @@ xchk_fscounters(
|
|||
|
||||
/*
|
||||
* If the filesystem is not frozen, the counter summation calls above
|
||||
* can race with xfs_mod_freecounter, which subtracts a requested space
|
||||
* can race with xfs_dec_freecounter, which subtracts a requested space
|
||||
* reservation from the counter and undoes the subtraction if that made
|
||||
* the counter go negative. Therefore, it's possible to see negative
|
||||
* values here, and we should only flag that as a corruption if we
|
||||
|
|
@ -593,7 +597,7 @@ xchk_fscounters(
|
|||
}
|
||||
|
||||
if (!xchk_fscount_within_range(sc, frextents, &mp->m_frextents,
|
||||
fsc->frextents)) {
|
||||
fsc->frextents - fsc->frextents_delayed)) {
|
||||
if (fsc->frozen)
|
||||
xchk_set_corrupt(sc);
|
||||
else
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ struct xchk_fscounters {
|
|||
uint64_t ifree;
|
||||
uint64_t fdblocks;
|
||||
uint64_t frextents;
|
||||
uint64_t frextents_delayed;
|
||||
unsigned long long icount_min;
|
||||
unsigned long long icount_max;
|
||||
bool frozen;
|
||||
|
|
|
|||
|
|
@ -65,7 +65,17 @@ xrep_fscounters(
|
|||
percpu_counter_set(&mp->m_icount, fsc->icount);
|
||||
percpu_counter_set(&mp->m_ifree, fsc->ifree);
|
||||
percpu_counter_set(&mp->m_fdblocks, fsc->fdblocks);
|
||||
percpu_counter_set(&mp->m_frextents, fsc->frextents);
|
||||
|
||||
/*
|
||||
* Online repair is only supported on v5 file systems, which require
|
||||
* lazy sb counters and thus no update of sb_fdblocks here. But as of
|
||||
* now we don't support lazy counting sb_frextents yet, and thus need
|
||||
* to also update it directly here. And for that we need to keep
|
||||
* track of the delalloc reservations separately, as they are are
|
||||
* subtracted from m_frextents, but not included in sb_frextents.
|
||||
*/
|
||||
percpu_counter_set(&mp->m_frextents,
|
||||
fsc->frextents - fsc->frextents_delayed);
|
||||
mp->m_sb.sb_frextents = fsc->frextents;
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -108,6 +108,7 @@ static const struct xchk_health_map type_to_health_flag[XFS_SCRUB_TYPE_NR] = {
|
|||
[XFS_SCRUB_TYPE_FSCOUNTERS] = { XHG_FS, XFS_SICK_FS_COUNTERS },
|
||||
[XFS_SCRUB_TYPE_QUOTACHECK] = { XHG_FS, XFS_SICK_FS_QUOTACHECK },
|
||||
[XFS_SCRUB_TYPE_NLINKS] = { XHG_FS, XFS_SICK_FS_NLINKS },
|
||||
[XFS_SCRUB_TYPE_DIRTREE] = { XHG_INO, XFS_SICK_INO_DIRTREE },
|
||||
};
|
||||
|
||||
/* Return the health status mask for this scrub type. */
|
||||
|
|
|
|||
37
fs/xfs/scrub/ino_bitmap.h
Normal file
37
fs/xfs/scrub/ino_bitmap.h
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Copyright (c) 2023-2024 Oracle. All Rights Reserved.
|
||||
* Author: Darrick J. Wong <djwong@kernel.org>
|
||||
*/
|
||||
#ifndef __XFS_SCRUB_INO_BITMAP_H__
|
||||
#define __XFS_SCRUB_INO_BITMAP_H__
|
||||
|
||||
/* Bitmaps, but for type-checked for xfs_ino_t */
|
||||
|
||||
struct xino_bitmap {
|
||||
struct xbitmap64 inobitmap;
|
||||
};
|
||||
|
||||
static inline void xino_bitmap_init(struct xino_bitmap *bitmap)
|
||||
{
|
||||
xbitmap64_init(&bitmap->inobitmap);
|
||||
}
|
||||
|
||||
static inline void xino_bitmap_destroy(struct xino_bitmap *bitmap)
|
||||
{
|
||||
xbitmap64_destroy(&bitmap->inobitmap);
|
||||
}
|
||||
|
||||
static inline int xino_bitmap_set(struct xino_bitmap *bitmap, xfs_ino_t ino)
|
||||
{
|
||||
return xbitmap64_set(&bitmap->inobitmap, ino, 1);
|
||||
}
|
||||
|
||||
static inline int xino_bitmap_test(struct xino_bitmap *bitmap, xfs_ino_t ino)
|
||||
{
|
||||
uint64_t len = 1;
|
||||
|
||||
return xbitmap64_test(&bitmap->inobitmap, ino, &len);
|
||||
}
|
||||
|
||||
#endif /* __XFS_SCRUB_INO_BITMAP_H__ */
|
||||
|
|
@ -739,6 +739,23 @@ xchk_inode_check_reflink_iflag(
|
|||
xchk_ino_set_corrupt(sc, ino);
|
||||
}
|
||||
|
||||
/*
|
||||
* If this inode has zero link count, it must be on the unlinked list. If
|
||||
* it has nonzero link count, it must not be on the unlinked list.
|
||||
*/
|
||||
STATIC void
|
||||
xchk_inode_check_unlinked(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
if (VFS_I(sc->ip)->i_nlink == 0) {
|
||||
if (!xfs_inode_on_unlinked_list(sc->ip))
|
||||
xchk_ino_set_corrupt(sc, sc->ip->i_ino);
|
||||
} else {
|
||||
if (xfs_inode_on_unlinked_list(sc->ip))
|
||||
xchk_ino_set_corrupt(sc, sc->ip->i_ino);
|
||||
}
|
||||
}
|
||||
|
||||
/* Scrub an inode. */
|
||||
int
|
||||
xchk_inode(
|
||||
|
|
@ -771,6 +788,8 @@ xchk_inode(
|
|||
if (S_ISREG(VFS_I(sc->ip)->i_mode))
|
||||
xchk_inode_check_reflink_iflag(sc, sc->ip->i_ino);
|
||||
|
||||
xchk_inode_check_unlinked(sc);
|
||||
|
||||
xchk_inode_xref(sc, sc->ip->i_ino, &di);
|
||||
out:
|
||||
return error;
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@
|
|||
#include "scrub/repair.h"
|
||||
#include "scrub/iscan.h"
|
||||
#include "scrub/readdir.h"
|
||||
#include "scrub/tempfile.h"
|
||||
|
||||
/*
|
||||
* Inode Record Repair
|
||||
|
|
@ -282,6 +283,51 @@ xrep_dinode_findmode_dirent(
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Try to lock a directory, or wait a jiffy. */
|
||||
static inline int
|
||||
xrep_dinode_ilock_nowait(
|
||||
struct xfs_inode *dp,
|
||||
unsigned int lock_mode)
|
||||
{
|
||||
if (xfs_ilock_nowait(dp, lock_mode))
|
||||
return true;
|
||||
|
||||
schedule_timeout_killable(1);
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to lock a directory to look for ftype hints. Since we already hold the
|
||||
* AGI buffer, we cannot block waiting for the ILOCK because rename can take
|
||||
* the ILOCK and then try to lock AGIs.
|
||||
*/
|
||||
STATIC int
|
||||
xrep_dinode_trylock_directory(
|
||||
struct xrep_inode *ri,
|
||||
struct xfs_inode *dp,
|
||||
unsigned int *lock_modep)
|
||||
{
|
||||
unsigned long deadline = jiffies + msecs_to_jiffies(30000);
|
||||
unsigned int lock_mode;
|
||||
int error = 0;
|
||||
|
||||
do {
|
||||
if (xchk_should_terminate(ri->sc, &error))
|
||||
return error;
|
||||
|
||||
if (xfs_need_iread_extents(&dp->i_df))
|
||||
lock_mode = XFS_ILOCK_EXCL;
|
||||
else
|
||||
lock_mode = XFS_ILOCK_SHARED;
|
||||
|
||||
if (xrep_dinode_ilock_nowait(dp, lock_mode)) {
|
||||
*lock_modep = lock_mode;
|
||||
return 0;
|
||||
}
|
||||
} while (!time_is_before_jiffies(deadline));
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this is a directory, walk the dirents looking for any that point to the
|
||||
* scrub target inode.
|
||||
|
|
@ -295,11 +341,17 @@ xrep_dinode_findmode_walk_directory(
|
|||
unsigned int lock_mode;
|
||||
int error = 0;
|
||||
|
||||
/* Ignore temporary repair directories. */
|
||||
if (xrep_is_tempfile(dp))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Scan the directory to see if there it contains an entry pointing to
|
||||
* the directory that we are repairing.
|
||||
*/
|
||||
lock_mode = xfs_ilock_data_map_shared(dp);
|
||||
error = xrep_dinode_trylock_directory(ri, dp, &lock_mode);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* If this directory is known to be sick, we cannot scan it reliably
|
||||
|
|
@ -356,6 +408,7 @@ xrep_dinode_find_mode(
|
|||
* so there's a real possibility that _iscan_iter can return EBUSY.
|
||||
*/
|
||||
xchk_iscan_start(sc, 5000, 100, &ri->ftype_iscan);
|
||||
xchk_iscan_set_agi_trylock(&ri->ftype_iscan);
|
||||
ri->ftype_iscan.skip_ino = sc->sm->sm_ino;
|
||||
ri->alleged_ftype = XFS_DIR3_FT_UNKNOWN;
|
||||
while ((error = xchk_iscan_iter(&ri->ftype_iscan, &dp)) == 1) {
|
||||
|
|
@ -463,6 +516,17 @@ xrep_dinode_mode(
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Fix unused link count fields having nonzero values. */
|
||||
STATIC void
|
||||
xrep_dinode_nlinks(
|
||||
struct xfs_dinode *dip)
|
||||
{
|
||||
if (dip->di_version > 1)
|
||||
dip->di_onlink = 0;
|
||||
else
|
||||
dip->di_nlink = 0;
|
||||
}
|
||||
|
||||
/* Fix any conflicting flags that the verifiers complain about. */
|
||||
STATIC void
|
||||
xrep_dinode_flags(
|
||||
|
|
@ -1324,6 +1388,7 @@ xrep_dinode_core(
|
|||
iget_error = xrep_dinode_mode(ri, dip);
|
||||
if (iget_error)
|
||||
goto write;
|
||||
xrep_dinode_nlinks(dip);
|
||||
xrep_dinode_flags(sc, dip, ri->rt_extents > 0);
|
||||
xrep_dinode_size(ri, dip);
|
||||
xrep_dinode_extsize_hints(sc, dip);
|
||||
|
|
@ -1671,6 +1736,44 @@ xrep_inode_extsize(
|
|||
}
|
||||
}
|
||||
|
||||
/* Ensure this file has an attr fork if it needs to hold a parent pointer. */
|
||||
STATIC int
|
||||
xrep_inode_pptr(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
struct xfs_inode *ip = sc->ip;
|
||||
struct inode *inode = VFS_I(ip);
|
||||
|
||||
if (!xfs_has_parent(mp))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Unlinked inodes that cannot be added to the directory tree will not
|
||||
* have a parent pointer.
|
||||
*/
|
||||
if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE))
|
||||
return 0;
|
||||
|
||||
/* The root directory doesn't have a parent pointer. */
|
||||
if (ip == mp->m_rootip)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Metadata inodes are rooted in the superblock and do not have any
|
||||
* parents.
|
||||
*/
|
||||
if (xfs_is_metadata_inode(ip))
|
||||
return 0;
|
||||
|
||||
/* Inode already has an attr fork; no further work possible here. */
|
||||
if (xfs_inode_has_attr_fork(ip))
|
||||
return 0;
|
||||
|
||||
return xfs_bmap_add_attrfork(sc->tp, ip,
|
||||
sizeof(struct xfs_attr_sf_hdr), true);
|
||||
}
|
||||
|
||||
/* Fix any irregularities in an inode that the verifiers don't catch. */
|
||||
STATIC int
|
||||
xrep_inode_problems(
|
||||
|
|
@ -1679,6 +1782,9 @@ xrep_inode_problems(
|
|||
int error;
|
||||
|
||||
error = xrep_inode_blockcounts(sc);
|
||||
if (error)
|
||||
return error;
|
||||
error = xrep_inode_pptr(sc);
|
||||
if (error)
|
||||
return error;
|
||||
xrep_inode_timestamps(sc->ip);
|
||||
|
|
@ -1697,6 +1803,46 @@ xrep_inode_problems(
|
|||
return xrep_roll_trans(sc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure this inode's unlinked list pointers are consistent with its
|
||||
* link count.
|
||||
*/
|
||||
STATIC int
|
||||
xrep_inode_unlinked(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
unsigned int nlink = VFS_I(sc->ip)->i_nlink;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* If this inode is linked from the directory tree and on the unlinked
|
||||
* list, remove it from the unlinked list.
|
||||
*/
|
||||
if (nlink > 0 && xfs_inode_on_unlinked_list(sc->ip)) {
|
||||
struct xfs_perag *pag;
|
||||
int error;
|
||||
|
||||
pag = xfs_perag_get(sc->mp,
|
||||
XFS_INO_TO_AGNO(sc->mp, sc->ip->i_ino));
|
||||
error = xfs_iunlink_remove(sc->tp, pag, sc->ip);
|
||||
xfs_perag_put(pag);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this inode is not linked from the directory tree yet not on the
|
||||
* unlinked list, put it on the unlinked list.
|
||||
*/
|
||||
if (nlink == 0 && !xfs_inode_on_unlinked_list(sc->ip)) {
|
||||
error = xfs_iunlink(sc->tp, sc->ip);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Repair an inode's fields. */
|
||||
int
|
||||
xrep_inode(
|
||||
|
|
@ -1746,5 +1892,10 @@ xrep_inode(
|
|||
return error;
|
||||
}
|
||||
|
||||
/* Reconnect incore unlinked list */
|
||||
error = xrep_inode_unlinked(sc);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
return xrep_defer_finish(sc);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -243,6 +243,51 @@ xchk_iscan_finish(
|
|||
mutex_unlock(&iscan->lock);
|
||||
}
|
||||
|
||||
/* Mark an inode scan finished before we actually scan anything. */
|
||||
void
|
||||
xchk_iscan_finish_early(
|
||||
struct xchk_iscan *iscan)
|
||||
{
|
||||
ASSERT(iscan->cursor_ino == iscan->scan_start_ino);
|
||||
ASSERT(iscan->__visited_ino == iscan->scan_start_ino);
|
||||
|
||||
xchk_iscan_finish(iscan);
|
||||
}
|
||||
|
||||
/*
|
||||
* Grab the AGI to advance the inode scan. Returns 0 if *agi_bpp is now set,
|
||||
* -ECANCELED if the live scan aborted, -EBUSY if the AGI could not be grabbed,
|
||||
* or the usual negative errno.
|
||||
*/
|
||||
STATIC int
|
||||
xchk_iscan_read_agi(
|
||||
struct xchk_iscan *iscan,
|
||||
struct xfs_perag *pag,
|
||||
struct xfs_buf **agi_bpp)
|
||||
{
|
||||
struct xfs_scrub *sc = iscan->sc;
|
||||
unsigned long relax;
|
||||
int ret;
|
||||
|
||||
if (!xchk_iscan_agi_needs_trylock(iscan))
|
||||
return xfs_ialloc_read_agi(pag, sc->tp, 0, agi_bpp);
|
||||
|
||||
relax = msecs_to_jiffies(iscan->iget_retry_delay);
|
||||
do {
|
||||
ret = xfs_ialloc_read_agi(pag, sc->tp, XFS_IALLOC_FLAG_TRYLOCK,
|
||||
agi_bpp);
|
||||
if (ret != -EAGAIN)
|
||||
return ret;
|
||||
if (!iscan->iget_timeout ||
|
||||
time_is_before_jiffies(iscan->__iget_deadline))
|
||||
return -EBUSY;
|
||||
|
||||
trace_xchk_iscan_agi_retry_wait(iscan);
|
||||
} while (!schedule_timeout_killable(relax) &&
|
||||
!xchk_iscan_aborted(iscan));
|
||||
return -ECANCELED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Advance ino to the next inode that the inobt thinks is allocated, being
|
||||
* careful to jump to the next AG if we've reached the right end of this AG's
|
||||
|
|
@ -281,7 +326,7 @@ xchk_iscan_advance(
|
|||
if (!pag)
|
||||
return -ECANCELED;
|
||||
|
||||
ret = xfs_ialloc_read_agi(pag, sc->tp, &agi_bp);
|
||||
ret = xchk_iscan_read_agi(iscan, pag, &agi_bp);
|
||||
if (ret)
|
||||
goto out_pag;
|
||||
|
||||
|
|
@ -362,6 +407,15 @@ xchk_iscan_iget_retry(
|
|||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/*
|
||||
* For an inode scan, we hold the AGI and want to try to grab a batch of
|
||||
* inodes. Holding the AGI prevents inodegc from clearing freed inodes,
|
||||
* so we must use noretry here. For every inode after the first one in the
|
||||
* batch, we don't want to wait, so we use retry there too. Finally, use
|
||||
* dontcache to avoid polluting the cache.
|
||||
*/
|
||||
#define ISCAN_IGET_FLAGS (XFS_IGET_NORETRY | XFS_IGET_DONTCACHE)
|
||||
|
||||
/*
|
||||
* Grab an inode as part of an inode scan. While scanning this inode, the
|
||||
* caller must ensure that no other threads can modify the inode until a call
|
||||
|
|
@ -389,7 +443,7 @@ xchk_iscan_iget(
|
|||
ASSERT(iscan->__inodes[0] == NULL);
|
||||
|
||||
/* Fill the first slot in the inode array. */
|
||||
error = xfs_iget(sc->mp, sc->tp, ino, XFS_IGET_NORETRY, 0,
|
||||
error = xfs_iget(sc->mp, sc->tp, ino, ISCAN_IGET_FLAGS, 0,
|
||||
&iscan->__inodes[idx]);
|
||||
|
||||
trace_xchk_iscan_iget(iscan, error);
|
||||
|
|
@ -402,8 +456,13 @@ xchk_iscan_iget(
|
|||
* It's possible that this inode has lost all of its links but
|
||||
* hasn't yet been inactivated. If we don't have a transaction
|
||||
* or it's not writable, flush the inodegc workers and wait.
|
||||
* If we have a non-empty transaction, we must not block on
|
||||
* inodegc, which allocates its own transactions.
|
||||
*/
|
||||
xfs_inodegc_flush(mp);
|
||||
if (sc->tp && !(sc->tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
|
||||
xfs_inodegc_push(mp);
|
||||
else
|
||||
xfs_inodegc_flush(mp);
|
||||
return xchk_iscan_iget_retry(iscan, true);
|
||||
}
|
||||
|
||||
|
|
@ -457,7 +516,7 @@ xchk_iscan_iget(
|
|||
|
||||
ASSERT(iscan->__inodes[idx] == NULL);
|
||||
|
||||
error = xfs_iget(sc->mp, sc->tp, ino, XFS_IGET_NORETRY, 0,
|
||||
error = xfs_iget(sc->mp, sc->tp, ino, ISCAN_IGET_FLAGS, 0,
|
||||
&iscan->__inodes[idx]);
|
||||
if (error)
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -59,6 +59,9 @@ struct xchk_iscan {
|
|||
/* Set if the scan has been aborted due to some event in the fs. */
|
||||
#define XCHK_ISCAN_OPSTATE_ABORTED (1)
|
||||
|
||||
/* Use trylock to acquire the AGI */
|
||||
#define XCHK_ISCAN_OPSTATE_TRYLOCK_AGI (2)
|
||||
|
||||
static inline bool
|
||||
xchk_iscan_aborted(const struct xchk_iscan *iscan)
|
||||
{
|
||||
|
|
@ -71,8 +74,21 @@ xchk_iscan_abort(struct xchk_iscan *iscan)
|
|||
set_bit(XCHK_ISCAN_OPSTATE_ABORTED, &iscan->__opstate);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
xchk_iscan_agi_needs_trylock(const struct xchk_iscan *iscan)
|
||||
{
|
||||
return test_bit(XCHK_ISCAN_OPSTATE_TRYLOCK_AGI, &iscan->__opstate);
|
||||
}
|
||||
|
||||
static inline void
|
||||
xchk_iscan_set_agi_trylock(struct xchk_iscan *iscan)
|
||||
{
|
||||
set_bit(XCHK_ISCAN_OPSTATE_TRYLOCK_AGI, &iscan->__opstate);
|
||||
}
|
||||
|
||||
void xchk_iscan_start(struct xfs_scrub *sc, unsigned int iget_timeout,
|
||||
unsigned int iget_retry_delay, struct xchk_iscan *iscan);
|
||||
void xchk_iscan_finish_early(struct xchk_iscan *iscan);
|
||||
void xchk_iscan_teardown(struct xchk_iscan *iscan);
|
||||
|
||||
int xchk_iscan_iter(struct xchk_iscan *iscan, struct xfs_inode **ipp);
|
||||
|
|
|
|||
320
fs/xfs/scrub/listxattr.c
Normal file
320
fs/xfs/scrub/listxattr.c
Normal file
|
|
@ -0,0 +1,320 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Copyright (c) 2022-2024 Oracle. All Rights Reserved.
|
||||
* Author: Darrick J. Wong <djwong@kernel.org>
|
||||
*/
|
||||
#include "xfs.h"
|
||||
#include "xfs_fs.h"
|
||||
#include "xfs_shared.h"
|
||||
#include "xfs_format.h"
|
||||
#include "xfs_log_format.h"
|
||||
#include "xfs_trans_resv.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_da_format.h"
|
||||
#include "xfs_da_btree.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_attr_leaf.h"
|
||||
#include "xfs_attr_sf.h"
|
||||
#include "xfs_trans.h"
|
||||
#include "scrub/scrub.h"
|
||||
#include "scrub/bitmap.h"
|
||||
#include "scrub/dab_bitmap.h"
|
||||
#include "scrub/listxattr.h"
|
||||
|
||||
/* Call a function for every entry in a shortform xattr structure. */
|
||||
STATIC int
|
||||
xchk_xattr_walk_sf(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
xchk_xattr_fn attr_fn,
|
||||
void *priv)
|
||||
{
|
||||
struct xfs_attr_sf_hdr *hdr = ip->i_af.if_data;
|
||||
struct xfs_attr_sf_entry *sfe;
|
||||
unsigned int i;
|
||||
int error;
|
||||
|
||||
sfe = xfs_attr_sf_firstentry(hdr);
|
||||
for (i = 0; i < hdr->count; i++) {
|
||||
error = attr_fn(sc, ip, sfe->flags, sfe->nameval, sfe->namelen,
|
||||
&sfe->nameval[sfe->namelen], sfe->valuelen,
|
||||
priv);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
sfe = xfs_attr_sf_nextentry(sfe);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Call a function for every entry in this xattr leaf block. */
|
||||
STATIC int
|
||||
xchk_xattr_walk_leaf_entries(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
xchk_xattr_fn attr_fn,
|
||||
struct xfs_buf *bp,
|
||||
void *priv)
|
||||
{
|
||||
struct xfs_attr3_icleaf_hdr ichdr;
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
struct xfs_attr_leafblock *leaf = bp->b_addr;
|
||||
struct xfs_attr_leaf_entry *entry;
|
||||
unsigned int i;
|
||||
int error;
|
||||
|
||||
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
|
||||
entry = xfs_attr3_leaf_entryp(leaf);
|
||||
|
||||
for (i = 0; i < ichdr.count; entry++, i++) {
|
||||
void *value;
|
||||
unsigned char *name;
|
||||
unsigned int namelen, valuelen;
|
||||
|
||||
if (entry->flags & XFS_ATTR_LOCAL) {
|
||||
struct xfs_attr_leaf_name_local *name_loc;
|
||||
|
||||
name_loc = xfs_attr3_leaf_name_local(leaf, i);
|
||||
name = name_loc->nameval;
|
||||
namelen = name_loc->namelen;
|
||||
value = &name_loc->nameval[name_loc->namelen];
|
||||
valuelen = be16_to_cpu(name_loc->valuelen);
|
||||
} else {
|
||||
struct xfs_attr_leaf_name_remote *name_rmt;
|
||||
|
||||
name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
|
||||
name = name_rmt->name;
|
||||
namelen = name_rmt->namelen;
|
||||
value = NULL;
|
||||
valuelen = be32_to_cpu(name_rmt->valuelen);
|
||||
}
|
||||
|
||||
error = attr_fn(sc, ip, entry->flags, name, namelen, value,
|
||||
valuelen, priv);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Call a function for every entry in a leaf-format xattr structure. Avoid
|
||||
* memory allocations for the loop detector since there's only one block.
|
||||
*/
|
||||
STATIC int
|
||||
xchk_xattr_walk_leaf(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
xchk_xattr_fn attr_fn,
|
||||
void *priv)
|
||||
{
|
||||
struct xfs_buf *leaf_bp;
|
||||
int error;
|
||||
|
||||
error = xfs_attr3_leaf_read(sc->tp, ip, ip->i_ino, 0, &leaf_bp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xchk_xattr_walk_leaf_entries(sc, ip, attr_fn, leaf_bp, priv);
|
||||
xfs_trans_brelse(sc->tp, leaf_bp);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Find the leftmost leaf in the xattr dabtree. */
|
||||
STATIC int
|
||||
xchk_xattr_find_leftmost_leaf(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
struct xdab_bitmap *seen_dablks,
|
||||
struct xfs_buf **leaf_bpp)
|
||||
{
|
||||
struct xfs_da3_icnode_hdr nodehdr;
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
struct xfs_trans *tp = sc->tp;
|
||||
struct xfs_da_intnode *node;
|
||||
struct xfs_da_node_entry *btree;
|
||||
struct xfs_buf *bp;
|
||||
xfs_failaddr_t fa;
|
||||
xfs_dablk_t blkno = 0;
|
||||
unsigned int expected_level = 0;
|
||||
int error;
|
||||
|
||||
for (;;) {
|
||||
xfs_extlen_t len = 1;
|
||||
uint16_t magic;
|
||||
|
||||
/* Make sure we haven't seen this new block already. */
|
||||
if (xdab_bitmap_test(seen_dablks, blkno, &len))
|
||||
return -EFSCORRUPTED;
|
||||
|
||||
error = xfs_da3_node_read(tp, ip, blkno, &bp, XFS_ATTR_FORK);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
node = bp->b_addr;
|
||||
magic = be16_to_cpu(node->hdr.info.magic);
|
||||
if (magic == XFS_ATTR_LEAF_MAGIC ||
|
||||
magic == XFS_ATTR3_LEAF_MAGIC)
|
||||
break;
|
||||
|
||||
error = -EFSCORRUPTED;
|
||||
if (magic != XFS_DA_NODE_MAGIC &&
|
||||
magic != XFS_DA3_NODE_MAGIC)
|
||||
goto out_buf;
|
||||
|
||||
fa = xfs_da3_node_header_check(bp, ip->i_ino);
|
||||
if (fa)
|
||||
goto out_buf;
|
||||
|
||||
xfs_da3_node_hdr_from_disk(mp, &nodehdr, node);
|
||||
|
||||
if (nodehdr.count == 0 || nodehdr.level >= XFS_DA_NODE_MAXDEPTH)
|
||||
goto out_buf;
|
||||
|
||||
/* Check the level from the root node. */
|
||||
if (blkno == 0)
|
||||
expected_level = nodehdr.level - 1;
|
||||
else if (expected_level != nodehdr.level)
|
||||
goto out_buf;
|
||||
else
|
||||
expected_level--;
|
||||
|
||||
/* Remember that we've seen this node. */
|
||||
error = xdab_bitmap_set(seen_dablks, blkno, 1);
|
||||
if (error)
|
||||
goto out_buf;
|
||||
|
||||
/* Find the next level towards the leaves of the dabtree. */
|
||||
btree = nodehdr.btree;
|
||||
blkno = be32_to_cpu(btree->before);
|
||||
xfs_trans_brelse(tp, bp);
|
||||
}
|
||||
|
||||
error = -EFSCORRUPTED;
|
||||
fa = xfs_attr3_leaf_header_check(bp, ip->i_ino);
|
||||
if (fa)
|
||||
goto out_buf;
|
||||
|
||||
if (expected_level != 0)
|
||||
goto out_buf;
|
||||
|
||||
/* Remember that we've seen this leaf. */
|
||||
error = xdab_bitmap_set(seen_dablks, blkno, 1);
|
||||
if (error)
|
||||
goto out_buf;
|
||||
|
||||
*leaf_bpp = bp;
|
||||
return 0;
|
||||
|
||||
out_buf:
|
||||
xfs_trans_brelse(tp, bp);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Call a function for every entry in a node-format xattr structure. */
|
||||
STATIC int
|
||||
xchk_xattr_walk_node(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
xchk_xattr_fn attr_fn,
|
||||
xchk_xattrleaf_fn leaf_fn,
|
||||
void *priv)
|
||||
{
|
||||
struct xfs_attr3_icleaf_hdr leafhdr;
|
||||
struct xdab_bitmap seen_dablks;
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
struct xfs_attr_leafblock *leaf;
|
||||
struct xfs_buf *leaf_bp;
|
||||
int error;
|
||||
|
||||
xdab_bitmap_init(&seen_dablks);
|
||||
|
||||
error = xchk_xattr_find_leftmost_leaf(sc, ip, &seen_dablks, &leaf_bp);
|
||||
if (error)
|
||||
goto out_bitmap;
|
||||
|
||||
for (;;) {
|
||||
xfs_extlen_t len;
|
||||
|
||||
error = xchk_xattr_walk_leaf_entries(sc, ip, attr_fn, leaf_bp,
|
||||
priv);
|
||||
if (error)
|
||||
goto out_leaf;
|
||||
|
||||
/* Find the right sibling of this leaf block. */
|
||||
leaf = leaf_bp->b_addr;
|
||||
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
|
||||
if (leafhdr.forw == 0)
|
||||
goto out_leaf;
|
||||
|
||||
xfs_trans_brelse(sc->tp, leaf_bp);
|
||||
|
||||
if (leaf_fn) {
|
||||
error = leaf_fn(sc, priv);
|
||||
if (error)
|
||||
goto out_bitmap;
|
||||
}
|
||||
|
||||
/* Make sure we haven't seen this new leaf already. */
|
||||
len = 1;
|
||||
if (xdab_bitmap_test(&seen_dablks, leafhdr.forw, &len)) {
|
||||
error = -EFSCORRUPTED;
|
||||
goto out_bitmap;
|
||||
}
|
||||
|
||||
error = xfs_attr3_leaf_read(sc->tp, ip, ip->i_ino,
|
||||
leafhdr.forw, &leaf_bp);
|
||||
if (error)
|
||||
goto out_bitmap;
|
||||
|
||||
/* Remember that we've seen this new leaf. */
|
||||
error = xdab_bitmap_set(&seen_dablks, leafhdr.forw, 1);
|
||||
if (error)
|
||||
goto out_leaf;
|
||||
}
|
||||
|
||||
out_leaf:
|
||||
xfs_trans_brelse(sc->tp, leaf_bp);
|
||||
out_bitmap:
|
||||
xdab_bitmap_destroy(&seen_dablks);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Call a function for every extended attribute in a file.
|
||||
*
|
||||
* Callers must hold the ILOCK. No validation or cursor restarts allowed.
|
||||
* Returns -EFSCORRUPTED on any problem, including loops in the dabtree.
|
||||
*/
|
||||
int
|
||||
xchk_xattr_walk(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
xchk_xattr_fn attr_fn,
|
||||
xchk_xattrleaf_fn leaf_fn,
|
||||
void *priv)
|
||||
{
|
||||
int error;
|
||||
|
||||
xfs_assert_ilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL);
|
||||
|
||||
if (!xfs_inode_hasattr(ip))
|
||||
return 0;
|
||||
|
||||
if (ip->i_af.if_format == XFS_DINODE_FMT_LOCAL)
|
||||
return xchk_xattr_walk_sf(sc, ip, attr_fn, priv);
|
||||
|
||||
/* attr functions require that the attr fork is loaded */
|
||||
error = xfs_iread_extents(sc->tp, ip, XFS_ATTR_FORK);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (xfs_attr_is_leaf(ip))
|
||||
return xchk_xattr_walk_leaf(sc, ip, attr_fn, priv);
|
||||
|
||||
return xchk_xattr_walk_node(sc, ip, attr_fn, leaf_fn, priv);
|
||||
}
|
||||
19
fs/xfs/scrub/listxattr.h
Normal file
19
fs/xfs/scrub/listxattr.h
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/*
|
||||
* Copyright (c) 2022-2024 Oracle. All Rights Reserved.
|
||||
* Author: Darrick J. Wong <djwong@kernel.org>
|
||||
*/
|
||||
#ifndef __XFS_SCRUB_LISTXATTR_H__
|
||||
#define __XFS_SCRUB_LISTXATTR_H__
|
||||
|
||||
typedef int (*xchk_xattr_fn)(struct xfs_scrub *sc, struct xfs_inode *ip,
|
||||
unsigned int attr_flags, const unsigned char *name,
|
||||
unsigned int namelen, const void *value, unsigned int valuelen,
|
||||
void *priv);
|
||||
|
||||
typedef int (*xchk_xattrleaf_fn)(struct xfs_scrub *sc, void *priv);
|
||||
|
||||
int xchk_xattr_walk(struct xfs_scrub *sc, struct xfs_inode *ip,
|
||||
xchk_xattr_fn attr_fn, xchk_xattrleaf_fn leaf_fn, void *priv);
|
||||
|
||||
#endif /* __XFS_SCRUB_LISTXATTR_H__ */
|
||||
|
|
@ -18,15 +18,19 @@
|
|||
#include "xfs_dir2.h"
|
||||
#include "xfs_dir2_priv.h"
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_parent.h"
|
||||
#include "scrub/scrub.h"
|
||||
#include "scrub/common.h"
|
||||
#include "scrub/repair.h"
|
||||
#include "scrub/xfile.h"
|
||||
#include "scrub/xfarray.h"
|
||||
#include "scrub/iscan.h"
|
||||
#include "scrub/orphanage.h"
|
||||
#include "scrub/nlinks.h"
|
||||
#include "scrub/trace.h"
|
||||
#include "scrub/readdir.h"
|
||||
#include "scrub/tempfile.h"
|
||||
#include "scrub/listxattr.h"
|
||||
|
||||
/*
|
||||
* Live Inode Link Count Checking
|
||||
|
|
@ -43,11 +47,23 @@ int
|
|||
xchk_setup_nlinks(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xchk_nlink_ctrs *xnc;
|
||||
int error;
|
||||
|
||||
xchk_fsgates_enable(sc, XCHK_FSGATES_DIRENTS);
|
||||
|
||||
sc->buf = kzalloc(sizeof(struct xchk_nlink_ctrs), XCHK_GFP_FLAGS);
|
||||
if (!sc->buf)
|
||||
if (xchk_could_repair(sc)) {
|
||||
error = xrep_setup_nlinks(sc);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
xnc = kvzalloc(sizeof(struct xchk_nlink_ctrs), XCHK_GFP_FLAGS);
|
||||
if (!xnc)
|
||||
return -ENOMEM;
|
||||
xnc->xname.name = xnc->namebuf;
|
||||
xnc->sc = sc;
|
||||
sc->buf = xnc;
|
||||
|
||||
return xchk_setup_fs(sc);
|
||||
}
|
||||
|
|
@ -152,6 +168,13 @@ xchk_nlinks_live_update(
|
|||
|
||||
xnc = container_of(nb, struct xchk_nlink_ctrs, dhook.dirent_hook.nb);
|
||||
|
||||
/*
|
||||
* Ignore temporary directories being used to stage dir repairs, since
|
||||
* we don't bump the link counts of the children.
|
||||
*/
|
||||
if (xrep_is_tempfile(p->dp))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
trace_xchk_nlinks_live_update(xnc->sc->mp, p->dp, action, p->ip->i_ino,
|
||||
p->delta, p->name->name, p->name->len);
|
||||
|
||||
|
|
@ -251,12 +274,17 @@ xchk_nlinks_collect_dirent(
|
|||
* number of parents of the root directory.
|
||||
*
|
||||
* Otherwise, increment the number of backrefs pointing back to ino.
|
||||
*
|
||||
* If the filesystem has parent pointers, we walk the pptrs to
|
||||
* determine the backref count.
|
||||
*/
|
||||
if (dotdot) {
|
||||
if (dp == sc->mp->m_rootip)
|
||||
error = xchk_nlinks_update_incore(xnc, ino, 1, 0, 0);
|
||||
else
|
||||
else if (!xfs_has_parent(sc->mp))
|
||||
error = xchk_nlinks_update_incore(xnc, ino, 0, 1, 0);
|
||||
else
|
||||
error = 0;
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
|
@ -293,6 +321,61 @@ out_incomplete:
|
|||
return error;
|
||||
}
|
||||
|
||||
/* Bump the backref count for the inode referenced by this parent pointer. */
|
||||
STATIC int
|
||||
xchk_nlinks_collect_pptr(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
unsigned int attr_flags,
|
||||
const unsigned char *name,
|
||||
unsigned int namelen,
|
||||
const void *value,
|
||||
unsigned int valuelen,
|
||||
void *priv)
|
||||
{
|
||||
struct xfs_name xname = {
|
||||
.name = name,
|
||||
.len = namelen,
|
||||
};
|
||||
struct xchk_nlink_ctrs *xnc = priv;
|
||||
const struct xfs_parent_rec *pptr_rec = value;
|
||||
xfs_ino_t parent_ino;
|
||||
int error;
|
||||
|
||||
/* Update the shadow link counts if we haven't already failed. */
|
||||
|
||||
if (xchk_iscan_aborted(&xnc->collect_iscan)) {
|
||||
error = -ECANCELED;
|
||||
goto out_incomplete;
|
||||
}
|
||||
|
||||
if (!(attr_flags & XFS_ATTR_PARENT))
|
||||
return 0;
|
||||
|
||||
error = xfs_parent_from_attr(sc->mp, attr_flags, name, namelen, value,
|
||||
valuelen, &parent_ino, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
trace_xchk_nlinks_collect_pptr(sc->mp, ip, &xname, pptr_rec);
|
||||
|
||||
mutex_lock(&xnc->lock);
|
||||
|
||||
error = xchk_nlinks_update_incore(xnc, parent_ino, 0, 1, 0);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
|
||||
mutex_unlock(&xnc->lock);
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&xnc->lock);
|
||||
xchk_iscan_abort(&xnc->collect_iscan);
|
||||
out_incomplete:
|
||||
xchk_set_incomplete(sc);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Walk a directory to bump the observed link counts of the children. */
|
||||
STATIC int
|
||||
xchk_nlinks_collect_dir(
|
||||
|
|
@ -303,6 +386,13 @@ xchk_nlinks_collect_dir(
|
|||
unsigned int lock_mode;
|
||||
int error = 0;
|
||||
|
||||
/*
|
||||
* Ignore temporary directories being used to stage dir repairs, since
|
||||
* we don't bump the link counts of the children.
|
||||
*/
|
||||
if (xrep_is_tempfile(dp))
|
||||
return 0;
|
||||
|
||||
/* Prevent anyone from changing this directory while we walk it. */
|
||||
xfs_ilock(dp, XFS_IOLOCK_SHARED);
|
||||
lock_mode = xfs_ilock_data_map_shared(dp);
|
||||
|
|
@ -332,6 +422,28 @@ xchk_nlinks_collect_dir(
|
|||
if (error)
|
||||
goto out_abort;
|
||||
|
||||
/* Walk the parent pointers to get real backref counts. */
|
||||
if (xfs_has_parent(sc->mp)) {
|
||||
/*
|
||||
* If the extended attributes look as though they has been
|
||||
* zapped by the inode record repair code, we cannot scan for
|
||||
* parent pointers.
|
||||
*/
|
||||
if (xchk_pptr_looks_zapped(dp)) {
|
||||
error = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
error = xchk_xattr_walk(sc, dp, xchk_nlinks_collect_pptr, NULL,
|
||||
xnc);
|
||||
if (error == -ECANCELED) {
|
||||
error = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (error)
|
||||
goto out_abort;
|
||||
}
|
||||
|
||||
xchk_iscan_mark_visited(&xnc->collect_iscan, dp);
|
||||
goto out_unlock;
|
||||
|
||||
|
|
@ -537,6 +649,14 @@ xchk_nlinks_compare_inode(
|
|||
unsigned int actual_nlink;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Ignore temporary files being used to stage repairs, since we assume
|
||||
* they're correct for non-directories, and the directory repair code
|
||||
* doesn't bump the link counts for the children.
|
||||
*/
|
||||
if (xrep_is_tempfile(ip))
|
||||
return 0;
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
||||
mutex_lock(&xnc->lock);
|
||||
|
||||
|
|
@ -571,9 +691,11 @@ xchk_nlinks_compare_inode(
|
|||
* this as a corruption. The VFS won't let users increase the link
|
||||
* count, but it will let them decrease it.
|
||||
*/
|
||||
if (total_links > XFS_MAXLINK) {
|
||||
if (total_links > XFS_NLINK_PINNED) {
|
||||
xchk_ino_set_corrupt(sc, ip->i_ino);
|
||||
goto out_corrupt;
|
||||
} else if (total_links > XFS_MAXLINK) {
|
||||
xchk_ino_set_warning(sc, ip->i_ino);
|
||||
}
|
||||
|
||||
/* Link counts should match. */
|
||||
|
|
@ -850,9 +972,6 @@ xchk_nlinks_setup_scan(
|
|||
xfs_agino_t first_agino, last_agino;
|
||||
int error;
|
||||
|
||||
ASSERT(xnc->sc == NULL);
|
||||
xnc->sc = sc;
|
||||
|
||||
mutex_init(&xnc->lock);
|
||||
|
||||
/* Retry iget every tenth of a second for up to 30 seconds. */
|
||||
|
|
|
|||
|
|
@ -28,6 +28,13 @@ struct xchk_nlink_ctrs {
|
|||
* from other writer threads.
|
||||
*/
|
||||
struct xfs_dir_hook dhook;
|
||||
|
||||
/* Orphanage reparenting request. */
|
||||
struct xrep_adoption adoption;
|
||||
|
||||
/* Directory entry name, plus the trailing null. */
|
||||
struct xfs_name xname;
|
||||
char namebuf[MAXNAMELEN];
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -17,14 +17,19 @@
|
|||
#include "xfs_iwalk.h"
|
||||
#include "xfs_ialloc.h"
|
||||
#include "xfs_sb.h"
|
||||
#include "xfs_ag.h"
|
||||
#include "xfs_dir2.h"
|
||||
#include "xfs_parent.h"
|
||||
#include "scrub/scrub.h"
|
||||
#include "scrub/common.h"
|
||||
#include "scrub/repair.h"
|
||||
#include "scrub/xfile.h"
|
||||
#include "scrub/xfarray.h"
|
||||
#include "scrub/iscan.h"
|
||||
#include "scrub/orphanage.h"
|
||||
#include "scrub/nlinks.h"
|
||||
#include "scrub/trace.h"
|
||||
#include "scrub/tempfile.h"
|
||||
|
||||
/*
|
||||
* Live Inode Link Count Repair
|
||||
|
|
@ -36,6 +41,48 @@
|
|||
* inode is locked.
|
||||
*/
|
||||
|
||||
/* Set up to repair inode link counts. */
|
||||
int
|
||||
xrep_setup_nlinks(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
return xrep_orphanage_try_create(sc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Inodes that aren't the root directory or the orphanage, have a nonzero link
|
||||
* count, and no observed parents should be moved to the orphanage.
|
||||
*/
|
||||
static inline bool
|
||||
xrep_nlinks_is_orphaned(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
unsigned int actual_nlink,
|
||||
const struct xchk_nlink *obs)
|
||||
{
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
|
||||
if (obs->parents != 0)
|
||||
return false;
|
||||
if (ip == mp->m_rootip || ip == sc->orphanage)
|
||||
return false;
|
||||
return actual_nlink != 0;
|
||||
}
|
||||
|
||||
/* Remove an inode from the unlinked list. */
|
||||
STATIC int
|
||||
xrep_nlinks_iunlink_remove(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xfs_perag *pag;
|
||||
int error;
|
||||
|
||||
pag = xfs_perag_get(sc->mp, XFS_INO_TO_AGNO(sc->mp, sc->ip->i_ino));
|
||||
error = xfs_iunlink_remove(sc->tp, pag, sc->ip);
|
||||
xfs_perag_put(pag);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Correct the link count of the given inode. Because we have to grab locks
|
||||
* and resources in a certain order, it's possible that this will be a no-op.
|
||||
|
|
@ -50,17 +97,55 @@ xrep_nlinks_repair_inode(
|
|||
struct xfs_inode *ip = sc->ip;
|
||||
uint64_t total_links;
|
||||
uint64_t actual_nlink;
|
||||
bool orphanage_available = false;
|
||||
bool dirty = false;
|
||||
int error;
|
||||
|
||||
xchk_ilock(sc, XFS_IOLOCK_EXCL);
|
||||
/*
|
||||
* Ignore temporary files being used to stage repairs, since we assume
|
||||
* they're correct for non-directories, and the directory repair code
|
||||
* doesn't bump the link counts for the children.
|
||||
*/
|
||||
if (xrep_is_tempfile(ip))
|
||||
return 0;
|
||||
|
||||
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &sc->tp);
|
||||
if (error)
|
||||
return error;
|
||||
/*
|
||||
* If the filesystem has an orphanage attached to the scrub context,
|
||||
* prepare for a link count repair that could involve @ip being adopted
|
||||
* by the lost+found.
|
||||
*/
|
||||
if (xrep_orphanage_can_adopt(sc)) {
|
||||
error = xrep_orphanage_iolock_two(sc);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xchk_ilock(sc, XFS_ILOCK_EXCL);
|
||||
xfs_trans_ijoin(sc->tp, ip, 0);
|
||||
error = xrep_adoption_trans_alloc(sc, &xnc->adoption);
|
||||
if (error) {
|
||||
xchk_iunlock(sc, XFS_IOLOCK_EXCL);
|
||||
xrep_orphanage_iunlock(sc, XFS_IOLOCK_EXCL);
|
||||
} else {
|
||||
orphanage_available = true;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Either there is no orphanage or we couldn't allocate resources for
|
||||
* that kind of update. Let's try again with only the resources we
|
||||
* need for a simple link count update, since that's much more common.
|
||||
*/
|
||||
if (!orphanage_available) {
|
||||
xchk_ilock(sc, XFS_IOLOCK_EXCL);
|
||||
|
||||
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0,
|
||||
&sc->tp);
|
||||
if (error) {
|
||||
xchk_iunlock(sc, XFS_IOLOCK_EXCL);
|
||||
return error;
|
||||
}
|
||||
|
||||
xchk_ilock(sc, XFS_ILOCK_EXCL);
|
||||
xfs_trans_ijoin(sc->tp, ip, 0);
|
||||
}
|
||||
|
||||
mutex_lock(&xnc->lock);
|
||||
|
||||
|
|
@ -99,28 +184,68 @@ xrep_nlinks_repair_inode(
|
|||
}
|
||||
|
||||
/*
|
||||
* We did not find any links to this inode. If the inode agrees, we
|
||||
* have nothing further to do. If not, the inode has a nonzero link
|
||||
* count and we don't have anywhere to graft the child onto. Dropping
|
||||
* a live inode's link count to zero can cause unexpected shutdowns in
|
||||
* inactivation, so leave it alone.
|
||||
* Decide if we're going to move this file to the orphanage, and fix
|
||||
* up the incore link counts if we are.
|
||||
*/
|
||||
if (total_links == 0) {
|
||||
if (actual_nlink != 0)
|
||||
trace_xrep_nlinks_unfixable_inode(mp, ip, &obs);
|
||||
goto out_trans;
|
||||
if (orphanage_available &&
|
||||
xrep_nlinks_is_orphaned(sc, ip, actual_nlink, &obs)) {
|
||||
/* Figure out what name we're going to use here. */
|
||||
error = xrep_adoption_compute_name(&xnc->adoption, &xnc->xname);
|
||||
if (error)
|
||||
goto out_trans;
|
||||
|
||||
/*
|
||||
* Reattach this file to the directory tree by moving it to
|
||||
* the orphanage per the adoption parameters that we already
|
||||
* computed.
|
||||
*/
|
||||
error = xrep_adoption_move(&xnc->adoption);
|
||||
if (error)
|
||||
goto out_trans;
|
||||
|
||||
/*
|
||||
* Re-read the link counts since the reparenting will have
|
||||
* updated our scan info.
|
||||
*/
|
||||
mutex_lock(&xnc->lock);
|
||||
error = xfarray_load_sparse(xnc->nlinks, ip->i_ino, &obs);
|
||||
mutex_unlock(&xnc->lock);
|
||||
if (error)
|
||||
goto out_trans;
|
||||
|
||||
total_links = xchk_nlink_total(ip, &obs);
|
||||
actual_nlink = VFS_I(ip)->i_nlink;
|
||||
dirty = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this inode is linked from the directory tree and on the unlinked
|
||||
* list, remove it from the unlinked list.
|
||||
*/
|
||||
if (total_links > 0 && xfs_inode_on_unlinked_list(ip)) {
|
||||
error = xrep_nlinks_iunlink_remove(sc);
|
||||
if (error)
|
||||
goto out_trans;
|
||||
dirty = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this inode is not linked from the directory tree yet not on the
|
||||
* unlinked list, put it on the unlinked list.
|
||||
*/
|
||||
if (total_links == 0 && !xfs_inode_on_unlinked_list(ip)) {
|
||||
error = xfs_iunlink(sc->tp, ip);
|
||||
if (error)
|
||||
goto out_trans;
|
||||
dirty = true;
|
||||
}
|
||||
|
||||
/* Commit the new link count if it changed. */
|
||||
if (total_links != actual_nlink) {
|
||||
if (total_links > XFS_MAXLINK) {
|
||||
trace_xrep_nlinks_unfixable_inode(mp, ip, &obs);
|
||||
goto out_trans;
|
||||
}
|
||||
|
||||
trace_xrep_nlinks_update_inode(mp, ip, &obs);
|
||||
|
||||
set_nlink(VFS_I(ip), total_links);
|
||||
set_nlink(VFS_I(ip), min_t(unsigned long long, total_links,
|
||||
XFS_NLINK_PINNED));
|
||||
dirty = true;
|
||||
}
|
||||
|
||||
|
|
@ -132,14 +257,19 @@ xrep_nlinks_repair_inode(
|
|||
xfs_trans_log_inode(sc->tp, ip, XFS_ILOG_CORE);
|
||||
|
||||
error = xrep_trans_commit(sc);
|
||||
xchk_iunlock(sc, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
|
||||
return error;
|
||||
goto out_unlock;
|
||||
|
||||
out_scanlock:
|
||||
mutex_unlock(&xnc->lock);
|
||||
out_trans:
|
||||
xchk_trans_cancel(sc);
|
||||
xchk_iunlock(sc, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
|
||||
out_unlock:
|
||||
xchk_iunlock(sc, XFS_ILOCK_EXCL);
|
||||
if (orphanage_available) {
|
||||
xrep_orphanage_iunlock(sc, XFS_ILOCK_EXCL);
|
||||
xrep_orphanage_iunlock(sc, XFS_IOLOCK_EXCL);
|
||||
}
|
||||
xchk_iunlock(sc, XFS_IOLOCK_EXCL);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
@ -172,10 +302,10 @@ xrep_nlinks(
|
|||
/*
|
||||
* We need ftype for an accurate count of the number of child
|
||||
* subdirectory links. Child subdirectories with a back link (dotdot
|
||||
* entry) but no forward link are unfixable, so we cannot repair the
|
||||
* link count of the parent directory based on the back link count
|
||||
* alone. Filesystems without ftype support are rare (old V4) so we
|
||||
* just skip out here.
|
||||
* entry) but no forward link are moved to the orphanage, so we cannot
|
||||
* repair the link count of the parent directory based on the back link
|
||||
* count alone. Filesystems without ftype support are rare (old V4) so
|
||||
* we just skip out here.
|
||||
*/
|
||||
if (!xfs_has_ftype(sc->mp))
|
||||
return -EOPNOTSUPP;
|
||||
|
|
|
|||
627
fs/xfs/scrub/orphanage.c
Normal file
627
fs/xfs/scrub/orphanage.c
Normal file
|
|
@ -0,0 +1,627 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Copyright (c) 2021-2024 Oracle. All Rights Reserved.
|
||||
* Author: Darrick J. Wong <djwong@kernel.org>
|
||||
*/
|
||||
#include "xfs.h"
|
||||
#include "xfs_fs.h"
|
||||
#include "xfs_shared.h"
|
||||
#include "xfs_format.h"
|
||||
#include "xfs_trans_resv.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_log_format.h"
|
||||
#include "xfs_trans.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_ialloc.h"
|
||||
#include "xfs_quota.h"
|
||||
#include "xfs_trans_space.h"
|
||||
#include "xfs_dir2.h"
|
||||
#include "xfs_icache.h"
|
||||
#include "xfs_bmap.h"
|
||||
#include "xfs_bmap_btree.h"
|
||||
#include "xfs_parent.h"
|
||||
#include "xfs_attr_sf.h"
|
||||
#include "scrub/scrub.h"
|
||||
#include "scrub/common.h"
|
||||
#include "scrub/repair.h"
|
||||
#include "scrub/trace.h"
|
||||
#include "scrub/orphanage.h"
|
||||
#include "scrub/readdir.h"
|
||||
|
||||
#include <linux/namei.h>
|
||||
|
||||
/*
|
||||
* The Orphanage
|
||||
* =============
|
||||
*
|
||||
* If the directory tree is damaged, children of that directory become
|
||||
* inaccessible via that file path. If a child has no other parents, the file
|
||||
* is said to be orphaned. xfs_repair fixes this situation by creating a
|
||||
* orphanage directory (specifically, /lost+found) and creating a directory
|
||||
* entry pointing to the orphaned file.
|
||||
*
|
||||
* Online repair follows this tactic by creating a root-owned /lost+found
|
||||
* directory if one does not exist. If an orphan is found, it will move that
|
||||
* files into orphanage.
|
||||
*/
|
||||
|
||||
/* Make the orphanage owned by root. */
|
||||
STATIC int
|
||||
xrep_chown_orphanage(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *dp)
|
||||
{
|
||||
struct xfs_trans *tp;
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
struct xfs_dquot *udqp = NULL, *gdqp = NULL, *pdqp = NULL;
|
||||
struct xfs_dquot *oldu = NULL, *oldg = NULL, *oldp = NULL;
|
||||
struct inode *inode = VFS_I(dp);
|
||||
int error;
|
||||
|
||||
error = xfs_qm_vop_dqalloc(dp, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0,
|
||||
XFS_QMOPT_QUOTALL, &udqp, &gdqp, &pdqp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xfs_trans_alloc_ichange(dp, udqp, gdqp, pdqp, true, &tp);
|
||||
if (error)
|
||||
goto out_dqrele;
|
||||
|
||||
/*
|
||||
* Always clear setuid/setgid/sticky on the orphanage since we don't
|
||||
* normally want that functionality on this directory and xfs_repair
|
||||
* doesn't create it this way either. Leave the other access bits
|
||||
* unchanged.
|
||||
*/
|
||||
inode->i_mode &= ~(S_ISUID | S_ISGID | S_ISVTX);
|
||||
|
||||
/*
|
||||
* Change the ownerships and register quota modifications
|
||||
* in the transaction.
|
||||
*/
|
||||
if (!uid_eq(inode->i_uid, GLOBAL_ROOT_UID)) {
|
||||
if (XFS_IS_UQUOTA_ON(mp))
|
||||
oldu = xfs_qm_vop_chown(tp, dp, &dp->i_udquot, udqp);
|
||||
inode->i_uid = GLOBAL_ROOT_UID;
|
||||
}
|
||||
if (!gid_eq(inode->i_gid, GLOBAL_ROOT_GID)) {
|
||||
if (XFS_IS_GQUOTA_ON(mp))
|
||||
oldg = xfs_qm_vop_chown(tp, dp, &dp->i_gdquot, gdqp);
|
||||
inode->i_gid = GLOBAL_ROOT_GID;
|
||||
}
|
||||
if (dp->i_projid != 0) {
|
||||
if (XFS_IS_PQUOTA_ON(mp))
|
||||
oldp = xfs_qm_vop_chown(tp, dp, &dp->i_pdquot, pdqp);
|
||||
dp->i_projid = 0;
|
||||
}
|
||||
|
||||
dp->i_diflags &= ~(XFS_DIFLAG_REALTIME | XFS_DIFLAG_RTINHERIT);
|
||||
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
|
||||
|
||||
XFS_STATS_INC(mp, xs_ig_attrchg);
|
||||
|
||||
if (xfs_has_wsync(mp))
|
||||
xfs_trans_set_sync(tp);
|
||||
error = xfs_trans_commit(tp);
|
||||
|
||||
xfs_qm_dqrele(oldu);
|
||||
xfs_qm_dqrele(oldg);
|
||||
xfs_qm_dqrele(oldp);
|
||||
|
||||
out_dqrele:
|
||||
xfs_qm_dqrele(udqp);
|
||||
xfs_qm_dqrele(gdqp);
|
||||
xfs_qm_dqrele(pdqp);
|
||||
return error;
|
||||
}
|
||||
|
||||
#define ORPHANAGE "lost+found"
|
||||
|
||||
/* Create the orphanage directory, and set sc->orphanage to it. */
|
||||
int
|
||||
xrep_orphanage_create(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
struct dentry *root_dentry, *orphanage_dentry;
|
||||
struct inode *root_inode = VFS_I(sc->mp->m_rootip);
|
||||
struct inode *orphanage_inode;
|
||||
int error;
|
||||
|
||||
if (xfs_is_shutdown(mp))
|
||||
return -EIO;
|
||||
if (xfs_is_readonly(mp)) {
|
||||
sc->orphanage = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ASSERT(sc->tp == NULL);
|
||||
ASSERT(sc->orphanage == NULL);
|
||||
|
||||
/* Find the dentry for the root directory... */
|
||||
root_dentry = d_find_alias(root_inode);
|
||||
if (!root_dentry) {
|
||||
error = -EFSCORRUPTED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* ...which is a directory, right? */
|
||||
if (!d_is_dir(root_dentry)) {
|
||||
error = -EFSCORRUPTED;
|
||||
goto out_dput_root;
|
||||
}
|
||||
|
||||
/* Try to find the orphanage directory. */
|
||||
inode_lock_nested(root_inode, I_MUTEX_PARENT);
|
||||
orphanage_dentry = lookup_one_len(ORPHANAGE, root_dentry,
|
||||
strlen(ORPHANAGE));
|
||||
if (IS_ERR(orphanage_dentry)) {
|
||||
error = PTR_ERR(orphanage_dentry);
|
||||
goto out_unlock_root;
|
||||
}
|
||||
|
||||
/*
|
||||
* Nothing found? Call mkdir to create the orphanage. Create the
|
||||
* directory without other-user access because we're live and someone
|
||||
* could have been relying partly on minimal access to a parent
|
||||
* directory to control access to a file we put in here.
|
||||
*/
|
||||
if (d_really_is_negative(orphanage_dentry)) {
|
||||
error = vfs_mkdir(&nop_mnt_idmap, root_inode, orphanage_dentry,
|
||||
0750);
|
||||
if (error)
|
||||
goto out_dput_orphanage;
|
||||
}
|
||||
|
||||
/* Not a directory? Bail out. */
|
||||
if (!d_is_dir(orphanage_dentry)) {
|
||||
error = -ENOTDIR;
|
||||
goto out_dput_orphanage;
|
||||
}
|
||||
|
||||
/*
|
||||
* Grab a reference to the orphanage. This /should/ succeed since
|
||||
* we hold the root directory locked and therefore nobody can delete
|
||||
* the orphanage.
|
||||
*/
|
||||
orphanage_inode = igrab(d_inode(orphanage_dentry));
|
||||
if (!orphanage_inode) {
|
||||
error = -ENOENT;
|
||||
goto out_dput_orphanage;
|
||||
}
|
||||
|
||||
/* Make sure the orphanage is owned by root. */
|
||||
error = xrep_chown_orphanage(sc, XFS_I(orphanage_inode));
|
||||
if (error)
|
||||
goto out_dput_orphanage;
|
||||
|
||||
/* Stash the reference for later and bail out. */
|
||||
sc->orphanage = XFS_I(orphanage_inode);
|
||||
sc->orphanage_ilock_flags = 0;
|
||||
|
||||
out_dput_orphanage:
|
||||
dput(orphanage_dentry);
|
||||
out_unlock_root:
|
||||
inode_unlock(VFS_I(sc->mp->m_rootip));
|
||||
out_dput_root:
|
||||
dput(root_dentry);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
void
|
||||
xrep_orphanage_ilock(
|
||||
struct xfs_scrub *sc,
|
||||
unsigned int ilock_flags)
|
||||
{
|
||||
sc->orphanage_ilock_flags |= ilock_flags;
|
||||
xfs_ilock(sc->orphanage, ilock_flags);
|
||||
}
|
||||
|
||||
bool
|
||||
xrep_orphanage_ilock_nowait(
|
||||
struct xfs_scrub *sc,
|
||||
unsigned int ilock_flags)
|
||||
{
|
||||
if (xfs_ilock_nowait(sc->orphanage, ilock_flags)) {
|
||||
sc->orphanage_ilock_flags |= ilock_flags;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
xrep_orphanage_iunlock(
|
||||
struct xfs_scrub *sc,
|
||||
unsigned int ilock_flags)
|
||||
{
|
||||
xfs_iunlock(sc->orphanage, ilock_flags);
|
||||
sc->orphanage_ilock_flags &= ~ilock_flags;
|
||||
}
|
||||
|
||||
/* Grab the IOLOCK of the orphanage and sc->ip. */
|
||||
int
|
||||
xrep_orphanage_iolock_two(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
while (true) {
|
||||
if (xchk_should_terminate(sc, &error))
|
||||
return error;
|
||||
|
||||
/*
|
||||
* Normal XFS takes the IOLOCK before grabbing a transaction.
|
||||
* Scrub holds a transaction, which means that we can't block
|
||||
* on either IOLOCK.
|
||||
*/
|
||||
if (xrep_orphanage_ilock_nowait(sc, XFS_IOLOCK_EXCL)) {
|
||||
if (xchk_ilock_nowait(sc, XFS_IOLOCK_EXCL))
|
||||
break;
|
||||
xrep_orphanage_iunlock(sc, XFS_IOLOCK_EXCL);
|
||||
}
|
||||
delay(1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Release the orphanage. */
|
||||
void
|
||||
xrep_orphanage_rele(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
if (!sc->orphanage)
|
||||
return;
|
||||
|
||||
if (sc->orphanage_ilock_flags)
|
||||
xfs_iunlock(sc->orphanage, sc->orphanage_ilock_flags);
|
||||
|
||||
xchk_irele(sc, sc->orphanage);
|
||||
sc->orphanage = NULL;
|
||||
}
|
||||
|
||||
/* Adoption moves a file into /lost+found */
|
||||
|
||||
/* Can the orphanage adopt @sc->ip? */
|
||||
bool
|
||||
xrep_orphanage_can_adopt(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
ASSERT(sc->ip != NULL);
|
||||
|
||||
if (!sc->orphanage)
|
||||
return false;
|
||||
if (sc->ip == sc->orphanage)
|
||||
return false;
|
||||
if (xfs_internal_inum(sc->mp, sc->ip->i_ino))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a new transaction to send a child to the orphanage.
|
||||
*
|
||||
* Allocate a new transaction with sufficient disk space to handle the
|
||||
* adoption, take ILOCK_EXCL of the orphanage and sc->ip, joins them to the
|
||||
* transaction, and reserve quota to reparent the latter. Caller must hold the
|
||||
* IOLOCK of the orphanage and sc->ip.
|
||||
*/
|
||||
int
|
||||
xrep_adoption_trans_alloc(
|
||||
struct xfs_scrub *sc,
|
||||
struct xrep_adoption *adopt)
|
||||
{
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
unsigned int child_blkres = 0;
|
||||
int error;
|
||||
|
||||
ASSERT(sc->tp == NULL);
|
||||
ASSERT(sc->ip != NULL);
|
||||
ASSERT(sc->orphanage != NULL);
|
||||
ASSERT(sc->ilock_flags & XFS_IOLOCK_EXCL);
|
||||
ASSERT(sc->orphanage_ilock_flags & XFS_IOLOCK_EXCL);
|
||||
ASSERT(!(sc->ilock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)));
|
||||
ASSERT(!(sc->orphanage_ilock_flags &
|
||||
(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)));
|
||||
|
||||
/* Compute the worst case space reservation that we need. */
|
||||
adopt->sc = sc;
|
||||
adopt->orphanage_blkres = xfs_link_space_res(mp, MAXNAMELEN);
|
||||
if (S_ISDIR(VFS_I(sc->ip)->i_mode))
|
||||
child_blkres = xfs_rename_space_res(mp, 0, false,
|
||||
xfs_name_dotdot.len, false);
|
||||
if (xfs_has_parent(mp))
|
||||
child_blkres += XFS_ADDAFORK_SPACE_RES(mp);
|
||||
adopt->child_blkres = child_blkres;
|
||||
|
||||
/*
|
||||
* Allocate a transaction to link the child into the parent, along with
|
||||
* enough disk space to handle expansion of both the orphanage and the
|
||||
* dotdot entry of a child directory.
|
||||
*/
|
||||
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link,
|
||||
adopt->orphanage_blkres + adopt->child_blkres, 0, 0,
|
||||
&sc->tp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_lock_two_inodes(sc->orphanage, XFS_ILOCK_EXCL,
|
||||
sc->ip, XFS_ILOCK_EXCL);
|
||||
sc->ilock_flags |= XFS_ILOCK_EXCL;
|
||||
sc->orphanage_ilock_flags |= XFS_ILOCK_EXCL;
|
||||
|
||||
xfs_trans_ijoin(sc->tp, sc->orphanage, 0);
|
||||
xfs_trans_ijoin(sc->tp, sc->ip, 0);
|
||||
|
||||
/*
|
||||
* Reserve enough quota in the orphan directory to add the new name.
|
||||
* Normally the orphanage should have user/group/project ids of zero
|
||||
* and hence is not subject to quota enforcement, but we're allowed to
|
||||
* exceed quota to reattach disconnected parts of the directory tree.
|
||||
*/
|
||||
error = xfs_trans_reserve_quota_nblks(sc->tp, sc->orphanage,
|
||||
adopt->orphanage_blkres, 0, true);
|
||||
if (error)
|
||||
goto out_cancel;
|
||||
|
||||
/*
|
||||
* Reserve enough quota in the child directory to change dotdot.
|
||||
* Here we're also allowed to exceed file quota to repair inconsistent
|
||||
* metadata.
|
||||
*/
|
||||
if (adopt->child_blkres) {
|
||||
error = xfs_trans_reserve_quota_nblks(sc->tp, sc->ip,
|
||||
adopt->child_blkres, 0, true);
|
||||
if (error)
|
||||
goto out_cancel;
|
||||
}
|
||||
|
||||
return 0;
|
||||
out_cancel:
|
||||
xchk_trans_cancel(sc);
|
||||
xrep_orphanage_iunlock(sc, XFS_ILOCK_EXCL);
|
||||
xchk_iunlock(sc, XFS_ILOCK_EXCL);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute the xfs_name for the directory entry that we're adding to the
|
||||
* orphanage. Caller must hold ILOCKs of sc->ip and the orphanage and must not
|
||||
* reuse namebuf until the adoption completes or is dissolved.
|
||||
*/
|
||||
int
|
||||
xrep_adoption_compute_name(
|
||||
struct xrep_adoption *adopt,
|
||||
struct xfs_name *xname)
|
||||
{
|
||||
struct xfs_scrub *sc = adopt->sc;
|
||||
char *namebuf = (void *)xname->name;
|
||||
xfs_ino_t ino;
|
||||
unsigned int incr = 0;
|
||||
int error = 0;
|
||||
|
||||
adopt->xname = xname;
|
||||
xname->len = snprintf(namebuf, MAXNAMELEN, "%llu", sc->ip->i_ino);
|
||||
xname->type = xfs_mode_to_ftype(VFS_I(sc->ip)->i_mode);
|
||||
|
||||
/* Make sure the filename is unique in the lost+found. */
|
||||
error = xchk_dir_lookup(sc, sc->orphanage, xname, &ino);
|
||||
while (error == 0 && incr < 10000) {
|
||||
xname->len = snprintf(namebuf, MAXNAMELEN, "%llu.%u",
|
||||
sc->ip->i_ino, ++incr);
|
||||
error = xchk_dir_lookup(sc, sc->orphanage, xname, &ino);
|
||||
}
|
||||
if (error == 0) {
|
||||
/* We already have 10,000 entries in the orphanage? */
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
if (error != -ENOENT)
|
||||
return error;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure the dcache does not have a positive dentry for the name we've
|
||||
* chosen. The caller should have checked with the ondisk directory, so any
|
||||
* discrepancy is a sign that something is seriously wrong.
|
||||
*/
|
||||
static int
|
||||
xrep_adoption_check_dcache(
|
||||
struct xrep_adoption *adopt)
|
||||
{
|
||||
struct qstr qname = QSTR_INIT(adopt->xname->name,
|
||||
adopt->xname->len);
|
||||
struct xfs_scrub *sc = adopt->sc;
|
||||
struct dentry *d_orphanage, *d_child;
|
||||
int error = 0;
|
||||
|
||||
d_orphanage = d_find_alias(VFS_I(sc->orphanage));
|
||||
if (!d_orphanage)
|
||||
return 0;
|
||||
|
||||
d_child = d_hash_and_lookup(d_orphanage, &qname);
|
||||
if (d_child) {
|
||||
trace_xrep_adoption_check_child(sc->mp, d_child);
|
||||
|
||||
if (d_is_positive(d_child)) {
|
||||
ASSERT(d_is_negative(d_child));
|
||||
error = -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
dput(d_child);
|
||||
}
|
||||
|
||||
dput(d_orphanage);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate all dentries for the name that was added to the orphanage
|
||||
* directory, and all dentries pointing to the child inode that was moved.
|
||||
*
|
||||
* There should not be any positive entries for the name, since we've
|
||||
* maintained our lock on the orphanage directory.
|
||||
*/
|
||||
static void
|
||||
xrep_adoption_zap_dcache(
|
||||
struct xrep_adoption *adopt)
|
||||
{
|
||||
struct qstr qname = QSTR_INIT(adopt->xname->name,
|
||||
adopt->xname->len);
|
||||
struct xfs_scrub *sc = adopt->sc;
|
||||
struct dentry *d_orphanage, *d_child;
|
||||
|
||||
/* Invalidate all dentries for the adoption name */
|
||||
d_orphanage = d_find_alias(VFS_I(sc->orphanage));
|
||||
if (!d_orphanage)
|
||||
return;
|
||||
|
||||
d_child = d_hash_and_lookup(d_orphanage, &qname);
|
||||
while (d_child != NULL) {
|
||||
trace_xrep_adoption_invalidate_child(sc->mp, d_child);
|
||||
|
||||
ASSERT(d_is_negative(d_child));
|
||||
d_invalidate(d_child);
|
||||
dput(d_child);
|
||||
d_child = d_lookup(d_orphanage, &qname);
|
||||
}
|
||||
|
||||
dput(d_orphanage);
|
||||
|
||||
/* Invalidate all the dentries pointing down to this file. */
|
||||
while ((d_child = d_find_alias(VFS_I(sc->ip))) != NULL) {
|
||||
trace_xrep_adoption_invalidate_child(sc->mp, d_child);
|
||||
|
||||
d_invalidate(d_child);
|
||||
dput(d_child);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we have to add an attr fork ahead of a parent pointer update, how much
|
||||
* space should we ask for?
|
||||
*/
|
||||
static inline int
|
||||
xrep_adoption_attr_sizeof(
|
||||
const struct xrep_adoption *adopt)
|
||||
{
|
||||
return sizeof(struct xfs_attr_sf_hdr) +
|
||||
xfs_attr_sf_entsize_byname(sizeof(struct xfs_parent_rec),
|
||||
adopt->xname->len);
|
||||
}
|
||||
|
||||
/*
|
||||
* Move the current file to the orphanage under the computed name.
|
||||
*
|
||||
* Returns with a dirty transaction so that the caller can handle any other
|
||||
* work, such as fixing up unlinked lists or resetting link counts.
|
||||
*/
|
||||
int
|
||||
xrep_adoption_move(
|
||||
struct xrep_adoption *adopt)
|
||||
{
|
||||
struct xfs_scrub *sc = adopt->sc;
|
||||
bool isdir = S_ISDIR(VFS_I(sc->ip)->i_mode);
|
||||
int error;
|
||||
|
||||
trace_xrep_adoption_reparent(sc->orphanage, adopt->xname,
|
||||
sc->ip->i_ino);
|
||||
|
||||
error = xrep_adoption_check_dcache(adopt);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* If this filesystem has parent pointers, ensure that the file being
|
||||
* moved to the orphanage has an attribute fork. This is required
|
||||
* because the parent pointer code does not itself add attr forks.
|
||||
*/
|
||||
if (!xfs_inode_has_attr_fork(sc->ip) && xfs_has_parent(sc->mp)) {
|
||||
int sf_size = xrep_adoption_attr_sizeof(adopt);
|
||||
|
||||
error = xfs_bmap_add_attrfork(sc->tp, sc->ip, sf_size, true);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Create the new name in the orphanage. */
|
||||
error = xfs_dir_createname(sc->tp, sc->orphanage, adopt->xname,
|
||||
sc->ip->i_ino, adopt->orphanage_blkres);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* Bump the link count of the orphanage if we just added a
|
||||
* subdirectory, and update its timestamps.
|
||||
*/
|
||||
xfs_trans_ichgtime(sc->tp, sc->orphanage,
|
||||
XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
|
||||
if (isdir)
|
||||
xfs_bumplink(sc->tp, sc->orphanage);
|
||||
xfs_trans_log_inode(sc->tp, sc->orphanage, XFS_ILOG_CORE);
|
||||
|
||||
/* Bump the link count of the child. */
|
||||
if (adopt->bump_child_nlink) {
|
||||
xfs_bumplink(sc->tp, sc->ip);
|
||||
xfs_trans_log_inode(sc->tp, sc->ip, XFS_ILOG_CORE);
|
||||
}
|
||||
|
||||
/* Replace the dotdot entry if the child is a subdirectory. */
|
||||
if (isdir) {
|
||||
error = xfs_dir_replace(sc->tp, sc->ip, &xfs_name_dotdot,
|
||||
sc->orphanage->i_ino, adopt->child_blkres);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Add a parent pointer from the file back to the lost+found. */
|
||||
if (xfs_has_parent(sc->mp)) {
|
||||
error = xfs_parent_addname(sc->tp, &adopt->ppargs,
|
||||
sc->orphanage, adopt->xname, sc->ip);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Notify dirent hooks that we moved the file to /lost+found, and
|
||||
* finish all the deferred work so that we know the adoption is fully
|
||||
* recorded in the log.
|
||||
*/
|
||||
xfs_dir_update_hook(sc->orphanage, sc->ip, 1, adopt->xname);
|
||||
|
||||
/* Remove negative dentries from the lost+found's dcache */
|
||||
xrep_adoption_zap_dcache(adopt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Roll to a clean scrub transaction so that we can release the orphanage,
|
||||
* even if xrep_adoption_move was not called.
|
||||
*
|
||||
* Commits all the work and deferred ops attached to an adoption request and
|
||||
* rolls to a clean scrub transaction. On success, returns 0 with the scrub
|
||||
* context holding a clean transaction with no inodes joined. On failure,
|
||||
* returns negative errno with no scrub transaction. All inode locks are
|
||||
* still held after this function returns.
|
||||
*/
|
||||
int
|
||||
xrep_adoption_trans_roll(
|
||||
struct xrep_adoption *adopt)
|
||||
{
|
||||
struct xfs_scrub *sc = adopt->sc;
|
||||
int error;
|
||||
|
||||
trace_xrep_adoption_trans_roll(sc->orphanage, sc->ip,
|
||||
!!(sc->tp->t_flags & XFS_TRANS_DIRTY));
|
||||
|
||||
/* Finish all the deferred ops to commit all repairs. */
|
||||
error = xrep_defer_finish(sc);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* Roll the transaction once more to detach the inodes. */
|
||||
return xfs_trans_roll(&sc->tp);
|
||||
}
|
||||
86
fs/xfs/scrub/orphanage.h
Normal file
86
fs/xfs/scrub/orphanage.h
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Copyright (c) 2021-2024 Oracle. All Rights Reserved.
|
||||
* Author: Darrick J. Wong <djwong@kernel.org>
|
||||
*/
|
||||
#ifndef __XFS_SCRUB_ORPHANAGE_H__
|
||||
#define __XFS_SCRUB_ORPHANAGE_H__
|
||||
|
||||
#ifdef CONFIG_XFS_ONLINE_REPAIR
|
||||
int xrep_orphanage_create(struct xfs_scrub *sc);
|
||||
|
||||
/*
|
||||
* If we're doing a repair, ensure that the orphanage exists and attach it to
|
||||
* the scrub context.
|
||||
*/
|
||||
static inline int
|
||||
xrep_orphanage_try_create(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
int error;
|
||||
|
||||
ASSERT(sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR);
|
||||
|
||||
error = xrep_orphanage_create(sc);
|
||||
switch (error) {
|
||||
case 0:
|
||||
case -ENOENT:
|
||||
case -ENOTDIR:
|
||||
case -ENOSPC:
|
||||
/*
|
||||
* If the orphanage can't be found or isn't a directory, we'll
|
||||
* keep going, but we won't be able to attach the file to the
|
||||
* orphanage if we can't find the parent.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
int xrep_orphanage_iolock_two(struct xfs_scrub *sc);
|
||||
|
||||
void xrep_orphanage_ilock(struct xfs_scrub *sc, unsigned int ilock_flags);
|
||||
bool xrep_orphanage_ilock_nowait(struct xfs_scrub *sc,
|
||||
unsigned int ilock_flags);
|
||||
void xrep_orphanage_iunlock(struct xfs_scrub *sc, unsigned int ilock_flags);
|
||||
|
||||
void xrep_orphanage_rele(struct xfs_scrub *sc);
|
||||
|
||||
/* Information about a request to add a file to the orphanage. */
|
||||
struct xrep_adoption {
|
||||
struct xfs_scrub *sc;
|
||||
|
||||
/* Name used for the adoption. */
|
||||
struct xfs_name *xname;
|
||||
|
||||
/* Parent pointer context tracking */
|
||||
struct xfs_parent_args ppargs;
|
||||
|
||||
/* Block reservations for orphanage and child (if directory). */
|
||||
unsigned int orphanage_blkres;
|
||||
unsigned int child_blkres;
|
||||
|
||||
/*
|
||||
* Does the caller want us to bump the child link count? This is not
|
||||
* needed when reattaching files that have become disconnected but have
|
||||
* nlink > 1. It is necessary when changing the directory tree
|
||||
* structure.
|
||||
*/
|
||||
bool bump_child_nlink:1;
|
||||
};
|
||||
|
||||
bool xrep_orphanage_can_adopt(struct xfs_scrub *sc);
|
||||
|
||||
int xrep_adoption_trans_alloc(struct xfs_scrub *sc,
|
||||
struct xrep_adoption *adopt);
|
||||
int xrep_adoption_compute_name(struct xrep_adoption *adopt,
|
||||
struct xfs_name *xname);
|
||||
int xrep_adoption_move(struct xrep_adoption *adopt);
|
||||
int xrep_adoption_trans_roll(struct xrep_adoption *adopt);
|
||||
#else
|
||||
struct xrep_adoption { /* empty */ };
|
||||
# define xrep_orphanage_rele(sc) ((void)0)
|
||||
#endif /* CONFIG_XFS_ONLINE_REPAIR */
|
||||
|
||||
#endif /* __XFS_SCRUB_ORPHANAGE_H__ */
|
||||
|
|
@ -10,19 +10,37 @@
|
|||
#include "xfs_trans_resv.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_log_format.h"
|
||||
#include "xfs_trans.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_icache.h"
|
||||
#include "xfs_dir2.h"
|
||||
#include "xfs_dir2_priv.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_parent.h"
|
||||
#include "scrub/scrub.h"
|
||||
#include "scrub/common.h"
|
||||
#include "scrub/readdir.h"
|
||||
#include "scrub/tempfile.h"
|
||||
#include "scrub/repair.h"
|
||||
#include "scrub/listxattr.h"
|
||||
#include "scrub/xfile.h"
|
||||
#include "scrub/xfarray.h"
|
||||
#include "scrub/xfblob.h"
|
||||
#include "scrub/trace.h"
|
||||
|
||||
/* Set us up to scrub parents. */
|
||||
int
|
||||
xchk_setup_parent(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (xchk_could_repair(sc)) {
|
||||
error = xrep_setup_parent(sc);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
return xchk_setup_inode_contents(sc, 0);
|
||||
}
|
||||
|
||||
|
|
@ -143,7 +161,8 @@ xchk_parent_validate(
|
|||
}
|
||||
if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
|
||||
return error;
|
||||
if (dp == sc->ip || !S_ISDIR(VFS_I(dp)->i_mode)) {
|
||||
if (dp == sc->ip || xrep_is_tempfile(dp) ||
|
||||
!S_ISDIR(VFS_I(dp)->i_mode)) {
|
||||
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
|
||||
goto out_rele;
|
||||
}
|
||||
|
|
@ -185,6 +204,621 @@ out_rele:
|
|||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Checking of Parent Pointers
|
||||
* ===========================
|
||||
*
|
||||
* On filesystems with directory parent pointers, we check the referential
|
||||
* integrity by visiting each parent pointer of a child file and checking that
|
||||
* the directory referenced by the pointer actually has a dirent pointing
|
||||
* forward to the child file.
|
||||
*/
|
||||
|
||||
/* Deferred parent pointer entry that we saved for later. */
|
||||
struct xchk_pptr {
|
||||
/* Cookie for retrieval of the pptr name. */
|
||||
xfblob_cookie name_cookie;
|
||||
|
||||
/* Parent pointer record. */
|
||||
struct xfs_parent_rec pptr_rec;
|
||||
|
||||
/* Length of the pptr name. */
|
||||
uint8_t namelen;
|
||||
};
|
||||
|
||||
struct xchk_pptrs {
|
||||
struct xfs_scrub *sc;
|
||||
|
||||
/* How many parent pointers did we find at the end? */
|
||||
unsigned long long pptrs_found;
|
||||
|
||||
/* Parent of this directory. */
|
||||
xfs_ino_t parent_ino;
|
||||
|
||||
/* Fixed-size array of xchk_pptr structures. */
|
||||
struct xfarray *pptr_entries;
|
||||
|
||||
/* Blobs containing parent pointer names. */
|
||||
struct xfblob *pptr_names;
|
||||
|
||||
/* Scratch buffer for scanning pptr xattrs */
|
||||
struct xfs_da_args pptr_args;
|
||||
|
||||
/* If we've cycled the ILOCK, we must revalidate all deferred pptrs. */
|
||||
bool need_revalidate;
|
||||
|
||||
/* Name buffer */
|
||||
struct xfs_name xname;
|
||||
char namebuf[MAXNAMELEN];
|
||||
};
|
||||
|
||||
/* Does this parent pointer match the dotdot entry? */
|
||||
STATIC int
|
||||
xchk_parent_scan_dotdot(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
unsigned int attr_flags,
|
||||
const unsigned char *name,
|
||||
unsigned int namelen,
|
||||
const void *value,
|
||||
unsigned int valuelen,
|
||||
void *priv)
|
||||
{
|
||||
struct xchk_pptrs *pp = priv;
|
||||
xfs_ino_t parent_ino;
|
||||
int error;
|
||||
|
||||
if (!(attr_flags & XFS_ATTR_PARENT))
|
||||
return 0;
|
||||
|
||||
error = xfs_parent_from_attr(sc->mp, attr_flags, name, namelen, value,
|
||||
valuelen, &parent_ino, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (pp->parent_ino == parent_ino)
|
||||
return -ECANCELED;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Look up the dotdot entry so that we can check it as we walk the pptrs. */
|
||||
STATIC int
|
||||
xchk_parent_pptr_and_dotdot(
|
||||
struct xchk_pptrs *pp)
|
||||
{
|
||||
struct xfs_scrub *sc = pp->sc;
|
||||
int error;
|
||||
|
||||
/* Look up '..' */
|
||||
error = xchk_dir_lookup(sc, sc->ip, &xfs_name_dotdot, &pp->parent_ino);
|
||||
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
|
||||
return error;
|
||||
if (!xfs_verify_dir_ino(sc->mp, pp->parent_ino)) {
|
||||
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Is this the root dir? Then '..' must point to itself. */
|
||||
if (sc->ip == sc->mp->m_rootip) {
|
||||
if (sc->ip->i_ino != pp->parent_ino)
|
||||
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this is now an unlinked directory, the dotdot value is
|
||||
* meaningless as long as it points to a valid inode.
|
||||
*/
|
||||
if (VFS_I(sc->ip)->i_nlink == 0)
|
||||
return 0;
|
||||
|
||||
if (pp->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
|
||||
return 0;
|
||||
|
||||
/* Otherwise, walk the pptrs again, and check. */
|
||||
error = xchk_xattr_walk(sc, sc->ip, xchk_parent_scan_dotdot, NULL, pp);
|
||||
if (error == -ECANCELED) {
|
||||
/* Found a parent pointer that matches dotdot. */
|
||||
return 0;
|
||||
}
|
||||
if (!error || error == -EFSCORRUPTED) {
|
||||
/* Found a broken parent pointer or no match. */
|
||||
xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
|
||||
return 0;
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to lock a parent directory for checking dirents. Returns the inode
|
||||
* flags for the locks we now hold, or zero if we failed.
|
||||
*/
|
||||
STATIC unsigned int
|
||||
xchk_parent_lock_dir(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *dp)
|
||||
{
|
||||
if (!xfs_ilock_nowait(dp, XFS_IOLOCK_SHARED))
|
||||
return 0;
|
||||
|
||||
if (!xfs_ilock_nowait(dp, XFS_ILOCK_SHARED)) {
|
||||
xfs_iunlock(dp, XFS_IOLOCK_SHARED);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!xfs_need_iread_extents(&dp->i_df))
|
||||
return XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED;
|
||||
|
||||
xfs_iunlock(dp, XFS_ILOCK_SHARED);
|
||||
|
||||
if (!xfs_ilock_nowait(dp, XFS_ILOCK_EXCL)) {
|
||||
xfs_iunlock(dp, XFS_IOLOCK_SHARED);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL;
|
||||
}
|
||||
|
||||
/* Check the forward link (dirent) associated with this parent pointer. */
|
||||
STATIC int
|
||||
xchk_parent_dirent(
|
||||
struct xchk_pptrs *pp,
|
||||
const struct xfs_name *xname,
|
||||
struct xfs_inode *dp)
|
||||
{
|
||||
struct xfs_scrub *sc = pp->sc;
|
||||
xfs_ino_t child_ino;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Use the name attached to this parent pointer to look up the
|
||||
* directory entry in the alleged parent.
|
||||
*/
|
||||
error = xchk_dir_lookup(sc, dp, xname, &child_ino);
|
||||
if (error == -ENOENT) {
|
||||
xchk_fblock_xref_set_corrupt(sc, XFS_ATTR_FORK, 0);
|
||||
return 0;
|
||||
}
|
||||
if (!xchk_fblock_xref_process_error(sc, XFS_ATTR_FORK, 0, &error))
|
||||
return error;
|
||||
|
||||
/* Does the inode number match? */
|
||||
if (child_ino != sc->ip->i_ino) {
|
||||
xchk_fblock_xref_set_corrupt(sc, XFS_ATTR_FORK, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Try to grab a parent directory. */
|
||||
STATIC int
|
||||
xchk_parent_iget(
|
||||
struct xchk_pptrs *pp,
|
||||
const struct xfs_parent_rec *pptr,
|
||||
struct xfs_inode **dpp)
|
||||
{
|
||||
struct xfs_scrub *sc = pp->sc;
|
||||
struct xfs_inode *ip;
|
||||
xfs_ino_t parent_ino = be64_to_cpu(pptr->p_ino);
|
||||
int error;
|
||||
|
||||
/* Validate inode number. */
|
||||
error = xfs_dir_ino_validate(sc->mp, parent_ino);
|
||||
if (error) {
|
||||
xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
|
||||
return -ECANCELED;
|
||||
}
|
||||
|
||||
error = xchk_iget(sc, parent_ino, &ip);
|
||||
if (error == -EINVAL || error == -ENOENT) {
|
||||
xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
|
||||
return -ECANCELED;
|
||||
}
|
||||
if (!xchk_fblock_xref_process_error(sc, XFS_ATTR_FORK, 0, &error))
|
||||
return error;
|
||||
|
||||
/* The parent must be a directory. */
|
||||
if (!S_ISDIR(VFS_I(ip)->i_mode)) {
|
||||
xchk_fblock_xref_set_corrupt(sc, XFS_ATTR_FORK, 0);
|
||||
goto out_rele;
|
||||
}
|
||||
|
||||
/* Validate generation number. */
|
||||
if (VFS_I(ip)->i_generation != be32_to_cpu(pptr->p_gen)) {
|
||||
xchk_fblock_xref_set_corrupt(sc, XFS_ATTR_FORK, 0);
|
||||
goto out_rele;
|
||||
}
|
||||
|
||||
*dpp = ip;
|
||||
return 0;
|
||||
out_rele:
|
||||
xchk_irele(sc, ip);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Walk an xattr of a file. If this xattr is a parent pointer, follow it up
|
||||
* to a parent directory and check that the parent has a dirent pointing back
|
||||
* to us.
|
||||
*/
|
||||
STATIC int
|
||||
xchk_parent_scan_attr(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
unsigned int attr_flags,
|
||||
const unsigned char *name,
|
||||
unsigned int namelen,
|
||||
const void *value,
|
||||
unsigned int valuelen,
|
||||
void *priv)
|
||||
{
|
||||
struct xfs_name xname = {
|
||||
.name = name,
|
||||
.len = namelen,
|
||||
};
|
||||
struct xchk_pptrs *pp = priv;
|
||||
struct xfs_inode *dp = NULL;
|
||||
const struct xfs_parent_rec *pptr_rec = value;
|
||||
xfs_ino_t parent_ino;
|
||||
unsigned int lockmode;
|
||||
int error;
|
||||
|
||||
if (!(attr_flags & XFS_ATTR_PARENT))
|
||||
return 0;
|
||||
|
||||
error = xfs_parent_from_attr(sc->mp, attr_flags, name, namelen, value,
|
||||
valuelen, &parent_ino, NULL);
|
||||
if (error) {
|
||||
xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* No self-referential parent pointers. */
|
||||
if (parent_ino == sc->ip->i_ino) {
|
||||
xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
|
||||
return -ECANCELED;
|
||||
}
|
||||
|
||||
pp->pptrs_found++;
|
||||
|
||||
error = xchk_parent_iget(pp, pptr_rec, &dp);
|
||||
if (error)
|
||||
return error;
|
||||
if (!dp)
|
||||
return 0;
|
||||
|
||||
/* Try to lock the inode. */
|
||||
lockmode = xchk_parent_lock_dir(sc, dp);
|
||||
if (!lockmode) {
|
||||
struct xchk_pptr save_pp = {
|
||||
.pptr_rec = *pptr_rec, /* struct copy */
|
||||
.namelen = namelen,
|
||||
};
|
||||
|
||||
/* Couldn't lock the inode, so save the pptr for later. */
|
||||
trace_xchk_parent_defer(sc->ip, &xname, dp->i_ino);
|
||||
|
||||
error = xfblob_storename(pp->pptr_names, &save_pp.name_cookie,
|
||||
&xname);
|
||||
if (!xchk_fblock_xref_process_error(sc, XFS_ATTR_FORK, 0,
|
||||
&error))
|
||||
goto out_rele;
|
||||
|
||||
error = xfarray_append(pp->pptr_entries, &save_pp);
|
||||
if (!xchk_fblock_xref_process_error(sc, XFS_ATTR_FORK, 0,
|
||||
&error))
|
||||
goto out_rele;
|
||||
|
||||
goto out_rele;
|
||||
}
|
||||
|
||||
error = xchk_parent_dirent(pp, &xname, dp);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
|
||||
out_unlock:
|
||||
xfs_iunlock(dp, lockmode);
|
||||
out_rele:
|
||||
xchk_irele(sc, dp);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Revalidate a parent pointer that we collected in the past but couldn't check
|
||||
* because of lock contention. Returns 0 if the parent pointer is still valid,
|
||||
* -ENOENT if it has gone away on us, or a negative errno.
|
||||
*/
|
||||
STATIC int
|
||||
xchk_parent_revalidate_pptr(
|
||||
struct xchk_pptrs *pp,
|
||||
const struct xfs_name *xname,
|
||||
struct xfs_parent_rec *pptr)
|
||||
{
|
||||
struct xfs_scrub *sc = pp->sc;
|
||||
int error;
|
||||
|
||||
error = xfs_parent_lookup(sc->tp, sc->ip, xname, pptr, &pp->pptr_args);
|
||||
if (error == -ENOATTR) {
|
||||
/* Parent pointer went away, nothing to revalidate. */
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check a parent pointer the slow way, which means we cycle locks a bunch
|
||||
* and put up with revalidation until we get it done.
|
||||
*/
|
||||
STATIC int
|
||||
xchk_parent_slow_pptr(
|
||||
struct xchk_pptrs *pp,
|
||||
const struct xfs_name *xname,
|
||||
struct xfs_parent_rec *pptr)
|
||||
{
|
||||
struct xfs_scrub *sc = pp->sc;
|
||||
struct xfs_inode *dp = NULL;
|
||||
unsigned int lockmode;
|
||||
int error;
|
||||
|
||||
/* Check that the deferred parent pointer still exists. */
|
||||
if (pp->need_revalidate) {
|
||||
error = xchk_parent_revalidate_pptr(pp, xname, pptr);
|
||||
if (error == -ENOENT)
|
||||
return 0;
|
||||
if (!xchk_fblock_xref_process_error(sc, XFS_ATTR_FORK, 0,
|
||||
&error))
|
||||
return error;
|
||||
}
|
||||
|
||||
error = xchk_parent_iget(pp, pptr, &dp);
|
||||
if (error)
|
||||
return error;
|
||||
if (!dp)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If we can grab both IOLOCK and ILOCK of the alleged parent, we
|
||||
* can proceed with the validation.
|
||||
*/
|
||||
lockmode = xchk_parent_lock_dir(sc, dp);
|
||||
if (lockmode) {
|
||||
trace_xchk_parent_slowpath(sc->ip, xname, dp->i_ino);
|
||||
goto check_dirent;
|
||||
}
|
||||
|
||||
/*
|
||||
* We couldn't lock the parent dir. Drop all the locks and try to
|
||||
* get them again, one at a time.
|
||||
*/
|
||||
xchk_iunlock(sc, sc->ilock_flags);
|
||||
pp->need_revalidate = true;
|
||||
|
||||
trace_xchk_parent_ultraslowpath(sc->ip, xname, dp->i_ino);
|
||||
|
||||
error = xchk_dir_trylock_for_pptrs(sc, dp, &lockmode);
|
||||
if (error)
|
||||
goto out_rele;
|
||||
|
||||
/* Revalidate the parent pointer now that we cycled locks. */
|
||||
error = xchk_parent_revalidate_pptr(pp, xname, pptr);
|
||||
if (error == -ENOENT) {
|
||||
error = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (!xchk_fblock_xref_process_error(sc, XFS_ATTR_FORK, 0, &error))
|
||||
goto out_unlock;
|
||||
|
||||
check_dirent:
|
||||
error = xchk_parent_dirent(pp, xname, dp);
|
||||
out_unlock:
|
||||
xfs_iunlock(dp, lockmode);
|
||||
out_rele:
|
||||
xchk_irele(sc, dp);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Check all the parent pointers that we deferred the first time around. */
|
||||
STATIC int
|
||||
xchk_parent_finish_slow_pptrs(
|
||||
struct xchk_pptrs *pp)
|
||||
{
|
||||
xfarray_idx_t array_cur;
|
||||
int error;
|
||||
|
||||
foreach_xfarray_idx(pp->pptr_entries, array_cur) {
|
||||
struct xchk_pptr pptr;
|
||||
|
||||
if (pp->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
|
||||
return 0;
|
||||
|
||||
error = xfarray_load(pp->pptr_entries, array_cur, &pptr);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xfblob_loadname(pp->pptr_names, pptr.name_cookie,
|
||||
&pp->xname, pptr.namelen);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xchk_parent_slow_pptr(pp, &pp->xname, &pptr.pptr_rec);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Empty out both xfiles now that we've checked everything. */
|
||||
xfarray_truncate(pp->pptr_entries);
|
||||
xfblob_truncate(pp->pptr_names);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Count the number of parent pointers. */
|
||||
STATIC int
|
||||
xchk_parent_count_pptr(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
unsigned int attr_flags,
|
||||
const unsigned char *name,
|
||||
unsigned int namelen,
|
||||
const void *value,
|
||||
unsigned int valuelen,
|
||||
void *priv)
|
||||
{
|
||||
struct xchk_pptrs *pp = priv;
|
||||
int error;
|
||||
|
||||
if (!(attr_flags & XFS_ATTR_PARENT))
|
||||
return 0;
|
||||
|
||||
error = xfs_parent_from_attr(sc->mp, attr_flags, name, namelen, value,
|
||||
valuelen, NULL, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
pp->pptrs_found++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compare the number of parent pointers to the link count. For
|
||||
* non-directories these should be the same. For unlinked directories the
|
||||
* count should be zero; for linked directories, it should be nonzero.
|
||||
*/
|
||||
STATIC int
|
||||
xchk_parent_count_pptrs(
|
||||
struct xchk_pptrs *pp)
|
||||
{
|
||||
struct xfs_scrub *sc = pp->sc;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* If we cycled the ILOCK while cross-checking parent pointers with
|
||||
* dirents, then we need to recalculate the number of parent pointers.
|
||||
*/
|
||||
if (pp->need_revalidate) {
|
||||
pp->pptrs_found = 0;
|
||||
error = xchk_xattr_walk(sc, sc->ip, xchk_parent_count_pptr,
|
||||
NULL, pp);
|
||||
if (error == -EFSCORRUPTED) {
|
||||
/* Found a bad parent pointer */
|
||||
xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
|
||||
return 0;
|
||||
}
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
if (S_ISDIR(VFS_I(sc->ip)->i_mode)) {
|
||||
if (sc->ip == sc->mp->m_rootip)
|
||||
pp->pptrs_found++;
|
||||
|
||||
if (VFS_I(sc->ip)->i_nlink == 0 && pp->pptrs_found > 0)
|
||||
xchk_ino_set_corrupt(sc, sc->ip->i_ino);
|
||||
else if (VFS_I(sc->ip)->i_nlink > 0 &&
|
||||
pp->pptrs_found == 0)
|
||||
xchk_ino_set_corrupt(sc, sc->ip->i_ino);
|
||||
} else {
|
||||
if (VFS_I(sc->ip)->i_nlink != pp->pptrs_found)
|
||||
xchk_ino_set_corrupt(sc, sc->ip->i_ino);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Check parent pointers of a file. */
|
||||
STATIC int
|
||||
xchk_parent_pptr(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xchk_pptrs *pp;
|
||||
char *descr;
|
||||
int error;
|
||||
|
||||
pp = kvzalloc(sizeof(struct xchk_pptrs), XCHK_GFP_FLAGS);
|
||||
if (!pp)
|
||||
return -ENOMEM;
|
||||
pp->sc = sc;
|
||||
pp->xname.name = pp->namebuf;
|
||||
|
||||
/*
|
||||
* Set up some staging memory for parent pointers that we can't check
|
||||
* due to locking contention.
|
||||
*/
|
||||
descr = xchk_xfile_ino_descr(sc, "slow parent pointer entries");
|
||||
error = xfarray_create(descr, 0, sizeof(struct xchk_pptr),
|
||||
&pp->pptr_entries);
|
||||
kfree(descr);
|
||||
if (error)
|
||||
goto out_pp;
|
||||
|
||||
descr = xchk_xfile_ino_descr(sc, "slow parent pointer names");
|
||||
error = xfblob_create(descr, &pp->pptr_names);
|
||||
kfree(descr);
|
||||
if (error)
|
||||
goto out_entries;
|
||||
|
||||
error = xchk_xattr_walk(sc, sc->ip, xchk_parent_scan_attr, NULL, pp);
|
||||
if (error == -ECANCELED) {
|
||||
error = 0;
|
||||
goto out_names;
|
||||
}
|
||||
if (error)
|
||||
goto out_names;
|
||||
|
||||
error = xchk_parent_finish_slow_pptrs(pp);
|
||||
if (error == -ETIMEDOUT) {
|
||||
/* Couldn't grab a lock, scrub was marked incomplete */
|
||||
error = 0;
|
||||
goto out_names;
|
||||
}
|
||||
if (error)
|
||||
goto out_names;
|
||||
|
||||
if (pp->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
|
||||
goto out_names;
|
||||
|
||||
/*
|
||||
* For subdirectories, make sure the dotdot entry references the same
|
||||
* inode as the parent pointers.
|
||||
*
|
||||
* If we're scanning a /consistent/ directory, there should only be
|
||||
* one parent pointer, and it should point to the same directory as
|
||||
* the dotdot entry.
|
||||
*
|
||||
* However, a corrupt directory tree might feature a subdirectory with
|
||||
* multiple parents. The directory loop scanner is responsible for
|
||||
* correcting that kind of problem, so for now we only validate that
|
||||
* the dotdot entry matches /one/ of the parents.
|
||||
*/
|
||||
if (S_ISDIR(VFS_I(sc->ip)->i_mode)) {
|
||||
error = xchk_parent_pptr_and_dotdot(pp);
|
||||
if (error)
|
||||
goto out_names;
|
||||
}
|
||||
|
||||
if (pp->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
|
||||
goto out_pp;
|
||||
|
||||
/*
|
||||
* Complain if the number of parent pointers doesn't match the link
|
||||
* count. This could be a sign of missing parent pointers (or an
|
||||
* incorrect link count).
|
||||
*/
|
||||
error = xchk_parent_count_pptrs(pp);
|
||||
if (error)
|
||||
goto out_names;
|
||||
|
||||
out_names:
|
||||
xfblob_destroy(pp->pptr_names);
|
||||
out_entries:
|
||||
xfarray_destroy(pp->pptr_entries);
|
||||
out_pp:
|
||||
kvfree(pp);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Scrub a parent pointer. */
|
||||
int
|
||||
xchk_parent(
|
||||
|
|
@ -194,6 +828,9 @@ xchk_parent(
|
|||
xfs_ino_t parent_ino;
|
||||
int error = 0;
|
||||
|
||||
if (xfs_has_parent(mp))
|
||||
return xchk_parent_pptr(sc);
|
||||
|
||||
/*
|
||||
* If we're a directory, check that the '..' link points up to
|
||||
* a directory that has one entry pointing to us.
|
||||
|
|
@ -237,3 +874,64 @@ xchk_parent(
|
|||
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Decide if this file's extended attributes (and therefore its parent
|
||||
* pointers) have been zapped to satisfy the inode and ifork verifiers.
|
||||
* Checking and repairing should be postponed until the extended attribute
|
||||
* structure is fixed.
|
||||
*/
|
||||
bool
|
||||
xchk_pptr_looks_zapped(
|
||||
struct xfs_inode *ip)
|
||||
{
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct inode *inode = VFS_I(ip);
|
||||
|
||||
ASSERT(xfs_has_parent(mp));
|
||||
|
||||
/*
|
||||
* Temporary files that cannot be linked into the directory tree do not
|
||||
* have attr forks because they cannot ever have parents.
|
||||
*/
|
||||
if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Directory tree roots do not have parents, so the expected outcome
|
||||
* of a parent pointer scan is always the empty set. It's safe to scan
|
||||
* them even if the attr fork was zapped.
|
||||
*/
|
||||
if (ip == mp->m_rootip)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Metadata inodes are all rooted in the superblock and do not have
|
||||
* any parents. Hence the attr fork will not be initialized, but
|
||||
* there are no parent pointers that might have been zapped.
|
||||
*/
|
||||
if (xfs_is_metadata_inode(ip))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Linked and linkable non-rootdir files should always have an
|
||||
* attribute fork because that is where parent pointers are
|
||||
* stored. If the fork is absent, something is amiss.
|
||||
*/
|
||||
if (!xfs_inode_has_attr_fork(ip))
|
||||
return true;
|
||||
|
||||
/* Repair zapped this file's attr fork a short time ago */
|
||||
if (xfs_ifork_zapped(ip, XFS_ATTR_FORK))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If the dinode repair found a bad attr fork, it will reset the fork
|
||||
* to extents format with zero records and wait for the bmapbta
|
||||
* scrubber to reconstruct the block mappings. The extended attribute
|
||||
* structure always contain some content when parent pointers are
|
||||
* enabled, so this is a clear sign of a zapped attr fork.
|
||||
*/
|
||||
return ip->i_af.if_format == XFS_DINODE_FMT_EXTENTS &&
|
||||
ip->i_af.if_nextents == 0;
|
||||
}
|
||||
|
|
|
|||
1612
fs/xfs/scrub/parent_repair.c
Normal file
1612
fs/xfs/scrub/parent_repair.c
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -77,8 +77,6 @@ xrep_quota_item_fill_bmap_hole(
|
|||
irec, &nmaps);
|
||||
if (error)
|
||||
return error;
|
||||
if (nmaps != 1)
|
||||
return -ENOSPC;
|
||||
|
||||
dq->q_blkno = XFS_FSB_TO_DADDR(mp, irec->br_startblock);
|
||||
|
||||
|
|
@ -444,10 +442,6 @@ xrep_quota_data_fork(
|
|||
XFS_BMAPI_CONVERT, 0, &nrec, &nmap);
|
||||
if (error)
|
||||
goto out;
|
||||
if (nmap != 1) {
|
||||
error = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
ASSERT(nrec.br_startoff == irec.br_startoff);
|
||||
ASSERT(nrec.br_blockcount == irec.br_blockcount);
|
||||
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@
|
|||
#include "xfs_trans.h"
|
||||
#include "xfs_error.h"
|
||||
#include "scrub/scrub.h"
|
||||
#include "scrub/common.h"
|
||||
#include "scrub/readdir.h"
|
||||
|
||||
/* Call a function for every entry in a shortform directory. */
|
||||
|
|
@ -99,7 +100,7 @@ xchk_dir_walk_block(
|
|||
unsigned int off, next_off, end;
|
||||
int error;
|
||||
|
||||
error = xfs_dir3_block_read(sc->tp, dp, &bp);
|
||||
error = xfs_dir3_block_read(sc->tp, dp, dp->i_ino, &bp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
@ -175,7 +176,7 @@ xchk_read_leaf_dir_buf(
|
|||
if (new_off > *curoff)
|
||||
*curoff = new_off;
|
||||
|
||||
return xfs_dir3_data_read(tp, dp, map.br_startoff, 0, bpp);
|
||||
return xfs_dir3_data_read(tp, dp, dp->i_ino, map.br_startoff, 0, bpp);
|
||||
}
|
||||
|
||||
/* Call a function for every entry in a leaf directory. */
|
||||
|
|
@ -273,8 +274,8 @@ xchk_dir_walk(
|
|||
.dp = dp,
|
||||
.geo = dp->i_mount->m_dir_geo,
|
||||
.trans = sc->tp,
|
||||
.owner = dp->i_ino,
|
||||
};
|
||||
bool isblock;
|
||||
int error;
|
||||
|
||||
if (xfs_is_shutdown(dp->i_mount))
|
||||
|
|
@ -283,22 +284,17 @@ xchk_dir_walk(
|
|||
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
|
||||
xfs_assert_ilocked(dp, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL);
|
||||
|
||||
if (dp->i_df.if_format == XFS_DINODE_FMT_LOCAL)
|
||||
switch (xfs_dir2_format(&args, &error)) {
|
||||
case XFS_DIR2_FMT_SF:
|
||||
return xchk_dir_walk_sf(sc, dp, dirent_fn, priv);
|
||||
|
||||
/* dir2 functions require that the data fork is loaded */
|
||||
error = xfs_iread_extents(sc->tp, dp, XFS_DATA_FORK);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xfs_dir2_isblock(&args, &isblock);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (isblock)
|
||||
case XFS_DIR2_FMT_BLOCK:
|
||||
return xchk_dir_walk_block(sc, dp, dirent_fn, priv);
|
||||
|
||||
return xchk_dir_walk_leaf(sc, dp, dirent_fn, priv);
|
||||
case XFS_DIR2_FMT_LEAF:
|
||||
case XFS_DIR2_FMT_NODE:
|
||||
return xchk_dir_walk_leaf(sc, dp, dirent_fn, priv);
|
||||
default:
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -324,50 +320,102 @@ xchk_dir_lookup(
|
|||
.hashval = xfs_dir2_hashname(dp->i_mount, name),
|
||||
.whichfork = XFS_DATA_FORK,
|
||||
.op_flags = XFS_DA_OP_OKNOENT,
|
||||
.owner = dp->i_ino,
|
||||
};
|
||||
bool isblock, isleaf;
|
||||
int error;
|
||||
|
||||
if (xfs_is_shutdown(dp->i_mount))
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
* A temporary directory's block headers are written with the owner
|
||||
* set to sc->ip, so we must switch the owner here for the lookup.
|
||||
*/
|
||||
if (dp == sc->tempip)
|
||||
args.owner = sc->ip->i_ino;
|
||||
|
||||
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
|
||||
xfs_assert_ilocked(dp, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL);
|
||||
|
||||
if (dp->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
|
||||
error = xfs_dir2_sf_lookup(&args);
|
||||
goto out_check_rval;
|
||||
}
|
||||
|
||||
/* dir2 functions require that the data fork is loaded */
|
||||
error = xfs_iread_extents(sc->tp, dp, XFS_DATA_FORK);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xfs_dir2_isblock(&args, &isblock);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (isblock) {
|
||||
error = xfs_dir2_block_lookup(&args);
|
||||
goto out_check_rval;
|
||||
}
|
||||
|
||||
error = xfs_dir2_isleaf(&args, &isleaf);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (isleaf) {
|
||||
error = xfs_dir2_leaf_lookup(&args);
|
||||
goto out_check_rval;
|
||||
}
|
||||
|
||||
error = xfs_dir2_node_lookup(&args);
|
||||
|
||||
out_check_rval:
|
||||
if (error == -EEXIST)
|
||||
error = 0;
|
||||
error = xfs_dir_lookup_args(&args);
|
||||
if (!error)
|
||||
*ino = args.inumber;
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to grab the IOLOCK and ILOCK of sc->ip and ip, returning @ip's lock
|
||||
* state. The caller may have a transaction, so we must use trylock for both
|
||||
* IOLOCKs.
|
||||
*/
|
||||
static inline unsigned int
|
||||
xchk_dir_trylock_both(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip)
|
||||
{
|
||||
if (!xchk_ilock_nowait(sc, XFS_IOLOCK_EXCL))
|
||||
return 0;
|
||||
|
||||
if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
|
||||
goto parent_iolock;
|
||||
|
||||
xchk_ilock(sc, XFS_ILOCK_EXCL);
|
||||
if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
|
||||
goto parent_ilock;
|
||||
|
||||
return XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL;
|
||||
|
||||
parent_ilock:
|
||||
xchk_iunlock(sc, XFS_ILOCK_EXCL);
|
||||
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
|
||||
parent_iolock:
|
||||
xchk_iunlock(sc, XFS_IOLOCK_EXCL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try for a limited time to grab the IOLOCK and ILOCK of both the scrub target
|
||||
* (@sc->ip) and the inode at the other end (@ip) of a directory or parent
|
||||
* pointer link so that we can check that link.
|
||||
*
|
||||
* We do not know ahead of time that the directory tree is /not/ corrupt, so we
|
||||
* cannot use the "lock two inode" functions because we do not know that there
|
||||
* is not a racing thread trying to take the locks in opposite order. First
|
||||
* take IOLOCK_EXCL of the scrub target, and then try to take IOLOCK_SHARED
|
||||
* of @ip to synchronize with the VFS. Next, take ILOCK_EXCL of the scrub
|
||||
* target and @ip to synchronize with XFS.
|
||||
*
|
||||
* If the trylocks succeed, *lockmode will be set to the locks held for @ip;
|
||||
* @sc->ilock_flags will be set for the locks held for @sc->ip; and zero will
|
||||
* be returned. If not, returns -EDEADLOCK to try again; or -ETIMEDOUT if
|
||||
* XCHK_TRY_HARDER was set. Returns -EINTR if the process has been killed.
|
||||
*/
|
||||
int
|
||||
xchk_dir_trylock_for_pptrs(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
unsigned int *lockmode)
|
||||
{
|
||||
unsigned int nr;
|
||||
int error = 0;
|
||||
|
||||
ASSERT(sc->ilock_flags == 0);
|
||||
|
||||
for (nr = 0; nr < HZ; nr++) {
|
||||
*lockmode = xchk_dir_trylock_both(sc, ip);
|
||||
if (*lockmode)
|
||||
return 0;
|
||||
|
||||
if (xchk_should_terminate(sc, &error))
|
||||
return error;
|
||||
|
||||
delay(1);
|
||||
}
|
||||
|
||||
if (sc->flags & XCHK_TRY_HARDER) {
|
||||
xchk_set_incomplete(sc);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return -EDEADLOCK;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,4 +16,7 @@ int xchk_dir_walk(struct xfs_scrub *sc, struct xfs_inode *dp,
|
|||
int xchk_dir_lookup(struct xfs_scrub *sc, struct xfs_inode *dp,
|
||||
const struct xfs_name *name, xfs_ino_t *ino);
|
||||
|
||||
int xchk_dir_trylock_for_pptrs(struct xfs_scrub *sc, struct xfs_inode *ip,
|
||||
unsigned int *lockmode);
|
||||
|
||||
#endif /* __XFS_SCRUB_READDIR_H__ */
|
||||
|
|
|
|||
|
|
@ -211,6 +211,48 @@ static inline void xreap_defer_finish_reset(struct xreap_state *rs)
|
|||
rs->force_roll = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute the maximum length of a buffer cache scan (in units of sectors),
|
||||
* given a quantity of fs blocks.
|
||||
*/
|
||||
xfs_daddr_t
|
||||
xrep_bufscan_max_sectors(
|
||||
struct xfs_mount *mp,
|
||||
xfs_extlen_t fsblocks)
|
||||
{
|
||||
int max_fsbs;
|
||||
|
||||
/* Remote xattr values are the largest buffers that we support. */
|
||||
max_fsbs = xfs_attr3_max_rmt_blocks(mp);
|
||||
|
||||
return XFS_FSB_TO_BB(mp, min_t(xfs_extlen_t, fsblocks, max_fsbs));
|
||||
}
|
||||
|
||||
/*
|
||||
* Return an incore buffer from a sector scan, or NULL if there are no buffers
|
||||
* left to return.
|
||||
*/
|
||||
struct xfs_buf *
|
||||
xrep_bufscan_advance(
|
||||
struct xfs_mount *mp,
|
||||
struct xrep_bufscan *scan)
|
||||
{
|
||||
scan->__sector_count += scan->daddr_step;
|
||||
while (scan->__sector_count <= scan->max_sectors) {
|
||||
struct xfs_buf *bp = NULL;
|
||||
int error;
|
||||
|
||||
error = xfs_buf_incore(mp->m_ddev_targp, scan->daddr,
|
||||
scan->__sector_count, XBF_LIVESCAN, &bp);
|
||||
if (!error)
|
||||
return bp;
|
||||
|
||||
scan->__sector_count += scan->daddr_step;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Try to invalidate the incore buffers for an extent that we're freeing. */
|
||||
STATIC void
|
||||
xreap_agextent_binval(
|
||||
|
|
@ -241,28 +283,15 @@ xreap_agextent_binval(
|
|||
* of any plausible size.
|
||||
*/
|
||||
while (bno < agbno_next) {
|
||||
xfs_agblock_t fsbcount;
|
||||
xfs_agblock_t max_fsbs;
|
||||
|
||||
/*
|
||||
* Max buffer size is the max remote xattr buffer size, which
|
||||
* is one fs block larger than 64k.
|
||||
*/
|
||||
max_fsbs = min_t(xfs_agblock_t, agbno_next - bno,
|
||||
xfs_attr3_rmt_blocks(mp, XFS_XATTR_SIZE_MAX));
|
||||
|
||||
for (fsbcount = 1; fsbcount <= max_fsbs; fsbcount++) {
|
||||
struct xfs_buf *bp = NULL;
|
||||
xfs_daddr_t daddr;
|
||||
int error;
|
||||
|
||||
daddr = XFS_AGB_TO_DADDR(mp, agno, bno);
|
||||
error = xfs_buf_incore(mp->m_ddev_targp, daddr,
|
||||
XFS_FSB_TO_BB(mp, fsbcount),
|
||||
XBF_LIVESCAN, &bp);
|
||||
if (error)
|
||||
continue;
|
||||
struct xrep_bufscan scan = {
|
||||
.daddr = XFS_AGB_TO_DADDR(mp, agno, bno),
|
||||
.max_sectors = xrep_bufscan_max_sectors(mp,
|
||||
agbno_next - bno),
|
||||
.daddr_step = XFS_FSB_TO_BB(mp, 1),
|
||||
};
|
||||
struct xfs_buf *bp;
|
||||
|
||||
while ((bp = xrep_bufscan_advance(mp, &scan)) != NULL) {
|
||||
xfs_trans_bjoin(sc->tp, bp);
|
||||
xfs_trans_binval(sc->tp, bp);
|
||||
rs->invalidated++;
|
||||
|
|
@ -646,3 +675,375 @@ xrep_reap_fsblocks(
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Metadata files are not supposed to share blocks with anything else.
|
||||
* If blocks are shared, we remove the reverse mapping (thus reducing the
|
||||
* crosslink factor); if blocks are not shared, we also need to free them.
|
||||
*
|
||||
* This first step determines the longest subset of the passed-in imap
|
||||
* (starting at its beginning) that is either crosslinked or not crosslinked.
|
||||
* The blockcount will be adjust down as needed.
|
||||
*/
|
||||
STATIC int
|
||||
xreap_bmapi_select(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
int whichfork,
|
||||
struct xfs_bmbt_irec *imap,
|
||||
bool *crosslinked)
|
||||
{
|
||||
struct xfs_owner_info oinfo;
|
||||
struct xfs_btree_cur *cur;
|
||||
xfs_filblks_t len = 1;
|
||||
xfs_agblock_t bno;
|
||||
xfs_agblock_t agbno;
|
||||
xfs_agblock_t agbno_next;
|
||||
int error;
|
||||
|
||||
agbno = XFS_FSB_TO_AGBNO(sc->mp, imap->br_startblock);
|
||||
agbno_next = agbno + imap->br_blockcount;
|
||||
|
||||
cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
|
||||
sc->sa.pag);
|
||||
|
||||
xfs_rmap_ino_owner(&oinfo, ip->i_ino, whichfork, imap->br_startoff);
|
||||
error = xfs_rmap_has_other_keys(cur, agbno, 1, &oinfo, crosslinked);
|
||||
if (error)
|
||||
goto out_cur;
|
||||
|
||||
bno = agbno + 1;
|
||||
while (bno < agbno_next) {
|
||||
bool also_crosslinked;
|
||||
|
||||
oinfo.oi_offset++;
|
||||
error = xfs_rmap_has_other_keys(cur, bno, 1, &oinfo,
|
||||
&also_crosslinked);
|
||||
if (error)
|
||||
goto out_cur;
|
||||
|
||||
if (also_crosslinked != *crosslinked)
|
||||
break;
|
||||
|
||||
len++;
|
||||
bno++;
|
||||
}
|
||||
|
||||
imap->br_blockcount = len;
|
||||
trace_xreap_bmapi_select(sc->sa.pag, agbno, len, *crosslinked);
|
||||
out_cur:
|
||||
xfs_btree_del_cursor(cur, error);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Decide if this buffer can be joined to a transaction. This is true for most
|
||||
* buffers, but there are two cases that we want to catch: large remote xattr
|
||||
* value buffers are not logged and can overflow the buffer log item dirty
|
||||
* bitmap size; and oversized cached buffers if things have really gone
|
||||
* haywire.
|
||||
*/
|
||||
static inline bool
|
||||
xreap_buf_loggable(
|
||||
const struct xfs_buf *bp)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < bp->b_map_count; i++) {
|
||||
int chunks;
|
||||
int map_size;
|
||||
|
||||
chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
|
||||
XFS_BLF_CHUNK);
|
||||
map_size = DIV_ROUND_UP(chunks, NBWORD);
|
||||
if (map_size > XFS_BLF_DATAMAP_SIZE)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate any buffers for this file mapping. The @imap blockcount may be
|
||||
* adjusted downward if we need to roll the transaction.
|
||||
*/
|
||||
STATIC int
|
||||
xreap_bmapi_binval(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
int whichfork,
|
||||
struct xfs_bmbt_irec *imap)
|
||||
{
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
struct xfs_perag *pag = sc->sa.pag;
|
||||
int bmap_flags = xfs_bmapi_aflag(whichfork);
|
||||
xfs_fileoff_t off;
|
||||
xfs_fileoff_t max_off;
|
||||
xfs_extlen_t scan_blocks;
|
||||
xfs_agnumber_t agno = sc->sa.pag->pag_agno;
|
||||
xfs_agblock_t bno;
|
||||
xfs_agblock_t agbno;
|
||||
xfs_agblock_t agbno_next;
|
||||
unsigned int invalidated = 0;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Avoid invalidating AG headers and post-EOFS blocks because we never
|
||||
* own those.
|
||||
*/
|
||||
agbno = bno = XFS_FSB_TO_AGBNO(sc->mp, imap->br_startblock);
|
||||
agbno_next = agbno + imap->br_blockcount;
|
||||
if (!xfs_verify_agbno(pag, agbno) ||
|
||||
!xfs_verify_agbno(pag, agbno_next - 1))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Buffers for file blocks can span multiple contiguous mappings. This
|
||||
* means that for each block in the mapping, there could exist an
|
||||
* xfs_buf indexed by that block with any length up to the maximum
|
||||
* buffer size (remote xattr values) or to the next hole in the fork.
|
||||
* To set up our binval scan, first we need to figure out the location
|
||||
* of the next hole.
|
||||
*/
|
||||
off = imap->br_startoff + imap->br_blockcount;
|
||||
max_off = off + xfs_attr3_max_rmt_blocks(mp);
|
||||
while (off < max_off) {
|
||||
struct xfs_bmbt_irec hmap;
|
||||
int nhmaps = 1;
|
||||
|
||||
error = xfs_bmapi_read(ip, off, max_off - off, &hmap,
|
||||
&nhmaps, bmap_flags);
|
||||
if (error)
|
||||
return error;
|
||||
if (nhmaps != 1 || hmap.br_startblock == DELAYSTARTBLOCK) {
|
||||
ASSERT(0);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
if (!xfs_bmap_is_real_extent(&hmap))
|
||||
break;
|
||||
|
||||
off = hmap.br_startoff + hmap.br_blockcount;
|
||||
}
|
||||
scan_blocks = off - imap->br_startoff;
|
||||
|
||||
trace_xreap_bmapi_binval_scan(sc, imap, scan_blocks);
|
||||
|
||||
/*
|
||||
* If there are incore buffers for these blocks, invalidate them. If
|
||||
* we can't (try)lock the buffer we assume it's owned by someone else
|
||||
* and leave it alone. The buffer cache cannot detect aliasing, so
|
||||
* employ nested loops to detect incore buffers of any plausible size.
|
||||
*/
|
||||
while (bno < agbno_next) {
|
||||
struct xrep_bufscan scan = {
|
||||
.daddr = XFS_AGB_TO_DADDR(mp, agno, bno),
|
||||
.max_sectors = xrep_bufscan_max_sectors(mp,
|
||||
scan_blocks),
|
||||
.daddr_step = XFS_FSB_TO_BB(mp, 1),
|
||||
};
|
||||
struct xfs_buf *bp;
|
||||
|
||||
while ((bp = xrep_bufscan_advance(mp, &scan)) != NULL) {
|
||||
if (xreap_buf_loggable(bp)) {
|
||||
xfs_trans_bjoin(sc->tp, bp);
|
||||
xfs_trans_binval(sc->tp, bp);
|
||||
} else {
|
||||
xfs_buf_stale(bp);
|
||||
xfs_buf_relse(bp);
|
||||
}
|
||||
invalidated++;
|
||||
|
||||
/*
|
||||
* Stop invalidating if we've hit the limit; we should
|
||||
* still have enough reservation left to free however
|
||||
* much of the mapping we've seen so far.
|
||||
*/
|
||||
if (invalidated > XREAP_MAX_BINVAL) {
|
||||
imap->br_blockcount = agbno_next - bno;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
bno++;
|
||||
scan_blocks--;
|
||||
}
|
||||
|
||||
out:
|
||||
trace_xreap_bmapi_binval(sc->sa.pag, agbno, imap->br_blockcount);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dispose of as much of the beginning of this file fork mapping as possible.
|
||||
* The number of blocks disposed of is returned in @imap->br_blockcount.
|
||||
*/
|
||||
STATIC int
|
||||
xrep_reap_bmapi_iter(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
int whichfork,
|
||||
struct xfs_bmbt_irec *imap,
|
||||
bool crosslinked)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (crosslinked) {
|
||||
/*
|
||||
* If there are other rmappings, this block is cross linked and
|
||||
* must not be freed. Remove the reverse mapping, leave the
|
||||
* buffer cache in its possibly confused state, and move on.
|
||||
* We don't want to risk discarding valid data buffers from
|
||||
* anybody else who thinks they own the block, even though that
|
||||
* runs the risk of stale buffer warnings in the future.
|
||||
*/
|
||||
trace_xreap_dispose_unmap_extent(sc->sa.pag,
|
||||
XFS_FSB_TO_AGBNO(sc->mp, imap->br_startblock),
|
||||
imap->br_blockcount);
|
||||
|
||||
/*
|
||||
* Schedule removal of the mapping from the fork. We use
|
||||
* deferred log intents in this function to control the exact
|
||||
* sequence of metadata updates.
|
||||
*/
|
||||
xfs_bmap_unmap_extent(sc->tp, ip, whichfork, imap);
|
||||
xfs_trans_mod_dquot_byino(sc->tp, ip, XFS_TRANS_DQ_BCOUNT,
|
||||
-(int64_t)imap->br_blockcount);
|
||||
xfs_rmap_unmap_extent(sc->tp, ip, whichfork, imap);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the block is not crosslinked, we can invalidate all the incore
|
||||
* buffers for the extent, and then free the extent. This is a bit of
|
||||
* a mess since we don't detect discontiguous buffers that are indexed
|
||||
* by a block starting before the first block of the extent but overlap
|
||||
* anyway.
|
||||
*/
|
||||
trace_xreap_dispose_free_extent(sc->sa.pag,
|
||||
XFS_FSB_TO_AGBNO(sc->mp, imap->br_startblock),
|
||||
imap->br_blockcount);
|
||||
|
||||
/*
|
||||
* Invalidate as many buffers as we can, starting at the beginning of
|
||||
* this mapping. If this function sets blockcount to zero, the
|
||||
* transaction is full of logged buffer invalidations, so we need to
|
||||
* return early so that we can roll and retry.
|
||||
*/
|
||||
error = xreap_bmapi_binval(sc, ip, whichfork, imap);
|
||||
if (error || imap->br_blockcount == 0)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* Schedule removal of the mapping from the fork. We use deferred log
|
||||
* intents in this function to control the exact sequence of metadata
|
||||
* updates.
|
||||
*/
|
||||
xfs_bmap_unmap_extent(sc->tp, ip, whichfork, imap);
|
||||
xfs_trans_mod_dquot_byino(sc->tp, ip, XFS_TRANS_DQ_BCOUNT,
|
||||
-(int64_t)imap->br_blockcount);
|
||||
return xfs_free_extent_later(sc->tp, imap->br_startblock,
|
||||
imap->br_blockcount, NULL, XFS_AG_RESV_NONE, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Dispose of as much of this file extent as we can. Upon successful return,
|
||||
* the imap will reflect the mapping that was removed from the fork.
|
||||
*/
|
||||
STATIC int
|
||||
xreap_ifork_extent(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
int whichfork,
|
||||
struct xfs_bmbt_irec *imap)
|
||||
{
|
||||
xfs_agnumber_t agno;
|
||||
bool crosslinked;
|
||||
int error;
|
||||
|
||||
ASSERT(sc->sa.pag == NULL);
|
||||
|
||||
trace_xreap_ifork_extent(sc, ip, whichfork, imap);
|
||||
|
||||
agno = XFS_FSB_TO_AGNO(sc->mp, imap->br_startblock);
|
||||
sc->sa.pag = xfs_perag_get(sc->mp, agno);
|
||||
if (!sc->sa.pag)
|
||||
return -EFSCORRUPTED;
|
||||
|
||||
error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &sc->sa.agf_bp);
|
||||
if (error)
|
||||
goto out_pag;
|
||||
|
||||
/*
|
||||
* Decide the fate of the blocks at the beginning of the mapping, then
|
||||
* update the mapping to use it with the unmap calls.
|
||||
*/
|
||||
error = xreap_bmapi_select(sc, ip, whichfork, imap, &crosslinked);
|
||||
if (error)
|
||||
goto out_agf;
|
||||
|
||||
error = xrep_reap_bmapi_iter(sc, ip, whichfork, imap, crosslinked);
|
||||
if (error)
|
||||
goto out_agf;
|
||||
|
||||
out_agf:
|
||||
xfs_trans_brelse(sc->tp, sc->sa.agf_bp);
|
||||
sc->sa.agf_bp = NULL;
|
||||
out_pag:
|
||||
xfs_perag_put(sc->sa.pag);
|
||||
sc->sa.pag = NULL;
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dispose of each block mapped to the given fork of the given file. Callers
|
||||
* must hold ILOCK_EXCL, and ip can only be sc->ip or sc->tempip. The fork
|
||||
* must not have any delalloc reservations.
|
||||
*/
|
||||
int
|
||||
xrep_reap_ifork(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
int whichfork)
|
||||
{
|
||||
xfs_fileoff_t off = 0;
|
||||
int bmap_flags = xfs_bmapi_aflag(whichfork);
|
||||
int error;
|
||||
|
||||
ASSERT(xfs_has_rmapbt(sc->mp));
|
||||
ASSERT(ip == sc->ip || ip == sc->tempip);
|
||||
ASSERT(whichfork == XFS_ATTR_FORK || !XFS_IS_REALTIME_INODE(ip));
|
||||
|
||||
while (off < XFS_MAX_FILEOFF) {
|
||||
struct xfs_bmbt_irec imap;
|
||||
int nimaps = 1;
|
||||
|
||||
/* Read the next extent, skip past holes and delalloc. */
|
||||
error = xfs_bmapi_read(ip, off, XFS_MAX_FILEOFF - off, &imap,
|
||||
&nimaps, bmap_flags);
|
||||
if (error)
|
||||
return error;
|
||||
if (nimaps != 1 || imap.br_startblock == DELAYSTARTBLOCK) {
|
||||
ASSERT(0);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this is a real space mapping, reap as much of it as we
|
||||
* can in a single transaction.
|
||||
*/
|
||||
if (xfs_bmap_is_real_extent(&imap)) {
|
||||
error = xreap_ifork_extent(sc, ip, whichfork, &imap);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xfs_defer_finish(&sc->tp);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
off = imap.br_startoff + imap.br_blockcount;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,5 +13,26 @@ int xrep_reap_agblocks(struct xfs_scrub *sc, struct xagb_bitmap *bitmap,
|
|||
const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type);
|
||||
int xrep_reap_fsblocks(struct xfs_scrub *sc, struct xfsb_bitmap *bitmap,
|
||||
const struct xfs_owner_info *oinfo);
|
||||
int xrep_reap_ifork(struct xfs_scrub *sc, struct xfs_inode *ip, int whichfork);
|
||||
|
||||
/* Buffer cache scan context. */
|
||||
struct xrep_bufscan {
|
||||
/* Disk address for the buffers we want to scan. */
|
||||
xfs_daddr_t daddr;
|
||||
|
||||
/* Maximum number of sectors to scan. */
|
||||
xfs_daddr_t max_sectors;
|
||||
|
||||
/* Each round, increment the search length by this number of sectors. */
|
||||
xfs_daddr_t daddr_step;
|
||||
|
||||
/* Internal scan state; initialize to zero. */
|
||||
xfs_daddr_t __sector_count;
|
||||
};
|
||||
|
||||
xfs_daddr_t xrep_bufscan_max_sectors(struct xfs_mount *mp,
|
||||
xfs_extlen_t fsblocks);
|
||||
struct xfs_buf *xrep_bufscan_advance(struct xfs_mount *mp,
|
||||
struct xrep_bufscan *scan);
|
||||
|
||||
#endif /* __XFS_SCRUB_REAP_H__ */
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue