Improve zfs_blkptr_verify()

- Skip config lock enter/exit for embedded blocks.  They have no
DVAs, so there is nothing to check under the lock.
 - Skip CHECKSUM check and properly check PSIZE for embedded blocks.
 - Add static branch predictions for unlikely conditions.
 - Do not verify DVAs for blocks already in ARC.  ARC hit already
"verified" the first (often the only) DVA, and it does not worth to
enter/exit config lock for nothing.

Some profiles show me up to 3% of CPU saving from this change.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Allan Jude <allan@klarasystems.com>
Signed-off-by: Alexander Motin <mav@FreeBSD.org>
Sponsored by:	iXsystems, Inc.
Closes #16387
This commit is contained in:
Alexander Motin 2024-08-08 18:25:10 -04:00 committed by GitHub
parent 24e6585e76
commit aef452f108
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 51 additions and 33 deletions

View File

@ -5576,19 +5576,6 @@ arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
*/
fstrans_cookie_t cookie = spl_fstrans_mark();
top:
/*
* Verify the block pointer contents are reasonable. This should
* always be the case since the blkptr is protected by a checksum.
* However, if there is damage it's desirable to detect this early
* and treat it as a checksum error. This allows an alternate blkptr
* to be tried when one is available (e.g. ditto blocks).
*/
if (!zfs_blkptr_verify(spa, bp, (zio_flags & ZIO_FLAG_CONFIG_WRITER) ?
BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) {
rc = SET_ERROR(ECKSUM);
goto done;
}
if (!embedded_bp) {
/*
* Embedded BP's have no DVA and require no I/O to "read".
@ -5608,6 +5595,18 @@ top:
(hdr->b_l1hdr.b_pabd != NULL && !encrypted_read))) {
boolean_t is_data = !HDR_ISTYPE_METADATA(hdr);
/*
* Verify the block pointer contents are reasonable. This
* should always be the case since the blkptr is protected by
* a checksum.
*/
if (!zfs_blkptr_verify(spa, bp, BLK_CONFIG_SKIP,
BLK_VERIFY_LOG)) {
mutex_exit(hash_lock);
rc = SET_ERROR(ECKSUM);
goto done;
}
if (HDR_IO_IN_PROGRESS(hdr)) {
if (*arc_flags & ARC_FLAG_CACHED_ONLY) {
mutex_exit(hash_lock);
@ -5761,6 +5760,20 @@ top:
goto done;
}
/*
* Verify the block pointer contents are reasonable. This
* should always be the case since the blkptr is protected by
* a checksum.
*/
if (!zfs_blkptr_verify(spa, bp,
(zio_flags & ZIO_FLAG_CONFIG_WRITER) ?
BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) {
if (hash_lock != NULL)
mutex_exit(hash_lock);
rc = SET_ERROR(ECKSUM);
goto done;
}
if (hdr == NULL) {
/*
* This block is not in the cache or it has

View File

@ -1105,45 +1105,50 @@ zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
{
int errors = 0;
if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
if (unlikely(!DMU_OT_IS_VALID(BP_GET_TYPE(bp)))) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid TYPE %llu",
bp, (longlong_t)BP_GET_TYPE(bp));
}
if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid CHECKSUM %llu",
bp, (longlong_t)BP_GET_CHECKSUM(bp));
}
if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS) {
if (unlikely(BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid COMPRESS %llu",
bp, (longlong_t)BP_GET_COMPRESS(bp));
}
if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
if (unlikely(BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid LSIZE %llu",
bp, (longlong_t)BP_GET_LSIZE(bp));
}
if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid PSIZE %llu",
bp, (longlong_t)BP_GET_PSIZE(bp));
}
if (BP_IS_EMBEDDED(bp)) {
if (BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES) {
if (unlikely(BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid ETYPE %llu",
bp, (longlong_t)BPE_GET_ETYPE(bp));
}
if (unlikely(BPE_GET_PSIZE(bp) > BPE_PAYLOAD_SIZE)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid PSIZE %llu",
bp, (longlong_t)BPE_GET_PSIZE(bp));
}
return (errors == 0);
}
if (unlikely(BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid CHECKSUM %llu",
bp, (longlong_t)BP_GET_CHECKSUM(bp));
}
if (unlikely(BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid PSIZE %llu",
bp, (longlong_t)BP_GET_PSIZE(bp));
}
/*
* Do not verify individual DVAs if the config is not trusted. This
* will be done once the zio is executed in vdev_mirror_map_alloc.
*/
if (!spa->spa_trust_config)
if (unlikely(!spa->spa_trust_config))
return (errors == 0);
switch (blk_config) {
@ -1172,20 +1177,20 @@ zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
const dva_t *dva = &bp->blk_dva[i];
uint64_t vdevid = DVA_GET_VDEV(dva);
if (vdevid >= spa->spa_root_vdev->vdev_children) {
if (unlikely(vdevid >= spa->spa_root_vdev->vdev_children)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has invalid VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
if (vd == NULL) {
if (unlikely(vd == NULL)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has invalid VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
if (vd->vdev_ops == &vdev_hole_ops) {
if (unlikely(vd->vdev_ops == &vdev_hole_ops)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has hole VDEV %llu",
bp, i, (longlong_t)vdevid);
@ -1203,7 +1208,7 @@ zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
uint64_t asize = DVA_GET_ASIZE(dva);
if (DVA_GET_GANG(dva))
asize = vdev_gang_header_asize(vd);
if (offset + asize > vd->vdev_asize) {
if (unlikely(offset + asize > vd->vdev_asize)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has invalid OFFSET %llu",
bp, i, (longlong_t)offset);