Print "(repairing)" in zpool status again

Historically, zpool status prints "(repairing)" for any drives that
have errors during a scrub:

        NAME            STATE     READ WRITE CKSUM
        mypool          ONLINE       0     0     0
          mirror-0      ONLINE       0     0     0
            /tmp/file1  ONLINE      13     0     0  (repairing)
            /tmp/file2  ONLINE       0     0     0
            /tmp/file3  ONLINE       0     0     0

This was accidentally broken in "OpenZFS 9166 - zfs storage pool
checkpoint" (d2734cc).  This patch adds it back in.

Reviewed-by: Serapheim Dimitropoulos <serapheim@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Tony Hutter <hutter2@llnl.gov>
Closes #7779
Closes #7978
This commit is contained in:
Tony Hutter 2018-10-09 20:30:32 -07:00 committed by Brian Behlendorf
parent 0391690583
commit 2ef0f8c329
4 changed files with 84 additions and 4 deletions

View File

@ -1742,7 +1742,7 @@ static void
print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
nvlist_t *nv, int depth, boolean_t isspare)
{
nvlist_t **child;
nvlist_t **child, *root;
uint_t c, children;
pool_scan_stat_t *ps = NULL;
vdev_stat_t *vs;
@ -1868,7 +1868,10 @@ print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
}
}
(void) nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_SCAN_STATS,
/* The root vdev has the scrub/resilver stats */
root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,
(uint64_t **)&ps, &c);
if (ps != NULL && ps->pss_state == DSS_SCANNING &&
@ -1903,6 +1906,7 @@ print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_status_config(zhp, cb, vname, child[c], depth + 2,
isspare);
free(vname);

View File

@ -423,7 +423,8 @@ tags = ['functional', 'cli_root', 'zpool_replace']
[tests/functional/cli_root/zpool_scrub]
tests = ['zpool_scrub_001_neg', 'zpool_scrub_002_pos', 'zpool_scrub_003_pos',
'zpool_scrub_004_pos', 'zpool_scrub_005_pos',
'zpool_scrub_encrypted_unloaded', 'zpool_scrub_offline_device']
'zpool_scrub_encrypted_unloaded', 'zpool_scrub_print_repairing',
'zpool_scrub_offline_device']
tags = ['functional', 'cli_root', 'zpool_scrub']
[tests/functional/cli_root/zpool_set]

View File

@ -8,7 +8,8 @@ dist_pkgdata_SCRIPTS = \
zpool_scrub_004_pos.ksh \
zpool_scrub_005_pos.ksh \
zpool_scrub_encrypted_unloaded.ksh \
zpool_scrub_offline_device.ksh
zpool_scrub_offline_device.ksh \
zpool_scrub_print_repairing.ksh
dist_pkgdata_DATA = \
zpool_scrub.cfg

View File

@ -0,0 +1,74 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
# Copyright (c) 2018 Lawrence Livermore National Security, LLC.
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_scrub/zpool_scrub.cfg
#
# DESCRIPTION:
# zpool status should print "(repairing)" on drives with errors found
# while scrubbing.
#
# STRATEGY:
# 1. Create a file (already done in setup.ksh)
# 2. Inject read errors on one vdev
# 3. Run a scrub
# 4. Verify we see "(repairing)" on the bad vdev
#
verify_runnable "global"
log_assert "Verify we see '(repairing)' while scrubbing a bad vdev."
function cleanup
{
log_must zinject -c all
log_must set_tunable64 zfs_scan_vdev_limit $ZFS_SCAN_VDEV_LIMIT_DEFAULT
zpool scrub -s $TESTPOOL || true
}
log_onexit cleanup
# A file is already created in setup.ksh. Inject read errors on the first disk.
log_must zinject -d $DISK1 -e io -T read -f 100 $TESTPOOL
# Make the scrub slow
log_must zinject -d $DISK1 -D10:1 $TESTPOOL
log_must set_tunable64 zfs_scan_vdev_limit $ZFS_SCAN_VDEV_LIMIT_SLOW
log_must zpool scrub $TESTPOOL
# Wait for the scrub to show '(repairing)'. Timeout after 10 sec if it doesn't
# show it.
for i in {0..100} ; do
if ! is_pool_scrubbing $TESTPOOL ; then
break
fi
if zpool status | grep "$DISK1" | grep -q '(repairing)' ; then
log_pass "Correctly saw '(repairing)' while scrubbing"
fi
sleep 0.1
done
log_fail "Never saw '(repairing)' while scrubbing"