Fix "zpool add -n" for dedup, special and log devices

For dedup, special and log devices "zpool add -n" does not print
correctly their vdev type:

~# zpool add -n pool dedup /tmp/dedup special /tmp/special log /tmp/log
would update 'pool' to the following configuration:
	pool
	  /tmp/normal
	  /tmp/dedup
	  /tmp/special
	  /tmp/log

This could lead storage administrators to modify their ZFS pools to
unexpected and unintended vdev configurations.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: loli10K <ezomori.nozomu@gmail.com>
Closes #9783 
Closes #9390
This commit is contained in:
loli10K 2020-01-07 00:40:06 +01:00 committed by Brian Behlendorf
parent bc9cef11fd
commit c24fa4b19a
5 changed files with 75 additions and 37 deletions

View File

@ -936,20 +936,35 @@ zpool_do_add(int argc, char **argv)
print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags); print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);
/* print other classes: 'dedup', 'special', and 'log' */ /* print other classes: 'dedup', 'special', and 'log' */
if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {
print_vdev_tree(zhp, "dedup", poolnvroot, 0, print_vdev_tree(zhp, "dedup", poolnvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, name_flags); VDEV_ALLOC_BIAS_DEDUP, name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0, VDEV_ALLOC_BIAS_DEDUP, print_vdev_tree(zhp, NULL, nvroot, 0,
name_flags); VDEV_ALLOC_BIAS_DEDUP, name_flags);
} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {
print_vdev_tree(zhp, "dedup", nvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, name_flags);
}
if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
print_vdev_tree(zhp, "special", poolnvroot, 0, print_vdev_tree(zhp, "special", poolnvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, name_flags); VDEV_ALLOC_BIAS_SPECIAL, name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0, VDEV_ALLOC_BIAS_SPECIAL, print_vdev_tree(zhp, NULL, nvroot, 0,
name_flags); VDEV_ALLOC_BIAS_SPECIAL, name_flags);
} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
print_vdev_tree(zhp, "special", nvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
}
print_vdev_tree(zhp, "logs", poolnvroot, 0, VDEV_ALLOC_BIAS_LOG, if (num_logs(poolnvroot) > 0) {
name_flags); print_vdev_tree(zhp, "logs", poolnvroot, 0,
print_vdev_tree(zhp, NULL, nvroot, 0, VDEV_ALLOC_BIAS_LOG, VDEV_ALLOC_BIAS_LOG, name_flags);
name_flags); print_vdev_tree(zhp, NULL, nvroot, 0,
VDEV_ALLOC_BIAS_LOG, name_flags);
} else if (num_logs(nvroot) > 0) {
print_vdev_tree(zhp, "logs", nvroot, 0,
VDEV_ALLOC_BIAS_LOG, name_flags);
}
/* Do the same for the caches */ /* Do the same for the caches */
if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE, if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,

View File

@ -34,7 +34,7 @@ extern "C" {
#endif #endif
extern boolean_t zfs_allocatable_devs(nvlist_t *); extern boolean_t zfs_allocatable_devs(nvlist_t *);
extern boolean_t zfs_special_devs(nvlist_t *); extern boolean_t zfs_special_devs(nvlist_t *, char *);
extern void zpool_get_load_policy(nvlist_t *, zpool_load_policy_t *); extern void zpool_get_load_policy(nvlist_t *, zpool_load_policy_t *);
extern int zfs_zpl_version_map(int spa_version); extern int zfs_zpl_version_map(int spa_version);

View File

@ -68,7 +68,7 @@ zfs_allocatable_devs(nvlist_t *nv)
* Are there special vdevs? * Are there special vdevs?
*/ */
boolean_t boolean_t
zfs_special_devs(nvlist_t *nv) zfs_special_devs(nvlist_t *nv, char *type)
{ {
char *bias; char *bias;
uint_t c; uint_t c;
@ -84,7 +84,11 @@ zfs_special_devs(nvlist_t *nv)
&bias) == 0) { &bias) == 0) {
if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0 || if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0 ||
strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0) { strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0) {
if (type != NULL && strcmp(bias, type) == 0) {
return (B_TRUE); return (B_TRUE);
} else if (type == NULL) {
return (B_TRUE);
}
} }
} }
} }

View File

@ -5690,7 +5690,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
return (error); return (error);
} }
} }
if (!has_allocclass && zfs_special_devs(nvroot)) { if (!has_allocclass && zfs_special_devs(nvroot, NULL)) {
spa_deactivate(spa); spa_deactivate(spa);
spa_remove(spa); spa_remove(spa);
mutex_exit(&spa_namespace_lock); mutex_exit(&spa_namespace_lock);

View File

@ -34,26 +34,23 @@
# #
# DESCRIPTION: # DESCRIPTION:
# 'zpool add -n <pool> <vdev> ...' can display the configuration without # 'zpool add -n <pool> <vdev> ...' can display the configuration without adding
# adding the specified devices to given pool # the specified devices to given pool
# #
# STRATEGY: # STRATEGY:
# 1. Create a storage pool # 1. Create a storage pool
# 2. Use -n to add a device to the pool # 2. Use -n to add devices to the pool
# 3. Verify the device is not added actually # 3. Verify the devices are not added actually
# 4. Add devices to the pool for real this time, verify the vdev tree is the
# same printed by the dryrun iteration
# #
verify_runnable "global" verify_runnable "global"
function cleanup function cleanup
{ {
poolexists $TESTPOOL && \
destroy_pool $TESTPOOL destroy_pool $TESTPOOL
rm -f $TMPFILE_PREFIX* $VDEV_PREFIX*
partition_cleanup
[[ -e $tmpfile ]] && \
log_must rm -f $tmpfile
} }
log_assert "'zpool add -n <pool> <vdev> ...' can display the configuration" \ log_assert "'zpool add -n <pool> <vdev> ...' can display the configuration" \
@ -61,18 +58,40 @@ log_assert "'zpool add -n <pool> <vdev> ...' can display the configuration" \
log_onexit cleanup log_onexit cleanup
tmpfile="$TEST_BASE_DIR/zpool_add_003.tmp$$" typeset TMPFILE_PREFIX="$TEST_BASE_DIR/zpool_add_003"
typeset STR_DRYRUN="would update '$TESTPOOL' to the following configuration:"
typeset VDEV_PREFIX="$TEST_BASE_DIR/filedev"
typeset -a VDEV_TYPES=("" "dedup" "special" "log" "cache")
create_pool "$TESTPOOL" "${disk}${SLICE_PREFIX}${SLICE0}" vdevs=""
config=""
# 1. Create a storage pool
log_must truncate -s $SPA_MINDEVSIZE "$VDEV_PREFIX-root"
log_must zpool create "$TESTPOOL" "$VDEV_PREFIX-root"
log_must poolexists "$TESTPOOL" log_must poolexists "$TESTPOOL"
for vdevtype in "${VDEV_TYPES[@]}"; do
log_must truncate -s $SPA_MINDEVSIZE "$VDEV_PREFIX-$vdevtype"
vdevs="$vdevs $VDEV_PREFIX-$vdevtype"
config="$config $vdevtype $VDEV_PREFIX-$vdevtype"
done
zpool add -n "$TESTPOOL" ${disk}${SLICE_PREFIX}${SLICE1} > $tmpfile # 2. Use -n to add devices to the pool
log_must eval "zpool add -f -n $TESTPOOL $config > $TMPFILE_PREFIX-dryrun"
log_must grep -q "$STR_DRYRUN" "$TMPFILE_PREFIX-dryrun"
log_mustnot vdevs_in_pool "$TESTPOOL" "${disk}${SLICE_PREFIX}${SLICE1}" # 3. Verify the devices are not added actually
for vdev in $vdevs; do
log_mustnot vdevs_in_pool "$TESTPOOL" "$vdev"
done
str="would update '$TESTPOOL' to the following configuration:" # 4. Add devices to the pool for real this time, verify the vdev tree is the
cat $tmpfile | grep "$str" >/dev/null 2>&1 # same printed by the dryrun iteration
(( $? != 0 )) && \ log_must zpool add -f $TESTPOOL $config
log_fail "'zpool add -n <pool> <vdev> ...' is executed as unexpected" zpool status $TESTPOOL | awk 'NR == 1, /NAME/ { next } /^$/ {exit}
{print $1}' > "$TMPFILE_PREFIX-vdevtree"
cat "$TMPFILE_PREFIX-dryrun" | awk 'NR == 1, /would/ {next}
{print $1}' > "$TMPFILE_PREFIX-vdevtree-n"
log_must eval "diff $TMPFILE_PREFIX-vdevtree-n $TMPFILE_PREFIX-vdevtree"
log_pass "'zpool add -n <pool> <vdev> ...'executes successfully." log_pass "'zpool add -n <pool> <vdev> ...' executes successfully."