zpool import cachefile improvements

Importing a pool using the cachefile is ideal to reduce the time
required to import a pool. However, if the devices associated with
a pool in the cachefile have changed, then the import would fail.
This can easily be corrected by doing a normal import which would
then read the pool configuration from the labels.

The goal of this change is make importing using a cachefile more
resilient and auto-correcting. This is accomplished by having
the cachefile import logic automatically fallback to reading the
labels of the devices similar to a normal import. The main difference
between the fallback logic and a normal import is that the cachefile
import logic will only look at the device directories that were
originally used when the cachefile was populated. Additionally,
the fallback logic will always import by guid to ensure that only
the pools in the cachefile would be imported.

External-issue: DLPX-71980
Reviewed-by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: George Wilson <gwilson@delphix.com>
Closes #11716
This commit is contained in:
George Wilson 2021-03-12 17:42:27 -06:00 committed by GitHub
parent b8fa03efbc
commit 0936981d86
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 441 additions and 154 deletions

View File

@ -2623,8 +2623,8 @@ print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
/*
* Display the status for the given pool.
*/
static void
show_import(nvlist_t *config)
static int
show_import(nvlist_t *config, boolean_t report_error)
{
uint64_t pool_state;
vdev_stat_t *vs;
@ -2656,6 +2656,13 @@ show_import(nvlist_t *config)
reason = zpool_import_status(config, &msgid, &errata);
/*
* If we're importing using a cachefile, then we won't report any
* errors unless we are in the scan phase of the import.
*/
if (reason != ZPOOL_STATUS_OK && !report_error)
return (reason);
(void) printf(gettext(" pool: %s\n"), name);
(void) printf(gettext(" id: %llu\n"), (u_longlong_t)guid);
(void) printf(gettext(" state: %s"), health);
@ -2983,6 +2990,7 @@ show_import(nvlist_t *config)
"be part of this pool, though their\n\texact "
"configuration cannot be determined.\n"));
}
return (0);
}
static boolean_t
@ -3121,6 +3129,121 @@ do_import(nvlist_t *config, const char *newname, const char *mntopts,
return (ret);
}
static int
import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
char *orig_name, char *new_name,
boolean_t do_destroyed, boolean_t pool_specified, boolean_t do_all,
importargs_t *import)
{
nvlist_t *config = NULL;
nvlist_t *found_config = NULL;
uint64_t pool_state;
/*
* At this point we have a list of import candidate configs. Even if
* we were searching by pool name or guid, we still need to
* post-process the list to deal with pool state and possible
* duplicate names.
*/
int err = 0;
nvpair_t *elem = NULL;
boolean_t first = B_TRUE;
while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
verify(nvpair_value_nvlist(elem, &config) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&pool_state) == 0);
if (!do_destroyed && pool_state == POOL_STATE_DESTROYED)
continue;
if (do_destroyed && pool_state != POOL_STATE_DESTROYED)
continue;
verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
import->policy) == 0);
if (!pool_specified) {
if (first)
first = B_FALSE;
else if (!do_all)
(void) printf("\n");
if (do_all) {
err |= do_import(config, NULL, mntopts,
props, flags);
} else {
/*
* If we're importing from cachefile, then
* we don't want to report errors until we
* are in the scan phase of the import. If
* we get an error, then we return that error
* to invoke the scan phase.
*/
if (import->cachefile && !import->scan)
err = show_import(config, B_FALSE);
else
(void) show_import(config, B_TRUE);
}
} else if (import->poolname != NULL) {
char *name;
/*
* We are searching for a pool based on name.
*/
verify(nvlist_lookup_string(config,
ZPOOL_CONFIG_POOL_NAME, &name) == 0);
if (strcmp(name, import->poolname) == 0) {
if (found_config != NULL) {
(void) fprintf(stderr, gettext(
"cannot import '%s': more than "
"one matching pool\n"),
import->poolname);
(void) fprintf(stderr, gettext(
"import by numeric ID instead\n"));
err = B_TRUE;
}
found_config = config;
}
} else {
uint64_t guid;
/*
* Search for a pool by guid.
*/
verify(nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
if (guid == import->guid)
found_config = config;
}
}
/*
* If we were searching for a specific pool, verify that we found a
* pool, and then do the import.
*/
if (pool_specified && err == 0) {
if (found_config == NULL) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"no such pool available\n"), orig_name);
err = B_TRUE;
} else {
err |= do_import(found_config, new_name,
mntopts, props, flags);
}
}
/*
* If we were just looking for pools, report an error if none were
* found.
*/
if (!pool_specified && first)
(void) fprintf(stderr,
gettext("no pools available to import\n"));
return (err);
}
typedef struct target_exists_args {
const char *poolname;
uint64_t poolguid;
@ -3248,51 +3371,54 @@ zpool_do_checkpoint(int argc, char **argv)
/*
* zpool import [-d dir] [-D]
* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
* [-d dir | -c cachefile] [-f] -a
* [-d dir | -c cachefile | -s] [-f] -a
* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
* [-d dir | -c cachefile] [-f] [-n] [-F] <pool | id> [newpool]
* [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>
* [newpool]
*
* -c Read pool information from a cachefile instead of searching
* devices.
* -c Read pool information from a cachefile instead of searching
* devices. If importing from a cachefile config fails, then
* fallback to searching for devices only in the directories that
* exist in the cachefile.
*
* -d Scan in a specific directory, other than /dev/. More than
* -d Scan in a specific directory, other than /dev/. More than
* one directory can be specified using multiple '-d' options.
*
* -D Scan for previously destroyed pools or import all or only
* specified destroyed pools.
* -D Scan for previously destroyed pools or import all or only
* specified destroyed pools.
*
* -R Temporarily import the pool, with all mountpoints relative to
* -R Temporarily import the pool, with all mountpoints relative to
* the given root. The pool will remain exported when the machine
* is rebooted.
*
* -V Import even in the presence of faulted vdevs. This is an
* intentionally undocumented option for testing purposes, and
* treats the pool configuration as complete, leaving any bad
* -V Import even in the presence of faulted vdevs. This is an
* intentionally undocumented option for testing purposes, and
* treats the pool configuration as complete, leaving any bad
* vdevs in the FAULTED state. In other words, it does verbatim
* import.
*
* -f Force import, even if it appears that the pool is active.
* -f Force import, even if it appears that the pool is active.
*
* -F Attempt rewind if necessary.
* -F Attempt rewind if necessary.
*
* -n See if rewind would work, but don't actually rewind.
* -n See if rewind would work, but don't actually rewind.
*
* -N Import the pool but don't mount datasets.
* -N Import the pool but don't mount datasets.
*
* -T Specify a starting txg to use for import. This option is
* intentionally undocumented option for testing purposes.
* -T Specify a starting txg to use for import. This option is
* intentionally undocumented option for testing purposes.
*
* -a Import all pools found.
* -a Import all pools found.
*
* -l Load encryption keys while importing.
* -l Load encryption keys while importing.
*
* -o Set property=value and/or temporary mount options (without '=').
* -o Set property=value and/or temporary mount options (without '=').
*
* -s Scan using the default search path, the libblkid cache will
* not be consulted.
* -s Scan using the default search path, the libblkid cache will
* not be consulted.
*
* --rewind-to-checkpoint
* Import the pool and revert back to the checkpoint.
* --rewind-to-checkpoint
* Import the pool and revert back to the checkpoint.
*
* The import command scans for pools to import, and import pools based on pool
* name and GUID. The pool can also be renamed as part of the import process.
@ -3309,15 +3435,11 @@ zpool_do_import(int argc, char **argv)
boolean_t do_all = B_FALSE;
boolean_t do_destroyed = B_FALSE;
char *mntopts = NULL;
nvpair_t *elem;
nvlist_t *config;
uint64_t searchguid = 0;
char *searchname = NULL;
char *propval;
nvlist_t *found_config;
nvlist_t *policy = NULL;
nvlist_t *props = NULL;
boolean_t first;
int flags = ZFS_IMPORT_NORMAL;
uint32_t rewind_policy = ZPOOL_NO_REWIND;
boolean_t dryrun = B_FALSE;
@ -3325,7 +3447,8 @@ zpool_do_import(int argc, char **argv)
boolean_t xtreme_rewind = B_FALSE;
boolean_t do_scan = B_FALSE;
boolean_t pool_exists = B_FALSE;
uint64_t pool_state, txg = -1ULL;
boolean_t pool_specified = B_FALSE;
uint64_t txg = -1ULL;
char *cachefile = NULL;
importargs_t idata = { 0 };
char *endptr;
@ -3447,6 +3570,11 @@ zpool_do_import(int argc, char **argv)
usage(B_FALSE);
}
if (cachefile && do_scan) {
(void) fprintf(stderr, gettext("-c is incompatible with -s\n"));
usage(B_FALSE);
}
if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
(void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
usage(B_FALSE);
@ -3527,7 +3655,7 @@ zpool_do_import(int argc, char **argv)
searchname = argv[0];
searchguid = 0;
}
found_config = NULL;
pool_specified = B_TRUE;
/*
* User specified a name or guid. Ensure it's unique.
@ -3606,98 +3734,33 @@ zpool_do_import(int argc, char **argv)
return (1);
}
err = import_pools(pools, props, mntopts, flags, argv[0],
argc == 1 ? NULL : argv[1], do_destroyed, pool_specified,
do_all, &idata);
/*
* At this point we have a list of import candidate configs. Even if
* we were searching by pool name or guid, we still need to
* post-process the list to deal with pool state and possible
* duplicate names.
* If we're using the cachefile and we failed to import, then
* fallback to scanning the directory for pools that match
* those in the cachefile.
*/
err = 0;
elem = NULL;
first = B_TRUE;
while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
if (err != 0 && cachefile != NULL) {
(void) printf(gettext("cachefile import failed, retrying\n"));
verify(nvpair_value_nvlist(elem, &config) == 0);
/*
* We use the scan flag to gather the directories that exist
* in the cachefile. If we need to fallback to searching for
* the pool config, we will only search devices in these
* directories.
*/
idata.scan = B_TRUE;
nvlist_free(pools);
pools = zpool_search_import(g_zfs, &idata, &libzfs_config_ops);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&pool_state) == 0);
if (!do_destroyed && pool_state == POOL_STATE_DESTROYED)
continue;
if (do_destroyed && pool_state != POOL_STATE_DESTROYED)
continue;
verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
policy) == 0);
if (argc == 0) {
if (first)
first = B_FALSE;
else if (!do_all)
(void) printf("\n");
if (do_all) {
err |= do_import(config, NULL, mntopts,
props, flags);
} else {
show_import(config);
}
} else if (searchname != NULL) {
char *name;
/*
* We are searching for a pool based on name.
*/
verify(nvlist_lookup_string(config,
ZPOOL_CONFIG_POOL_NAME, &name) == 0);
if (strcmp(name, searchname) == 0) {
if (found_config != NULL) {
(void) fprintf(stderr, gettext(
"cannot import '%s': more than "
"one matching pool\n"), searchname);
(void) fprintf(stderr, gettext(
"import by numeric ID instead\n"));
err = B_TRUE;
}
found_config = config;
}
} else {
uint64_t guid;
/*
* Search for a pool by guid.
*/
verify(nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
if (guid == searchguid)
found_config = config;
}
err = import_pools(pools, props, mntopts, flags, argv[0],
argc == 1 ? NULL : argv[1], do_destroyed, pool_specified,
do_all, &idata);
}
/*
* If we were searching for a specific pool, verify that we found a
* pool, and then do the import.
*/
if (argc != 0 && err == 0) {
if (found_config == NULL) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"no such pool available\n"), argv[0]);
err = B_TRUE;
} else {
err |= do_import(found_config, argc == 1 ? NULL :
argv[1], mntopts, props, flags);
}
}
/*
* If we were just looking for pools, report an error if none were
* found.
*/
if (argc == 0 && first)
(void) fprintf(stderr,
gettext("no pools available to import\n"));
error:
nvlist_free(props);
nvlist_free(pools);

View File

@ -1269,7 +1269,8 @@ error:
* to import a specific pool.
*/
static nvlist_t *
zpool_find_import_impl(libpc_handle_t *hdl, importargs_t *iarg)
zpool_find_import_impl(libpc_handle_t *hdl, importargs_t *iarg,
pthread_mutex_t *lock, avl_tree_t *cache)
{
nvlist_t *ret = NULL;
pool_list_t pools = { 0 };
@ -1277,34 +1278,11 @@ zpool_find_import_impl(libpc_handle_t *hdl, importargs_t *iarg)
vdev_entry_t *ve, *venext;
config_entry_t *ce, *cenext;
name_entry_t *ne, *nenext;
pthread_mutex_t lock;
avl_tree_t *cache;
rdsk_node_t *slice;
void *cookie;
tpool_t *t;
verify(iarg->poolname == NULL || iarg->guid == 0);
pthread_mutex_init(&lock, NULL);
/*
* Locate pool member vdevs by blkid or by directory scanning.
* On success a newly allocated AVL tree which is populated with an
* entry for each discovered vdev will be returned in the cache.
* It's the caller's responsibility to consume and destroy this tree.
*/
if (iarg->scan || iarg->paths != 0) {
size_t dirs = iarg->paths;
const char * const *dir = (const char * const *)iarg->path;
if (dirs == 0)
dir = zpool_default_search_paths(&dirs);
if (zpool_find_import_scan(hdl, &lock, &cache, dir, dirs) != 0)
return (NULL);
} else {
if (zpool_find_import_blkid(hdl, &lock, &cache) != 0)
return (NULL);
}
/*
* Create a thread pool to parallelize the process of reading and
@ -1384,7 +1362,6 @@ zpool_find_import_impl(libpc_handle_t *hdl, importargs_t *iarg)
}
avl_destroy(cache);
free(cache);
pthread_mutex_destroy(&lock);
ret = get_configs(hdl, &pools, iarg->can_be_active, iarg->policy);
@ -1411,14 +1388,43 @@ zpool_find_import_impl(libpc_handle_t *hdl, importargs_t *iarg)
return (ret);
}
/*
* Given a config, discover the paths for the devices which
* exist in the config.
*/
static int
discover_cached_paths(libpc_handle_t *hdl, nvlist_t *nv,
avl_tree_t *cache, pthread_mutex_t *lock)
{
char *path = NULL;
uint_t children;
nvlist_t **child;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (int c = 0; c < children; c++) {
discover_cached_paths(hdl, child[c], cache, lock);
}
}
/*
* Once we have the path, we need to add the directory to
* our directoy cache.
*/
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
return (zpool_find_import_scan_dir(hdl, lock, cache,
dirname(path), 0));
}
return (0);
}
/*
* Given a cache file, return the contents as a list of importable pools.
* poolname or guid (but not both) are provided by the caller when trying
* to import a specific pool.
*/
static nvlist_t *
zpool_find_import_cached(libpc_handle_t *hdl, const char *cachefile,
const char *poolname, uint64_t guid)
zpool_find_import_cached(libpc_handle_t *hdl, importargs_t *iarg)
{
char *buf;
int fd;
@ -1430,9 +1436,9 @@ zpool_find_import_cached(libpc_handle_t *hdl, const char *cachefile,
uint64_t this_guid;
boolean_t active;
verify(poolname == NULL || guid == 0);
verify(iarg->poolname == NULL || iarg->guid == 0);
if ((fd = open(cachefile, O_RDONLY)) < 0) {
if ((fd = open(iarg->cachefile, O_RDONLY)) < 0) {
zutil_error_aux(hdl, "%s", strerror(errno));
(void) zutil_error(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN, "failed to open cache file"));
@ -1488,11 +1494,11 @@ zpool_find_import_cached(libpc_handle_t *hdl, const char *cachefile,
src = fnvpair_value_nvlist(elem);
name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME);
if (poolname != NULL && strcmp(poolname, name) != 0)
if (iarg->poolname != NULL && strcmp(iarg->poolname, name) != 0)
continue;
this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID);
if (guid != 0 && guid != this_guid)
if (iarg->guid != 0 && iarg->guid != this_guid)
continue;
if (zutil_pool_active(hdl, name, this_guid, &active) != 0) {
@ -1504,8 +1510,68 @@ zpool_find_import_cached(libpc_handle_t *hdl, const char *cachefile,
if (active)
continue;
if (iarg->scan) {
uint64_t saved_guid = iarg->guid;
const char *saved_poolname = iarg->poolname;
pthread_mutex_t lock;
/*
* Create the device cache that will hold the
* devices we will scan based on the cachefile.
* This will get destroyed and freed by
* zpool_find_import_impl.
*/
avl_tree_t *cache = zutil_alloc(hdl,
sizeof (avl_tree_t));
avl_create(cache, slice_cache_compare,
sizeof (rdsk_node_t),
offsetof(rdsk_node_t, rn_node));
nvlist_t *nvroot = fnvlist_lookup_nvlist(src,
ZPOOL_CONFIG_VDEV_TREE);
/*
* We only want to find the pool with this_guid.
* We will reset these values back later.
*/
iarg->guid = this_guid;
iarg->poolname = NULL;
/*
* We need to build up a cache of devices that exists
* in the paths pointed to by the cachefile. This allows
* us to preserve the device namespace that was
* originally specified by the user but also lets us
* scan devices in those directories in case they had
* been renamed.
*/
pthread_mutex_init(&lock, NULL);
discover_cached_paths(hdl, nvroot, cache, &lock);
nvlist_t *nv = zpool_find_import_impl(hdl, iarg,
&lock, cache);
pthread_mutex_destroy(&lock);
/*
* zpool_find_import_impl will return back
* a list of pools that it found based on the
* device cache. There should only be one pool
* since we're looking for a specific guid.
* We will use that pool to build up the final
* pool nvlist which is returned back to the
* caller.
*/
nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
fnvlist_add_nvlist(pools, nvpair_name(pair),
fnvpair_value_nvlist(pair));
VERIFY3P(nvlist_next_nvpair(nv, pair), ==, NULL);
iarg->guid = saved_guid;
iarg->poolname = saved_poolname;
continue;
}
if (nvlist_add_string(src, ZPOOL_CONFIG_CACHEFILE,
cachefile) != 0) {
iarg->cachefile) != 0) {
(void) zutil_no_memory(hdl);
nvlist_free(raw);
nvlist_free(pools);
@ -1527,11 +1593,51 @@ zpool_find_import_cached(libpc_handle_t *hdl, const char *cachefile,
}
nvlist_free(dst);
}
nvlist_free(raw);
return (pools);
}
static nvlist_t *
zpool_find_import(libpc_handle_t *hdl, importargs_t *iarg)
{
pthread_mutex_t lock;
avl_tree_t *cache;
nvlist_t *pools = NULL;
verify(iarg->poolname == NULL || iarg->guid == 0);
pthread_mutex_init(&lock, NULL);
/*
* Locate pool member vdevs by blkid or by directory scanning.
* On success a newly allocated AVL tree which is populated with an
* entry for each discovered vdev will be returned in the cache.
* It's the caller's responsibility to consume and destroy this tree.
*/
if (iarg->scan || iarg->paths != 0) {
size_t dirs = iarg->paths;
const char * const *dir = (const char * const *)iarg->path;
if (dirs == 0)
dir = zpool_default_search_paths(&dirs);
if (zpool_find_import_scan(hdl, &lock, &cache,
dir, dirs) != 0) {
pthread_mutex_destroy(&lock);
return (NULL);
}
} else {
if (zpool_find_import_blkid(hdl, &lock, &cache) != 0) {
pthread_mutex_destroy(&lock);
return (NULL);
}
}
pools = zpool_find_import_impl(hdl, iarg, &lock, cache);
pthread_mutex_destroy(&lock);
return (pools);
}
nvlist_t *
zpool_search_import(void *hdl, importargs_t *import,
const pool_config_ops_t *pco)
@ -1546,10 +1652,9 @@ zpool_search_import(void *hdl, importargs_t *import,
verify(import->poolname == NULL || import->guid == 0);
if (import->cachefile != NULL)
pools = zpool_find_import_cached(&handle, import->cachefile,
import->poolname, import->guid);
pools = zpool_find_import_cached(&handle, import);
else
pools = zpool_find_import_impl(&handle, import);
pools = zpool_find_import(&handle, import);
if ((pools == NULL || nvlist_empty(pools)) &&
handle.lpc_open_access_error && geteuid() != 0) {

View File

@ -398,6 +398,7 @@ tests = ['zpool_import_001_pos', 'zpool_import_002_pos',
'import_cachefile_device_replaced',
'import_cachefile_mirror_attached',
'import_cachefile_mirror_detached',
'import_cachefile_paths_changed',
'import_cachefile_shared_device',
'import_devices_missing',
'import_paths_changed',

View File

@ -9,6 +9,7 @@ dist_pkgdata_SCRIPTS = \
import_cachefile_device_replaced.ksh \
import_cachefile_mirror_attached.ksh \
import_cachefile_mirror_detached.ksh \
import_cachefile_paths_changed.ksh \
import_cachefile_shared_device.ksh \
import_devices_missing.ksh \
import_paths_changed.ksh \

View File

@ -0,0 +1,117 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2021 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/cli_root/zpool_import/zpool_import.kshlib
#
# DESCRIPTION:
# A pool should be importable from a cachefile even if device paths
# have changed.
#
# STRATEGY:
# 1. Create a pool using a cachefile
# 2. Backup cachefile
# 3. Export the pool.
# 4. Change the paths of some of the devices.
# 5. Verify that we can import the pool using the cachefile.
#
verify_runnable "global"
log_onexit cleanup
function test_new_paths
{
typeset poolcreate="$1"
typeset pathstochange="$2"
log_note "$0: pool '$poolcreate', changing paths of $pathstochange."
log_must zpool create -o cachefile=$CPATH $TESTPOOL1 $poolcreate
log_must cp $CPATH $CPATHBKP
log_must zpool export $TESTPOOL1
for dev in $pathstochange; do
log_must mv $dev "${dev}_new"
done
log_must zpool import -c $CPATHBKP $TESTPOOL1
log_must check_pool_healthy $TESTPOOL1
# Cleanup
log_must zpool destroy $TESTPOOL1
log_must rm -f $CPATH $CPATHBKP
for dev in $pathstochange; do
log_must mv "${dev}_new" $dev
done
log_note ""
}
function test_duplicate_pools
{
typeset poolcreate="$1"
typeset pathstocopy="$2"
log_note "$0: pool '$poolcreate', creating duplicate pool using $pathstocopy."
log_must zpool create -o cachefile=$CPATH $TESTPOOL1 $poolcreate
log_must zpool export $TESTPOOL1
for dev in $pathstocopy; do
log_must cp $dev "${dev}_orig"
done
log_must zpool create -f -o cachefile=$CPATH $TESTPOOL1 $poolcreate
log_must cp $CPATH $CPATHBKP
log_must zpool export $TESTPOOL1
for dev in $pathstocopy; do
log_must mv $dev "${dev}_new"
done
log_must zpool import -c $CPATHBKP
log_must zpool import -c $CPATHBKP $TESTPOOL1
log_must check_pool_healthy $TESTPOOL1
# Cleanup
log_must zpool destroy $TESTPOOL1
log_must rm -f $CPATH $CPATHBKP
for dev in $pathstocopy; do
log_must rm "${dev}_orig"
log_must mv "${dev}_new" $dev
done
log_note ""
}
test_new_paths "$VDEV0 $VDEV1" "$VDEV0 $VDEV1"
test_new_paths "mirror $VDEV0 $VDEV1" "$VDEV0 $VDEV1"
test_new_paths "$VDEV0 log $VDEV1" "$VDEV0 $VDEV1"
test_new_paths "raidz $VDEV0 $VDEV1 $VDEV2" "$VDEV0 $VDEV1 $VDEV2"
test_new_paths "draid $VDEV0 $VDEV1 $VDEV2" "$VDEV0 $VDEV1 $VDEV2"
test_duplicate_pools "$VDEV0 $VDEV1" "$VDEV0 $VDEV1"
test_duplicate_pools "mirror $VDEV0 $VDEV1" "$VDEV0 $VDEV1"
test_duplicate_pools "$VDEV0 log $VDEV1" "$VDEV0 $VDEV1"
test_duplicate_pools "raidz $VDEV0 $VDEV1 $VDEV2" "$VDEV0 $VDEV1 $VDEV2"
test_duplicate_pools "draid $VDEV0 $VDEV1 $VDEV2" "$VDEV0 $VDEV1 $VDEV2"
log_pass "zpool import with cachefile succeeded after changing device paths."