Added no_scrub_restart flag to zpool reopen

Added -n flag to zpool reopen that allows a running scrub
operation to continue if there is a device with Dirty Time Log.

By default if a component device has a DTL and zpool reopen
is executed all running scan operations will be restarted.

Added functional tests for `zpool reopen`

Tests covers following scenarios:
* `zpool reopen` without arguments,
* `zpool reopen` with pool name as argument,
* `zpool reopen` while scrubbing,
* `zpool reopen -n` while scrubbing,
* `zpool reopen -n` while resilvering,
* `zpool reopen` with bad arguments.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Tom Caputi <tcaputi@datto.com>
Signed-off-by: Arkadiusz Bubała <arkadiusz.bubala@open-e.com>
Closes #6076 
Closes #6746
This commit is contained in:
Arkadiusz Bubała 2017-10-26 21:26:09 +02:00 committed by Brian Behlendorf
parent 3ad59c015d
commit d3f2cd7e3b
28 changed files with 1195 additions and 382 deletions

View File

@ -23,6 +23,7 @@
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright 2014 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2016, 2017, Intel Corporation.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
*/
/*
@ -722,6 +723,8 @@ zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
(void) strlcpy(fullpath, path, sizeof (fullpath));
if (wholedisk) {
char *spath = zfs_strip_partition(fullpath);
boolean_t scrub_restart = B_TRUE;
if (!spath) {
zed_log_msg(LOG_INFO, "%s: Can't alloc",
__func__);
@ -736,7 +739,7 @@ zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
* device so that the kernel can update the size
* of the expanded device.
*/
(void) zpool_reopen(zhp);
(void) zpool_reopen_one(zhp, &scrub_restart);
}
if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {

View File

@ -28,6 +28,7 @@
* Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
*/
#include <assert.h>
@ -342,7 +343,7 @@ get_usage(zpool_help_t idx)
case HELP_REMOVE:
return (gettext("\tremove <pool> <device> ...\n"));
case HELP_REOPEN:
return (gettext("\treopen <pool>\n"));
return (gettext("\treopen [-n] <pool>\n"));
case HELP_SCRUB:
return (gettext("\tscrub [-s | -p] <pool> ...\n"));
case HELP_STATUS:
@ -5855,12 +5856,14 @@ zpool_do_reopen(int argc, char **argv)
{
int c;
int ret = 0;
zpool_handle_t *zhp;
char *pool;
boolean_t scrub_restart = B_TRUE;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
while ((c = getopt(argc, argv, "n")) != -1) {
switch (c) {
case 'n':
scrub_restart = B_FALSE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
@ -5868,25 +5871,13 @@ zpool_do_reopen(int argc, char **argv)
}
}
argc--;
argv++;
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
/* if argc == 0 we will execute zpool_reopen_one on all pools */
ret = for_each_pool(argc, argv, B_TRUE, NULL, zpool_reopen_one,
&scrub_restart);
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
pool = argv[0];
if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL)
return (1);
ret = zpool_reopen(zhp);
zpool_close(zhp);
return (ret);
}

View File

@ -224,6 +224,7 @@ AC_CONFIG_FILES([
tests/zfs-tests/tests/functional/cli_root/zpool_offline/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_online/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_remove/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_reopen/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_replace/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_scrub/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_set/Makefile

View File

@ -27,6 +27,7 @@
* Copyright (c) 2016, Intel Corporation.
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
*/
#ifndef _LIBZFS_H
@ -266,7 +267,7 @@ typedef struct splitflags {
extern int zpool_scan(zpool_handle_t *, pool_scan_func_t, pool_scrub_cmd_t);
extern int zpool_clear(zpool_handle_t *, const char *, nvlist_t *);
extern int zpool_reguid(zpool_handle_t *);
extern int zpool_reopen(zpool_handle_t *);
extern int zpool_reopen_one(zpool_handle_t *, void *);
extern int zpool_sync_one(zpool_handle_t *, void *);

View File

@ -23,6 +23,7 @@
* Copyright (c) 2012, 2014 by Delphix. All rights reserved.
* Copyright (c) 2017 Datto Inc.
* Copyright 2017 RackTop Systems.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
*/
#ifndef _LIBZFS_CORE_H
@ -101,6 +102,7 @@ int lzc_rollback(const char *, char *, int);
int lzc_rollback_to(const char *, const char *);
int lzc_sync(const char *, nvlist_t *, nvlist_t **);
int lzc_reopen(const char *, boolean_t);
#ifdef __cplusplus
}

View File

@ -25,6 +25,7 @@
* Copyright (c) 2011, 2014 by Delphix. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
*/
#include <ctype.h>
@ -3370,20 +3371,20 @@ zpool_reguid(zpool_handle_t *zhp)
* Reopen the pool.
*/
int
zpool_reopen(zpool_handle_t *zhp)
zpool_reopen_one(zpool_handle_t *zhp, void *data)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
libzfs_handle_t *hdl = zhp->zpool_hdl;
libzfs_handle_t *hdl = zpool_get_handle(zhp);
const char *pool_name = zpool_get_name(zhp);
boolean_t *scrub_restart = data;
int error;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
zhp->zpool_name);
error = lzc_reopen(pool_name, *scrub_restart);
if (error) {
return (zpool_standard_error_fmt(hdl, error,
dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));
}
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, msg));
return (0);
}
/* call into libzfs_core to execute the sync IOCTL per pool */

View File

@ -24,6 +24,7 @@
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2017 Datto Inc.
* Copyright 2017 RackTop Systems.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
*/
/*
@ -1111,3 +1112,16 @@ lzc_change_key(const char *fsname, uint64_t crypt_cmd, nvlist_t *props,
nvlist_free(ioc_args);
return (error);
}
int
lzc_reopen(const char *pool_name, boolean_t scrub_restart)
{
nvlist_t *args = fnvlist_alloc();
int error;
fnvlist_add_boolean_value(args, "scrub_restart", scrub_restart);
error = lzc_ioctl(ZFS_IOC_POOL_REOPEN, pool_name, args, NULL);
nvlist_free(args);
return (error);
}

View File

@ -25,6 +25,7 @@
.\" Copyright (c) 2017 Datto Inc.
.\" Copyright (c) 2017 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd August 23, 2017
.Dt ZPOOL 8 SMM
@ -140,6 +141,7 @@
.Ar pool
.Nm
.Cm reopen
.Op Fl n
.Ar pool
.Nm
.Cm remove
@ -1739,9 +1741,14 @@ performing this action.
.It Xo
.Nm
.Cm reopen
.Op Fl n
.Ar pool
.Xc
Reopen all the vdevs associated with the pool.
.Bl -tag -width Ds
.It Fl n
Do not restart an in-progress scrub operation. This is not recommended and can
result in partially resilvered devices unless a second scrub is performed.
.It Xo
.Nm
.Cm remove

View File

@ -36,6 +36,7 @@
* Copyright (c) 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2017 Datto Inc. All rights reserved.
* Copyright 2017 RackTop Systems.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
*/
/*
@ -5032,25 +5033,46 @@ zfs_ioc_clear(zfs_cmd_t *zc)
return (error);
}
/*
* Reopen all the vdevs associated with the pool.
*
* innvl: {
* "scrub_restart" -> when true and scrub is running, allow to restart
* scrub as the side effect of the reopen (boolean).
* }
*
* outnvl is unused
*/
/* ARGSUSED */
static int
zfs_ioc_pool_reopen(zfs_cmd_t *zc)
zfs_ioc_pool_reopen(const char *pool, nvlist_t *innvl, nvlist_t *outnvl)
{
spa_t *spa;
int error;
boolean_t scrub_restart = B_TRUE;
error = spa_open(zc->zc_name, &spa, FTAG);
if (innvl) {
if (nvlist_lookup_boolean_value(innvl, "scrub_restart",
&scrub_restart) != 0) {
return (SET_ERROR(EINVAL));
}
}
error = spa_open(pool, &spa, FTAG);
if (error != 0)
return (error);
spa_vdev_state_enter(spa, SCL_NONE);
/*
* If a resilver is already in progress then set the
* spa_scrub_reopen flag to B_TRUE so that we don't restart
* the scan as a side effect of the reopen. Otherwise, let
* vdev_open() decided if a resilver is required.
* If the scrub_restart flag is B_FALSE and a scrub is already
* in progress then set spa_scrub_reopen flag to B_TRUE so that
* we don't restart the scrub as a side effect of the reopen.
* Otherwise, let vdev_open() decided if a resilver is required.
*/
spa->spa_scrub_reopen = dsl_scan_resilvering(spa->spa_dsl_pool);
spa->spa_scrub_reopen = (!scrub_restart &&
dsl_scan_scrubbing(spa->spa_dsl_pool));
vdev_reopen(spa->spa_root_vdev);
spa->spa_scrub_reopen = B_FALSE;
@ -5058,6 +5080,7 @@ zfs_ioc_pool_reopen(zfs_cmd_t *zc)
spa_close(spa, FTAG);
return (0);
}
/*
* inputs:
* zc_name name of filesystem
@ -6316,6 +6339,9 @@ zfs_ioctl_init(void)
zfs_ioctl_register("sync", ZFS_IOC_POOL_SYNC,
zfs_ioc_pool_sync, zfs_secpolicy_none, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_FALSE);
zfs_ioctl_register("reopen", ZFS_IOC_POOL_REOPEN, zfs_ioc_pool_reopen,
zfs_secpolicy_config, POOL_NAME, POOL_CHECK_SUSPENDED, B_TRUE,
B_TRUE);
/* IOCTLS that use the legacy function signature */
@ -6389,8 +6415,6 @@ zfs_ioctl_init(void)
zfs_ioctl_register_pool(ZFS_IOC_CLEAR, zfs_ioc_clear,
zfs_secpolicy_config, B_TRUE, POOL_CHECK_READONLY);
zfs_ioctl_register_pool(ZFS_IOC_POOL_REOPEN, zfs_ioc_pool_reopen,
zfs_secpolicy_config, B_TRUE, POOL_CHECK_SUSPENDED);
zfs_ioctl_register_dataset_read(ZFS_IOC_SPACE_WRITTEN,
zfs_ioc_space_written);

View File

@ -294,6 +294,11 @@ tests = ['zpool_online_001_pos', 'zpool_online_002_neg']
tests = ['zpool_remove_001_neg', 'zpool_remove_002_pos',
'zpool_remove_003_pos']
[tests/functional/cli_root/zpool_reopen]
tests = ['zpool_reopen_001_pos', 'zpool_reopen_002_pos',
'zpool_reopen_003_pos', 'zpool_reopen_004_pos', 'zpool_reopen_005_pos',
'zpool_reopen_006_neg']
[tests/functional/cli_root/zpool_replace]
tests = ['zpool_replace_001_neg', 'replace-o_ashift', 'replace_prop_ashift']

View File

@ -1,5 +1,6 @@
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/include
dist_pkgdata_SCRIPTS = \
blkdev.shlib \
commands.cfg \
default.cfg \
libtest.shlib \

View File

@ -0,0 +1,395 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
# Copyright 2016 Nexenta Systems, Inc.
# Copyright (c) 2016, 2017 by Intel Corporation. All rights reserved.
# Copyright (c) 2017 Lawrence Livermore National Security, LLC.
# Copyright (c) 2017 Datto Inc.
# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
#
#
# Returns SCSI host number for the given disk
#
function get_scsi_host #disk
{
typeset disk=$1
ls /sys/block/${disk}/device/scsi_device | cut -d : -f 1
}
#
# Cause a scan of all scsi host adapters by default
#
# $1 optional host number
#
function scan_scsi_hosts
{
typeset hostnum=${1}
if is_linux; then
if [[ -z $hostnum ]]; then
for host in /sys/class/scsi_host/host*; do
log_must eval "echo '- - -' > $host/scan"
done
else
log_must eval \
"echo /sys/class/scsi_host/host$hostnum/scan" \
> /dev/null
log_must eval \
"echo '- - -' > /sys/class/scsi_host/host$hostnum/scan"
fi
fi
}
#
# Wait for newly created block devices to have their minors created.
#
function block_device_wait
{
if is_linux; then
udevadm trigger
udevadm settle
fi
}
#
# Check if the given device is physical device
#
function is_physical_device #device
{
typeset device=${1#$DEV_DSKDIR}
device=${device#$DEV_RDSKDIR}
if is_linux; then
[[ -b "$DEV_DSKDIR/$device" ]] && \
[[ -f /sys/module/loop/parameters/max_part ]]
return $?
else
echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
return $?
fi
}
#
# Check if the given device is a real device (ie SCSI device)
#
function is_real_device #disk
{
typeset disk=$1
[[ -z $disk ]] && log_fail "No argument for disk given."
if is_linux; then
lsblk $DEV_RDSKDIR/$disk -o TYPE 2>/dev/null | \
egrep disk >/dev/null
return $?
fi
}
#
# Check if the given device is a loop device
#
function is_loop_device #disk
{
typeset disk=$1
[[ -z $disk ]] && log_fail "No argument for disk given."
if is_linux; then
lsblk $DEV_RDSKDIR/$disk -o TYPE 2>/dev/null | \
egrep loop >/dev/null
return $?
fi
}
#
# Check if the given device is a multipath device and if there is a sybolic
# link to a device mapper and to a disk
# Currently no support for dm devices alone without multipath
#
function is_mpath_device #disk
{
typeset disk=$1
[[ -z $disk ]] && log_fail "No argument for disk given."
if is_linux; then
lsblk $DEV_MPATHDIR/$disk -o TYPE 2>/dev/null | \
egrep mpath >/dev/null
if (($? == 0)); then
readlink $DEV_MPATHDIR/$disk > /dev/null 2>&1
return $?
else
return $?
fi
fi
}
# Set the slice prefix for disk partitioning depending
# on whether the device is a real, multipath, or loop device.
# Currently all disks have to be of the same type, so only
# checks first disk to determine slice prefix.
#
function set_slice_prefix
{
typeset disk
typeset -i i=0
if is_linux; then
while (( i < $DISK_ARRAY_NUM )); do
disk="$(echo $DISKS | nawk '{print $(i + 1)}')"
if ( is_mpath_device $disk ) && [[ -z $(echo $disk | awk 'substr($1,18,1)\
~ /^[[:digit:]]+$/') ]] || ( is_real_device $disk ); then
export SLICE_PREFIX=""
return 0
elif ( is_mpath_device $disk || is_loop_device \
$disk ); then
export SLICE_PREFIX="p"
return 0
else
log_fail "$disk not supported for partitioning."
fi
(( i = i + 1))
done
fi
}
#
# Set the directory path of the listed devices in $DISK_ARRAY_NUM
# Currently all disks have to be of the same type, so only
# checks first disk to determine device directory
# default = /dev (linux)
# real disk = /dev (linux)
# multipath device = /dev/mapper (linux)
#
function set_device_dir
{
typeset disk
typeset -i i=0
if is_linux; then
while (( i < $DISK_ARRAY_NUM )); do
disk="$(echo $DISKS | nawk '{print $(i + 1)}')"
if is_mpath_device $disk; then
export DEV_DSKDIR=$DEV_MPATHDIR
return 0
else
export DEV_DSKDIR=$DEV_RDSKDIR
return 0
fi
(( i = i + 1))
done
else
export DEV_DSKDIR=$DEV_RDSKDIR
fi
}
#
# Get the directory path of given device
#
function get_device_dir #device
{
typeset device=$1
if ! $(is_physical_device $device) ; then
if [[ $device != "/" ]]; then
device=${device%/*}
fi
if [[ -b "$DEV_DSKDIR/$device" ]]; then
device="$DEV_DSKDIR"
fi
echo $device
else
echo "$DEV_DSKDIR"
fi
}
#
# Get persistent name for given disk
#
function get_persistent_disk_name #device
{
typeset device=$1
typeset dev_id
if is_linux; then
if is_real_device $device; then
dev_id="$(udevadm info -q all -n $DEV_DSKDIR/$device \
| egrep disk/by-id | nawk '{print $2; exit}' \
| nawk -F / '{print $3}')"
echo $dev_id
elif is_mpath_device $device; then
dev_id="$(udevadm info -q all -n $DEV_DSKDIR/$device \
| egrep disk/by-id/dm-uuid \
| nawk '{print $2; exit}' \
| nawk -F / '{print $3}')"
echo $dev_id
else
echo $device
fi
else
echo $device
fi
}
#
# Online or offline a disk on the system
#
# First checks state of disk. Test will fail if disk is not properly onlined
# or offlined. Online is a full rescan of SCSI disks by echoing to every
# host entry.
#
function on_off_disk # disk state{online,offline} host
{
typeset disk=$1
typeset state=$2
typeset host=$3
[[ -z $disk ]] || [[ -z $state ]] && \
log_fail "Arguments invalid or missing"
if is_linux; then
if [[ $state == "offline" ]] && ( is_mpath_device $disk ); then
dm_name="$(readlink $DEV_DSKDIR/$disk \
| nawk -F / '{print $2}')"
slave="$(ls /sys/block/${dm_name}/slaves \
| nawk '{print $1}')"
while [[ -n $slave ]]; do
#check if disk is online
lsscsi | egrep $slave > /dev/null
if (($? == 0)); then
slave_dir="/sys/block/${dm_name}"
slave_dir+="/slaves/${slave}/device"
ss="${slave_dir}/state"
sd="${slave_dir}/delete"
log_must eval "echo 'offline' > ${ss}"
log_must eval "echo '1' > ${sd}"
lsscsi | egrep $slave > /dev/null
if (($? == 0)); then
log_fail "Offlining" \
"$disk failed"
fi
fi
slave="$(ls /sys/block/$dm_name/slaves \
2>/dev/null | nawk '{print $1}')"
done
elif [[ $state == "offline" ]] && ( is_real_device $disk ); then
#check if disk is online
lsscsi | egrep $disk > /dev/null
if (($? == 0)); then
dev_state="/sys/block/$disk/device/state"
dev_delete="/sys/block/$disk/device/delete"
log_must eval "echo 'offline' > ${dev_state}"
log_must eval "echo '1' > ${dev_delete}"
lsscsi | egrep $disk > /dev/null
if (($? == 0)); then
log_fail "Offlining $disk" \
"failed"
fi
else
log_note "$disk is already offline"
fi
elif [[ $state == "online" ]]; then
#force a full rescan
scan_scsi_hosts $host
block_device_wait
if is_mpath_device $disk; then
dm_name="$(readlink $DEV_DSKDIR/$disk \
| nawk -F / '{print $2}')"
slave="$(ls /sys/block/$dm_name/slaves \
| nawk '{print $1}')"
lsscsi | egrep $slave > /dev/null
if (($? != 0)); then
log_fail "Onlining $disk failed"
fi
elif is_real_device $disk; then
typeset -i retries=0
while ! lsscsi | egrep -q $disk; do
if (( $retries > 2 )); then
log_fail "Onlining $disk failed"
break
fi
(( ++retries ))
sleep 1
done
else
log_fail "$disk is not a real dev"
fi
else
log_fail "$disk failed to $state"
fi
fi
}
#
# Simulate disk removal
#
function remove_disk #disk
{
typeset disk=$1
on_off_disk $disk "offline"
block_device_wait
}
#
# Simulate disk insertion for the given SCSI host
#
function insert_disk #disk scsi_host
{
typeset disk=$1
typeset scsi_host=$2
on_off_disk $disk "online" $scsi_host
block_device_wait
}
#
# Load scsi_debug module with specified parameters
#
function load_scsi_debug # dev_size_mb add_host num_tgts max_luns
{
typeset devsize=$1
typeset hosts=$2
typeset tgts=$3
typeset luns=$4
[[ -z $devsize ]] || [[ -z $hosts ]] || [[ -z $tgts ]] || \
[[ -z $luns ]] && log_fail "Arguments invalid or missing"
if is_linux; then
modprobe -n scsi_debug
if (($? != 0)); then
log_unsupported "Platform does not have scsi_debug"
"module"
fi
lsmod | egrep scsi_debug > /dev/null
if (($? == 0)); then
log_fail "scsi_debug module already installed"
else
log_must modprobe scsi_debug dev_size_mb=$devsize \
add_host=$hosts num_tgts=$tgts max_luns=$luns
block_device_wait
lsscsi | egrep scsi_debug > /dev/null
if (($? == 1)); then
log_fail "scsi_debug module install failed"
fi
fi
fi
}
#
# Get scsi_debug device name.
# Returns basename of scsi_debug device (for example "sdb").
#
function get_debug_device
{
lsscsi | nawk '/scsi_debug/ {print $6; exit}' | cut -d / -f3
}

View File

@ -27,10 +27,12 @@
# Copyright 2016 Nexenta Systems, Inc.
# Copyright (c) 2017 Lawrence Livermore National Security, LLC.
# Copyright (c) 2017 Datto Inc.
# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
#
. ${STF_TOOLS}/include/logapi.shlib
. ${STF_SUITE}/include/math.shlib
. ${STF_SUITE}/include/blkdev.shlib
#
# Apply constrained path when available. This is required since the
@ -1751,124 +1753,6 @@ function check_state # pool disk state{online,offline,degraded}
return $?
}
#
# Cause a scan of all scsi host adapters by default
#
# $1 optional host number
#
function scan_scsi_hosts
{
typeset hostnum=${1}
if is_linux; then
if [[ -z $hostnum ]]; then
for host in /sys/class/scsi_host/host*; do
log_must eval "echo '- - -' > $host/scan"
done
else
log_must eval \
"echo /sys/class/scsi_host/host$hostnum/scan" \
> /dev/null
log_must eval \
"echo '- - -' > /sys/class/scsi_host/host$hostnum/scan"
fi
fi
}
#
# Wait for newly created block devices to have their minors created.
#
function block_device_wait
{
if is_linux; then
udevadm trigger
udevadm settle
fi
}
#
# Online or offline a disk on the system
#
# First checks state of disk. Test will fail if disk is not properly onlined
# or offlined. Online is a full rescan of SCSI disks by echoing to every
# host entry.
#
function on_off_disk # disk state{online,offline} host
{
typeset disk=$1
typeset state=$2
typeset host=$3
[[ -z $disk ]] || [[ -z $state ]] && \
log_fail "Arguments invalid or missing"
if is_linux; then
if [[ $state == "offline" ]] && ( is_mpath_device $disk ); then
dm_name="$(readlink $DEV_DSKDIR/$disk \
| nawk -F / '{print $2}')"
slave="$(ls /sys/block/${dm_name}/slaves \
| nawk '{print $1}')"
while [[ -n $slave ]]; do
#check if disk is online
lsscsi | egrep $slave > /dev/null
if (($? == 0)); then
slave_dir="/sys/block/${dm_name}"
slave_dir+="/slaves/${slave}/device"
ss="${slave_dir}/state"
sd="${slave_dir}/delete"
log_must eval "echo 'offline' > ${ss}"
log_must eval "echo '1' > ${sd}"
lsscsi | egrep $slave > /dev/null
if (($? == 0)); then
log_fail "Offlining" \
"$disk failed"
fi
fi
slave="$(ls /sys/block/$dm_name/slaves \
2>/dev/null | nawk '{print $1}')"
done
elif [[ $state == "offline" ]] && ( is_real_device $disk ); then
#check if disk is online
lsscsi | egrep $disk > /dev/null
if (($? == 0)); then
dev_state="/sys/block/$disk/device/state"
dev_delete="/sys/block/$disk/device/delete"
log_must eval "echo 'offline' > ${dev_state}"
log_must eval "echo '1' > ${dev_delete}"
lsscsi | egrep $disk > /dev/null
if (($? == 0)); then
log_fail "Offlining $disk" \
"failed"
fi
else
log_note "$disk is already offline"
fi
elif [[ $state == "online" ]]; then
#force a full rescan
scan_scsi_hosts $host
block_device_wait
if is_mpath_device $disk; then
dm_name="$(readlink $DEV_DSKDIR/$disk \
| nawk -F / '{print $2}')"
slave="$(ls /sys/block/$dm_name/slaves \
| nawk '{print $1}')"
lsscsi | egrep $slave > /dev/null
if (($? != 0)); then
log_fail "Onlining $disk failed"
fi
elif is_real_device $disk; then
lsscsi | egrep $disk > /dev/null
if (($? != 0)); then
log_fail "Onlining $disk failed"
fi
else
log_fail "$disk is not a real dev"
fi
else
log_fail "$disk failed to $state"
fi
fi
}
#
# Get the mountpoint of snapshot
# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
@ -2897,217 +2781,6 @@ function get_rootpool
fi
}
#
# Check if the given device is physical device
#
function is_physical_device #device
{
typeset device=${1#$DEV_DSKDIR}
device=${device#$DEV_RDSKDIR}
if is_linux; then
[[ -b "$DEV_DSKDIR/$device" ]] && \
[[ -f /sys/module/loop/parameters/max_part ]]
return $?
else
echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
return $?
fi
}
#
# Check if the given device is a real device (ie SCSI device)
#
function is_real_device #disk
{
typeset disk=$1
[[ -z $disk ]] && log_fail "No argument for disk given."
if is_linux; then
lsblk $DEV_RDSKDIR/$disk -o TYPE 2>/dev/null | \
egrep disk >/dev/null
return $?
fi
}
#
# Check if the given device is a loop device
#
function is_loop_device #disk
{
typeset disk=$1
[[ -z $disk ]] && log_fail "No argument for disk given."
if is_linux; then
lsblk $DEV_RDSKDIR/$disk -o TYPE 2>/dev/null | \
egrep loop >/dev/null
return $?
fi
}
#
# Check if the given device is a multipath device and if there is a sybolic
# link to a device mapper and to a disk
# Currently no support for dm devices alone without multipath
#
function is_mpath_device #disk
{
typeset disk=$1
[[ -z $disk ]] && log_fail "No argument for disk given."
if is_linux; then
lsblk $DEV_MPATHDIR/$disk -o TYPE 2>/dev/null | \
egrep mpath >/dev/null
if (($? == 0)); then
readlink $DEV_MPATHDIR/$disk > /dev/null 2>&1
return $?
else
return $?
fi
fi
}
# Set the slice prefix for disk partitioning depending
# on whether the device is a real, multipath, or loop device.
# Currently all disks have to be of the same type, so only
# checks first disk to determine slice prefix.
#
function set_slice_prefix
{
typeset disk
typeset -i i=0
if is_linux; then
while (( i < $DISK_ARRAY_NUM )); do
disk="$(echo $DISKS | nawk '{print $(i + 1)}')"
if ( is_mpath_device $disk ) && [[ -z $(echo $disk | awk 'substr($1,18,1)\
~ /^[[:digit:]]+$/') ]] || ( is_real_device $disk ); then
export SLICE_PREFIX=""
return 0
elif ( is_mpath_device $disk || is_loop_device \
$disk ); then
export SLICE_PREFIX="p"
return 0
else
log_fail "$disk not supported for partitioning."
fi
(( i = i + 1))
done
fi
}
#
# Set the directory path of the listed devices in $DISK_ARRAY_NUM
# Currently all disks have to be of the same type, so only
# checks first disk to determine device directory
# default = /dev (linux)
# real disk = /dev (linux)
# multipath device = /dev/mapper (linux)
#
function set_device_dir
{
typeset disk
typeset -i i=0
if is_linux; then
while (( i < $DISK_ARRAY_NUM )); do
disk="$(echo $DISKS | nawk '{print $(i + 1)}')"
if is_mpath_device $disk; then
export DEV_DSKDIR=$DEV_MPATHDIR
return 0
else
export DEV_DSKDIR=$DEV_RDSKDIR
return 0
fi
(( i = i + 1))
done
else
export DEV_DSKDIR=$DEV_RDSKDIR
fi
}
#
# Get the directory path of given device
#
function get_device_dir #device
{
typeset device=$1
if ! $(is_physical_device $device) ; then
if [[ $device != "/" ]]; then
device=${device%/*}
fi
if [[ -b "$DEV_DSKDIR/$device" ]]; then
device="$DEV_DSKDIR"
fi
echo $device
else
echo "$DEV_DSKDIR"
fi
}
#
# Get persistent name for given disk
#
function get_persistent_disk_name #device
{
typeset device=$1
typeset dev_id
if is_linux; then
if is_real_device $device; then
dev_id="$(udevadm info -q all -n $DEV_DSKDIR/$device \
| egrep disk/by-id | nawk '{print $2; exit}' \
| nawk -F / '{print $3}')"
echo $dev_id
elif is_mpath_device $device; then
dev_id="$(udevadm info -q all -n $DEV_DSKDIR/$device \
| egrep disk/by-id/dm-uuid \
| nawk '{print $2; exit}' \
| nawk -F / '{print $3}')"
echo $dev_id
else
echo $device
fi
else
echo $device
fi
}
#
# Load scsi_debug module with specified parameters
#
function load_scsi_debug # dev_size_mb add_host num_tgts max_luns
{
typeset devsize=$1
typeset hosts=$2
typeset tgts=$3
typeset luns=$4
[[ -z $devsize ]] || [[ -z $hosts ]] || [[ -z $tgts ]] || \
[[ -z $luns ]] && log_fail "Arguments invalid or missing"
if is_linux; then
modprobe -n scsi_debug
if (($? != 0)); then
log_unsupported "Platform does not have scsi_debug"
"module"
fi
lsmod | egrep scsi_debug > /dev/null
if (($? == 0)); then
log_fail "scsi_debug module already installed"
else
log_must modprobe scsi_debug dev_size_mb=$devsize \
add_host=$hosts num_tgts=$tgts max_luns=$luns
block_device_wait
lsscsi | egrep scsi_debug > /dev/null
if (($? == 1)); then
log_fail "scsi_debug module install failed"
fi
fi
fi
}
#
# Get the package name
#

View File

@ -46,6 +46,7 @@ SUBDIRS = \
zpool_offline \
zpool_online \
zpool_remove \
zpool_reopen \
zpool_replace \
zpool_scrub \
zpool_set \

View File

@ -0,0 +1,12 @@
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/cli_root/zpool_reopen
dist_pkgdata_SCRIPTS = \
setup.ksh \
cleanup.ksh \
zpool_reopen.cfg \
zpool_reopen.shlib \
zpool_reopen_001_pos.ksh \
zpool_reopen_002_pos.ksh \
zpool_reopen_003_pos.ksh \
zpool_reopen_004_pos.ksh \
zpool_reopen_005_pos.ksh \
zpool_reopen_006_neg.ksh

View File

@ -0,0 +1,33 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2016, 2017 by Intel Corporation. All rights reserved.
# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
#
. $STF_SUITE/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib
verify_runnable "global"
cleanup_devices $DISKS
# Unplug the disk and remove scsi_debug module
if is_linux; then
for SDDEVICE in $(get_debug_device); do
unplug $SDDEVICE
done
modprobe -r scsi_debug
fi
log_pass

View File

@ -0,0 +1,30 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2016, 2017 by Intel Corporation. All rights reserved.
# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
#
. $STF_SUITE/tests/functional/cli_root/zpool_reopen/zpool_reopen.cfg
verify_runnable "global"
# Create scsi_debug devices for the reopen tests
if is_linux; then
load_scsi_debug $SDSIZE $SDHOSTS $SDTGTS $SDLUNS
else
log_unsupported "scsi debug module unsupported"
fi
log_pass

View File

@ -0,0 +1,47 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2016, 2017 by Intel Corporation. All rights reserved.
# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
#
. $STF_SUITE/include/libtest.shlib
verify_runnable "global"
export DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
export DISKSARRAY=$DISKS
export SMALL_FILE_SIZE=10
export LARGE_FILE_SIZE=80
export MAXTIMEOUT=40
export SDSIZE=256
export SDHOSTS=1
export SDTGTS=1
export SDLUNS=1
export DISK1=$(echo $DISKS | nawk '{print $1}')
export DISK2=$(echo $DISKS | nawk '{print $2}')
export DISK3=$(echo $DISKS | nawk '{print $3}')
if is_linux; then
set_slice_prefix
set_device_dir
devs_id[0]=$(get_persistent_disk_name $DISK1)
devs_id[1]=$(get_persistent_disk_name $DISK2)
devs_id[2]=$(get_persistent_disk_name $DISK3)
export devs_id
else
DEV_DSKDIR="/dev"
fi

View File

@ -0,0 +1,117 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
#
. $STF_SUITE/tests/functional/cli_root/zpool_reopen/zpool_reopen.cfg
#
# Clear labels on the given disks
#
function clear_labels #disks
{
for disk in $@; do
if ( is_loop_device $disk ) || ( is_mpath_device $disk ); then
zpool labelclear -f /dev/$disk
else
zpool labelclear -f /dev/${disk}1
fi
done
}
#
# Set the REMOVED_DISK and REMOVED_DISK_ID constants for device
# used for re-plugging. When the disk is loop device use the
# scsi_debug emulated drive. Otherwise use the real drive.
#
function set_removed_disk
{
if is_loop_device $DISK1; then
export REMOVED_DISK=$(get_debug_device)
export REMOVED_DISK_ID=$(get_persistent_disk_name $REMOVED_DISK)
elif ( is_real_device $DISK1 ) || ( is_mpath_device $DISK1 ); then
export REMOVED_DISK="$DISK1"
export REMOVED_DISK_ID=${devs_id[0]}
else
log_fail "No drives that supports removal"
fi
}
#
# Generate random file of the given size in MiB
#
function generate_random_file #path size_mb
{
typeset path=$1
typeset -i size_mb=$2
file_write -o create -f $path -b 1048576 -s0 -c $size_mb -d R
}
#
# Wait until specific event or timeout occur.
#
# The passed function is executed with pool name as argument
# with an interval of 1 second until it succeeds or until the
# timeout occurs.
# It returns 1 on timeout or 0 otherwise.
#
function wait_for_action #pool timeout function
{
typeset pool=$1
typeset -i timeout=$2
typeset func=$3
while [ $timeout -gt 0 ]; do
(( --timeout ))
if ( $func $pool ); then
return 0
fi
sleep 1
done
return 1
}
#
# Helpers for wait_for_action function:
# wait_for_resilver_start - wait until resilver is started
# wait_for_resilver_end - wait until resilver is finished
# wait_for_scrub_end - wait until scrub is finished
#
function wait_for_resilver_start #pool timeout
{
wait_for_action $1 $2 is_pool_resilvering
return $?
}
function wait_for_resilver_end #pool timeout
{
wait_for_action $1 $2 is_pool_resilvered
return $?
}
function wait_for_scrub_end #pool timeout
{
wait_for_action $1 $2 is_pool_scrubbed
return $?
}
#
# Check if scan action has been restarted on the given pool
#
function is_scan_restarted #pool
{
typeset pool=$1
zpool history -i $pool | grep -q "scan aborted, restarting"
return $?
}

View File

@ -0,0 +1,70 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
#
. $STF_SUITE/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib
#
# DESCRIPTION:
# Test if zpool reopen with no arguments works correctly.
#
# STRATEGY:
# 1. Create a pool.
# 2. Remove a disk.
# 3. Reopen a pool and verify if removed disk is marked as unavailable.
# 4. "Plug back" disk.
# 5. Reopen a pool and verify if removed disk is marked online again.
# 6. Check if reopen caused resilver start.
#
verify_runnable "global"
function cleanup
{
# bring back removed disk online for further tests
insert_disk $REMOVED_DISK $scsi_host
poolexists $TESTPOOL && destroy_pool $TESTPOOL
clear_labels $REMOVED_DISK $DISK2
}
log_assert "Testing zpool reopen with no arguments"
log_onexit cleanup
set_removed_disk
scsi_host=$(get_scsi_host $REMOVED_DISK)
# 1. Create a pool.
default_mirror_setup_noexit $REMOVED_DISK_ID $DISK2
# 2. Remove a disk.
remove_disk $REMOVED_DISK
# 3. Reopen a pool and verify if removed disk is marked as unavailable.
log_must zpool reopen
log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
# Write some data to the pool
log_must generate_random_file /$TESTPOOL/data $SMALL_FILE_SIZE
# 4. "Plug back" disk.
insert_disk $REMOVED_DISK $scsi_host
# 5. Reopen a pool and verify if removed disk is marked online again.
log_must zpool reopen
log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "online"
# 6. Check if reopen caused resilver start.
log_must wait_for_resilver_end $TESTPOOL $MAXTIMEOUT
# clean up
log_must zpool destroy $TESTPOOL
clear_labels $REMOVED_DISK $DISK2
log_pass "Zpool reopen with no arguments test passed"

View File

@ -0,0 +1,70 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
#
. $STF_SUITE/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib
#
# DESCRIPTION:
# Test if zpool reopen with pool name as argument works correctly.
#
# STRATEGY:
# 1. Create a pool.
# 2. Remove a disk.
# 3. Reopen a pool and verify if removed disk is marked as unavailable.
# 4. "Plug back" disk.
# 5. Reopen a pool and verify if removed disk is marked online again.
# 6. Check if reopen caused resilver start.
#
verify_runnable "global"
function cleanup
{
# bring back removed disk online for further tests
insert_disk $REMOVED_DISK $scsi_host
poolexists $TESTPOOL && destroy_pool $TESTPOOL
clear_labels $REMOVED_DISK $DISK2
}
log_assert "Testing zpool reopen with no arguments"
log_onexit cleanup
set_removed_disk
scsi_host=$(get_scsi_host $REMOVED_DISK)
# 1. Create a pool.
default_mirror_setup_noexit $REMOVED_DISK_ID $DISK2
# 2. Remove a disk.
remove_disk $REMOVED_DISK
# 3. Reopen a pool and verify if removed disk is marked as unavailable.
log_must zpool reopen $TESTPOOL
log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
# Write some data to the pool
log_must generate_random_file /$TESTPOOL/data $SMALL_FILE_SIZE
# 4. "Plug back" disk.
insert_disk $REMOVED_DISK $scsi_host
# 5. Reopen a pool and verify if removed disk is marked online again.
log_must zpool reopen $TESTPOOL
log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "online"
# 6. Check if reopen caused resilver start.
log_must wait_for_resilver_end $TESTPOOL $MAXTIMEOUT
# clean up
log_must zpool destroy $TESTPOOL
clear_labels $REMOVED_DISK $DISK2
log_pass "Zpool reopen with no arguments test passed"

View File

@ -0,0 +1,101 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
#
. $STF_SUITE/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib
#
# DESCRIPTION:
# Test zpool reopen while scrub is running.
# Checks if re-plugged device is fully resilvered.
#
# STRATEGY:
# 1. Create a pool
# 2. Remove a disk.
# 3. Write a test file to the pool and calculate its checksum.
# 4. Execute scrub.
# 5. "Plug back" disk.
# 6. Reopen a pool.
# 7. Check if scrub scan is replaced by resilver.
# 8. Put another device offline and check if the test file checksum is correct.
#
# NOTES:
# A 25ms delay is added to make sure that the scrub is running while
# the reopen kicks the resilver.
#
verify_runnable "global"
function cleanup
{
log_must zinject -c all
rm -f $TESTFILE_MD5 2>/dev/null
# bring back removed disk online for further tests
insert_disk $REMOVED_DISK $scsi_host
poolexists $TESTPOOL && destroy_pool $TESTPOOL
}
log_assert "Testing zpool reopen with pool name as argument"
log_onexit cleanup
set_removed_disk
scsi_host=$(get_scsi_host $REMOVED_DISK)
# 1. Create a pool
default_mirror_setup_noexit $REMOVED_DISK_ID $DISK2
# 2. Remove a disk.
remove_disk $REMOVED_DISK
log_must zpool reopen $TESTPOOL
log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
# 3. Write a test file to the pool and calculate its checksum.
TESTFILE=/$TESTPOOL/data
TESTFILE_MD5=$(mktemp --tmpdir=/var/tmp)
log_must generate_random_file /$TESTPOOL/data $LARGE_FILE_SIZE
log_must md5sum $TESTFILE > $TESTFILE_MD5
# 4. Execute scrub.
# add delay to I/O requests for remaining disk in pool
log_must zinject -d $DISK2 -D25:1 $TESTPOOL
log_must zpool scrub $TESTPOOL
# 5. "Plug back" disk.
insert_disk $REMOVED_DISK $scsi_host
# 6. Reopen a pool.
log_must zpool reopen $TESTPOOL
log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "online"
# 7. Check if scrub scan is replaced by resilver.
# the scrub operation has to be running while reopen is executed
log_must is_pool_scrubbing $TESTPOOL true
# the scrub will be replaced by resilver, wait until it ends
log_must wait_for_resilver_end $TESTPOOL $MAXTIMEOUT
# check if the scrub scan has been interrupted by resilver
log_must is_scan_restarted $TESTPOOL
# remove delay from disk
log_must zinject -c all
# 8. Put another device offline and check if the test file checksum is correct.
log_must zpool offline $TESTPOOL $DISK2
log_must md5sum -c $TESTFILE_MD5
log_must zpool online $TESTPOOL $DISK2
sleep 1
# clean up
rm -f $TESTFILE_MD5 2>/dev/null
log_must zpool destroy $TESTPOOL
log_pass "Zpool reopen test successful"

View File

@ -0,0 +1,88 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
#
. $STF_SUITE/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib
#
# DESCRIPTION:
# Test zpool reopen -n while scrub is running.
# Checks if re-plugged device is NOT resilvered.
#
# STRATEGY:
# 1. Create a pool
# 2. Remove a disk.
# 3. Write test file to pool.
# 4. Execute scrub.
# 5. "Plug back" disk.
# 6. Reopen a pool with an -n flag.
# 7. Check if scrub scan is NOT replaced by resilver.
# 8. Check if trying to put device to offline fails because of no valid
# replicas.
#
# NOTES:
# A 25ms delay is added to make sure that the scrub is running while
# the reopen is invoked.
#
verify_runnable "global"
function cleanup
{
log_must zinject -c all
# bring back removed disk online for further tests
insert_disk $REMOVED_DISK $scsi_host
poolexists $TESTPOOL && destroy_pool $TESTPOOL
}
log_assert "Testing zpool reopen with pool name as argument"
log_onexit cleanup
set_removed_disk
scsi_host=$(get_scsi_host $REMOVED_DISK)
# 1. Create a pool
default_mirror_setup_noexit $REMOVED_DISK_ID $DISK2
# 2. Remove a disk.
remove_disk $REMOVED_DISK
log_must zpool reopen -n $TESTPOOL
log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
# 3. Write test file to pool.
log_must generate_random_file /$TESTPOOL/data $LARGE_FILE_SIZE
# 4. Execute scrub.
# add delay to I/O requests for remaining disk in pool
log_must zinject -d $DISK2 -D25:1 $TESTPOOL
log_must zpool scrub $TESTPOOL
# 5. "Plug back" disk.
insert_disk $REMOVED_DISK $scsi_host
# 6. Reopen a pool with an -n flag.
log_must zpool reopen -n $TESTPOOL
log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "online"
# 7. Check if scrub scan is NOT replaced by resilver.
log_must wait_for_scrub_end $TESTPOOL $MAXTIMEOUT
log_mustnot is_scan_restarted $TESTPOOL
# remove delay from disk
log_must zinject -c all
# 8. Check if trying to put device to offline fails because of no valid
# replicas.
log_mustnot zpool offline $TESTPOOL $DISK2
# clean up
log_must zpool destroy $TESTPOOL
log_pass "Zpool reopen test successful"

View File

@ -0,0 +1,86 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
#
. $STF_SUITE/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib
#
# DESCRIPTION:
# Test zpool reopen -n while resilver is running.
# Checks if the resilver is restarted.
#
# STRATEGY:
# 1. Create a pool
# 2. Remove a disk.
# 3. Write test file to pool.
# 4. "Plug back" disk.
# 5. Reopen a pool and wait until resilvering is started.
# 6. Reopen a pool again with -n flag.
# 7. Wait until resilvering is finished and check if it was restarted.
#
# NOTES:
# A 25ms delay is added to make sure that the resilver is running while
# the reopen is invoked.
#
verify_runnable "global"
function cleanup
{
log_must zinject -c all
insert_disk $REMOVED_DISK $scsi_host
poolexists $TESTPOOL && destroy_pool $TESTPOOL
}
log_assert "Testing zpool reopen with pool name as argument"
log_onexit cleanup
set_removed_disk
scsi_host=$(get_scsi_host $REMOVED_DISK)
# 1. Create a pool
default_mirror_setup_noexit $REMOVED_DISK_ID $DISK2
# 2. Remove a disk.
remove_disk $REMOVED_DISK
log_must zpool reopen $TESTPOOL
log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
# 3. Write test file to pool.
log_must generate_random_file /$TESTPOOL/data $LARGE_FILE_SIZE
# 4. "Plug back" disk.
insert_disk $REMOVED_DISK $scsi_host
# 5. Reopen a pool and wait until resilvering is started.
log_must zpool reopen $TESTPOOL
log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "online"
# add delay to I/O requests for the reopened disk
log_must zinject -d $REMOVED_DISK_ID -D25:1 $TESTPOOL
# wait until resilver starts
log_must wait_for_resilver_start $TESTPOOL $MAXTIMEOUT
# 6. Reopen a pool again with -n flag.
zpool reopen -n $TESTPOOL
# 7. Wait until resilvering is finished and check if it was restarted.
log_must wait_for_resilver_end $TESTPOOL $MAXTIMEOUT
# remove delay from disk
log_must zinject -c all
log_must is_scan_restarted $TESTPOOL
# clean up
log_must zpool destroy $TESTPOOL
log_pass "Zpool reopen test successful"

View File

@ -0,0 +1,43 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
#
. $STF_SUITE/include/libtest.shlib
#
# DESCRIPTION:
# Wrong arguments passed to zpool reopen should cause an error.
#
# STRATEGY:
# 1. Create an array with bad 'zpool reopen' arguments.
# 2. For each argument execute the 'zpool reopen' command and verify
# if it returns an error.
#
verify_runnable "global"
# 1. Create an array with bad 'zpool reopen' arguments.
typeset -a args=("!" "1" "-s" "--n" "-1" "-" "-c" "-f" "-d 2" "-abc" "-na")
log_assert "Test 'zpool reopen' with invalid arguments."
# 2. For each argument execute the 'zpool reopen' command and verify
# if it returns an error.
for arg in ${args[@]}; do
log_mustnot zpool reopen $arg
done
log_pass "Passing invalid arguments to 'zpool reopen' failed as expected."

View File

@ -55,7 +55,7 @@ fi
function cleanup
{
#online last disk before fail
on_off_disk $offline_disk "online" $host
insert_disk $offline_disk $host
poolexists $TESTPOOL && destroy_pool $TESTPOOL
}
@ -98,11 +98,10 @@ for offline_disk in $autoonline_disks
do
log_must zpool export -F $TESTPOOL
host=$(ls /sys/block/$offline_disk/device/scsi_device \
| nawk -F : '{ print $1}')
host=$(get_scsi_host $offline_disk)
# Offline disk
on_off_disk $offline_disk "offline"
remove_disk $offline_disk
# Reimport pool with drive missing
log_must zpool import $TESTPOOL
@ -115,7 +114,7 @@ do
zpool events -c $TESTPOOL
# Online disk
on_off_disk $offline_disk "online" $host
insert_disk $offline_disk $host
log_note "Delay for ZED auto-online"
typeset -i timeout=0

View File

@ -110,7 +110,7 @@ log_must mkfile $FSIZE /$TESTPOOL/data
log_must zpool export -F $TESTPOOL
# Offline disk
on_off_disk $SD "offline"
remove_disk $SD
block_device_wait
log_must modprobe -r scsi_debug

View File

@ -33,14 +33,12 @@ cleanup_devices $DISKS
zed_stop
zed_cleanup
SD=$(lsscsi | nawk '/scsi_debug/ {print $6; exit}')
SDDEVICE=$(echo $SD | nawk -F / '{print $3}')
SDDEVICE=$(get_debug_device)
# Offline disk and remove scsi_debug module
if is_linux; then
if [ -n "$SDDEVICE" ]; then
on_off_disk $SDDEVICE "offline"
block_device_wait
remove_disk $SDDEVICE
fi
modprobe -r scsi_debug
fi