Replace custom zpool configs with generic configs

To streamline testing I have in the past added several custom configs
to the zpool-config directory.  This change reverts those custom configs
and replaces them with three generic config which can do the same thing.
The generic config behavior can be set by setting various environment
variables when calling either the zpool-create.sh or zpios.sh scripts.

For example if you wanted to create and test a single 4-disk Raid-Z2
configuration using disks [A-D]1 with dedicated ZIL and L2ARC devices
you could run the following.

$ ZIL="log A2" L2ARC="cache B2" RANKS=1 CHANNELS=4 LEVEL=2 \
  zpool-create.sh -c zpool-raidz

$ zpool status tank
  pool: tank
 state: ONLINE
 scan: none requested
config:

      NAME        STATE     READ WRITE CKSUM
      tank        ONLINE       0     0     0
        raidz2-0  ONLINE       0     0     0
          A1      ONLINE       0     0     0
          B1      ONLINE       0     0     0
          C1      ONLINE       0     0     0
          D1      ONLINE       0     0     0
      logs
        A2        ONLINE       0     0     0
      cache
        B2        ONLINE       0     0     0

errors: No known data errors
This commit is contained in:
Brian Behlendorf 2010-11-05 11:43:20 -07:00
parent 3ee56c292b
commit cb39a6c6aa
17 changed files with 261 additions and 276 deletions

View File

@ -1,10 +1,6 @@
pkglibexecdir = $(libexecdir)/@PACKAGE@/zpool-config
dist_pkglibexec_SCRIPTS = \
$(top_srcdir)/scripts/zpool-config/dm0-raid0.sh \
$(top_srcdir)/scripts/zpool-config/dragon-raid0-1x70.sh \
$(top_srcdir)/scripts/zpool-config/dragon-raid10-35x2.sh \
$(top_srcdir)/scripts/zpool-config/dragon-raidz2-7x10.sh \
$(top_srcdir)/scripts/zpool-config/dragon-raidz-7x10.sh \
$(top_srcdir)/scripts/zpool-config/file-raid0.sh \
$(top_srcdir)/scripts/zpool-config/file-raid10.sh \
$(top_srcdir)/scripts/zpool-config/file-raidz2.sh \
@ -29,14 +25,9 @@ dist_pkglibexec_SCRIPTS = \
$(top_srcdir)/scripts/zpool-config/scsi_debug-raidz2.sh \
$(top_srcdir)/scripts/zpool-config/scsi_debug-raidz3.sh \
$(top_srcdir)/scripts/zpool-config/sda-raid0.sh \
$(top_srcdir)/scripts/zpool-config/supermicro-raid0-1x16.sh \
$(top_srcdir)/scripts/zpool-config/supermicro-raid10-8x2.sh \
$(top_srcdir)/scripts/zpool-config/supermicro-raidz2-4x4.sh \
$(top_srcdir)/scripts/zpool-config/supermicro-raidz-4x4.sh \
$(top_srcdir)/scripts/zpool-config/x4550-raid0-1x48.sh \
$(top_srcdir)/scripts/zpool-config/x4550-raid10-24x2.sh \
$(top_srcdir)/scripts/zpool-config/x4550-raidz2-8x6.sh \
$(top_srcdir)/scripts/zpool-config/x4550-raidz-8x6.sh
$(top_srcdir)/scripts/zpool-config/zpool-raid0.sh \
$(top_srcdir)/scripts/zpool-config/zpool-raid10.sh \
$(top_srcdir)/scripts/zpool-config/zpool-raidz.sh
all:
@list='$(dist_pkglibexec_SCRIPTS)'; \

View File

@ -274,10 +274,6 @@ top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
dist_pkglibexec_SCRIPTS = \
$(top_srcdir)/scripts/zpool-config/dm0-raid0.sh \
$(top_srcdir)/scripts/zpool-config/dragon-raid0-1x70.sh \
$(top_srcdir)/scripts/zpool-config/dragon-raid10-35x2.sh \
$(top_srcdir)/scripts/zpool-config/dragon-raidz2-7x10.sh \
$(top_srcdir)/scripts/zpool-config/dragon-raidz-7x10.sh \
$(top_srcdir)/scripts/zpool-config/file-raid0.sh \
$(top_srcdir)/scripts/zpool-config/file-raid10.sh \
$(top_srcdir)/scripts/zpool-config/file-raidz2.sh \
@ -302,14 +298,9 @@ dist_pkglibexec_SCRIPTS = \
$(top_srcdir)/scripts/zpool-config/scsi_debug-raidz2.sh \
$(top_srcdir)/scripts/zpool-config/scsi_debug-raidz3.sh \
$(top_srcdir)/scripts/zpool-config/sda-raid0.sh \
$(top_srcdir)/scripts/zpool-config/supermicro-raid0-1x16.sh \
$(top_srcdir)/scripts/zpool-config/supermicro-raid10-8x2.sh \
$(top_srcdir)/scripts/zpool-config/supermicro-raidz2-4x4.sh \
$(top_srcdir)/scripts/zpool-config/supermicro-raidz-4x4.sh \
$(top_srcdir)/scripts/zpool-config/x4550-raid0-1x48.sh \
$(top_srcdir)/scripts/zpool-config/x4550-raid10-24x2.sh \
$(top_srcdir)/scripts/zpool-config/x4550-raidz2-8x6.sh \
$(top_srcdir)/scripts/zpool-config/x4550-raidz-8x6.sh
$(top_srcdir)/scripts/zpool-config/zpool-raid0.sh \
$(top_srcdir)/scripts/zpool-config/zpool-raid10.sh \
$(top_srcdir)/scripts/zpool-config/zpool-raidz.sh
all: all-am

View File

@ -1,21 +0,0 @@
#!/bin/bash
#
# Dragon (White Box) Raid-0 Configuration (1x70)
#
RANKS=7
CHANNELS=10
zpool_create() {
udev_setup ${ETCDIR}/zfs/zdev.conf.dragon.example
udev_raid0_setup ${RANKS} ${CHANNELS}
msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID0S[*]}
${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID0S[*]} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
udev_cleanup ${ETCDIR}/zfs/zdev.conf.dragon.example
}

View File

@ -1,21 +0,0 @@
#!/bin/bash
#
# Dragon (White Box) Raid-10 Configuration (35x2(1+1))
#
RANKS=7
CHANNELS=10
zpool_create() {
udev_setup ${ETCDIR}/zfs/zdev.conf.dragon.example
udev_raid10_setup ${RANKS} ${CHANNELS}
msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID10S[*]}
${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID10S[*]} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
udev_cleanup ${ETCDIR}/zfs/zdev.conf.dragon.example
}

View File

@ -1,21 +0,0 @@
#!/bin/bash
#
# Dragon (White Box) Raid-Z Configuration (7x10(9+1))
#
RANKS=7
CHANNELS=10
zpool_create() {
udev_setup ${ETCDIR}/zfs/zdev.conf.dragon.example
udev_raidz_setup ${RANKS} ${CHANNELS}
msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZS[*]}
${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZS[*]} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
udev_cleanup ${ETCDIR}/zfs/zdev.conf.dragon.example
}

View File

@ -1,21 +0,0 @@
#!/bin/bash
#
# Dragon (White Box) Raid-Z2 Configuration (7x10(8+2))
#
RANKS=7
CHANNELS=10
zpool_create() {
udev_setup ${ETCDIR}/zfs/zdev.conf.dragon.example
udev_raidz2_setup ${RANKS} ${CHANNELS}
msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZ2S[*]}
${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZ2S[*]} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
udev_cleanup ${ETCDIR}/zfs/zdev.conf.dragon.example
}

View File

@ -1,21 +0,0 @@
#!/bin/bash
#
# Supermicro (White Box) Raid-0 Configuration (1x16)
#
RANKS=4
CHANNELS=4
zpool_create() {
udev_setup ${ETCDIR}/zfs/zdev.conf.supermicro.example
udev_raid0_setup ${RANKS} ${CHANNELS}
msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID0S[*]}
${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID0S[*]} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
udev_cleanup ${ETCDIR}/zfs/zdev.conf.supermicro.example
}

View File

@ -1,21 +0,0 @@
#!/bin/bash
#
# Supermicro (White Box) Raid-10 Configuration (8x2(1+1))
#
RANKS=4
CHANNELS=4
zpool_create() {
udev_setup ${ETCDIR}/zfs/zdev.conf.supermicro.example
udev_raid10_setup ${RANKS} ${CHANNELS}
msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID10S[*]}
${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID10S[*]} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
udev_cleanup ${ETCDIR}/zfs/zdev.conf.supermicro.example
}

View File

@ -1,21 +0,0 @@
#!/bin/bash
#
# Supermicro (White Box) Raid-Z Configuration (4x4(3+1))
#
RANKS=4
CHANNELS=4
zpool_create() {
udev_setup ${ETCDIR}/zfs/zdev.conf.supermicro.example
udev_raidz_setup ${RANKS} ${CHANNELS}
msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZS[*]}
${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZS[*]} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
udev_cleanup ${ETCDIR}/zfs/zdev.conf.supermicro.example
}

View File

@ -1,21 +0,0 @@
#!/bin/bash
#
# Supermicro (White Box) Raid-Z2 Configuration (4x4(2+2))
#
RANKS=4
CHANNELS=4
zpool_create() {
udev_setup ${ETCDIR}/zfs/zdev.conf.supermicro.example
udev_raidz2_setup ${RANKS} ${CHANNELS}
msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZ2S[*]}
${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZ2S[*]} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
udev_cleanup ${ETCDIR}/zfs/zdev.conf.supermicro.example
}

View File

@ -1,21 +0,0 @@
#!/bin/bash
#
# Sun Fire x4550 (Thumper/Thor) Raid-0 Configuration (1x48)
#
RANKS=8
CHANNELS=6
zpool_create() {
udev_setup ${ETCDIR}/zfs/zdev.conf.x4550.example
udev_raid0_setup ${RANKS} ${CHANNELS}
msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID0S[*]}
${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID0S[*]} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
udev_cleanup ${ETCDIR}/zfs/zdev.conf.x4550.example
}

View File

@ -1,21 +0,0 @@
#!/bin/bash
#
# Sun Fire x4550 (Thumper/Thor) Raid-10 Configuration (24x2(1+1))
#
RANKS=8
CHANNELS=6
zpool_create() {
udev_setup ${ETCDIR}/zfs/zdev.conf.x4550.example
udev_raid10_setup ${RANKS} ${CHANNELS}
msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID10S[*]}
${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAID10S[*]} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
udev_cleanup ${ETCDIR}/zfs/zdev.conf.x4550.example
}

View File

@ -1,21 +0,0 @@
#!/bin/bash
#
# Sun Fire x4550 (Thumper/Thor) Raid-Z Configuration (8x6(5+1))
#
RANKS=8
CHANNELS=6
zpool_create() {
udev_setup ${ETCDIR}/zfs/zdev.conf.x4550.example
udev_raidz_setup ${RANKS} ${CHANNELS}
msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZS[*]}
${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZS[*]} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
udev_cleanup ${ETCDIR}/zfs/zdev.conf.x4550.example
}

View File

@ -1,21 +0,0 @@
#!/bin/bash
#
# Sun Fire x4550 (Thumper/Thor) Raid-Z Configuration (8x6(4+2))
#
RANKS=8
CHANNELS=6
zpool_create() {
udev_setup ${ETCDIR}/zfs/zdev.conf.x4550.example
udev_raidz2_setup ${RANKS} ${CHANNELS}
msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZ2S[*]}
${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${RAIDZ2S[*]} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
udev_cleanup ${ETCDIR}/zfs/zdev.conf.x4550.example
}

View File

@ -0,0 +1,81 @@
#!/bin/bash
#
# Zpool Raid-0 Configuration
#
# This script is used to simplify testing with the /dev/disk/zpool/[A-Z][1-n]
# devices. It assumes that you have already populated /dev/disk/zpool/ by
# creating an /etc/zfs/zdev.conf file based on your system design. You may
# use the zpool_layout command or manually create your own config file.
#
# You can then use either the zpool-create.sh or the zpios.sh test script to
# test various Raid-0 configurations by adjusting the following tunables.
# For example if you wanted to create and test a single 4-disk Raid-0
# configuration using disks [A-D]1 with dedicated ZIL and L2ARC devices
# you could run the following.
#
# ZIL="log A2" L2ARC="cache B2" RANKS=1 CHANNELS=4 \
# zpool-create.sh -c zpool-raid0
#
# zpool status tank
# pool: tank
# state: ONLINE
# scan: none requested
# config:
#
# NAME STATE READ WRITE CKSUM
# tank ONLINE 0 0 0
# A1 ONLINE 0 0 0
# B1 ONLINE 0 0 0
# C1 ONLINE 0 0 0
# D1 ONLINE 0 0 0
# logs
# A2 ONLINE 0 0 0
# cache
# B2 ONLINE 0 0 0
#
# errors: No known data errors
#
# Number of interior vdevs to create using the following rank ids.
RANKS=${RANKS:-1}
RANK_LIST=( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 )
# Number of devices per vdev using the following channel ids.
CHANNELS=${CHANNELS:-8}
CHANNEL_LIST=( A B C D E F G H I J K L M N O P Q R S T U V W X Y Z )
# Create a ZIL vdev as follows.
ZIL=${ZIL:-}
# Create an L2ARC vdev as follows.
L2ARC=${L2ARC:-}
raid0_setup() {
local RANKS=$1
local CHANNELS=$2
RAID0S=()
for (( i=0, k=0; i<${RANKS}; i++ )); do
RANK=${RANK_LIST[$i]}
for (( j=0; j<${CHANNELS}; j++, k++ )); do
RAID0S[${k}]="${CHANNEL_LIST[$j]}${RANK}"
done
done
return 0
}
zpool_create() {
raid0_setup ${RANKS} ${CHANNELS}
ZPOOL_DEVICES="${RAID0S[*]} ${ZIL} ${L2ARC}"
msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${ZPOOL_DEVICES}
${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${ZPOOL_DEVICES} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
}

View File

@ -0,0 +1,86 @@
#!/bin/bash
#
# Zpool Raid-10 Configuration
#
# This script is used to simplify testing with the /dev/disk/zpool/[A-Z][1-n]
# devices. It assumes that you have already populated /dev/disk/zpool/ by
# creating an /etc/zfs/zdev.conf file based on your system design. You may
# use the zpool_layout command or manually create your own config file.
#
# You can then use either the zpool-create.sh or the zpios.sh test script to
# test various Raid-10 configurations by adjusting the following tunables.
# For example if you wanted to create and test a single 4-disk Raid-10
# configuration using disks [A-D]1 with dedicated ZIL and L2ARC devices
# you could run the following.
#
# ZIL="log A2" L2ARC="cache B2" RANKS=1 CHANNELS=4 \
# zpool-create.sh -c zpool-raid10
#
# zpool status tank
# pool: tank
# state: ONLINE
# scan: none requested
# config:
#
# NAME STATE READ WRITE CKSUM
# tank ONLINE 0 0 0
# mirror-0 ONLINE 0 0 0
# A1 ONLINE 0 0 0
# B1 ONLINE 0 0 0
# mirror-1 ONLINE 0 0 0
# C1 ONLINE 0 0 0
# D1 ONLINE 0 0 0
# logs
# A2 ONLINE 0 0 0
# cache
# B2 ONLINE 0 0 0
#
# errors: No known data errors
#
# Number of interior vdevs to create using the following rank ids.
RANKS=${RANKS:-1}
RANK_LIST=( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 )
# Number of devices per vdev using the following channel ids.
CHANNELS=${CHANNELS:-8}
CHANNEL_LIST=( A B C D E F G H I J K L M N O P Q R S T U V W X Y Z )
# Create a ZIL vdev as follows.
ZIL=${ZIL:-}
# Create an L2ARC vdev as follows.
L2ARC=${L2ARC:-}
raid10_setup() {
local RANKS=$1
local CHANNELS=$2
local IDX=0
RAID10S=()
for (( i=0, l=0 ; i<${RANKS}; i++ )); do
RANK=${RANK_LIST[$i]}
for (( j=0, k=1; j<${CHANNELS}; j+=2,k+=2,l++ )); do
DISK1="${CHANNEL_LIST[$j]}${RANK}"
DISK2="${CHANNEL_LIST[$k]}${RANK}"
RAID10S[$l]="mirror ${DISK1} ${DISK2}"
done
done
return 0
}
zpool_create() {
raid10_setup ${RANKS} ${CHANNELS}
ZPOOL_DEVICES="${RAID10S[*]} ${ZIL} ${L2ARC}"
msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${ZPOOL_DEVICES}
${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${ZPOOL_DEVICES} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
}

View File

@ -0,0 +1,88 @@
#!/bin/bash
#
# Zpool Raid-Z Configuration
#
# This script is used to simplify testing with the /dev/disk/zpool/[A-Z][1-n]
# devices. It assumes that you have already populated /dev/disk/zpool/ by
# creating an /etc/zfs/zdev.conf file based on your system design. You may
# use the zpool_layout command or manually create your own config file.
#
# You can then use either the zpool-create.sh or the zpios.sh test script to
# test various Raid-Z configurations by adjusting the following tunables.
# For example if you wanted to create and test a single 4-disk Raid-Z2
# configuration using disks [A-D]1 with dedicated ZIL and L2ARC devices
# you could run the following.
#
# ZIL="log A2" L2ARC="cache B2" RANKS=1 CHANNELS=4 LEVEL=2 \
# zpool-create.sh -c zpool-raidz
#
# zpool status tank
# pool: tank
# state: ONLINE
# scan: none requested
# config:
#
# NAME STATE READ WRITE CKSUM
# tank ONLINE 0 0 0
# raidz2-0 ONLINE 0 0 0
# A1 ONLINE 0 0 0
# B1 ONLINE 0 0 0
# C1 ONLINE 0 0 0
# D1 ONLINE 0 0 0
# logs
# A2 ONLINE 0 0 0
# cache
# B2 ONLINE 0 0 0
#
# errors: No known data errors
#
# Number of interior vdevs to create using the following rank ids.
RANKS=${RANKS:-1}
RANK_LIST=( 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 )
# Number of devices per vdev using the following channel ids.
CHANNELS=${CHANNELS:-8}
CHANNEL_LIST=( A B C D E F G H I J K L M N O P Q R S T U V W X Y Z )
# Raid-Z Level: 1, 2, or 3.
LEVEL=${LEVEL:-2}
# Create a ZIL vdev as follows.
ZIL=${ZIL:-}
# Create an L2ARC vdev as follows.
L2ARC=${L2ARC:-}
raidz_setup() {
local RANKS=$1
local CHANNELS=$2
RAIDZS=()
for (( i=0; i<${RANKS}; i++ )); do
RANK=${RANK_LIST[$i]}
RAIDZ=("raidz${LEVEL}")
for (( j=0, k=1; j<${CHANNELS}; j++, k++ )); do
RAIDZ[$k]="${CHANNEL_LIST[$j]}${RANK}"
done
RAIDZS[$i]="${RAIDZ[*]}"
done
return 0
}
zpool_create() {
raidz_setup ${RANKS} ${CHANNELS}
ZPOOL_DEVICES="${RAIDZS[*]} ${ZIL} ${L2ARC}"
msg ${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${ZPOOL_DEVICES}
${ZPOOL} create ${FORCE_FLAG} ${ZPOOL_NAME} ${ZPOOL_DEVICES} || exit 1
}
zpool_destroy() {
msg ${ZPOOL} destroy ${ZPOOL_NAME}
${ZPOOL} destroy ${ZPOOL_NAME}
}