ZFS

Posted on Mar 14, 2025

Pools

# Pool Creation
zpool create <pool> mirror <disk1,..n>					# Creates a storage pool of a single mirrored vdev
zpool create <pool> raidz <disk1,...n>					# Creates a storage pool of a single RAIDZ vdev
zpool create <pool> raidz2 <disk1,...n>					# Creates a storage pool of a single RAIDZ2 vdev
zpool create <pool> raidz3 <disk1,...n>					# Creates a storage pool of a single RAIDZ3 vdev
zpool destroy <pool>									# Destroys pool

# Expanding the pool
zpool add <pool> <type> <disk1,...n>					# Adds a new vdev to the storage pool
zpool attach <pool> <disk_a> <new_disk>					# If disk_a is a single disk, then the new disk will mirror it. Attaching additional disks to a mirror will simply add another replica.
zpool detach <pool> <disk>								# Removes a device from a mirror. 

# Replacing Drive
zpool offline <pool> <failed_disk>						# Take old drive offline then physically replace the drive.
zpool replace <pool> <failed_disk> <new_disk>			# Replace the drive in the pool, <new_disk> can be another disk or a hot spare. 
zpool online <pool> <disk>								# Put the drive back online
zpool labelclear <disk>									# Removes the ZFS label information from the device

# Migration
zpool export <pool>										# Marks a pool to be exported, allowing it to be moved to a different system.
zpool import <pool>										# Imports a pool to be used on a new system

# Monitoring
zpool list												# Lists information about ZFS storage pools
zpool iostat											# Display logical I/O statistics for ZFS storage pools
zpool events											# List recent events generated by kernel
zpool events -c											# Clears recent events generated by kernel
zpool history											# Inspect command history of ZFS storage pools

# Maintenance
zpool status											# Displays the detailed health status for the given pools.
zpool status -v											# Displays verbose data error information, printing out a complete list of all data errors since the last complete pool scrub.
zpool status -P											# Display full paths for vdevs instead of only the last component of the path.
zpool status -L											# Display real paths for vdevs resolving all symbolic links
zpool status -i											# Display vdev initialization status.
zpool status -s											# Display the number of leaf vdev slow I/O operations.
zpool clear												# Clear device errors in ZFS storage pool
zpool scrub												# Starts or resumes a paused scrub operation. Examines all data.
zpool scrub -s											# Stops the scrubbing process
zpool scrub -p											# Pauses the scrubbing process
zpool scrub	-e											# Only scrubs data with known errors as reported by 'zpool status -v'
zpool scrub	-C											# Continues a scub operation
zpool resilver <pool>									# Starts or restarts a resilver operation. Examines data known to be out of date
zpool trim												# Starts the process of reclaiming unused blocks on demand.

# Misc
zpool upgrade -a										# Upgrades all zpools to the latest version
zpool get -o field <pool>								# Gets the property of field for the <pool>
zpool get all <pool>									# Gets all properties of the field for the <pool>
zpool set property=value <pool>							# Sets the specified proeprty on the <pool>
zpool set property=value <pool> <vdev>					# Sets the specified property on <vdev> in <pool>

Datasets

# Basic Management
zfs create -o mountpoint=/srv tank/srv					# Creates a dataset that automounts to /srv
zfs set property=value <pool>/<dataset>					# Sets the property to value for <pool>/<dataset>
zfs get <property> <pool>/<dataset>						# Gets the value of <property> for <pool>/<dataset>
zfs list												# Lists datasets
zfs list -t <type>										# Lists all datasets of type: filesystem, snapshot, volume, bookmark, or all
zfs rename <old> <new>									# Rename a filesystem, folume, or snapshot from <old> to <new>
zfs send <pool>/<dataset>@<snapshot>					# Sends a snapshot as a stream to stdout.
zfs receive <pool>/<dataset>							# Recieves zfs stream
zfs send <pool1>/<dataset1>@<snapshot1> | ssh <host> zfs recieve <pool2>/<dataset2>@<snapshot2> # Sends a snapshot over ssh to a remote machine to be imported.
zfs destroy												# Destroys a dataset

# Mounting
zfs mount <pool/dataset>								# Mounts specific dataset. Must have the mountpoint property set.
zfs mount -a											# Mounts all datasets
zfs mount -l <pool/dataset>								# Loads the key when mounting. 'zfs load-key'
zfs umount <mountpoint>									# Unmounts a dataset
zfs umount -a											# Unmounts all datasets
zfs umount -u <mountpoint>								# Unmounts a dataset and unloads key

# Encryption
zfs load-key <pool>/<dataset>							# Loads an encryption key for specified dataset.
zfs unload-key <filesystem>								# Unloads the encryption key for sepcific file system
zfs change-key -l -o keylocation=value -o keyformat=value -o pbkdf2iters=value <filesystem>	# Changes the encryption on <filesystem>

# Snapshots
zfs snapshot <dataset>@<snapshotname>					# Creates a snapshot of the <dataset> with the <snapshotname>.
zfs hold <tag> <snapshotname>							# Adds a <tag> to the snapshot preventing it from being deleted.
zfs holds -r											# Lists holds recursively.
zfs release <tag> <snapshot>							# Removes a single reference, named with the <tag> argument, from the specified snapshot or shapshots.
zfs diff <snapshot> <snapshot|filesystem>				# Show the difference between two snapshots or a snapshot and the current filesystem.

Tasks

Replacing a failed drive

zpool set autoexpand=on <pool>							# Autoexpands the vdev when the drives of the vdev are replaced with larger ones.
zpool offline <pool> <failed_disk>						# Take old drive offline then physically replace the drive.
zpool replace <pool> <failed_disk> <new_disk>			# Replace the drive in the pool, <new_disk> can be another disk or a hot spare.

Changing Encryption Key

It’s important to know that the master key does not change when the zfs change-key is run. In the event of a master key being compromised, it’s best to destroy the pool and securely erasing all disks.

# Prefered Method
zpool destory <pool>
for disk in <disk1> <disk2> <disk3> <disk4> ; do
dd if=/dev/zero of=$disk
dd if=/dev/zero | tr '\000' '\377' > $disk
dd if=/dev/urandom of=$disk
done
zpool create <pool> mirror <disk1> <disk2> mirror <disk3> <disk4>
zfs create -o encryption=on -o keylocation=prompt -o keyformat=passphrase -o mountpoint=mountpoint <pool>/<dataset>
zfs mount -l <pool>/<dataset>
rsync -av /path/to/backup/ /path/to/mounted/dataset/

# In place
zfs create -o encryption=on -o keylocation=prompt -o keyformat=passphrase -o mountpoint=mountpoint <pool>/<newdataset>
zfs snapshot <pool>/<olddataset>@compromised
zfs send <pool>/<olddataset>@compromised | zfs receive <pool>/<newdataset>
# If drives support trim (zpool status -st)
for disk in <disk1> <disk2> <disk3> <disk4> ; do
zpool trim --secure $disk
done
# Else
zpool initialize <pool>
zfs destroy <pool>/<olddataset>