Anda di halaman 1dari 7

ZFS - most frequently used commands.

ZFS - most frequently used commands - below are the commands that we use on dail
y basis for ZFS filesystem Administration.
# Some reminders on command syntax
root@server:# zpool create oradb-1 c0d0s0
root@server:# zpool create oradb-1 mirror c0d0s3 c1d0s0
root@server:# zfs create oradb-1/oracle-10g
root@server:# zfs set mountpoint=/u01/oracle-10g oradb-1/oracle-10g
root@server:# zfs create oradb-1/home
root@server:# zfs set mountpoint=/export/home oradb-1/home
root@server:# zfs create oradb-1/home/oracle
root@server:# zfs set compression=on oradb-1/home
root@server:# zfs set quota=1g oradb-1/home/oracle
root@server:# zfs set reservation=2g oradb-1/home/oracle
root@server:# zfs set sharenfs=rw oradb-1/home
# to set the filesystem block size to 16k
root@server:# zfs set recordsize=16k oradb-1/home
zpool list, zpool status, zfs list are also useful command in case of FS monitor
ing.
I have lots of things to add to this however as and when I will get hands on I w
ill modify or add to my blog.
Thanks!
Posted by Nilesh Joshi at 7/14/2009 10:09:00 AM
=========
http://docs.oracle.com/cd/E19963-01/html/821-1448/gaynd.html
=========
Tutorials: Solaris ZFS Quick Tutorial / Examples
Posted by: mattzone on Dec 12, 2008 - 08:24 PM
Solaris
Solaris ZFS Quick Tutorial / Examples
First, create a pool using 'zpool'. Then use 'zfs' to make the filesystems.
Create a pool called pool1. The -m is optional. If given, it specifies a mount p
oint for zfs filesystems created from the specified pool. The mount point should
be empty or nonexistant. If the -m argument is omitted, mount point is "/".
# zpool create -m /export/data01 pool1 mirror c2t0d0 c4t0d0
# zpool status
pool: pool1
state: ONLINE
scrub: none requested
config:
NAME STATE READ WRITE CKSUM
pool1 ONLINE 0 0 0
mirror ONLINE 0 0 0
c2t0d0 ONLINE 0 0 0
c4t0d0 ONLINE 0 0 0
To list about pools:
# zpool list
NAME SIZE USED AVAIL CAP HEALTH ALTROOT
pool1 136G 18.2G 118G 13% ONLINE -
To create a zfs filesystem:
# zfs create pool1/fs.001
To set filesystem size:
# zfs set quota=24g pool1/fs.001
The "zfs share -a" command makes all zfs filesystems that have the "sharenfs" pr
operty turned on automatically shared. It only has to be issued once and persist
s over a reboot. Alternatively, one can issue individual "zfs share" commands fo
r specific filesystems:
# zfs share -a
To make a filesystem sharable:
# zfs set sharenfs=on pool1/fs.001
To list existing zfs filesystems:
# zfs list
NAME USED AVAIL REFER MOUNTPOINT
pool1 18.2G 116G 26.5K /export/data01
pool1/fs.001 18.2G 5.85G 18.2G /export/data01/fs.001
To list all properties of a specific filesystem:
# zfs get all pool1/fs.001
NAME PROPERTY VALUE SOURCE
pool1/fs.001 type filesystem -
pool1/fs.001 creation Wed Sep 13 16:34 2006 -
pool1/fs.001 used 18.2G -
pool1/fs.001 available 5.85G -
pool1/fs.001 referenced 18.2G -
pool1/fs.001 compressratio 1.00x -
pool1/fs.001 mounted yes -
pool1/fs.001 quota 24G local
pool1/fs.001 reservation none default
pool1/fs.001 recordsize 128K default
pool1/fs.001 mountpoint /export/data01/fs.001
inherited
from pool1
pool1/fs.001 sharenfs on local
pool1/fs.001 checksum on default
pool1/fs.001 compression off default
pool1/fs.001 atime on default
pool1/fs.001 devices on default
pool1/fs.001 exec on default
pool1/fs.001 setuid on default
pool1/fs.001 readonly off default
pool1/fs.001 zoned off default
pool1/fs.001 snapdir hidden default
pool1/fs.001 aclmode groupmask default
pool1/fs.001 aclinherit secure default
Here's an example of 'df':
# df -k -Fzfs
Filesystem kbytes used avail capacity
Mounted_on
pool1 140378112 26 121339726 1%
/export/data01
pool1/fs.001 25165824 19036649 6129174 76%
/export/data01/fs.001
You can increase the size of a pool by adding a mirrored pair of disk drives:
# zpool add pool1 mirror c2t1d0 c4t1d0
# zpool status
pool: pool1
state: ONLINE
scrub: none requested
config:
NAME STATE READ WRITE CKSUM
pool1 ONLINE 0 0 0
mirror ONLINE 0 0 0
c2t0d0 ONLINE 0 0 0
c4t0d0 ONLINE 0 0 0
mirror ONLINE 0 0 0
c2t1d0 ONLINE 0 0 0
c4t1d0 ONLINE 0 0 0
# zpool list
NAME SIZE USED AVAIL CAP HEALTH
ALTROOT
pool1 272G 18.2G 254G 6% ONLINE -
Note that "zpool attach" is a completely different command that adds multiway mi
rrors to increase redundancy but does not add extra space.
Make some more filesystems:
# zfs create pool1/fs.002
# zfs set quota=20g pool1/fs.002
# zfs set sharenfs=on pool1/fs.002
# zfs create pool1/fs.003
# zfs set quota=30g pool1/fs.003
# zfs set sharenfs=on pool1/fs.003
# zfs create pool1/fs.004
# zfs set quota=190G pool1/fs.004
# zfs set sharenfs=on pool1/fs.004
They show up in 'df':
# df -k -Fzfs -h
Filesystem size used avail capacity Mounted on
pool1 268G 29K 250G 1% /export/data01
pool1/fs.001 24G 18G 5.8G 76% /export/data01/fs.001
pool1/fs.002 30G 24K 30G 1% /export/data01/fs.002
pool1/fs.003 20G 24K 20G 1% /export/data01/fs.003
pool1/fs.004 190G 24K 190G 1% /export/data01/fs.004
But don't look for them in /etc/vfstab:
# cat /etc/vfstab
#device device mount FS fsck mount mou
nt
#to_mount to_fsck point type pass at_boot opt
ions
#
fd - /dev/fd fd - no -
/proc - /proc proc - no -
/dev/dsk/c0t0d0s1 - - swap - no -
/dev/dsk/c0t0d0s0 /dev/rdsk/c0t0d0s0 / ufs 1 no -
/dev/dsk/c0t0d0s3 /dev/rdsk/c0t0d0s3 /var ufs 1 no -
/devices - /devices devfs - no -
ctfs - /system/contract ctfs - no -
objfs - /system/object objfs - no -
swap - /tmp tmpfs - yes -
Note that they are all shared because the share -a is in effect:
# share
- /export/data01/fs.001 rw ""
- /export/data01/fs.002 rw ""
- /export/data01/fs.003 rw ""
- /export/data01/fs.004 rw ""
You can list zfs pools with:
# zfs list
NAME USED AVAIL REFER MOUNTPOINT
pool1 18.2G 250G 29.5K /export/data01
pool1/fs.001 18.2G 5.85G 18.2G /export/data01/gdos.001
pool1/fs.002 24.5K 30.0G 24.5K /export/data01/gdos.002
pool1/fs.003 24.5K 20.0G 24.5K /export/data01/gdos.003
pool1/fs.004 24.5K 190G 24.5K /export/data01/gdos.004
The disks are labeled and partitioned automatically by 'zpool':
# format
Searching for disks...done
AVAILABLE DISK SELECTIONS:
0. c0t0d0 <default cyl="cyl" 8851="8851" alt="alt" 2="2" hd="hd" 255=
"255" sec="sec" 63="63">
/pci@0,0/pcie11,4080@1/sd@0,0
1. c2t0d0
/pci@6,0/pci13e9,1300@3/sd@0,0
2. c2t1d0
/pci@6,0/pci13e9,1300@3/sd@1,0
3. c4t0d0
/pci@6,0/pci13e9,1300@4/sd@0,0
4. c4t1d0
/pci@6,0/pci13e9,1300@4/sd@1,0
Specify disk (enter its number):
partition> p
Current partition table (original):
Total disk sectors available: 286733069 + 16384 (reserved sectors)
Part Tag Flag First Sector Size Last
Sector
0 usr wm 34 136.72GB
286733069
1 unassigned wm 0 0 0
2 unassigned wm 0 0 0
3 unassigned wm 0 0 0
4 unassigned wm 0 0 0
5 unassigned wm 0 0 0
6 unassigned wm 0 0 0
8 reserved wm 286733070 8.00MB
286749453
</default>Note that Solaris 10 cannot boot from a zfs filesystem. You may want t
o have a look at this tutorial on Mirroring system disks with Solaris Volume Man
ager / Disksuite.

=====
How can I set up and configure a ZFS storage pool under Solaris 10?
ANSWER
This is a brief example of setting up a ZFS storage
pool and a couple of filesystems ("datasets") on Solaris 10.
You'll need the Solaris Update 2 release (dated June 2006) or later, as ZFS was
not in the earlier standard releases of Solaris 10.
Useful documentation reference:-
http://www.opensolaris.org/os/community/zfs/docs
Once you have the Solaris system up and running, this is how to proceed:-
1. You'll need a spare disk partition or two for the storage pool. You can
use files (see mkfile) if you're stuck for spare hard partitions, but only for e
xperimentation. The partitions
will be OVERWRITTEN by this procedure.
2. Login as root and create a storage pool using the spare partitions:-
# zpool create lake c0t2d0s0 c0t2d0s1
(The # indicates the type-in prompt)
If the above partitions contain exiting file systems, you may need to use
the -f (force) option:-
# zpool create -f lake c0t2d0s0 c0t2d0s1
The pool(called lake) has been created.
3. Use zpool list to view your pool stats:-
# zpool list
NAME SIZE USED AVAIL CAP HEALTH ALTROOT
lake 38.2G 32.5K 38.2G 0% ONLINE -
A df listing will also show it:-
# df -h /lake
Filesystem size used avail capacity Mounted on
lake 38G 8K 38G 1% /lake
4. Now create a file system within the pool (Sun call these datasets):-
# zfs create lake/fa
# zfs list
NAME USED AVAIL REFER MOUNTPOINT
lake 43.0K 38.0G 8.50K /lake
lake/fa 8K 38.0G 8K /lake/fa
To destroy a storage pool (AND ALL ITS DATA)
# zpool destroy lake
To destroy a dataset:-
# zfs destroy lake/fa
5. zpool can also create mirror devices:-
# zpool create lake mirror c0t2d0s0 c0t2d0s1
and something called RAID Z (similar to RAID 5):-
# zpool create lake raidz c0t2d0s0 c0t2d0s1 c0t2d0s3
6. To add further devices to a pool (not mirrors or RAIDZ - these must be extend
ed only with similar datasets):-
# zpool add lake c0t2d0s3 c0t2d0s4
# zpool status
pool: lake
state: ONLINE
scrub: none requested
config:
NAME STATE READ WRITE CKSUM
lake ONLINE 0 0 0
c0t2d0s0 ONLINE 0 0 0
c0t2d0s1 ONLINE 0 0 0
c0t2d0s3 ONLINE 0 0 0
c0t2d0s4 ONLINE 0 0 0
Note that datasets are automatically mounted, so no more
updating of /etc/vfstab!
You can also offline/online components, take snapshots (recursive since update 3
), clone
filesystems, and apply properties such as quotas, NFS sharing, etc.
Entire pools can be exported, then imported on another system, Intel or SPARC.
There are even built-in backup and restore facilities, not to mention
performance tools:-
# zpool iostat -v
capacity operations bandwidt
h
pool used avail read write read wr
ite
------------ ----- ----- ----- ----- ----- -
----
lake 305M 9.39G 0 0 43.7K 7
4.7K
mirror 153M 4.69G 0 0 38.6K 37
.5K
c0t2d0s0 - - 0 0 39
.1K 38.5K
c0t2d0s1 - - 0 1 8
1 114K
mirror 152M 4.69G 0 0 5.11K 3
7.5K
c0t2d0s3 - - 0 0 5
.13K 38.2K
c0t2d0s4 - - 0 0
27 38.2K
------------ ----- ----- ----- ----- ----- -
----
For more information on ZFS, why not attend our 4-day Solaris 10 Update
course see: http://www.firstalt.co.uk/courses/s10up.html
ZFS is also included in our standard Solaris 10 Systems Administration courses.
How do I set up and configure a Zone in Solaris 10?
ANSWER

Anda mungkin juga menyukai