ZFS Networker: Difference between revisions
Line 53: | Line 53: | ||
<source lang=bash> | <source lang=bash> | ||
#!/bin/bash | #!/bin/bash | ||
function print_option () { | function print_option () { | ||
Line 76: | Line 77: | ||
if [ $# -gt 0 ] | if [ $# -gt 0 ] | ||
then | then | ||
printf "%s : %s\n" "$(date '+%Y%m%d %H:%M:%S')" "$*" >> ${LOGFILE} | printf "%s (%s): %s\n" "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" "$*" >> ${LOGFILE} | ||
else | else | ||
printf "%s : " "$(date '+%Y%m%d %H:%M:%S')" >> ${LOGFILE} | printf "%s (%s): " "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" >> ${LOGFILE} | ||
cat >> ${LOGFILE} | #cat >> ${LOGFILE} | ||
while read data | |||
do | |||
printf "%s (%s): %s\n" "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" "${data}" >> ${LOGFILE} | |||
done | |||
fi | fi | ||
} | } | ||
function snapshot_pre { | function snapshot_pre { | ||
Line 129: | Line 95: | ||
ZONE=$3 | ZONE=$3 | ||
ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}" | ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}" | ||
ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | nawk '{print $NF;}' | ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | nawk '{print $NF;}') | ||
ZONE_ROOT="${ZONE_BASE}/root" | ZONE_ROOT="${ZONE_BASE}/root" | ||
else | else | ||
Line 142: | Line 108: | ||
cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS | cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS | ||
#!/bin/bash | #!/bin/bash | ||
DBDIR=\$(/usr/bin/nawk -F':' -v ORACLE_SID=${ORACLE_SID} '\$1==ORACLE_SID {print | DBDIR=\$(/usr/bin/nawk -F':' -v ORACLE_SID=${ORACLE_SID} '\$1==ORACLE_SID {print \$2;}' /var/opt/oracle/oratab) | ||
\${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF | \${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF | ||
create pfile from spfile; | create pfile from spfile; | ||
Line 156: | Line 122: | ||
fi | fi | ||
} | } | ||
function snapshot_pst { | function snapshot_pst { | ||
Line 164: | Line 131: | ||
ZONE=$3 | ZONE=$3 | ||
ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}" | ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}" | ||
ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | nawk '{print $NF;}' | ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | nawk '{print $NF;}') | ||
ZONE_ROOT="${ZONE_BASE}/root" | ZONE_ROOT="${ZONE_BASE}/root" | ||
else | else | ||
Line 178: | Line 145: | ||
cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS | cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS | ||
#!/bin/bash | #!/bin/bash | ||
DBDIR=\$(/usr/bin/nawk -F':' -v ORACLE_SID=${ORACLE_SID} '\$1==ORACLE_SID {print | DBDIR=\$(/usr/bin/nawk -F':' -v ORACLE_SID=${ORACLE_SID} '\$1==ORACLE_SID {print \$2;}' /var/opt/oracle/oratab) | ||
\${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF | \${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF | ||
alter database end backup; | alter database end backup; | ||
Line 194: | Line 161: | ||
ZPOOL=$1 | ZPOOL=$1 | ||
SNAPSHOT_NAME=$2 | SNAPSHOT_NAME=$2 | ||
${ | print_log ${LOGFILE} "Create ZFS snapshot -r ${ZPOOL}@${SNAPSHOT_NAME}" | ||
${ZFS_CMD} snapshot -r ${ZPOOL}@${SNAPSHOT_NAME} | |||
for zfs_snapshot in $(${ZFS_CMD} list -Ho name -t snapshot -r ${ZPOOL} | grep ${SNAPSHOT_NAME}) | |||
do | |||
${ZFS_CMD} clone -o readonly=on ${zfs_snapshot} ${zfs_snapshot/@*/}/nsr_backup | |||
${ZFS_CMD} mount ${zfs_snapshot/@*/}/nsr_backup 2>/dev/null | |||
if ( df -h ${zfs_snapshot/@*/}/nsr_backup ) | |||
then | |||
${ZFS_CMD} list -Ho creation,name ${zfs_snapshot/@*/}/nsr_backup | print_log ${LOGFILE} | |||
fi | |||
done | |||
} | } | ||
Line 205: | Line 177: | ||
ZPOOL=$1 | ZPOOL=$1 | ||
SNAPSHOT_NAME=$2 | SNAPSHOT_NAME=$2 | ||
if (${ZFS_CMD} list -t snapshot ${ZPOOL}@${SNAPSHOT_NAME} | if (${ZFS_CMD} list -t snapshot ${ZPOOL}@${SNAPSHOT_NAME}) | ||
then | then | ||
for zfs_snapshot in $(${ZFS_CMD} list -Ho name -t snapshot -r ${ZPOOL} | grep ${SNAPSHOT_NAME}) | |||
do | |||
if ( df -h ${zfs_snapshot/@*/}/nsr_backup ) | |||
then | |||
print_log ${LOGFILE} "Unmount ZFS clone ${zfs_snapshot/@*/}/nsr_backup" | |||
${ | ${ZFS_CMD} unmount ${zfs_snapshot/@*/}/nsr_backup | ||
fi | |||
# If this is a clone of ${zfs_snapshot}, then destroy it | |||
if [ "_$(${ZFS_CMD} list -Ho origin ${zfs_snapshot/@*/}/nsr_backup)_" == "_${zfs_snapshot}_" ] | |||
then | |||
print_log ${LOGFILE} "Destroy ZFS clone ${zfs_snapshot/@*/}/nsr_backup" | |||
${ZFS_CMD} destroy ${zfs_snapshot/@*/}/nsr_backup | |||
fi | |||
done | |||
print_log ${LOGFILE} "Destroy ZFS snapshot -r ${ZPOOL}@${SNAPSHOT_NAME}" | |||
${ZFS_CMD} destroy -r ${ZPOOL}@${SNAPSHOT_NAME} | |||
fi | fi | ||
} | } | ||
Line 221: | Line 203: | ||
} | } | ||
case $ | cmd_option=$1 | ||
export cmd_option | |||
ORACLE_SID=SAMPLE | |||
ORACLE_USER=oracle | |||
SNAPSHOT_NAME="nsr" | |||
ZFS_CMD="/usr/sbin/zfs" | |||
ZLOGIN_CMD="/usr/bin/zlogin" | |||
case ${cmd_option} in | |||
pre) | pre) | ||
# Get commandline from parent pid | |||
# pre /usr/sbin/savepnpc -c <networker-client> -s <networker-server> -g <NSR_GROUP> -LL | |||
pid=$(ptree $$ | nawk '/savepnpc/{print $1}') | |||
;; | ;; | ||
pst) | pst) | ||
# Get commandline from parent pid | |||
# pst /usr/bin/pstclntsave -s <networker-server> -g <NSR_GROUP> -c <networker-client> | |||
pid=$(ptree $$ | nawk '/pstclntsave/{print $1}') | |||
;; | ;; | ||
esac | esac | ||
commandline="$(pargs -e ${pid} | head -1)" | |||
# Called from backupserver use -c | |||
CLIENT_NAME=$(print_option -c ${commandline}) | |||
# If called from cmdline use -m | |||
CLIENT_NAME=${CLIENT_NAME:-$(print_option -m ${commandline})} | |||
# Last resort pre/post | |||
CLIENT_NAME=${CLIENT_NAME:-${cmd_option}} | |||
SERVER_NAME=$(print_option -s ${commandline}) | |||
GROUP_NAME=$(print_option -g ${commandline}) | |||
LOGFILE=/nsr/logs/${CLIENT_NAME}.log | |||
print_log ${LOGFILE} "Called from ${commandline}" | |||
} | |||
named_pipe=/tmp/.named_pipe.$$ | |||
# | # Delete named pipe on exit | ||
trap "rm -f ${named_pipe}" EXIT | |||
# Create named pipe | |||
mknod ${named_pipe} p | |||
# Read from named pipe and send it to print_log | |||
print_log ${LOGFILE} | tee <${named_pipe} | print_log ${LOGFILE}& | ||
exec >>${ | # Close STDOUT & STDERR | ||
exec 1>&- | |||
exec 2>&- | |||
# Redirect them to named pipe | |||
exec >${named_pipe} 2>&1 | |||
print_log ${LOGFILE} "Begin backup of ${CLIENT_NAME}" | print_log ${LOGFILE} "Begin backup of ${CLIENT_NAME}" | ||
Line 294: | Line 271: | ||
Start_command=$(/usr/cluster/bin/clrs show -p Start_command -g ${RG} | /usr/bin/nawk -F ':' '$1 ~ /Start_command/ && $2 ~ /sczbt/') | Start_command=$(/usr/cluster/bin/clrs show -p Start_command -g ${RG} | /usr/bin/nawk -F ':' '$1 ~ /Start_command/ && $2 ~ /sczbt/') | ||
print_log ${LOGFILE} "sczbt Start_command is: ${Start_command}" | print_log ${LOGFILE} "sczbt Start_command is: ${Start_command}" | ||
sczbt_config=$(print_option -P ${Start_command}) | sczbt_config=$(print_option -P ${Start_command})/sczbt_$(print_option -R ${Start_command}) | ||
print_log ${LOGFILE} "sczbt_config is ${sczbt_config} | print_log ${LOGFILE} "sczbt_config is ${sczbt_config}" | ||
ZONE=$(nawk -F '=' '$1=="Zonename"{gsub(/"/,"",$2);print $2}' ${sczbt_config} | ZONE=$(nawk -F '=' '$1=="Zonename"{gsub(/"/,"",$2);print $2}' ${sczbt_config}) | ||
print_log ${LOGFILE} "Zone from ${sczbt_config} | print_log ${LOGFILE} "Zone from ${sczbt_config} is ${ZONE}" | ||
case $ | case ${cmd_option} in | ||
pre) | pre) | ||
snapshot_destroy ${ZPOOLS} ${SNAPSHOT_NAME} | snapshot_destroy ${ZPOOLS} ${SNAPSHOT_NAME} | ||
# snapshot_pre ${DB} ${DBUSER} ${ZONE} | |||
snapshot_create ${ZPOOLS} ${SNAPSHOT_NAME} | snapshot_create ${ZPOOLS} ${SNAPSHOT_NAME} | ||
# snapshot_pst ${DB} ${DBUSER} ${ZONE} | |||
;; | ;; | ||
pst) | pst) | ||
snapshot_destroy ${ | #snapshot_destroy ${ZPOOLS} ${SNAPSHOT_NAME} | ||
;; | ;; | ||
*) | *) | ||
Line 419: | Line 290: | ||
;; | ;; | ||
esac | esac | ||
print_log ${LOGFILE} "End backup of ${CLIENT_NAME}" | |||
</source> | </source> | ||
!!!THIS CODE IS UNTESTED DO NOT USE THIS!!! | !!!THIS CODE IS UNTESTED DO NOT USE THIS!!! |
Revision as of 13:32, 14 October 2014
Kategorie:ZFS Kategorie:Solaris
Backup of ZFS snapshots on Solaris Cluster with Legato/EMC Networker
This describes how to setup a backup of the Solaris Cluster resource group named sample-rg.
The structure of my RGs is always:
RG: <name>-rg ZFS-HASP: <name>-hasp-zfs-res Logical Host: <name>-lh-res Logical Host Name: <name>-lh ZPOOL: <name>_pool
I used the bash as shell.
Define variables used in the following command lines
# NAME=sample
# RGname=${NAME}-rg
# NetworkerGroup=$(echo ${NAME} | tr 'a-z' 'A-Z' )
# ZPOOL=${NAME}_pool
# ZPOOL_BASEDIR=/local/${RGname}
Define a resource for Networker
What we need now is a resource definition in our Networker directory like this:
# zfs create ${ZPOOL}/nsr
# mkdir ${ZPOOL_BASEDIR}/nsr/{bin,log,res}
# cat > ${ZPOOL_BASEDIR}/nsr/res/${NetworkerGroup}.res <<EOF
type: savepnpc;
precmd: "${ZPOOL_BASEDIR}/nsr/bin/prepst_command.sh pre >${ZPOOL_BASEDIR}/nsr/log/networker_precmd.log 2>&1";
pstcmd: "${ZPOOL_BASEDIR}/nsr/bin/prepst_command.sh pst >${ZPOOL_BASEDIR}/nsr/log/networker_pstcmd.log 2>&1";
timeout: "08:00am";
abort precmd with group: Yes;
EOF
And now create a link on every cluster node to this file
# ln -s ${ZPOOL_BASEDIR}/nsr/res/${NetworkerGroup}.res /nsr/res/${NetworkerGroup}.res
The pre-/pstcmd-script
!!!THIS CODE IS UNTESTED DO NOT USE THIS!!!
!!!THIS JUST AN EXAMPLE!!!
Still not working...
#!/bin/bash
function print_option () {
option=$1; shift
# now process line
while [ $# -gt 0 ]
do
case $1 in
${option})
echo $2
shift
shift
;;
*)
shift
;;
esac
done
}
function print_log () {
LOGFILE=$1 ; shift
if [ $# -gt 0 ]
then
printf "%s (%s): %s\n" "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" "$*" >> ${LOGFILE}
else
printf "%s (%s): " "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" >> ${LOGFILE}
#cat >> ${LOGFILE}
while read data
do
printf "%s (%s): %s\n" "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" "${data}" >> ${LOGFILE}
done
fi
}
function snapshot_pre {
DB=$1
DBUSER=$2
if [ $# -eq 3 -a "_$3_" != "__" ]
then
ZONE=$3
ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}"
ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | nawk '{print $NF;}')
ZONE_ROOT="${ZONE_BASE}/root"
else
ZONE_ROOT=""
ZONE_CMD="su - ${DBUSER} -c"
fi
if( ${ZONE_CMD} echo >/dev/null 2>&1 )
then
SCRIPT_NAME="tmp/.nsr-pre-snap-script.$$"
# Create script inside zone
cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS
#!/bin/bash
DBDIR=\$(/usr/bin/nawk -F':' -v ORACLE_SID=${ORACLE_SID} '\$1==ORACLE_SID {print \$2;}' /var/opt/oracle/oratab)
\${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF
create pfile from spfile;
alter system archive log current;
alter database backup controlfile to trace;
alter database begin backup;
EOF
EOS
chmod 755 ${ZONE_ROOT}/${SCRIPT_NAME}
${ZONE_CMD} /${SCRIPT_NAME} 2>&1 | print_log ${LOGFILE}
rm -f ${ZONE_ROOT}/${SCRIPT_NAME}
fi
}
function snapshot_pst {
DB=$1
DBUSER=$2
if [ $# -eq 3 -a "_$3_" != "__" ]
then
ZONE=$3
ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}"
ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | nawk '{print $NF;}')
ZONE_ROOT="${ZONE_BASE}/root"
else
ZONE_ROOT=""
ZONE_CMD="su - ${DBUSER} -c"
fi
if( ${ZONE_CMD} echo >/dev/null 2>&1 )
then
SCRIPT_NAME="tmp/.nsr-pre-snap-script.$$"
# Create script inside zone
cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS
#!/bin/bash
DBDIR=\$(/usr/bin/nawk -F':' -v ORACLE_SID=${ORACLE_SID} '\$1==ORACLE_SID {print \$2;}' /var/opt/oracle/oratab)
\${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF
alter database end backup;
alter system archive log current;
EOF
EOS
chmod 755 ${ZONE_ROOT}/${SCRIPT_NAME}
${ZONE_CMD} /${SCRIPT_NAME} 2>&1 | print_log ${LOGFILE}
rm -f ${ZONE_ROOT}/${SCRIPT_NAME}
fi
}
function snapshot_create {
ZPOOL=$1
SNAPSHOT_NAME=$2
print_log ${LOGFILE} "Create ZFS snapshot -r ${ZPOOL}@${SNAPSHOT_NAME}"
${ZFS_CMD} snapshot -r ${ZPOOL}@${SNAPSHOT_NAME}
for zfs_snapshot in $(${ZFS_CMD} list -Ho name -t snapshot -r ${ZPOOL} | grep ${SNAPSHOT_NAME})
do
${ZFS_CMD} clone -o readonly=on ${zfs_snapshot} ${zfs_snapshot/@*/}/nsr_backup
${ZFS_CMD} mount ${zfs_snapshot/@*/}/nsr_backup 2>/dev/null
if ( df -h ${zfs_snapshot/@*/}/nsr_backup )
then
${ZFS_CMD} list -Ho creation,name ${zfs_snapshot/@*/}/nsr_backup | print_log ${LOGFILE}
fi
done
}
function snapshot_destroy {
ZPOOL=$1
SNAPSHOT_NAME=$2
if (${ZFS_CMD} list -t snapshot ${ZPOOL}@${SNAPSHOT_NAME})
then
for zfs_snapshot in $(${ZFS_CMD} list -Ho name -t snapshot -r ${ZPOOL} | grep ${SNAPSHOT_NAME})
do
if ( df -h ${zfs_snapshot/@*/}/nsr_backup )
then
print_log ${LOGFILE} "Unmount ZFS clone ${zfs_snapshot/@*/}/nsr_backup"
${ZFS_CMD} unmount ${zfs_snapshot/@*/}/nsr_backup
fi
# If this is a clone of ${zfs_snapshot}, then destroy it
if [ "_$(${ZFS_CMD} list -Ho origin ${zfs_snapshot/@*/}/nsr_backup)_" == "_${zfs_snapshot}_" ]
then
print_log ${LOGFILE} "Destroy ZFS clone ${zfs_snapshot/@*/}/nsr_backup"
${ZFS_CMD} destroy ${zfs_snapshot/@*/}/nsr_backup
fi
done
print_log ${LOGFILE} "Destroy ZFS snapshot -r ${ZPOOL}@${SNAPSHOT_NAME}"
${ZFS_CMD} destroy -r ${ZPOOL}@${SNAPSHOT_NAME}
fi
}
function usage {
echo "Usage: $0 (pre|pst)"
exit 1
}
cmd_option=$1
export cmd_option
ORACLE_SID=SAMPLE
ORACLE_USER=oracle
SNAPSHOT_NAME="nsr"
ZFS_CMD="/usr/sbin/zfs"
ZLOGIN_CMD="/usr/bin/zlogin"
case ${cmd_option} in
pre)
# Get commandline from parent pid
# pre /usr/sbin/savepnpc -c <networker-client> -s <networker-server> -g <NSR_GROUP> -LL
pid=$(ptree $$ | nawk '/savepnpc/{print $1}')
;;
pst)
# Get commandline from parent pid
# pst /usr/bin/pstclntsave -s <networker-server> -g <NSR_GROUP> -c <networker-client>
pid=$(ptree $$ | nawk '/pstclntsave/{print $1}')
;;
esac
commandline="$(pargs -e ${pid} | head -1)"
# Called from backupserver use -c
CLIENT_NAME=$(print_option -c ${commandline})
# If called from cmdline use -m
CLIENT_NAME=${CLIENT_NAME:-$(print_option -m ${commandline})}
# Last resort pre/post
CLIENT_NAME=${CLIENT_NAME:-${cmd_option}}
SERVER_NAME=$(print_option -s ${commandline})
GROUP_NAME=$(print_option -g ${commandline})
LOGFILE=/nsr/logs/${CLIENT_NAME}.log
print_log ${LOGFILE} "Called from ${commandline}"
named_pipe=/tmp/.named_pipe.$$
# Delete named pipe on exit
trap "rm -f ${named_pipe}" EXIT
# Create named pipe
mknod ${named_pipe} p
# Read from named pipe and send it to print_log
tee <${named_pipe} | print_log ${LOGFILE}&
# Close STDOUT & STDERR
exec 1>&-
exec 2>&-
# Redirect them to named pipe
exec >${named_pipe} 2>&1
print_log ${LOGFILE} "Begin backup of ${CLIENT_NAME}"
# Get resource name from hostname
LH_RES=$(/usr/cluster/bin/clrs show -t SUNW.LogicalHostname -p HostnameList | nawk -v Hostname="${CLIENT_NAME}" '/^Resource:/{res=$NF} /HostnameList:/ {for(i=2;i<=NF;i++){if($i == Hostname){print res}}}')
print_log ${LOGFILE} "LogicalHostname of ${CLIENT_NAME} is ${LH_RES}"
# Get ressourceGroup name from ressource name
RG=$(/usr/cluster/bin/scha_resource_get -O GROUP -R ${LH_RES})
print_log ${LOGFILE} "RessourceGroup of ${LH_RES} is ${RG}"
ZPOOLS=$(/usr/cluster/bin/clrs show -g ${RG} -p Zpools | nawk '$1=="Zpools:"{$1="";print $0}')
print_log ${LOGFILE} "ZPools used in ${RG}: ${ZPOOLS}"
Start_command=$(/usr/cluster/bin/clrs show -p Start_command -g ${RG} | /usr/bin/nawk -F ':' '$1 ~ /Start_command/ && $2 ~ /sczbt/')
print_log ${LOGFILE} "sczbt Start_command is: ${Start_command}"
sczbt_config=$(print_option -P ${Start_command})/sczbt_$(print_option -R ${Start_command})
print_log ${LOGFILE} "sczbt_config is ${sczbt_config}"
ZONE=$(nawk -F '=' '$1=="Zonename"{gsub(/"/,"",$2);print $2}' ${sczbt_config})
print_log ${LOGFILE} "Zone from ${sczbt_config} is ${ZONE}"
case ${cmd_option} in
pre)
snapshot_destroy ${ZPOOLS} ${SNAPSHOT_NAME}
# snapshot_pre ${DB} ${DBUSER} ${ZONE}
snapshot_create ${ZPOOLS} ${SNAPSHOT_NAME}
# snapshot_pst ${DB} ${DBUSER} ${ZONE}
;;
pst)
#snapshot_destroy ${ZPOOLS} ${SNAPSHOT_NAME}
;;
*)
usage
;;
esac
print_log ${LOGFILE} "End backup of ${CLIENT_NAME}"
!!!THIS CODE IS UNTESTED DO NOT USE THIS!!!
!!!THIS JUST AN EXAMPLE!!!
Registering new resource type LGTO.clnt
1. Install Solaris client package LGTOclnt 2. Register new resource type in cluster. One one node do:
# clrt register -f /usr/sbin/LGTO.clnt.rtr LGTO.clnt
Now you have a new resource type LGTO.clnt in your cluster.
Create client resource of type LGTO.clnt
So I use scripts like this:
# RGname=sample-rg
# clrs create \
-t LGTO.clnt \
-g ${RGname} \
-p Resource_dependencies=$(basename ${RGname} -rg)-hasp-zfs-res \
-p clientname=$(basename ${RGname} -rg)-lh \
-p Network_resource=$(basename ${RGname} -rg)-lh-res \
-p owned_paths=${ZPOOL_BASEDIR} \
$(basename ${RGname} -rg)-nsr-res
This expands to:
# clrs create \
-t LGTO.clnt \
-g sample-rg \
-p Resource_dependencies=sample-hasp-zfs-res \
-p clientname=sample-lh \
-p Network_resource=sample-lh-res \
-p owned_paths=/local/sample-rg \
sample-nsr-res
Now we have a client name to which we can connect to: sample-lh