ZFS Networker: Difference between revisions
From Lolly's Wiki
Jump to navigationJump to search
No edit summary |
|||
Line 46: | Line 46: | ||
#!/bin/bash | #!/bin/bash | ||
cmd_option=$1 | |||
export cmd_option | |||
SNAPSHOT_NAME="nsr" | |||
BASE_LOG_DIR="/nsr/logs" | |||
NSR_BACKUP_CLONE="nsr_backup" | |||
# Commands | |||
ZFS_CMD="/usr/sbin/zfs" | |||
ZPOOL_CMD="/usr/sbin/zpool" | |||
ZLOGIN_CMD="/usr/bin/zlogin" | |||
ZONECFG_CMD="/usr/sbin/zonecfg" | |||
DF_CMD="/usr/bin/df" | |||
AWK_CMD="/usr/bin/nawk" | |||
MKNOD_CMD="/usr/sbin/mknod" | |||
PARGS_CMD="/usr/bin/pargs" | |||
PTREE_CMD="/usr/bin/ptree" | |||
CLRS_CMD="/usr/cluster/bin/clrs" | |||
CLRG_CMD="/usr/cluster/bin/clrg" | |||
CLRT_CMD="/usr/cluster/bin/clrt" | |||
BASENAME_CMD="/usr/bin/basename" | |||
GETENT_CMD="/usr/bin/getent" | |||
SCHA_RESOURCE_GET_CMD="/usr/cluster/bin/scha_resource_get" | |||
# Subdir in ZFS where to put ZFS-config | |||
ZFS_SETUP_SUBDIR="cluster_config" | |||
ZFS_CONFIG_FILE=ZFS_Setup.sh | |||
# Oracle parameter | |||
ORACLE_SID=SAMPLE | |||
ORACLE_USER=oracle | |||
GLOBAL_LOGFILE=${BASE_LOG_DIR}/$(${BASENAME_CMD} $0 .sh).log | |||
exec >>${GLOBAL_LOGFILE} 2>&1 | |||
function print_option () { | function print_option () { | ||
Line 71: | Line 105: | ||
printf "%s (%s): %s\n" "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" "$*" >> ${LOGFILE} | printf "%s (%s): %s\n" "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" "$*" >> ${LOGFILE} | ||
else | else | ||
printf "%s (%s): " "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" | #printf "%s (%s): " "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" >> ${LOGFILE} | ||
while read data | while read data | ||
do | do | ||
Line 78: | Line 111: | ||
done | done | ||
fi | fi | ||
} | |||
function dump_zfs_config { | |||
ZPOOL=$1 | |||
OUTPUT_FILE=$2 | |||
printf "\n\n# Create ZPool ${ZPOOL} with size $(${ZPOOL_CMD} list -Ho size ${ZPOOL}):\n\n" >> ${OUTPUT_FILE} | |||
${ZPOOL_CMD} status ${ZPOOL} | ${AWK_CMD} '/config:/,/errors:/{if(/NAME/){getline; printf "Zpool structure of %s:\n\nzpool create %s",$1,$1; getline ; device=0; while(!/^$/ && !/errors:/){gsub(/mirror-[0-9]+/,"mirror",$1);gsub(/logs/,"log",$1);gsub(/(\/dev\/(r)*dsk\/)*c[0-9]+t[0-9A-F]+d[0-9]+(s[0-9]+)*/,"<device"device">",$1);if(/device/)device++;printf " %s",$1 ; getline}};printf "\n" ;}' >> ${OUTPUT_FILE} | |||
printf "\n\n# Create ZFS\n\n" >> ${OUTPUT_FILE} | |||
${ZFS_CMD} list -Hrt filesystem -o name,origin ${ZPOOL} | ${AWK_CMD} -v zfs_cmd=${ZFS_CMD} 'NR>1 && $2=="-"{print zfs_cmd,"create -o mountpoint=none",$1}' >> ${OUTPUT_FILE} | |||
printf "\n\n# Set ZFS values\n\n" >> ${OUTPUT_FILE} | |||
${ZFS_CMD} get -s local -Ho name,property,value -pr all ${ZPOOL} | ${AWK_CMD} -v zfs_cmd=${ZFS_CMD} '$2!="readonly"{printf "%s set -p %s=%s %s\n",zfs_cmd,$2,$3,$1}' >> ${OUTPUT_FILE} | |||
} | |||
function dump_cluster_config { | |||
RG=$1 | |||
OUTPUT_DIR=$2 | |||
${CLRG_CMD} export -o ${OUTPUT_DIR}/${RG}.clrg_export.xml ${RG} | |||
for RES in $(${CLRS_CMD} list -g ${RG}) | |||
do | |||
${CLRS_CMD} export -o ${OUTPUT_DIR}/${RES}.clrs_export.xml ${RES} | |||
done | |||
# Commands to recreate the RG | |||
COMMAND_FILE="${OUTPUT_DIR}/${RG}.ClusterCreateCommands.txt" | |||
printf "Recreate %s:\n%s create -i %s %s\n\n" "${RG}" "${CLRG_CMD}" "${OUTPUT_DIR}/${RG}.clrg_export.xml" "${RG}" > ${COMMAND_FILE} | |||
for RT in SUNW.LogicalHostname SUNW.HAStoragePlus SUNW.gds LGTO.clnt | |||
do | |||
for RT_VERSION in $(${CLRT_CMD} list | ${AWK_CMD} -v rt=${RT} '$1 ~ rt') | |||
do | |||
for RES in $(${CLRS_CMD} list -g ${RG} -t ${RT_VERSION}) | |||
do | |||
if [ "_${RT}_" == "_SUNW.LogicalHostname_" ] | |||
then | |||
printf "Add the following entries to all nodes!!!:\n/etc/inet/hosts:\n" >> ${COMMAND_FILE} | |||
${GETENT_CMD} hosts $(${CLRS_CMD} show -p HostnameList ${RES} | nawk '$1=="HostnameList:"{$1="";print}') >> ${COMMAND_FILE} | |||
printf "\n" >> ${COMMAND_FILE} | |||
fi | |||
printf "Recreate %s:\n%s create -i %s %s\n\n" "${RES}" "${CLRS_CMD}" "${OUTPUT_DIR}/${RES}.clrs_export.xml" "${RES}" >> ${COMMAND_FILE} | |||
done | |||
done | |||
done | |||
} | } | ||
Line 87: | Line 161: | ||
ZONE=$3 | ZONE=$3 | ||
ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}" | ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}" | ||
ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | | ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | ${AWK_CMD} '{print $NF;}') | ||
ZONE_ROOT="${ZONE_BASE}/root" | ZONE_ROOT="${ZONE_BASE}/root" | ||
else | else | ||
Line 100: | Line 174: | ||
cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS | cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS | ||
#!/bin/bash | #!/bin/bash | ||
DBDIR=\$( | DBDIR=\$(${AWK_CMD} -F':' -v ORACLE_SID=${ORACLE_SID} '\$1==ORACLE_SID {print \$2;}' /var/opt/oracle/oratab) | ||
\${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF | \${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF | ||
create pfile from spfile; | create pfile from spfile; | ||
Line 123: | Line 197: | ||
ZONE=$3 | ZONE=$3 | ||
ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}" | ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}" | ||
ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | | ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | ${AWK_CMD} '{print $NF;}') | ||
ZONE_ROOT="${ZONE_BASE}/root" | ZONE_ROOT="${ZONE_BASE}/root" | ||
else | else | ||
Line 137: | Line 211: | ||
cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS | cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS | ||
#!/bin/bash | #!/bin/bash | ||
DBDIR=\$( | DBDIR=\$(${AWK_CMD} -F':' -v ORACLE_SID=${ORACLE_SID} '\$1==ORACLE_SID {print \$2;}' /var/opt/oracle/oratab) | ||
\${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF | \${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF | ||
alter database end backup; | alter database end backup; | ||
Line 157: | Line 231: | ||
for zfs_snapshot in $(${ZFS_CMD} list -Ho name -t snapshot -r ${ZPOOL} | grep ${SNAPSHOT_NAME}) | for zfs_snapshot in $(${ZFS_CMD} list -Ho name -t snapshot -r ${ZPOOL} | grep ${SNAPSHOT_NAME}) | ||
do | do | ||
${ZFS_CMD} clone -o readonly=on ${zfs_snapshot} ${zfs_snapshot/@*/}/ | ${ZFS_CMD} clone -o readonly=on ${zfs_snapshot} ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE} | ||
${ZFS_CMD} mount ${zfs_snapshot/@*/}/ | ${ZFS_CMD} mount ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE} 2>/dev/null | ||
if ( | if [ "_$(${ZFS_CMD} get -Ho value mounted ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE})_" == "_yes_" ] | ||
then | then | ||
# echo /usr/sbin/save -s ${SERVER_NAME} -g ${GROUP_NAME} -LL -m ${CLIENT_NAME} $(${ZFS_CMD} get -Ho value mountpoint ${zfs_snapshot/@*/}/ | # echo /usr/sbin/save -s ${SERVER_NAME} -g ${GROUP_NAME} -LL -m ${CLIENT_NAME} $(${ZFS_CMD} get -Ho value mountpoint ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE}) | ||
${ZFS_CMD} list -Ho creation,name ${zfs_snapshot/@*/}/ | ${ZFS_CMD} list -Ho creation,name ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE} | print_log ${LOGFILE} | ||
fi | fi | ||
done | done | ||
Line 170: | Line 244: | ||
ZPOOL=$1 | ZPOOL=$1 | ||
SNAPSHOT_NAME=$2 | SNAPSHOT_NAME=$2 | ||
if (${ZFS_CMD} list -t snapshot ${ZPOOL}@${SNAPSHOT_NAME}) | if (${ZFS_CMD} list -t snapshot ${ZPOOL}@${SNAPSHOT_NAME} > /dev/null) | ||
then | then | ||
for zfs_snapshot in $(${ZFS_CMD} list -Ho name -t snapshot -r ${ZPOOL} | grep ${SNAPSHOT_NAME}) | for zfs_snapshot in $(${ZFS_CMD} list -Ho name -t snapshot -r ${ZPOOL} | grep ${SNAPSHOT_NAME}) | ||
do | do | ||
if ( | if [ "_$(${ZFS_CMD} get -Ho value mounted ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE})_" == "_yes_" ] | ||
then | then | ||
print_log ${LOGFILE} "Unmount ZFS clone ${zfs_snapshot/@*/}/ | print_log ${LOGFILE} "Unmount ZFS clone ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE}" | ||
${ZFS_CMD} unmount ${zfs_snapshot/@*/}/ | ${ZFS_CMD} unmount ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE} | ||
fi | fi | ||
# If this is a clone of ${zfs_snapshot}, then destroy it | # If this is a clone of ${zfs_snapshot}, then destroy it | ||
if [ "_$(${ZFS_CMD} list -Ho origin ${zfs_snapshot/@*/}/ | if [ "_$(${ZFS_CMD} list -Ho origin ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE})_" == "_${zfs_snapshot}_" ] | ||
then | then | ||
print_log ${LOGFILE} "Destroy ZFS clone ${zfs_snapshot/@*/}/ | print_log ${LOGFILE} "Destroy ZFS clone ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE}" | ||
${ZFS_CMD} destroy ${zfs_snapshot/@*/}/ | ${ZFS_CMD} destroy ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE} | ||
fi | fi | ||
done | done | ||
Line 193: | Line 267: | ||
function usage { | function usage { | ||
echo "Usage: $0 (pre|pst)" | echo "Usage: $0 (pre|pst)" | ||
echo "Usage: $0 init <ZPool-Name>" | |||
echo "Usage: $0 dump <ZPool-Name> <Output-File>" | |||
exit 1 | exit 1 | ||
} | } | ||
case ${cmd_option} in | |||
pre|pst) | |||
case ${cmd_option} in | case ${cmd_option} in | ||
pre) | pre) | ||
# Get commandline from parent pid | # Get commandline from parent pid | ||
# pre /usr/sbin/savepnpc -c <NetworkerClient> -s <NetworkerServer> -g <NetworkerGroup> -LL | # pre /usr/sbin/savepnpc -c <NetworkerClient> -s <NetworkerServer> -g <NetworkerGroup> -LL | ||
pid=$( | print_log ${GLOBAL_LOGFILE} "Begin (${cmd_option}) Called from $(${PTREE_CMD} $$ | ${AWK_CMD} '/savepnpc/{print $0}')" | ||
pid=$(${PTREE_CMD} $$ | ${AWK_CMD} '/savepnpc/{print $1}') | |||
;; | ;; | ||
pst) | pst) | ||
# Get commandline from parent pid | # Get commandline from parent pid | ||
# pst /usr/bin/pstclntsave -s <NetworkerServer> -g <NetworkerGroup> -c <NetworkerClient> | # pst /usr/bin/pstclntsave -s <NetworkerServer> -g <NetworkerGroup> -c <NetworkerClient> | ||
pid=$( | print_log ${GLOBAL_LOGFILE} "Begin (${cmd_option}) Called from $(${PTREE_CMD} $$ | ${AWK_CMD} '/pstclntsave/{print $0}')" | ||
pid=$(${PTREE_CMD} $$ | ${AWK_CMD} '/pstclntsave/{print $1}') | |||
;; | ;; | ||
esac | esac | ||
commandline="$( | commandline="$(${PARGS_CMD} -c ${pid} | ${AWK_CMD} -F':' '$1 ~ /^argv/{printf $2}END{print;}')" | ||
# Called from backupserver use -c | # Called from backupserver use -c | ||
CLIENT_NAME=$(print_option -c ${commandline}) | CLIENT_NAME=$(print_option -c ${commandline}) | ||
Line 232: | Line 300: | ||
GROUP_NAME=$(print_option -g ${commandline}) | GROUP_NAME=$(print_option -g ${commandline}) | ||
LOGFILE= | LOGFILE=${BASE_LOG_DIR}/${CLIENT_NAME}.log | ||
print_log ${LOGFILE} "Called from ${commandline}" | print_log ${LOGFILE} "Called from ${commandline}" | ||
Line 240: | Line 308: | ||
trap "rm -f ${named_pipe}" EXIT | trap "rm -f ${named_pipe}" EXIT | ||
# Create named pipe | # Create named pipe | ||
${MKNOD_CMD} ${named_pipe} p | |||
# Read from named pipe and send it to print_log | # Read from named pipe and send it to print_log | ||
Line 253: | Line 321: | ||
# Get resource name from hostname | # Get resource name from hostname | ||
LH_RES=$( | LH_RES=$(${CLRS_CMD} show -t SUNW.LogicalHostname -p HostnameList | ${AWK_CMD} -v Hostname="${CLIENT_NAME}" '/^Resource:/{res=$NF} /HostnameList:/ {for(i=2;i<=NF;i++){if($i == Hostname){print res}}}') | ||
print_log ${LOGFILE} "LogicalHostname of ${CLIENT_NAME} is ${LH_RES}" | print_log ${LOGFILE} "LogicalHostname of ${CLIENT_NAME} is ${LH_RES}" | ||
# Get ressourceGroup name from ressource name | # Get ressourceGroup name from ressource name | ||
RG=$( | RG=$(${SCHA_RESOURCE_GET_CMD} -O GROUP -R ${LH_RES}) | ||
print_log ${LOGFILE} "RessourceGroup of ${LH_RES} is ${RG}" | print_log ${LOGFILE} "RessourceGroup of ${LH_RES} is ${RG}" | ||
ZPOOLS=$( | ZPOOLS=$(${CLRS_CMD} show -g ${RG} -p Zpools | ${AWK_CMD} '$1=="Zpools:"{$1="";print $0}') | ||
print_log ${LOGFILE} "ZPools used in ${RG}: ${ZPOOLS}" | print_log ${LOGFILE} "ZPools used in ${RG}: ${ZPOOLS}" | ||
Start_command=$( | Start_command=$(${CLRS_CMD} show -p Start_command -g ${RG} | ${AWK_CMD} -F ':' '$1 ~ /Start_command/ && $2 ~ /sczbt/') | ||
print_log ${LOGFILE} "sczbt Start_command is: ${Start_command}" | print_log ${LOGFILE} "sczbt Start_command is: ${Start_command}" | ||
sczbt_config=$(print_option -P ${Start_command})/sczbt_$(print_option -R ${Start_command}) | sczbt_config=$(print_option -P ${Start_command})/sczbt_$(print_option -R ${Start_command}) | ||
print_log ${LOGFILE} "sczbt_config is ${sczbt_config}" | print_log ${LOGFILE} "sczbt_config is ${sczbt_config}" | ||
ZONE=$( | ZONE=$(${AWK_CMD} -F '=' '$1=="Zonename"{gsub(/"/,"",$2);print $2}' ${sczbt_config}) | ||
print_log ${LOGFILE} "Zone from ${sczbt_config} is ${ZONE}" | print_log ${LOGFILE} "Zone from ${sczbt_config} is ${ZONE}" | ||
;; | |||
LOGFILE= | init) | ||
LOGFILE=${BASE_LOG_DIR}/init.log | |||
if [ $# -ne 2 ] | if [ $# -ne 2 ] | ||
then | then | ||
Line 280: | Line 349: | ||
fi | fi | ||
ZPOOL=$2 | ZPOOL=$2 | ||
print_log ${LOGFILE} " | print_log ${GLOBAL_LOGFILE} "Begin (${cmd_option}) of zpool ${ZPOOL}" | ||
print_log ${LOGFILE} "Begin init of zpool ${ZPOOL}" | |||
;; | |||
esac | |||
case ${cmd_option} in | case ${cmd_option} in | ||
dump_cluster) | |||
if [ $# -ne 3 ] | |||
then | |||
echo "Wrong count of parameters." | |||
echo "Use $0 dump_cluster <Ressource_Group> <DIR>" | |||
exit 1 | |||
fi | |||
dump_cluster_config $2 $3 | |||
;; | |||
dump) | |||
if [ $# -ne 3 ] | |||
then | |||
echo "Wrong count of parameters." | |||
echo "Use $0 dump <ZPool-Name> <File>" | |||
exit 1 | |||
fi | |||
dump_zfs_config $2 $3 | |||
;; | |||
init) | init) | ||
snapshot_destroy ${ZPOOL} ${SNAPSHOT_NAME} | snapshot_destroy ${ZPOOL} ${SNAPSHOT_NAME} | ||
snapshot_create ${ZPOOL} ${SNAPSHOT_NAME} | snapshot_create ${ZPOOL} ${SNAPSHOT_NAME} | ||
print_log ${LOGFILE} "End init of zpool ${ZPOOL}" | |||
;; | ;; | ||
pre) | pre) | ||
Line 294: | Line 384: | ||
done | done | ||
# snapshot_pre ${DB} ${DBUSER} ${ZONE} | # snapshot_pre ${DB} ${DBUSER} ${ZONE} | ||
# Find the dir to write down zfs-setup | |||
for ZPOOL in ${ZPOOLS} | for ZPOOL in ${ZPOOLS} | ||
do | do | ||
if [ "_$(${ZFS_CMD} list -Ho name ${ZPOOL}/${ZFS_SETUP_SUBDIR} 2>/dev/null)_" != "__" ] | |||
then | |||
CONFIG_DIR=$(${ZFS_CMD} get -Ho value mountpoint ${ZPOOL}/${ZFS_SETUP_SUBDIR}) | |||
else | |||
if [ -d $(${ZFS_CMD} get -Ho value mountpoint ${ZPOOL})/${ZFS_SETUP_SUBDIR} ] | |||
then | |||
CONFIG_DIR=$(${ZFS_CMD} get -Ho value mountpoint ${ZPOOL})/${ZFS_SETUP_SUBDIR} | |||
fi | |||
fi | |||
if [ -d ${CONFIG_DIR} ] | |||
then | |||
printf "# Settings for ZFS\n\n" > ${CONFIG_DIR}/${ZFS_CONFIG_FILE} | |||
ZONE_CONFIG_FILE=zonecfg_${ZONE}.export | |||
${ZONECFG_CMD} -z ${ZONE} export > ${CONFIG_DIR}/${ZONE_CONFIG_FILE} | |||
fi | |||
done | |||
for ZPOOL in ${ZPOOLS} | |||
do | |||
if [ "_${CONFIG_DIR}_" != "__" ] | |||
then | |||
dump_zfs_config ${ZPOOL} ${CONFIG_DIR}/${ZFS_CONFIG_FILE} | |||
dump_cluster_config ${RG} ${CONFIG_DIR} | |||
fi | |||
snapshot_create ${ZPOOL} ${SNAPSHOT_NAME} | snapshot_create ${ZPOOL} ${SNAPSHOT_NAME} | ||
done | done | ||
# snapshot_pst ${DB} ${DBUSER} ${ZONE} | # snapshot_pst ${DB} ${DBUSER} ${ZONE} | ||
print_log ${LOGFILE} "End backup of ${CLIENT_NAME}" | |||
;; | ;; | ||
pst) | pst) | ||
Line 305: | Line 423: | ||
# snapshot_destroy ${ZPOOL} ${SNAPSHOT_NAME} | # snapshot_destroy ${ZPOOL} ${SNAPSHOT_NAME} | ||
#done | #done | ||
print_log ${LOGFILE} "End backup of ${CLIENT_NAME}" | |||
;; | ;; | ||
*) | *) | ||
Line 310: | Line 429: | ||
;; | ;; | ||
esac | esac | ||
print_log ${ | print_log ${GLOBAL_LOGFILE} "End (${cmd_option}) Called from:" | ||
${PTREE_CMD} $$ | print_log ${GLOBAL_LOGFILE} | |||
exit 0 | |||
</source> | </source> | ||
Line 316: | Line 437: | ||
<source lang=bash> | <source lang=bash> | ||
# digest -a md5 /nsr/bin/nsr_snapshot.sh | # digest -a md5 /nsr/bin/nsr_snapshot.sh | ||
aedff1a8bfa8ee0a012cd7def115e626 | |||
</source> | </source> | ||
Revision as of 17:27, 10 December 2014
Kategorie:ZFS Kategorie:Backup Kategorie:Solaris
Backup of ZFS snapshots on Solaris Cluster with Legato/EMC Networker
This describes how to setup a backup of the Solaris Cluster resource group named sample-rg.
The structure of my RGs is always:
RG: <name>-rg ZFS-HASP: <name>-hasp-zfs-res Logical Host: <name>-lh-res Logical Host Name: <name>-lh ZPOOL: <name>_pool
I used the bash as shell.
Define variables used in the following command lines
# NAME=sample
# RGname=${NAME}-rg
# NetworkerGroup=$(echo ${NAME} | tr 'a-z' 'A-Z' )
# ZPOOL=${NAME}_pool
# ZPOOL_BASEDIR=/local/${RGname}
Define a resource for Networker
What we need now is a resource definition in our Networker directory like this:
# mkdir /nsr/{bin,log,res}
# cat > /nsr/res/${NetworkerGroup}.res <<EOF
type: savepnpc;
precmd: "/nsr/bin/nsr_snapshot.sh pre >/nsr/log/networker_precmd.log 2>&1";
pstcmd: "/nsr/bin/nsr_snapshot.sh pst >/nsr/log/networker_pstcmd.log 2>&1";
timeout: "08:00am";
abort precmd with group: Yes;
EOF
The pre-/pstcmd-script
!!!THIS CODE IS UNTESTED DO NOT USE THIS!!!
!!!THIS JUST AN EXAMPLE!!!
#!/bin/bash
cmd_option=$1
export cmd_option
SNAPSHOT_NAME="nsr"
BASE_LOG_DIR="/nsr/logs"
NSR_BACKUP_CLONE="nsr_backup"
# Commands
ZFS_CMD="/usr/sbin/zfs"
ZPOOL_CMD="/usr/sbin/zpool"
ZLOGIN_CMD="/usr/bin/zlogin"
ZONECFG_CMD="/usr/sbin/zonecfg"
DF_CMD="/usr/bin/df"
AWK_CMD="/usr/bin/nawk"
MKNOD_CMD="/usr/sbin/mknod"
PARGS_CMD="/usr/bin/pargs"
PTREE_CMD="/usr/bin/ptree"
CLRS_CMD="/usr/cluster/bin/clrs"
CLRG_CMD="/usr/cluster/bin/clrg"
CLRT_CMD="/usr/cluster/bin/clrt"
BASENAME_CMD="/usr/bin/basename"
GETENT_CMD="/usr/bin/getent"
SCHA_RESOURCE_GET_CMD="/usr/cluster/bin/scha_resource_get"
# Subdir in ZFS where to put ZFS-config
ZFS_SETUP_SUBDIR="cluster_config"
ZFS_CONFIG_FILE=ZFS_Setup.sh
# Oracle parameter
ORACLE_SID=SAMPLE
ORACLE_USER=oracle
GLOBAL_LOGFILE=${BASE_LOG_DIR}/$(${BASENAME_CMD} $0 .sh).log
exec >>${GLOBAL_LOGFILE} 2>&1
function print_option () {
option=$1; shift
# now process line
while [ $# -gt 0 ]
do
case $1 in
${option})
echo $2
shift
shift
;;
*)
shift
;;
esac
done
}
function print_log () {
LOGFILE=$1 ; shift
if [ $# -gt 0 ]
then
printf "%s (%s): %s\n" "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" "$*" >> ${LOGFILE}
else
#printf "%s (%s): " "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" >> ${LOGFILE}
while read data
do
printf "%s (%s): %s\n" "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" "${data}" >> ${LOGFILE}
done
fi
}
function dump_zfs_config {
ZPOOL=$1
OUTPUT_FILE=$2
printf "\n\n# Create ZPool ${ZPOOL} with size $(${ZPOOL_CMD} list -Ho size ${ZPOOL}):\n\n" >> ${OUTPUT_FILE}
${ZPOOL_CMD} status ${ZPOOL} | ${AWK_CMD} '/config:/,/errors:/{if(/NAME/){getline; printf "Zpool structure of %s:\n\nzpool create %s",$1,$1; getline ; device=0; while(!/^$/ && !/errors:/){gsub(/mirror-[0-9]+/,"mirror",$1);gsub(/logs/,"log",$1);gsub(/(\/dev\/(r)*dsk\/)*c[0-9]+t[0-9A-F]+d[0-9]+(s[0-9]+)*/,"<device"device">",$1);if(/device/)device++;printf " %s",$1 ; getline}};printf "\n" ;}' >> ${OUTPUT_FILE}
printf "\n\n# Create ZFS\n\n" >> ${OUTPUT_FILE}
${ZFS_CMD} list -Hrt filesystem -o name,origin ${ZPOOL} | ${AWK_CMD} -v zfs_cmd=${ZFS_CMD} 'NR>1 && $2=="-"{print zfs_cmd,"create -o mountpoint=none",$1}' >> ${OUTPUT_FILE}
printf "\n\n# Set ZFS values\n\n" >> ${OUTPUT_FILE}
${ZFS_CMD} get -s local -Ho name,property,value -pr all ${ZPOOL} | ${AWK_CMD} -v zfs_cmd=${ZFS_CMD} '$2!="readonly"{printf "%s set -p %s=%s %s\n",zfs_cmd,$2,$3,$1}' >> ${OUTPUT_FILE}
}
function dump_cluster_config {
RG=$1
OUTPUT_DIR=$2
${CLRG_CMD} export -o ${OUTPUT_DIR}/${RG}.clrg_export.xml ${RG}
for RES in $(${CLRS_CMD} list -g ${RG})
do
${CLRS_CMD} export -o ${OUTPUT_DIR}/${RES}.clrs_export.xml ${RES}
done
# Commands to recreate the RG
COMMAND_FILE="${OUTPUT_DIR}/${RG}.ClusterCreateCommands.txt"
printf "Recreate %s:\n%s create -i %s %s\n\n" "${RG}" "${CLRG_CMD}" "${OUTPUT_DIR}/${RG}.clrg_export.xml" "${RG}" > ${COMMAND_FILE}
for RT in SUNW.LogicalHostname SUNW.HAStoragePlus SUNW.gds LGTO.clnt
do
for RT_VERSION in $(${CLRT_CMD} list | ${AWK_CMD} -v rt=${RT} '$1 ~ rt')
do
for RES in $(${CLRS_CMD} list -g ${RG} -t ${RT_VERSION})
do
if [ "_${RT}_" == "_SUNW.LogicalHostname_" ]
then
printf "Add the following entries to all nodes!!!:\n/etc/inet/hosts:\n" >> ${COMMAND_FILE}
${GETENT_CMD} hosts $(${CLRS_CMD} show -p HostnameList ${RES} | nawk '$1=="HostnameList:"{$1="";print}') >> ${COMMAND_FILE}
printf "\n" >> ${COMMAND_FILE}
fi
printf "Recreate %s:\n%s create -i %s %s\n\n" "${RES}" "${CLRS_CMD}" "${OUTPUT_DIR}/${RES}.clrs_export.xml" "${RES}" >> ${COMMAND_FILE}
done
done
done
}
function snapshot_pre {
DB=$1
DBUSER=$2
if [ $# -eq 3 -a "_$3_" != "__" ]
then
ZONE=$3
ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}"
ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | ${AWK_CMD} '{print $NF;}')
ZONE_ROOT="${ZONE_BASE}/root"
else
ZONE_ROOT=""
ZONE_CMD="su - ${DBUSER} -c"
fi
if( ${ZONE_CMD} echo >/dev/null 2>&1 )
then
SCRIPT_NAME="tmp/.nsr-pre-snap-script.$$"
# Create script inside zone
cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS
#!/bin/bash
DBDIR=\$(${AWK_CMD} -F':' -v ORACLE_SID=${ORACLE_SID} '\$1==ORACLE_SID {print \$2;}' /var/opt/oracle/oratab)
\${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF
create pfile from spfile;
alter system archive log current;
alter database backup controlfile to trace;
alter database begin backup;
EOF
EOS
chmod 755 ${ZONE_ROOT}/${SCRIPT_NAME}
${ZONE_CMD} /${SCRIPT_NAME} 2>&1 | print_log ${LOGFILE}
rm -f ${ZONE_ROOT}/${SCRIPT_NAME}
fi
}
function snapshot_pst {
DB=$1
DBUSER=$2
if [ $# -eq 3 -a "_$3_" != "__" ]
then
ZONE=$3
ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}"
ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | ${AWK_CMD} '{print $NF;}')
ZONE_ROOT="${ZONE_BASE}/root"
else
ZONE_ROOT=""
ZONE_CMD="su - ${DBUSER} -c"
fi
if( ${ZONE_CMD} echo >/dev/null 2>&1 )
then
SCRIPT_NAME="tmp/.nsr-pre-snap-script.$$"
# Create script inside zone
cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS
#!/bin/bash
DBDIR=\$(${AWK_CMD} -F':' -v ORACLE_SID=${ORACLE_SID} '\$1==ORACLE_SID {print \$2;}' /var/opt/oracle/oratab)
\${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF
alter database end backup;
alter system archive log current;
EOF
EOS
chmod 755 ${ZONE_ROOT}/${SCRIPT_NAME}
${ZONE_CMD} /${SCRIPT_NAME} 2>&1 | print_log ${LOGFILE}
rm -f ${ZONE_ROOT}/${SCRIPT_NAME}
fi
}
function snapshot_create {
ZPOOL=$1
SNAPSHOT_NAME=$2
print_log ${LOGFILE} "Create ZFS snapshot -r ${ZPOOL}@${SNAPSHOT_NAME}"
${ZFS_CMD} snapshot -r ${ZPOOL}@${SNAPSHOT_NAME}
for zfs_snapshot in $(${ZFS_CMD} list -Ho name -t snapshot -r ${ZPOOL} | grep ${SNAPSHOT_NAME})
do
${ZFS_CMD} clone -o readonly=on ${zfs_snapshot} ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE}
${ZFS_CMD} mount ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE} 2>/dev/null
if [ "_$(${ZFS_CMD} get -Ho value mounted ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE})_" == "_yes_" ]
then
# echo /usr/sbin/save -s ${SERVER_NAME} -g ${GROUP_NAME} -LL -m ${CLIENT_NAME} $(${ZFS_CMD} get -Ho value mountpoint ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE})
${ZFS_CMD} list -Ho creation,name ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE} | print_log ${LOGFILE}
fi
done
}
function snapshot_destroy {
ZPOOL=$1
SNAPSHOT_NAME=$2
if (${ZFS_CMD} list -t snapshot ${ZPOOL}@${SNAPSHOT_NAME} > /dev/null)
then
for zfs_snapshot in $(${ZFS_CMD} list -Ho name -t snapshot -r ${ZPOOL} | grep ${SNAPSHOT_NAME})
do
if [ "_$(${ZFS_CMD} get -Ho value mounted ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE})_" == "_yes_" ]
then
print_log ${LOGFILE} "Unmount ZFS clone ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE}"
${ZFS_CMD} unmount ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE}
fi
# If this is a clone of ${zfs_snapshot}, then destroy it
if [ "_$(${ZFS_CMD} list -Ho origin ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE})_" == "_${zfs_snapshot}_" ]
then
print_log ${LOGFILE} "Destroy ZFS clone ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE}"
${ZFS_CMD} destroy ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE}
fi
done
print_log ${LOGFILE} "Destroy ZFS snapshot -r ${ZPOOL}@${SNAPSHOT_NAME}"
${ZFS_CMD} destroy -r ${ZPOOL}@${SNAPSHOT_NAME}
fi
}
function usage {
echo "Usage: $0 (pre|pst)"
echo "Usage: $0 init <ZPool-Name>"
echo "Usage: $0 dump <ZPool-Name> <Output-File>"
exit 1
}
case ${cmd_option} in
pre|pst)
case ${cmd_option} in
pre)
# Get commandline from parent pid
# pre /usr/sbin/savepnpc -c <NetworkerClient> -s <NetworkerServer> -g <NetworkerGroup> -LL
print_log ${GLOBAL_LOGFILE} "Begin (${cmd_option}) Called from $(${PTREE_CMD} $$ | ${AWK_CMD} '/savepnpc/{print $0}')"
pid=$(${PTREE_CMD} $$ | ${AWK_CMD} '/savepnpc/{print $1}')
;;
pst)
# Get commandline from parent pid
# pst /usr/bin/pstclntsave -s <NetworkerServer> -g <NetworkerGroup> -c <NetworkerClient>
print_log ${GLOBAL_LOGFILE} "Begin (${cmd_option}) Called from $(${PTREE_CMD} $$ | ${AWK_CMD} '/pstclntsave/{print $0}')"
pid=$(${PTREE_CMD} $$ | ${AWK_CMD} '/pstclntsave/{print $1}')
;;
esac
commandline="$(${PARGS_CMD} -c ${pid} | ${AWK_CMD} -F':' '$1 ~ /^argv/{printf $2}END{print;}')"
# Called from backupserver use -c
CLIENT_NAME=$(print_option -c ${commandline})
# If called from cmdline use -m
CLIENT_NAME=${CLIENT_NAME:-$(print_option -m ${commandline})}
# Last resort pre/post
CLIENT_NAME=${CLIENT_NAME:-${cmd_option}}
SERVER_NAME=$(print_option -s ${commandline})
GROUP_NAME=$(print_option -g ${commandline})
LOGFILE=${BASE_LOG_DIR}/${CLIENT_NAME}.log
print_log ${LOGFILE} "Called from ${commandline}"
named_pipe=/tmp/.named_pipe.$$
# Delete named pipe on exit
trap "rm -f ${named_pipe}" EXIT
# Create named pipe
${MKNOD_CMD} ${named_pipe} p
# Read from named pipe and send it to print_log
tee <${named_pipe} | print_log ${LOGFILE}&
# Close STDOUT & STDERR
exec 1>&-
exec 2>&-
# Redirect them to named pipe
exec >${named_pipe} 2>&1
print_log ${LOGFILE} "Begin backup of ${CLIENT_NAME}"
# Get resource name from hostname
LH_RES=$(${CLRS_CMD} show -t SUNW.LogicalHostname -p HostnameList | ${AWK_CMD} -v Hostname="${CLIENT_NAME}" '/^Resource:/{res=$NF} /HostnameList:/ {for(i=2;i<=NF;i++){if($i == Hostname){print res}}}')
print_log ${LOGFILE} "LogicalHostname of ${CLIENT_NAME} is ${LH_RES}"
# Get ressourceGroup name from ressource name
RG=$(${SCHA_RESOURCE_GET_CMD} -O GROUP -R ${LH_RES})
print_log ${LOGFILE} "RessourceGroup of ${LH_RES} is ${RG}"
ZPOOLS=$(${CLRS_CMD} show -g ${RG} -p Zpools | ${AWK_CMD} '$1=="Zpools:"{$1="";print $0}')
print_log ${LOGFILE} "ZPools used in ${RG}: ${ZPOOLS}"
Start_command=$(${CLRS_CMD} show -p Start_command -g ${RG} | ${AWK_CMD} -F ':' '$1 ~ /Start_command/ && $2 ~ /sczbt/')
print_log ${LOGFILE} "sczbt Start_command is: ${Start_command}"
sczbt_config=$(print_option -P ${Start_command})/sczbt_$(print_option -R ${Start_command})
print_log ${LOGFILE} "sczbt_config is ${sczbt_config}"
ZONE=$(${AWK_CMD} -F '=' '$1=="Zonename"{gsub(/"/,"",$2);print $2}' ${sczbt_config})
print_log ${LOGFILE} "Zone from ${sczbt_config} is ${ZONE}"
;;
init)
LOGFILE=${BASE_LOG_DIR}/init.log
if [ $# -ne 2 ]
then
echo "Wrong count of parameters."
echo "Use $0 init <ZPool-Name>"
exit 1
fi
ZPOOL=$2
print_log ${GLOBAL_LOGFILE} "Begin (${cmd_option}) of zpool ${ZPOOL}"
print_log ${LOGFILE} "Begin init of zpool ${ZPOOL}"
;;
esac
case ${cmd_option} in
dump_cluster)
if [ $# -ne 3 ]
then
echo "Wrong count of parameters."
echo "Use $0 dump_cluster <Ressource_Group> <DIR>"
exit 1
fi
dump_cluster_config $2 $3
;;
dump)
if [ $# -ne 3 ]
then
echo "Wrong count of parameters."
echo "Use $0 dump <ZPool-Name> <File>"
exit 1
fi
dump_zfs_config $2 $3
;;
init)
snapshot_destroy ${ZPOOL} ${SNAPSHOT_NAME}
snapshot_create ${ZPOOL} ${SNAPSHOT_NAME}
print_log ${LOGFILE} "End init of zpool ${ZPOOL}"
;;
pre)
for ZPOOL in ${ZPOOLS}
do
snapshot_destroy ${ZPOOL} ${SNAPSHOT_NAME}
done
# snapshot_pre ${DB} ${DBUSER} ${ZONE}
# Find the dir to write down zfs-setup
for ZPOOL in ${ZPOOLS}
do
if [ "_$(${ZFS_CMD} list -Ho name ${ZPOOL}/${ZFS_SETUP_SUBDIR} 2>/dev/null)_" != "__" ]
then
CONFIG_DIR=$(${ZFS_CMD} get -Ho value mountpoint ${ZPOOL}/${ZFS_SETUP_SUBDIR})
else
if [ -d $(${ZFS_CMD} get -Ho value mountpoint ${ZPOOL})/${ZFS_SETUP_SUBDIR} ]
then
CONFIG_DIR=$(${ZFS_CMD} get -Ho value mountpoint ${ZPOOL})/${ZFS_SETUP_SUBDIR}
fi
fi
if [ -d ${CONFIG_DIR} ]
then
printf "# Settings for ZFS\n\n" > ${CONFIG_DIR}/${ZFS_CONFIG_FILE}
ZONE_CONFIG_FILE=zonecfg_${ZONE}.export
${ZONECFG_CMD} -z ${ZONE} export > ${CONFIG_DIR}/${ZONE_CONFIG_FILE}
fi
done
for ZPOOL in ${ZPOOLS}
do
if [ "_${CONFIG_DIR}_" != "__" ]
then
dump_zfs_config ${ZPOOL} ${CONFIG_DIR}/${ZFS_CONFIG_FILE}
dump_cluster_config ${RG} ${CONFIG_DIR}
fi
snapshot_create ${ZPOOL} ${SNAPSHOT_NAME}
done
# snapshot_pst ${DB} ${DBUSER} ${ZONE}
print_log ${LOGFILE} "End backup of ${CLIENT_NAME}"
;;
pst)
#for ZPOOL in ${ZPOOLS}
#do
# snapshot_destroy ${ZPOOL} ${SNAPSHOT_NAME}
#done
print_log ${LOGFILE} "End backup of ${CLIENT_NAME}"
;;
*)
usage
;;
esac
print_log ${GLOBAL_LOGFILE} "End (${cmd_option}) Called from:"
${PTREE_CMD} $$ | print_log ${GLOBAL_LOGFILE}
exit 0
MD5-Checksum
# digest -a md5 /nsr/bin/nsr_snapshot.sh
aedff1a8bfa8ee0a012cd7def115e626
!!!THIS CODE IS UNTESTED DO NOT USE THIS!!!
!!!THIS JUST AN EXAMPLE!!!
Registering new resource type LGTO.clnt
1. Install Solaris client package LGTOclnt 2. Register new resource type in cluster. One one node do:
# clrt register -f /usr/sbin/LGTO.clnt.rtr LGTO.clnt
Now you have a new resource type LGTO.clnt in your cluster.
Create client resource of type LGTO.clnt
So I use scripts like this:
# RGname=sample-rg
# clrs create \
-t LGTO.clnt \
-g ${RGname} \
-p Resource_dependencies=$(basename ${RGname} -rg)-hasp-zfs-res \
-p clientname=$(basename ${RGname} -rg)-lh \
-p Network_resource=$(basename ${RGname} -rg)-lh-res \
-p owned_paths=${ZPOOL_BASEDIR} \
$(basename ${RGname} -rg)-nsr-res
This expands to:
# clrs create \
-t LGTO.clnt \
-g sample-rg \
-p Resource_dependencies=sample-hasp-zfs-res \
-p clientname=sample-lh \
-p Network_resource=sample-lh-res \
-p owned_paths=/local/sample-rg \
sample-nsr-res
Now we have a client name to which we can connect to: sample-lh