ZFS Networker: Difference between revisions

From Lolly's Wiki
Jump to navigationJump to search
m (Text replacement - "<source" to "<syntaxhighlight")
 
(22 intermediate revisions by the same user not shown)
Line 1: Line 1:
[[Kategorie:ZFS]]
[[Category:ZFS|Backup]]
[[Kategorie:Solaris]]
[[Category:Backup|Networker]]
[[Category:Solaris|Backup]]
=Backup of ZFS snapshots on Solaris Cluster with Legato/EMC Networker=
=Backup of ZFS snapshots on Solaris Cluster with Legato/EMC Networker=
This describes how to setup a backup of the Solaris Cluster resource group named sample-rg.
This describes how to setup a backup of the Solaris Cluster resource group named sample-rg.
Line 16: Line 17:


==Define variables used in the following command lines==
==Define variables used in the following command lines==
<source lang=bash>
<syntaxhighlight lang=bash>
# NAME=sample
# NAME=sample
# RGname=${NAME}-rg
# RGname=${NAME}-rg
Line 22: Line 23:
# ZPOOL=${NAME}_pool
# ZPOOL=${NAME}_pool
# ZPOOL_BASEDIR=/local/${RGname}
# ZPOOL_BASEDIR=/local/${RGname}
</source>
</syntaxhighlight>


==Define a resource for Networker==
==Define a resource for Networker==
What we need now is a resource definition in our Networker directory like this:
What we need now is a resource definition in our Networker directory like this:
<source lang=bash>
<syntaxhighlight lang=bash>
# zfs create ${ZPOOL}/nsr
# mkdir /nsr/{bin,log,res}
# mkdir ${ZPOOL_BASEDIR}/nsr/{bin,log,res}
# cat > /nsr/res/${NetworkerGroup}.res <<EOF
 
# cat > ${ZPOOL_BASEDIR}/nsr/res/${NetworkerGroup}.res <<EOF
type: savepnpc;
type: savepnpc;
precmd: "${ZPOOL_BASEDIR}/nsr/bin/prepst_command.sh pre >${ZPOOL_BASEDIR}/nsr/log/networker_precmd.log 2>&1";
precmd: "/nsr/bin/nsr_snapshot.sh pre >/nsr/log/networker_precmd.log 2>&1";
pstcmd: "${ZPOOL_BASEDIR}/nsr/bin/prepst_command.sh pst >${ZPOOL_BASEDIR}/nsr/log/networker_pstcmd.log 2>&1";
pstcmd: "/nsr/bin/nsr_snapshot.sh pst >/nsr/log/networker_pstcmd.log 2>&1";
timeout: "08:00am";
timeout: "08:00am";
abort precmd with group: Yes;
abort precmd with group: Yes;
EOF
EOF
</source>
</syntaxhighlight>
 
And now create a link on every cluster node to this file
<source lang=bash>
# ln -s ${ZPOOL_BASEDIR}/nsr/res/${NetworkerGroup}.res /nsr/res/${NetworkerGroup}.res
</source>


==The pre-/pstcmd-script==
==The pre-/pstcmd-script==
Line 49: Line 43:


!!!THIS JUST AN EXAMPLE!!!
!!!THIS JUST AN EXAMPLE!!!
<syntaxhighlight lang=bash>
#!/bin/bash
cmd_option=$1
export cmd_option
SNAPSHOT_NAME="nsr"
BASE_LOG_DIR="/nsr/logs"
NSR_BACKUP_CLONE="nsr_backup"
# Commands
ZFS_CMD="/usr/sbin/zfs"
ZPOOL_CMD="/usr/sbin/zpool"
ZLOGIN_CMD="/usr/sbin/zlogin"
ZONECFG_CMD="/usr/sbin/zonecfg"
SVCS_CMD="/usr/sbin/svcs"
SVCADM_CMD="/usr/sbin/svcadm"
DF_CMD="/usr/bin/df"
RM_CMD="/usr/bin/rm"
AWK_CMD="/usr/bin/nawk"
MKNOD_CMD="/usr/sbin/mknod"
XARGS_CMD="/usr/bin/xargs"
PARGS_CMD="/usr/bin/pargs"
PTREE_CMD="/usr/bin/ptree"
CLRS_CMD="/usr/cluster/bin/clrs"
CLRG_CMD="/usr/cluster/bin/clrg"
CLRT_CMD="/usr/cluster/bin/clrt"
BASENAME_CMD="/usr/bin/basename"
GETENT_CMD="/usr/bin/getent"
SCHA_RESOURCE_GET_CMD="/usr/cluster/bin/scha_resource_get"
WGET_CMD=/usr/sfw/bin/wget
HOSTNAME_CMD="/usr/bin/uname -n"
# Subdir in ZFS where to put ZFS-config
ZFS_SETUP_SUBDIR="cluster_config"
ZFS_CONFIG_FILE=ZFS_Setup.sh
# Oracle parameter
ORACLE_SID=SAMPLE
ORACLE_USER=oracle
# Sophora parameter
SOPHORA_FMRI="svc:/cms/sophora:default"
SOPHORA_USER=admin
SOPHORA_PASS=password
GLOBAL_LOGFILE=${BASE_LOG_DIR}/$(${BASENAME_CMD} $0 .sh).log


Still not working...
# For all but get_slaves redirect output to log
<source lang=bash>
case ${cmd_option} in
#!/bin/bash
get_slaves)
  ;;
*)
  exec >>${GLOBAL_LOGFILE} 2>&1
  ;;
esac


function print_option () {
function print_option () {
Line 70: Line 117:
     esac
     esac
   done
   done
}
function sophora_startup () {
  SOPHORA_ZONE=$1 # Zone for zlogin
  SOPHORA_FMRI=$2 # FMRI for svcadm
  print_log ${LOGFILE} "Starting sophora in ${SOPHORA_ZONE}..."
  ${ZLOGIN_CMD} ${SOPHORA_ZONE} ${SVCADM_CMD} enable ${SOPHORA_FMRI}
}
function sophora_shutdown () {
  SOPHORA_ZONE=$1 # Zone for zlogin
  SOPHORA_FMRI=$2 # FMRI for svcadm
  print_log ${LOGFILE} "Shutting down sophora in ${SOPHORA_ZONE}..."
  ${ZLOGIN_CMD} ${SOPHORA_ZONE} ${SVCADM_CMD} disable -t ${SOPHORA_FMRI}
}
function sophora_get_slaves () {
  SOPHORA_ZONE=$1 # Zone for zlogin
  SOPHORA_PORT=$2 # Sophora port at localhost
  SOPHORA_USER=$3 # Sophora admin user
  SOPHORA_PASS=$4 # Sophora admin port
  ${ZLOGIN_CMD} ${SOPHORA_ZONE} \
    ${WGET_CMD} \
      -qO- \
      --no-proxy \
      --http-user=${SOPHORA_USER} \
      --http-password=${SOPHORA_PASS} \
      "http://localhost:${SOPHORA_PORT}/content-api/servers/?replicationMode=SLAVE" | \
    ${AWK_CMD} '
function get_param(param,name){
  name="\""name"\"";
  count=split(param,tupel,/,/);
  for(i=1;i<=count;i++){
    split(tupel[i],part,/:/);
    if(part[1]==name){
      gsub(/\"/,"",part[2]);return part[2];
    }
  }
}
{
  json=$0;
  gsub(/(\[\{|\}\])/,"",json);
  elements=split(json,array,/\},\{/);
  for(element=1;element<=elements;element++){
    print get_param(array[element],"hostname");
  }
}' | ${XARGS_CMD} -n 1 -i ${BASENAME_CMD} {} .server.de
}
function get_zone_hostname () {
  ${ZLOGIN_CMD} $1 ${HOSTNAME_CMD}
}
}


Line 76: Line 175:
   if [ $# -gt 0 ]
   if [ $# -gt 0 ]
   then
   then
     printf "%s : %s\n" "$(date '+%Y%m%d %H:%M:%S')" "$*" >> ${LOGFILE}
     printf "%s (%s): %s\n" "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" "$*" >> ${LOGFILE}
   else
   else
     printf "%s : " "$(date '+%Y%m%d %H:%M:%S')" >> ${LOGFILE}
     #printf "%s (%s): " "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" >> ${LOGFILE}
     cat >> ${LOGFILE}
     while read data
    do
      printf "%s (%s): %s\n" "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" "${data}" >> ${LOGFILE}
    done
   fi
   fi
}
}


function dump_zfs_config {
  ZPOOL=$1
  OUTPUT_FILE=$2
  printf "\n\n# Create ZPool ${ZPOOL} with size $(${ZPOOL_CMD} list -Ho size ${ZPOOL}):\n\n" >> ${OUTPUT_FILE}
  ${ZPOOL_CMD} status ${ZPOOL} | ${AWK_CMD} '/config:/,/errors:/{if(/NAME/){getline; printf "Zpool structure of %s:\n\nzpool create %s",$1,$1; getline ; device=0; while(!/^$/ && !/errors:/){gsub(/mirror-[0-9]+/,"mirror",$1);gsub(/logs/,"log",$1);gsub(/(\/dev\/(r)*dsk\/)*c[0-9]+t[0-9A-F]+d[0-9]+(s[0-9]+)*/,"<device"device">",$1);if(/device/)device++;printf " %s",$1 ; getline}};printf "\n" ;}' >> ${OUTPUT_FILE}
  printf "\n\n# Create ZFS\n\n"  >> ${OUTPUT_FILE}
  ${ZFS_CMD} list -Hrt filesystem -o name,origin ${ZPOOL} | ${AWK_CMD} -v zfs_cmd=${ZFS_CMD} 'NR>1 && $2=="-"{print zfs_cmd,"create -o mountpoint=none",$1}' >> ${OUTPUT_FILE}
  printf "\n\n# Set ZFS values\n\n" >> ${OUTPUT_FILE}
  ${ZFS_CMD} get -s local -Ho name,property,value -pr all ${ZPOOL} | ${AWK_CMD} -v zfs_cmd=${ZFS_CMD} '$2!="readonly"{printf "%s set -p %s=%s %s\n",zfs_cmd,$2,$3,$1}' >> ${OUTPUT_FILE}
}


# Get commandline from parent pid
function dump_cluster_config {
savepnpc_pid=$(ptree $$ | nawk '/savepnpc/{print $1}')
  RG=$1
savepnpc_commandline="$(pargs -e ${savepnpc_pid} | head -1)"
  OUTPUT_DIR=$2
CLIENT_NAME=$(print_option -m ${savepnpc_commandline})
  ${RM_CMD} -f ${OUTPUT_DIR}/${RG}.clrg_export.xml
  ${CLRG_CMD} export -o ${OUTPUT_DIR}/${RG}.clrg_export.xml ${RG}
  for RES in $(${CLRS_CMD} list -g ${RG})
  do
    ${RM_CMD} -f ${OUTPUT_DIR}/${RES}.clrs_export.xml
    ${CLRS_CMD} export -o ${OUTPUT_DIR}/${RES}.clrs_export.xml ${RES}
  done


LOGFILE=/nsr/logs/${CLIENT_NAME}.log
  # Commands to recreate the RG
print_log ${LOGFILE} "Called from ${savepnpc_commandline}"
  COMMAND_FILE="${OUTPUT_DIR}/${RG}.ClusterCreateCommands.txt"
exec >>${LOGFILE} 2>&1
  printf "Recreate %s:\n%s create -i %s %s\n\n" "${RG}" "${CLRG_CMD}" "${OUTPUT_DIR}/${RG}.clrg_export.xml" "${RG}" > ${COMMAND_FILE}
 
  for RT in SUNW.LogicalHostname SUNW.HAStoragePlus SUNW.gds LGTO.clnt
print_log ${LOGFILE} "Begin backup of ${CLIENT_NAME}"
  do
 
    for RT_VERSION in $(${CLRT_CMD} list | ${AWK_CMD} -v rt=${RT} '$1 ~ rt')
# Get resource name from hostname
    do
LH_RES=$(/usr/cluster/bin/clrs show -t SUNW.LogicalHostname -p HostnameList | na                                            wk -v Hostname="${CLIENT_NAME}" '/^Resource:/{res=$NF} /HostnameList:/ {for(i=2;                                            i<=NF;i++){if($i == Hostname){print res}}}')
      for RES in $(${CLRS_CMD} list -g ${RG} -t ${RT_VERSION})
 
      do
print_log ${LOGFILE} "LogicalHostname of ${CLIENT_NAME} is ${LH_RES}"
        if [ "_${RT}_" == "_SUNW.LogicalHostname_" ]
 
        then
# Get ressourceGroup name from ressource name
          printf "Add the following entries to all nodes!!!:\n/etc/inet/hosts:\n" >> ${COMMAND_FILE}
RG=$(/usr/cluster/bin/scha_resource_get -O GROUP -R ${LH_RES})
          ${GETENT_CMD} hosts $(${CLRS_CMD} show -p HostnameList ${RES} | nawk '$1=="HostnameList:"{$1="";print}') >> ${COMMAND_FILE}
print_log ${LOGFILE} "RessourceGroup of ${LH_RES} is ${RG}"
          printf "\n" >> ${COMMAND_FILE}
 
        fi
 
        printf "Recreate %s:\n%s create -i %s %s\n\n" "${RES}" "${CLRS_CMD}" "${OUTPUT_DIR}/${RES}.clrs_export.xml" "${RES}" >> ${COMMAND_FILE}
ZPOOLS=$(/usr/cluster/bin/clrs show -g ${RG} -p Zpools  | nawk '$1=="Zpools:"{$1                                             ="";print $0}')
      done
print_log ${LOGFILE} "ZPools used in ${RG}: ${ZPOOLS}"
    done
 
  done
Start_command=$(/usr/cluster/bin/clrs show -p Start_command -g ${RG} | /usr/bin/                                            nawk -F ':' '$1 ~ /Start_command/ && $2 ~ /sczbt/')
}
print_log ${LOGFILE} "sczbt Start_command is: ${Start_command}"
sczbt_config=$(print_option -P ${Start_command})
print_log ${LOGFILE} "sczbt_config is ${sczbt_config}/sczbt_config"
ZONE=$(nawk -F '=' '$1=="Zonename"{gsub(/"/,"",$2);print $2}' ${sczbt_config}/sc                                            zbt_config)
print_log ${LOGFILE} "Zone from ${sczbt_config}/sczbt_config is ${ZONE}"
 
ORACLE_SID=SAMPLE
ORACLE_USER=oracle
 
SNAPSHOT_NAME="nsr"
ZFS_CMD="/usr/sbin/zfs"
ZLOGIN_CMD="/usr/bin/zlogin"


function snapshot_pre {
function snapshot_pre {
Line 129: Line 235:
     ZONE=$3
     ZONE=$3
     ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}"
     ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}"
     ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | nawk '{print $NF;}'                                             )
     ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | ${AWK_CMD} '{print $NF;}')
     ZONE_ROOT="${ZONE_BASE}/root"
     ZONE_ROOT="${ZONE_BASE}/root"
   else
   else
Line 142: Line 248:
     cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS
     cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS
#!/bin/bash
#!/bin/bash
DBDIR=\$(/usr/bin/nawk -F':' -v ORACLE_SID=${ORACLE_SID} '\$1==ORACLE_SID {print                                             \$2;}' /var/opt/oracle/oratab)
DBDIR=\$(${AWK_CMD} -F':' -v ORACLE_SID=${ORACLE_SID} '\$1==ORACLE_SID {print \$2;}' /var/opt/oracle/oratab)
\${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF
\${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF
create pfile from spfile;
create pfile from spfile;
Line 156: Line 262:
   fi
   fi
}
}


function snapshot_pst {
function snapshot_pst {
Line 164: Line 271:
     ZONE=$3
     ZONE=$3
     ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}"
     ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}"
     ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | nawk '{print $NF;}'                                             )
     ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | ${AWK_CMD} '{print $NF;}')
     ZONE_ROOT="${ZONE_BASE}/root"
     ZONE_ROOT="${ZONE_BASE}/root"
   else
   else
Line 178: Line 285:
     cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS
     cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS
#!/bin/bash
#!/bin/bash
DBDIR=\$(/usr/bin/nawk -F':' -v ORACLE_SID=${ORACLE_SID} '\$1==ORACLE_SID {print                                             \$2;}' /var/opt/oracle/oratab)
DBDIR=\$(${AWK_CMD} -F':' -v ORACLE_SID=${ORACLE_SID} '\$1==ORACLE_SID {print \$2;}' /var/opt/oracle/oratab)
\${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF
\${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF
alter database end backup;
alter database end backup;
Line 194: Line 301:
   ZPOOL=$1
   ZPOOL=$1
   SNAPSHOT_NAME=$2
   SNAPSHOT_NAME=$2
   ${ZFS_CMD} snapshot -r ${ZPOOL}@${SNAPSHOT_NAME} 2>&1 | print_log ${LOGFILE}
   RES="$(${CLRS_CMD} show -p ZPools | ${AWK_CMD} -v pool=${ZPOOL} '/^Resource:/{res=$NF;}$NF ~ pool{print res;}')"
   #for zfs in $(zfs list -Ho name -t snapshot -r ${ZPOOL} | grep ${SNAPSHOT_NAME                                             })
 
   #do
  # Because of problems with unmounting during cluster monitoring disable montoring for this step
  #  ${ZFS_CMD} clone -o readonly=on ${zfs} ${zfs/@*/}/nsr_backup
  print_log ${LOGFILE} "Telling Cluster not to monitor ${RES}"
  # /usr/sbin/savepnpc -s hhlokens01.srv.ndr-net.de -g NdrCms -LL -m ndrcmstest                                            -cl /local/ndrcmstest-rg/home/nsr_backup
  if [ "_${RES}_" != "__" ]
   #done
  then
    ${CLRS_CMD} unmonitor ${RES}
  fi
 
  print_log ${LOGFILE} "Create ZFS snapshot -r ${ZPOOL}@${SNAPSHOT_NAME}"
  ${ZFS_CMD} snapshot -r ${ZPOOL}@${SNAPSHOT_NAME}
   for zfs_snapshot in $(${ZFS_CMD} list -Ho name -t snapshot -r ${ZPOOL} | grep ${SNAPSHOT_NAME})
   do
    ${ZFS_CMD} clone -o readonly=on ${zfs_snapshot} ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE}
    ${ZFS_CMD} mount ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE} 2>/dev/null
    if [ "_$(${ZFS_CMD} get -Ho value mounted ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE})_" == "_yes_" ]
    then
#     echo /usr/sbin/save -s ${SERVER_NAME} -g ${GROUP_NAME} -LL -m ${CLIENT_NAME} $(${ZFS_CMD} get -Ho value mountpoint ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE})
      ${ZFS_CMD} list -Ho creation,name ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE} |  print_log ${LOGFILE}
    fi
   done
 
  print_log ${LOGFILE} "Telling Cluster to monitor ${RES} again"
  if [ "_${RES}_" != "__" ]
  then
    sleep 1
    ${CLRS_CMD} monitor ${RES}
  fi
}
}


Line 205: Line 334:
   ZPOOL=$1
   ZPOOL=$1
   SNAPSHOT_NAME=$2
   SNAPSHOT_NAME=$2
   if (${ZFS_CMD} list -t snapshot ${ZPOOL}@${SNAPSHOT_NAME} >/dev/null 2>&1)
 
  RES="$(${CLRS_CMD} show -p ZPools | ${AWK_CMD} -v pool=${ZPOOL} '/^Resource:/{res=$NF;}$NF ~ pool{print res;}')"
 
  # Because of problems with unmounting during cluster monitoring disable montoring for this step
  print_log ${LOGFILE} "Telling Cluster not to monitor ${RES}"
  if [ "_${RES}_" != "__" ]
  then
    ${CLRS_CMD} unmonitor ${RES}
  fi
 
   if (${ZFS_CMD} list -t snapshot ${ZPOOL}@${SNAPSHOT_NAME} > /dev/null)
   then
   then
     #for zfs in $(zfs list -Ho name -t snapshot -r ${ZPOOL} | grep ${SNAPSHOT_NA                                            ME})
     for zfs_snapshot in $(${ZFS_CMD} list -Ho name -t snapshot -r ${ZPOOL} | grep ${SNAPSHOT_NAME})
     #do
     do
    #  ${ZFS_CMD} unmount ${zfs/@*/}/nsr_backup
      if [ "_$(${ZFS_CMD} get -Ho value mounted ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE})_" == "_yes_" ]
    # ${ZFS_CMD} destroy ${zfs/@*/}/nsr_backup
      then
     #done
        print_log ${LOGFILE} "Unmount ZFS clone ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE}"
     ${ZFS_CMD} destroy -r ${ZPOOL}@${SNAPSHOT_NAME} 2>&1 | print_log ${LOGFILE}
        ${ZFS_CMD} unmount ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE}
      fi
      # If this is a clone of ${zfs_snapshot}, then destroy it
      if [ "_$(${ZFS_CMD} list -Ho origin ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE})_" == "_${zfs_snapshot}_" ]
      then
        print_log ${LOGFILE} "Destroy ZFS clone ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE}"
        ${ZFS_CMD} destroy ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE}
      fi
     done
    print_log ${LOGFILE} "Destroy ZFS snapshot -r ${ZPOOL}@${SNAPSHOT_NAME}"
     ${ZFS_CMD} destroy -r ${ZPOOL}@${SNAPSHOT_NAME}
  fi
 
  print_log ${LOGFILE} "Telling Cluster to monitor ${RES} again"
  if [ "_${RES}_" != "__" ]
  then
    ${CLRS_CMD} monitor ${RES}
   fi
   fi
}
}
Line 218: Line 373:
function usage {
function usage {
   echo "Usage: $0 (pre|pst)"
   echo "Usage: $0 (pre|pst)"
  echo "Usage: $0 init <ZPool-Name>"
  echo "Usage: $0 initall"
  echo "Usage: $0 dump <ZPool-Name> <Output-File>"
   exit 1
   exit 1
}
}


case $1 in
case ${cmd_option} in
pre)
pre|pst)
   snapshot_destroy ${ZPOOLS} ${SNAPSHOT_NAME}
   case ${cmd_option} in
   snapshot_pre     ${DB}     ${DBUSER}       ${ZONE}
   pre)
  snapshot_create  ${ZPOOLS} ${SNAPSHOT_NAME}
     # Get commandline from parent pid
  snapshot_pst     ${DB}     ${DBUSER}       ${ZONE}
    # pre /usr/sbin/savepnpc -c <NetworkerClient> -s <NetworkerServer> -g <NetworkerGroup> -LL
  ;;
    print_log ${GLOBAL_LOGFILE} "Begin (${cmd_option}) Called from $(${PTREE_CMD} $$ | ${AWK_CMD} '/savepnpc/{print $0}')"
pst)
     pid=$(${PTREE_CMD} $$ | ${AWK_CMD} '/savepnpc/{print $1}')
  snapshot_destroy ${ZPOOL} ${SNAPSHOT_NAME}
    ;;
  ;;
  pst)
*)
    # Get commandline from parent pid
  usage
    # pst /usr/bin/pstclntsave -s <NetworkerServer> -g <NetworkerGroup> -c <NetworkerClient>
  ;;
    print_log ${GLOBAL_LOGFILE} "Begin (${cmd_option}) Called from $(${PTREE_CMD} $$ | ${AWK_CMD} '/pstclntsave/{print $0}')"
esac
    pid=$(${PTREE_CMD} $$ | ${AWK_CMD} '/pstclntsave/{print $1}')
root@hhrotcms05:~ #> cat /opt/nsr/bin/nsr_snapshot.sh
    ${PTREE_CMD} $$ | print_log ${GLOBAL_LOGFILE}
#!/bin/bash
    print_log ${GLOBAL_LOGFILE} "(${cmd_option}) PID=${pid}"
    ;;
  esac


function print_option () {
  commandline="$(${PARGS_CMD} -c ${pid} | ${AWK_CMD} -F':' '$1 ~ /^argv/{printf $2}END{print;}')"
  option=$1; shift
   # Called from backupserver use -c
   # now process line
   CLIENT_NAME=$(print_option -c ${commandline})
   while [ $# -gt 0 ]
  # If called from cmdline use -m
   do
   CLIENT_NAME=${CLIENT_NAME:-$(print_option -m ${commandline})}
    case $1 in
  # Last resort pre/post
    ${option})
   CLIENT_NAME=${CLIENT_NAME:-${cmd_option}}
        echo $2
        shift
        shift
        ;;
    *)
        shift
        ;;
    esac
   done
}


function print_log () {
   SERVER_NAME=$(print_option -s ${commandline})
   LOGFILE=$1 ; shift
   GROUP_NAME=$(print_option -g ${commandline})
  if [ $# -gt 0 ]
  then
    printf "%s : %s\n" "$(date '+%Y%m%d %H:%M:%S')" "$*" >> ${LOGFILE}
   else
    printf "%s : " "$(date '+%Y%m%d %H:%M:%S')" >> ${LOGFILE}
    cat >> ${LOGFILE}
  fi
}


  LOGFILE=${BASE_LOG_DIR}/${CLIENT_NAME}.log
  print_log ${LOGFILE} "Called from ${commandline}"


# Get commandline from parent pid
  named_pipe=/tmp/.named_pipe.$$
savepnpc_pid=$(ptree $$ | nawk '/savepnpc/{print $1}')
savepnpc_commandline="$(pargs -e ${savepnpc_pid} | head -1)"
CLIENT_NAME=$(print_option -m ${savepnpc_commandline})


LOGFILE=/nsr/logs/${CLIENT_NAME}.log
  # Delete named pipe on exit
print_log ${LOGFILE} "Called from ${savepnpc_commandline}"
  trap "rm -f ${named_pipe}" EXIT
exec >>${LOGFILE} 2>&1
  # Create named pipe
  ${MKNOD_CMD} ${named_pipe} p


print_log ${LOGFILE} "Begin backup of ${CLIENT_NAME}"
  # Read from named pipe and send it to print_log
  tee <${named_pipe} | print_log ${LOGFILE}&
  # Close STDOUT & STDERR
  exec 1>&-
  exec 2>&-
  # Redirect them to named pipe
  exec >${named_pipe} 2>&1


# Get resource name from hostname
  print_log ${LOGFILE} "Begin backup of ${CLIENT_NAME}"
LH_RES=$(/usr/cluster/bin/clrs show -t SUNW.LogicalHostname -p HostnameList | nawk -v Hostname="${CLIENT_NAME}" '/^Resource:/{res=$NF} /HostnameList:/ {for(i=2;i<=NF;i++){if($i == Hostname){print res}}}')


print_log ${LOGFILE} "LogicalHostname of ${CLIENT_NAME} is ${LH_RES}"
  # Get resource name from hostname
  LH_RES=$(${CLRS_CMD} show -t SUNW.LogicalHostname -p HostnameList | ${AWK_CMD} -v Hostname="${CLIENT_NAME}" '/^Resource:/{res=$NF} /HostnameList:/ {for(i=2;i<=NF;i++){if($i == Hostname){print res}}}')


# Get ressourceGroup name from ressource name
  print_log ${LOGFILE} "LogicalHostname of ${CLIENT_NAME} is ${LH_RES}"
RG=$(/usr/cluster/bin/scha_resource_get -O GROUP -R ${LH_RES})
print_log ${LOGFILE} "RessourceGroup of ${LH_RES} is ${RG}"


  # Get ressourceGroup name from ressource name
  RG=$(${SCHA_RESOURCE_GET_CMD} -O GROUP -R ${LH_RES})
  print_log ${LOGFILE} "RessourceGroup of ${LH_RES} is ${RG}"


ZPOOLS=$(/usr/cluster/bin/clrs show -g ${RG} -p Zpools  | nawk '$1=="Zpools:"{$1="";print $0}')
print_log ${LOGFILE} "ZPools used in ${RG}: ${ZPOOLS}"


Start_command=$(/usr/cluster/bin/clrs show -p Start_command -g ${RG} | /usr/bin/nawk -F ':' '$1 ~ /Start_command/ && $2 ~ /sczbt/')
  ZPOOLS=$(${CLRS_CMD} show -g ${RG} -p Zpools  | ${AWK_CMD} '$1=="Zpools:"{$1="";print $0}')
print_log ${LOGFILE} "sczbt Start_command is: ${Start_command}"
  print_log ${LOGFILE} "ZPools used in ${RG}: ${ZPOOLS}"
sczbt_config=$(print_option -P ${Start_command})
print_log ${LOGFILE} "sczbt_config is ${sczbt_config}/sczbt_config"
ZONE=$(nawk -F '=' '$1=="Zonename"{gsub(/"/,"",$2);print $2}' ${sczbt_config}/sczbt_config)
print_log ${LOGFILE} "Zone from ${sczbt_config}/sczbt_config is ${ZONE}"


ORACLE_SID=SAMPLE
  Start_command=$(${CLRS_CMD} show -p Start_command -g ${RG} | ${AWK_CMD} -F ':' '$1 ~ /Start_command/ && $2 ~ /sczbt/')
ORACLE_USER=oracle
  print_log ${LOGFILE} "sczbt Start_command is: ${Start_command}"
 
  sczbt_config=$(print_option -P ${Start_command})/sczbt_$(print_option -R ${Start_command})
SNAPSHOT_NAME="nsr"
  print_log ${LOGFILE} "sczbt_config is ${sczbt_config}"
ZFS_CMD="/usr/sbin/zfs"
  ZONE=$(${AWK_CMD} -F '=' '$1=="Zonename"{gsub(/"/,"",$2);print $2}' ${sczbt_config})
ZLOGIN_CMD="/usr/bin/zlogin"
  print_log ${LOGFILE} "Zone from ${sczbt_config} is ${ZONE}"
 
   ;;
function snapshot_pre {
init)
   DB=$1
   LOGFILE=${BASE_LOG_DIR}/init.log
   DBUSER=$2
   if [ $# -ne 2 ]
   if [ $# -eq 3 -a "_$3_" != "__" ]
   then
   then
     ZONE=$3
     echo "Wrong count of parameters."
    ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}"
     echo "Use $0 init <ZPool-Name>"
     ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | nawk '{print $NF;}')
     exit 1
    ZONE_ROOT="${ZONE_BASE}/root"
  else
    ZONE_ROOT=""
     ZONE_CMD="su - ${DBUSER} -c"
   fi
   fi
   if( ${ZONE_CMD} echo >/dev/null 2>&1 )
   ZPOOL=$2
  print_log ${GLOBAL_LOGFILE} "Begin (${cmd_option}) of zpool ${ZPOOL}"
  print_log ${LOGFILE} "Begin init of zpool ${ZPOOL}"
  ;;
initall)
  LOGFILE=${BASE_LOG_DIR}/initall.log
  print_log ${GLOBAL_LOGFILE} "Begin (${cmd_option})"
  ;;
get_slaves)
  if [ $# -ne 5 ]
   then
   then
     SCRIPT_NAME="tmp/.nsr-pre-snap-script.$$"
     echo "Wrong count of parameters."
    echo "Use $0 get_slaves <Zone-Name> <Sophora-Port> <Sophora-Adminuser> <Sophora-Password>"
    exit 1
  fi
  echo "Slave node(s): $(sophora_get_slaves $2 $3 $4 $5)"
  exit 0
  ;;
esac


    # Create script inside zone
case ${cmd_option} in
    cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS
dump_cluster)
#!/bin/bash
  if [ $# -ne 3 ]
DBDIR=\$(/usr/bin/nawk -F':' -v ORACLE_SID=${ORACLE_SID} '\$1==ORACLE_SID {print \$2;}' /var/opt/oracle/oratab)
  then
\${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF
     echo "Wrong count of parameters."
create pfile from spfile;
     echo "Use $0 dump_cluster <Ressource_Group> <DIR>"
alter system archive log current;
     exit 1
alter database backup controlfile to trace;
alter database begin backup;
EOF
EOS
     chmod 755 ${ZONE_ROOT}/${SCRIPT_NAME}
 
     ${ZONE_CMD} /${SCRIPT_NAME} 2>&1 | print_log ${LOGFILE}
     rm -f ${ZONE_ROOT}/${SCRIPT_NAME}
   fi
   fi
}
   dump_cluster_config $2 $3
 
   ;;
function snapshot_pst {
dump)
   DB=$1
   if [ $# -ne 3 ]
   DBUSER=$2
   if [ $# -eq 3 -a "_$3_" != "__" ]
   then
   then
     ZONE=$3
     echo "Wrong count of parameters."
    ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}"
     echo "Use $0 dump <ZPool-Name> <File>"
     ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | nawk '{print $NF;}')
     exit 1
    ZONE_ROOT="${ZONE_BASE}/root"
  else
    ZONE_ROOT=""
     ZONE_CMD="su - ${DBUSER} -c"
   fi
   fi
  dump_zfs_config $2 $3
  ;;
init)
  snapshot_destroy ${ZPOOL} ${SNAPSHOT_NAME}
  snapshot_create  ${ZPOOL} ${SNAPSHOT_NAME}
  print_log ${LOGFILE} "End  init of zpool ${ZPOOL}"
  ;;
initall)
  for ZPOOL in $(${ZPOOL_CMD} list -Ho name)
  do
    if [ "_${ZPOOL}_" == "_rpool_" ]
    then
      continue
    fi
    print_log ${LOGFILE} "Begin init of zpool ${ZPOOL}"
    snapshot_destroy ${ZPOOL} ${SNAPSHOT_NAME}
    snapshot_create  ${ZPOOL} ${SNAPSHOT_NAME}
    print_log ${LOGFILE} "End  init of zpool ${ZPOOL}"
  done
  ;;
pre)
  for ZPOOL in ${ZPOOLS}
  do
    snapshot_destroy ${ZPOOL} ${SNAPSHOT_NAME}
  done
  # Shutdown Sophora?
  startup="No"
  case ${ZONE} in
  arcus-rg)
    # Staging zones
    #sophora_shutdown ${ZONE} ${SOPHORA_FMRI}
    #startup="Yes"
    ;;
  incus-zone|velum-zone)
    SOPHORA_ADMINPORT=1196
    # Master-/slave-zones
    is_slave=0
    zone_hostname=$(get_zone_hostname ${ZONE})
    for slave in $(sophora_get_slaves ${ZONE} ${SOPHORA_ADMINPORT} ${SOPHORA_USER} ${SOPHORA_PASS})
    do
      print_log ${LOGFILE} "_${slave}_ == _${zone_hostname}_?"
      if [ "_${slave}_" == "_${zone_hostname}_" ]
      then
        is_slave=1
      fi
    done


  if( ${ZONE_CMD} echo >/dev/null 2>&1 )
    if [ ${is_slave} -eq 1 ]
  then
    then
     SCRIPT_NAME="tmp/.nsr-pre-snap-script.$$"
      # Slave
      print_log ${LOGFILE} "Slave..."
      sophora_shutdown ${ZONE} ${SOPHORA_FMRI}
      startup="Yes"
     else
      # Master
      print_log ${LOGFILE} "Master... Not shutting down Sophora"
    fi
    ;;
  merkel-zone|brandt-zone|schmidt-zone)
    SOPHORA_ADMINPORT=1396
    # Master-/slave-zones
    is_slave=0
    zone_hostname=$(get_zone_hostname ${ZONE})
    for slave in $(sophora_get_slaves ${ZONE} ${SOPHORA_ADMINPORT} ${SOPHORA_USER} ${SOPHORA_PASS})
    do
      print_log ${LOGFILE} "_${slave}_ == _${zone_hostname}_?"
      if [ "_${slave}_" == "_${zone_hostname}_" ]
      then
        is_slave=1
      fi
    done
 
    if [ ${is_slave} -eq 1 ]
    then
      # Slave
      print_log ${LOGFILE} "Slave..."
      sophora_shutdown ${ZONE} ${SOPHORA_FMRI}
      startup="Yes"
    else
      # Master
      print_log ${LOGFILE} "Master... Not shutting down Sophora"
    fi
    ;;
  *)
    ;;
  esac


    # Create script inside zone
  # Find the dir to write down zfs-setup
     cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS
  for ZPOOL in ${ZPOOLS}
#!/bin/bash
  do
DBDIR=\$(/usr/bin/nawk -F':' -v ORACLE_SID=${ORACLE_SID} '\$1==ORACLE_SID {print \$2;}' /var/opt/oracle/oratab)
     if [ "_$(${ZFS_CMD} list -Ho name ${ZPOOL}/${ZFS_SETUP_SUBDIR} 2>/dev/null)_" != "__" ]
\${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF
    then
alter database end backup;
      CONFIG_DIR=$(${ZFS_CMD} get -Ho value mountpoint ${ZPOOL}/${ZFS_SETUP_SUBDIR})
alter system archive log current;
    else
EOF
      if [ -d $(${ZFS_CMD} get -Ho value mountpoint ${ZPOOL})/${ZFS_SETUP_SUBDIR} ]
EOS
      then
    chmod 755 ${ZONE_ROOT}/${SCRIPT_NAME}
        CONFIG_DIR=$(${ZFS_CMD} get -Ho value mountpoint ${ZPOOL})/${ZFS_SETUP_SUBDIR}
      fi
    fi
    if [ -d ${CONFIG_DIR} ]
    then
      printf "# Settings for ZFS\n\n" > ${CONFIG_DIR}/${ZFS_CONFIG_FILE}


    ${ZONE_CMD} /${SCRIPT_NAME} 2>&1 | print_log ${LOGFILE}
      ZONE_CONFIG_FILE=zonecfg_${ZONE}.export
    rm -f ${ZONE_ROOT}/${SCRIPT_NAME}
      [ "_${ZONE}_" != "__" ] && ${ZONECFG_CMD} -z ${ZONE} export > ${CONFIG_DIR}/${ZONE_CONFIG_FILE}
  fi
    fi
}
  done


function snapshot_create {
  # Save configs and create snapshots
   ZPOOL=$1
   for ZPOOL in ${ZPOOLS}
   SNAPSHOT_NAME=$2
   do
  ${ZFS_CMD} snapshot -r ${ZPOOL}@${SNAPSHOT_NAME} 2>&1 | print_log ${LOGFILE}
    if [ "_${CONFIG_DIR}_" != "__" ]
  #for zfs in $(zfs list -Ho name -t snapshot -r ${ZPOOL} | grep ${SNAPSHOT_NAME})
    then
  #do
      # Save zfs config
  # ${ZFS_CMD} clone -o readonly=on ${zfs} ${zfs/@*/}/nsr_backup
      dump_zfs_config ${ZPOOL} ${CONFIG_DIR}/${ZFS_CONFIG_FILE}
   #  /usr/sbin/savepnpc -s hhlokens01.srv.ndr-net.de -g NdrCms -LL -m ndrcmstest-cl /local/ndrcmstest-rg/home/nsr_backup
      # Save Clusterconfig
  #done
      dump_cluster_config ${RG} ${CONFIG_DIR}
}
    fi
    snapshot_create ${ZPOOL} ${SNAPSHOT_NAME}
   done


function snapshot_destroy {
   # Startup Sophora?
  ZPOOL=$1
   if [ "_${startup}_" == "_Yes_" ]
   SNAPSHOT_NAME=$2
   if (${ZFS_CMD} list -t snapshot ${ZPOOL}@${SNAPSHOT_NAME} >/dev/null 2>&1)
   then
   then
     #for zfs in $(zfs list -Ho name -t snapshot -r ${ZPOOL} | grep ${SNAPSHOT_NAME})
     sophora_startup ${ZONE} ${SOPHORA_FMRI}
    #do
    #  ${ZFS_CMD} unmount ${zfs/@*/}/nsr_backup
    #  ${ZFS_CMD} destroy ${zfs/@*/}/nsr_backup
    #done
    ${ZFS_CMD} destroy -r ${ZPOOL}@${SNAPSHOT_NAME} 2>&1 | print_log ${LOGFILE}
   fi
   fi
}
   print_log ${LOGFILE} "End   backup of ${CLIENT_NAME}"
 
function usage {
  echo "Usage: $0 (pre|pst)"
   exit 1
}
 
case $1 in
pre)
  snapshot_destroy ${ZPOOLS} ${SNAPSHOT_NAME}
   snapshot_pre    ${DB}    ${DBUSER}        ${ZONE}
  snapshot_create  ${ZPOOLS} ${SNAPSHOT_NAME}
  snapshot_pst    ${DB}    ${DBUSER}        ${ZONE}
   ;;
   ;;
pst)
pst)
   snapshot_destroy ${ZPOOL} ${SNAPSHOT_NAME}
   for ZPOOL in ${ZPOOLS}
  do
    snapshot_destroy ${ZPOOL} ${SNAPSHOT_NAME}
  done
  print_log ${LOGFILE} "End  backup of ${CLIENT_NAME}"
   ;;
   ;;
*)
*)
Line 419: Line 634:
   ;;
   ;;
esac
esac
</source>
print_log ${GLOBAL_LOGFILE} "End  (${cmd_option}) Called from:"
${PTREE_CMD} $$ | print_log ${GLOBAL_LOGFILE}
exit 0
</syntaxhighlight>
 
MD5-Checksum
<syntaxhighlight lang=bash>
#  digest -a md5 /nsr/bin/nsr_snapshot.sh
01be6677ddf4342b625b1aa59d805628
</syntaxhighlight>
 
!!!THIS CODE IS UNTESTED DO NOT USE THIS!!!
!!!THIS CODE IS UNTESTED DO NOT USE THIS!!!


!!!THIS JUST AN EXAMPLE!!!
!!!THIS JUST AN EXAMPLE!!!
==Restore/Recover==
===Set some variables===
<syntaxhighlight lang=bash>
NSR_CLIENT="sample-cl"
NSR_SERVER="nsr-server"
ZPOOL="sample_pool"
RG="${NSR_CLIENT%-cl}-rg"
ZONE="${NSR_CLIENT%-cl}-zone"
</syntaxhighlight>
===Look for a valid backup===
<syntaxhighlight lang=bash>
# /usr/sbin/mminfo -s ${NSR_SERVER} -o t -N /local/${RG}/cluster_config/nsr_backup
</syntaxhighlight>
===Restore ZFS configuration===
<syntaxhighlight lang=bash>
# /usr/sbin/recover -s ${NSR_SERVER} -c ${NSR_CLIENT} -d /tmp -a /local/${RG}/cluster_config/nsr_backup/ZFS_Setup.sh
</syntaxhighlight>
Look into file /tmp/ZFS_Setup.sh which should look like this:
<syntaxhighlight lang=bash>
# Create ZPool sample_pool with size 1.02T:
Zpool structure of sample_pool:
zpool create sample_pool mirror <device0> <device1>
# Create ZFS
/usr/sbin/zfs create -o mountpoint=none sample_pool/app
/usr/sbin/zfs create -o mountpoint=none sample_pool/cluster_config
/usr/sbin/zfs create -o mountpoint=none sample_pool/data1
/usr/sbin/zfs create -o mountpoint=none sample_pool/data2
/usr/sbin/zfs create -o mountpoint=none sample_pool/home
/usr/sbin/zfs create -o mountpoint=none sample_pool/log
/usr/sbin/zfs create -o mountpoint=none sample_pool/usr_local
/usr/sbin/zfs create -o mountpoint=none sample_pool/zone
# Set ZFS values
/usr/sbin/zfs set -p reservation=104857600 sample_pool
/usr/sbin/zfs set -p mountpoint=none sample_pool
/usr/sbin/zfs set -p mountpoint=/local/sample-rg/app sample_pool/app
/usr/sbin/zfs set -p mountpoint=/local/sample-rg/cluster_config sample_pool/cluster_config
/usr/sbin/zfs set -p mountpoint=/local/sample-rg/data1 sample_pool/data1
/usr/sbin/zfs set -p mountpoint=/local/sample-rg/data2 sample_pool/data2
/usr/sbin/zfs set -p mountpoint=/local/sample-rg/home sample_pool/home
/usr/sbin/zfs set -p mountpoint=/local/sample-rg/log sample_pool/log
/usr/sbin/zfs set -p mountpoint=/local/sample-rg/usr_local sample_pool/usr_local
/usr/sbin/zfs set -p mountpoint=/local/sample-rg/zone sample_pool/zone
/usr/sbin/zfs set -p zpdata:zn=sample-zone sample_pool/zone
/usr/sbin/zfs set -p zpdata:rbe=S10_U9 sample_pool/zone
/usr/sbin/zfs set -p mountpoint=/local/sample-rg/zone-zfsBE_20121105 sample_pool/zone-zfsBE_20121105
/usr/sbin/zfs set -p zoned=off sample_pool/zone-zfsBE_20121105
/usr/sbin/zfs set -p canmount=on sample_pool/zone-zfsBE_20121105
/usr/sbin/zfs set -p zpdata:zn=sample-zone sample_pool/zone-zfsBE_20121105
/usr/sbin/zfs set -p zpdata:rbe=S10_U9 sample_pool/zone-zfsBE_20121105
</syntaxhighlight>
Mount the needed ZFS filesystems.
===Restore zone configuration===
<syntaxhighlight lang=bash>
# /usr/sbin/recover -s ${NSR_SERVER} -c ${NSR_CLIENT} -d /tmp -a /local/${RG}/cluster_config/nsr_backup/zonecfg_${ZONE}.export
# zonecfg -z ${ZONE} -f /tmp/zonecfg_${ZONE}.export
# zonecfg -z ${ZONE} info
</syntaxhighlight>
===Restore cluster configuration===
<syntaxhighlight lang=bash>
# /usr/sbin/recover -s ${NSR_SERVER} -c ${NSR_CLIENT} -d /tmp -a /local/${RG}/cluster_config/nsr_backup/*_export.xml
# /usr/sbin/recover -s ${NSR_SERVER} -c ${NSR_CLIENT} -d /tmp -a /local/${RG}/cluster_config/nsr_backup/*.ClusterCreateCommands.txt
# /usr/bin/perl -pi -e "s#/local/${RG}/cluster_config/nsr_backup/#/tmp/#g" /tmp/${RG}.ClusterCreateCommands.txt
</syntaxhighlight>
Follow the instructions in /tmp/${RG}.ClusterCreateCommands.txt:
<syntaxhighlight lang=bash>
Recreate sample-rg:
/usr/cluster/bin/clrg create -i /tmp/sample-rg.clrg_export.xml sample-rg
Add the following entries to all nodes!!!:
/etc/inet/hosts:
10.29.7.96    sample-cl
Recreate sample-lh-res:
/usr/cluster/bin/clrs create -i /tmp/sample-lh-res.clrs_export.xml sample-lh-res
Recreate sample-hasp-zfs-res:
/usr/cluster/bin/clrs create -i /tmp/sample-hasp-zfs-res.clrs_export.xml sample-hasp-zfs-res
Recreate sample-emctl-res:
/usr/cluster/bin/clrs create -i /tmp/sample-emctl-res.clrs_export.xml sample-emctl-res
Recreate sample-oracle-res:
/usr/cluster/bin/clrs create -i /tmp/sample-oracle-res.clrs_export.xml sample-oracle-res
Recreate sample-zone-res:
/usr/cluster/bin/clrs create -i /tmp/sample-zone-res.clrs_export.xml sample-zone-res
Recreate sample-nsr-res:
/usr/cluster/bin/clrs create -i /tmp/sample-nsr-res.clrs_export.xml sample-nsr-res
</syntaxhighlight>


==Registering new resource type LGTO.clnt==
==Registering new resource type LGTO.clnt==
1. Install Solaris client package LGTOclnt
1. Install Solaris client package LGTOclnt
2. Register new resource type in cluster. One one node do:
2. Register new resource type in cluster. One one node do:
<source lang=bash>
<syntaxhighlight lang=bash>
# clrt register -f /usr/sbin/LGTO.clnt.rtr LGTO.clnt
# clrt register -f /usr/sbin/LGTO.clnt.rtr LGTO.clnt
</source>
</syntaxhighlight>
Now you have a new resource type LGTO.clnt in your cluster.
Now you have a new resource type LGTO.clnt in your cluster.


Line 435: Line 759:


So I use scripts like this:
So I use scripts like this:
<source lang=bash>
<syntaxhighlight lang=bash>
# RGname=sample-rg
# RGname=sample-rg
# clrs create \
# clrs create \
Line 445: Line 769:
   -p owned_paths=${ZPOOL_BASEDIR} \
   -p owned_paths=${ZPOOL_BASEDIR} \
   $(basename ${RGname} -rg)-nsr-res
   $(basename ${RGname} -rg)-nsr-res
</source>
</syntaxhighlight>


This expands to:
This expands to:
<source lang=bash>
<syntaxhighlight lang=bash>
# clrs create \
# clrs create \
   -t LGTO.clnt \
   -t LGTO.clnt \
Line 457: Line 781:
   -p owned_paths=/local/sample-rg \
   -p owned_paths=/local/sample-rg \
   sample-nsr-res
   sample-nsr-res
</source>
</syntaxhighlight>


Now we have a client name to which we can connect to: sample-lh
Now we have a client name to which we can connect to: sample-lh

Latest revision as of 16:53, 25 November 2021

Backup of ZFS snapshots on Solaris Cluster with Legato/EMC Networker

This describes how to setup a backup of the Solaris Cluster resource group named sample-rg.

The structure of my RGs is always:

RG:                <name>-rg
ZFS-HASP:          <name>-hasp-zfs-res
Logical Host:      <name>-lh-res
Logical Host Name: <name>-lh
ZPOOL:             <name>_pool

I used the bash as shell.

Define variables used in the following command lines

# NAME=sample
# RGname=${NAME}-rg
# NetworkerGroup=$(echo ${NAME} | tr 'a-z' 'A-Z' )
# ZPOOL=${NAME}_pool
# ZPOOL_BASEDIR=/local/${RGname}

Define a resource for Networker

What we need now is a resource definition in our Networker directory like this:

# mkdir /nsr/{bin,log,res}
# cat > /nsr/res/${NetworkerGroup}.res <<EOF
type: savepnpc;
precmd: "/nsr/bin/nsr_snapshot.sh pre >/nsr/log/networker_precmd.log 2>&1";
pstcmd: "/nsr/bin/nsr_snapshot.sh pst >/nsr/log/networker_pstcmd.log 2>&1";
timeout: "08:00am";
abort precmd with group: Yes;
EOF

The pre-/pstcmd-script

!!!THIS CODE IS UNTESTED DO NOT USE THIS!!!

!!!THIS JUST AN EXAMPLE!!!

#!/bin/bash

cmd_option=$1
export cmd_option

SNAPSHOT_NAME="nsr"
BASE_LOG_DIR="/nsr/logs"
NSR_BACKUP_CLONE="nsr_backup"
# Commands
ZFS_CMD="/usr/sbin/zfs"
ZPOOL_CMD="/usr/sbin/zpool"
ZLOGIN_CMD="/usr/sbin/zlogin"
ZONECFG_CMD="/usr/sbin/zonecfg"
SVCS_CMD="/usr/sbin/svcs"
SVCADM_CMD="/usr/sbin/svcadm"
DF_CMD="/usr/bin/df"
RM_CMD="/usr/bin/rm"
AWK_CMD="/usr/bin/nawk"
MKNOD_CMD="/usr/sbin/mknod"
XARGS_CMD="/usr/bin/xargs"
PARGS_CMD="/usr/bin/pargs"
PTREE_CMD="/usr/bin/ptree"
CLRS_CMD="/usr/cluster/bin/clrs"
CLRG_CMD="/usr/cluster/bin/clrg"
CLRT_CMD="/usr/cluster/bin/clrt"
BASENAME_CMD="/usr/bin/basename"
GETENT_CMD="/usr/bin/getent"
SCHA_RESOURCE_GET_CMD="/usr/cluster/bin/scha_resource_get"
WGET_CMD=/usr/sfw/bin/wget
HOSTNAME_CMD="/usr/bin/uname -n"


# Subdir in ZFS where to put ZFS-config
ZFS_SETUP_SUBDIR="cluster_config"
ZFS_CONFIG_FILE=ZFS_Setup.sh

# Oracle parameter
ORACLE_SID=SAMPLE
ORACLE_USER=oracle

# Sophora parameter
SOPHORA_FMRI="svc:/cms/sophora:default"
SOPHORA_USER=admin
SOPHORA_PASS=password


GLOBAL_LOGFILE=${BASE_LOG_DIR}/$(${BASENAME_CMD} $0 .sh).log

# For all but get_slaves redirect output to log
case ${cmd_option} in
get_slaves)
  ;;
*)
  exec >>${GLOBAL_LOGFILE} 2>&1
  ;;
esac

function print_option () {
  option=$1; shift
  # now process line
  while [ $# -gt 0 ]
  do
    case $1 in
    ${option})
        echo $2
        shift
        shift
        ;;
    *)
        shift
        ;;
    esac
  done
}

function sophora_startup () {
  SOPHORA_ZONE=$1 # Zone for zlogin
  SOPHORA_FMRI=$2 # FMRI for svcadm
  print_log ${LOGFILE} "Starting sophora in ${SOPHORA_ZONE}..."
  ${ZLOGIN_CMD} ${SOPHORA_ZONE} ${SVCADM_CMD} enable ${SOPHORA_FMRI}
}

function sophora_shutdown () {
  SOPHORA_ZONE=$1 # Zone for zlogin
  SOPHORA_FMRI=$2 # FMRI for svcadm
  print_log ${LOGFILE} "Shutting down sophora in ${SOPHORA_ZONE}..."
  ${ZLOGIN_CMD} ${SOPHORA_ZONE} ${SVCADM_CMD} disable -t ${SOPHORA_FMRI}
}

function sophora_get_slaves () {
  SOPHORA_ZONE=$1 # Zone for zlogin
  SOPHORA_PORT=$2 # Sophora port at localhost
  SOPHORA_USER=$3 # Sophora admin user
  SOPHORA_PASS=$4 # Sophora admin port
  ${ZLOGIN_CMD} ${SOPHORA_ZONE} \
    ${WGET_CMD} \
      -qO- \
      --no-proxy \
      --http-user=${SOPHORA_USER} \
      --http-password=${SOPHORA_PASS} \
      "http://localhost:${SOPHORA_PORT}/content-api/servers/?replicationMode=SLAVE" | \
    ${AWK_CMD} '
function get_param(param,name){
  name="\""name"\"";
  count=split(param,tupel,/,/);
  for(i=1;i<=count;i++){
    split(tupel[i],part,/:/);
    if(part[1]==name){
      gsub(/\"/,"",part[2]);return part[2];
    }
  }
}
{
  json=$0;
  gsub(/(\[\{|\}\])/,"",json);
  elements=split(json,array,/\},\{/);
  for(element=1;element<=elements;element++){
    print get_param(array[element],"hostname");
  }
}' | ${XARGS_CMD} -n 1 -i ${BASENAME_CMD} {} .server.de

}

function get_zone_hostname () {
  ${ZLOGIN_CMD} $1 ${HOSTNAME_CMD}
}

function print_log () {
  LOGFILE=$1 ; shift
  if [ $# -gt 0 ]
  then
    printf "%s (%s): %s\n" "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" "$*" >> ${LOGFILE}
  else
    #printf "%s (%s): " "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" >> ${LOGFILE}
    while read data
    do
      printf "%s (%s): %s\n" "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" "${data}" >> ${LOGFILE}
    done
  fi
}

function dump_zfs_config {
  ZPOOL=$1
  OUTPUT_FILE=$2
  printf "\n\n# Create ZPool ${ZPOOL} with size $(${ZPOOL_CMD} list -Ho size ${ZPOOL}):\n\n" >> ${OUTPUT_FILE}
  ${ZPOOL_CMD} status ${ZPOOL} | ${AWK_CMD} '/config:/,/errors:/{if(/NAME/){getline; printf "Zpool structure of %s:\n\nzpool create %s",$1,$1; getline ; device=0; while(!/^$/ && !/errors:/){gsub(/mirror-[0-9]+/,"mirror",$1);gsub(/logs/,"log",$1);gsub(/(\/dev\/(r)*dsk\/)*c[0-9]+t[0-9A-F]+d[0-9]+(s[0-9]+)*/,"<device"device">",$1);if(/device/)device++;printf " %s",$1 ; getline}};printf "\n" ;}' >> ${OUTPUT_FILE}
  printf "\n\n# Create ZFS\n\n"   >> ${OUTPUT_FILE}
  ${ZFS_CMD} list -Hrt filesystem -o name,origin ${ZPOOL} | ${AWK_CMD} -v zfs_cmd=${ZFS_CMD} 'NR>1 && $2=="-"{print zfs_cmd,"create -o mountpoint=none",$1}' >> ${OUTPUT_FILE}
  printf "\n\n# Set ZFS values\n\n" >> ${OUTPUT_FILE}
  ${ZFS_CMD} get -s local -Ho name,property,value -pr all ${ZPOOL} | ${AWK_CMD} -v zfs_cmd=${ZFS_CMD} '$2!="readonly"{printf "%s set -p %s=%s %s\n",zfs_cmd,$2,$3,$1}' >> ${OUTPUT_FILE}
}

function dump_cluster_config {
  RG=$1
  OUTPUT_DIR=$2
  ${RM_CMD} -f ${OUTPUT_DIR}/${RG}.clrg_export.xml
  ${CLRG_CMD} export -o ${OUTPUT_DIR}/${RG}.clrg_export.xml ${RG}
  for RES in $(${CLRS_CMD} list -g ${RG})
  do
    ${RM_CMD} -f ${OUTPUT_DIR}/${RES}.clrs_export.xml
    ${CLRS_CMD} export -o ${OUTPUT_DIR}/${RES}.clrs_export.xml ${RES}
  done

  # Commands to recreate the RG
  COMMAND_FILE="${OUTPUT_DIR}/${RG}.ClusterCreateCommands.txt"
  printf "Recreate %s:\n%s create -i %s %s\n\n" "${RG}" "${CLRG_CMD}" "${OUTPUT_DIR}/${RG}.clrg_export.xml" "${RG}" > ${COMMAND_FILE}
  for RT in SUNW.LogicalHostname SUNW.HAStoragePlus SUNW.gds LGTO.clnt
  do
    for RT_VERSION in $(${CLRT_CMD} list | ${AWK_CMD} -v rt=${RT} '$1 ~ rt')
    do
      for RES in $(${CLRS_CMD} list -g ${RG} -t ${RT_VERSION})
      do
        if [ "_${RT}_" == "_SUNW.LogicalHostname_" ]
        then
          printf "Add the following entries to all nodes!!!:\n/etc/inet/hosts:\n" >> ${COMMAND_FILE}
          ${GETENT_CMD} hosts $(${CLRS_CMD} show -p HostnameList ${RES} | nawk '$1=="HostnameList:"{$1="";print}') >> ${COMMAND_FILE}
          printf "\n" >> ${COMMAND_FILE}
        fi
        printf "Recreate %s:\n%s create -i %s %s\n\n" "${RES}" "${CLRS_CMD}" "${OUTPUT_DIR}/${RES}.clrs_export.xml" "${RES}" >> ${COMMAND_FILE}
      done
    done
  done
}

function snapshot_pre {
  DB=$1
  DBUSER=$2
  if [ $# -eq 3 -a "_$3_" != "__" ]
  then
    ZONE=$3
    ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}"
    ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | ${AWK_CMD} '{print $NF;}')
    ZONE_ROOT="${ZONE_BASE}/root"
  else
    ZONE_ROOT=""
    ZONE_CMD="su - ${DBUSER} -c"
  fi
  if( ${ZONE_CMD} echo >/dev/null 2>&1 )
  then
    SCRIPT_NAME="tmp/.nsr-pre-snap-script.$$"

    # Create script inside zone
    cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS
#!/bin/bash
DBDIR=\$(${AWK_CMD} -F':' -v ORACLE_SID=${ORACLE_SID} '\$1==ORACLE_SID {print \$2;}' /var/opt/oracle/oratab)
\${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF
create pfile from spfile;
alter system archive log current;
alter database backup controlfile to trace;
alter database begin backup;
EOF
EOS
    chmod 755 ${ZONE_ROOT}/${SCRIPT_NAME}

    ${ZONE_CMD} /${SCRIPT_NAME} 2>&1 | print_log ${LOGFILE}
    rm -f ${ZONE_ROOT}/${SCRIPT_NAME}
  fi
}


function snapshot_pst {
  DB=$1
  DBUSER=$2
  if [ $# -eq 3 -a "_$3_" != "__" ]
  then
    ZONE=$3
    ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}"
    ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | ${AWK_CMD} '{print $NF;}')
    ZONE_ROOT="${ZONE_BASE}/root"
  else
    ZONE_ROOT=""
    ZONE_CMD="su - ${DBUSER} -c"
  fi

  if( ${ZONE_CMD} echo >/dev/null 2>&1 )
  then
    SCRIPT_NAME="tmp/.nsr-pre-snap-script.$$"

    # Create script inside zone
    cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS
#!/bin/bash
DBDIR=\$(${AWK_CMD} -F':' -v ORACLE_SID=${ORACLE_SID} '\$1==ORACLE_SID {print \$2;}' /var/opt/oracle/oratab)
\${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF
alter database end backup;
alter system archive log current;
EOF
EOS
    chmod 755 ${ZONE_ROOT}/${SCRIPT_NAME}

    ${ZONE_CMD} /${SCRIPT_NAME} 2>&1 | print_log ${LOGFILE}
    rm -f ${ZONE_ROOT}/${SCRIPT_NAME}
  fi
}

function snapshot_create {
  ZPOOL=$1
  SNAPSHOT_NAME=$2
  RES="$(${CLRS_CMD} show -p ZPools | ${AWK_CMD} -v pool=${ZPOOL} '/^Resource:/{res=$NF;}$NF ~ pool{print res;}')"

  # Because of problems with unmounting during cluster monitoring disable montoring for this step
  print_log ${LOGFILE} "Telling Cluster not to monitor ${RES}"
  if [ "_${RES}_" != "__" ]
  then
    ${CLRS_CMD} unmonitor ${RES}
  fi

  print_log ${LOGFILE} "Create ZFS snapshot -r ${ZPOOL}@${SNAPSHOT_NAME}"
  ${ZFS_CMD} snapshot -r ${ZPOOL}@${SNAPSHOT_NAME}
  for zfs_snapshot in $(${ZFS_CMD} list -Ho name -t snapshot -r ${ZPOOL} | grep ${SNAPSHOT_NAME})
  do
    ${ZFS_CMD} clone -o readonly=on ${zfs_snapshot} ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE}
    ${ZFS_CMD} mount ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE} 2>/dev/null
    if [ "_$(${ZFS_CMD} get -Ho value mounted ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE})_" == "_yes_" ]
    then
#      echo /usr/sbin/save -s ${SERVER_NAME} -g ${GROUP_NAME} -LL -m ${CLIENT_NAME} $(${ZFS_CMD} get -Ho value mountpoint ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE})
      ${ZFS_CMD} list -Ho creation,name ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE} |  print_log ${LOGFILE}
    fi
  done

  print_log ${LOGFILE} "Telling Cluster to monitor ${RES} again"
  if [ "_${RES}_" != "__" ]
  then
    sleep 1
    ${CLRS_CMD} monitor ${RES}
  fi
}

function snapshot_destroy {
  ZPOOL=$1
  SNAPSHOT_NAME=$2

  RES="$(${CLRS_CMD} show -p ZPools | ${AWK_CMD} -v pool=${ZPOOL} '/^Resource:/{res=$NF;}$NF ~ pool{print res;}')"

  # Because of problems with unmounting during cluster monitoring disable montoring for this step
  print_log ${LOGFILE} "Telling Cluster not to monitor ${RES}"
  if [ "_${RES}_" != "__" ]
  then
    ${CLRS_CMD} unmonitor ${RES}
  fi

  if (${ZFS_CMD} list -t snapshot ${ZPOOL}@${SNAPSHOT_NAME} > /dev/null)
  then
    for zfs_snapshot in $(${ZFS_CMD} list -Ho name -t snapshot -r ${ZPOOL} | grep ${SNAPSHOT_NAME})
    do
      if [ "_$(${ZFS_CMD} get -Ho value mounted ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE})_" == "_yes_" ]
      then
        print_log ${LOGFILE} "Unmount ZFS clone ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE}"
        ${ZFS_CMD} unmount ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE}
      fi
      # If this is a clone of ${zfs_snapshot}, then destroy it
      if [ "_$(${ZFS_CMD} list -Ho origin ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE})_" == "_${zfs_snapshot}_" ]
      then
        print_log ${LOGFILE} "Destroy ZFS clone ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE}"
        ${ZFS_CMD} destroy ${zfs_snapshot/@*/}/${NSR_BACKUP_CLONE}
      fi
    done
    print_log ${LOGFILE} "Destroy ZFS snapshot -r ${ZPOOL}@${SNAPSHOT_NAME}"
    ${ZFS_CMD} destroy -r ${ZPOOL}@${SNAPSHOT_NAME}
  fi

  print_log ${LOGFILE} "Telling Cluster to monitor ${RES} again"
  if [ "_${RES}_" != "__" ]
  then
    ${CLRS_CMD} monitor ${RES}
  fi
}

function usage {
  echo "Usage: $0 (pre|pst)"
  echo "Usage: $0 init <ZPool-Name>"
  echo "Usage: $0 initall"
  echo "Usage: $0 dump <ZPool-Name> <Output-File>"
  exit 1
}

case ${cmd_option} in
pre|pst)
  case ${cmd_option} in
  pre)
    # Get commandline from parent pid
    # pre /usr/sbin/savepnpc -c <NetworkerClient> -s <NetworkerServer> -g <NetworkerGroup> -LL
    print_log ${GLOBAL_LOGFILE} "Begin (${cmd_option}) Called from $(${PTREE_CMD} $$ | ${AWK_CMD} '/savepnpc/{print $0}')"
    pid=$(${PTREE_CMD} $$ | ${AWK_CMD} '/savepnpc/{print $1}')
    ;;
  pst)
    # Get commandline from parent pid
    # pst /usr/bin/pstclntsave -s <NetworkerServer> -g <NetworkerGroup> -c <NetworkerClient>
    print_log ${GLOBAL_LOGFILE} "Begin (${cmd_option}) Called from $(${PTREE_CMD} $$ | ${AWK_CMD} '/pstclntsave/{print $0}')"
    pid=$(${PTREE_CMD} $$ | ${AWK_CMD} '/pstclntsave/{print $1}')
    ${PTREE_CMD} $$ | print_log ${GLOBAL_LOGFILE}
    print_log ${GLOBAL_LOGFILE} "(${cmd_option}) PID=${pid}"
    ;;
  esac

  commandline="$(${PARGS_CMD} -c ${pid} | ${AWK_CMD} -F':' '$1 ~ /^argv/{printf $2}END{print;}')"
  # Called from backupserver use -c
  CLIENT_NAME=$(print_option -c ${commandline})
  # If called from cmdline use -m
  CLIENT_NAME=${CLIENT_NAME:-$(print_option -m ${commandline})}
  # Last resort pre/post
  CLIENT_NAME=${CLIENT_NAME:-${cmd_option}}

  SERVER_NAME=$(print_option -s ${commandline})
  GROUP_NAME=$(print_option -g ${commandline})

  LOGFILE=${BASE_LOG_DIR}/${CLIENT_NAME}.log
  print_log ${LOGFILE} "Called from ${commandline}"

  named_pipe=/tmp/.named_pipe.$$

  # Delete named pipe on exit
  trap "rm -f ${named_pipe}" EXIT
  # Create named pipe
  ${MKNOD_CMD} ${named_pipe} p

  # Read from named pipe and send it to print_log
  tee <${named_pipe} | print_log ${LOGFILE}&
  # Close STDOUT & STDERR
  exec 1>&-
  exec 2>&-
  # Redirect them to named pipe
  exec >${named_pipe} 2>&1

  print_log ${LOGFILE} "Begin backup of ${CLIENT_NAME}"

  # Get resource name from hostname
  LH_RES=$(${CLRS_CMD} show -t SUNW.LogicalHostname -p HostnameList | ${AWK_CMD} -v Hostname="${CLIENT_NAME}" '/^Resource:/{res=$NF} /HostnameList:/ {for(i=2;i<=NF;i++){if($i == Hostname){print res}}}')

  print_log ${LOGFILE} "LogicalHostname of ${CLIENT_NAME} is ${LH_RES}"

  # Get ressourceGroup name from ressource name
  RG=$(${SCHA_RESOURCE_GET_CMD} -O GROUP -R ${LH_RES})
  print_log ${LOGFILE} "RessourceGroup of ${LH_RES} is ${RG}"


  ZPOOLS=$(${CLRS_CMD} show -g ${RG} -p Zpools  | ${AWK_CMD} '$1=="Zpools:"{$1="";print $0}')
  print_log ${LOGFILE} "ZPools used in ${RG}: ${ZPOOLS}"

  Start_command=$(${CLRS_CMD} show -p Start_command -g ${RG} | ${AWK_CMD} -F ':' '$1 ~ /Start_command/ && $2 ~ /sczbt/')
  print_log ${LOGFILE} "sczbt Start_command is: ${Start_command}"
  sczbt_config=$(print_option -P ${Start_command})/sczbt_$(print_option -R ${Start_command})
  print_log ${LOGFILE} "sczbt_config is ${sczbt_config}"
  ZONE=$(${AWK_CMD} -F '=' '$1=="Zonename"{gsub(/"/,"",$2);print $2}' ${sczbt_config})
  print_log ${LOGFILE} "Zone from ${sczbt_config} is ${ZONE}"
  ;;
init)
  LOGFILE=${BASE_LOG_DIR}/init.log
  if [ $# -ne 2 ]
  then
    echo "Wrong count of parameters."
    echo "Use $0 init <ZPool-Name>"
    exit 1
  fi
  ZPOOL=$2
  print_log ${GLOBAL_LOGFILE} "Begin (${cmd_option}) of zpool ${ZPOOL}"
  print_log ${LOGFILE} "Begin init of zpool ${ZPOOL}"
  ;;
initall)
  LOGFILE=${BASE_LOG_DIR}/initall.log
  print_log ${GLOBAL_LOGFILE} "Begin (${cmd_option})"
  ;;
get_slaves)
  if [ $# -ne 5 ]
  then
    echo "Wrong count of parameters."
    echo "Use $0 get_slaves <Zone-Name> <Sophora-Port> <Sophora-Adminuser> <Sophora-Password>"
    exit 1
  fi
  echo "Slave node(s): $(sophora_get_slaves $2 $3 $4 $5)"
  exit 0
  ;;
esac

case ${cmd_option} in
dump_cluster)
  if [ $# -ne 3 ]
  then
    echo "Wrong count of parameters."
    echo "Use $0 dump_cluster <Ressource_Group> <DIR>"
    exit 1
  fi
  dump_cluster_config $2 $3
  ;;
dump)
  if [ $# -ne 3 ]
  then
    echo "Wrong count of parameters."
    echo "Use $0 dump <ZPool-Name> <File>"
    exit 1
  fi
  dump_zfs_config $2 $3
  ;;
init)
  snapshot_destroy ${ZPOOL} ${SNAPSHOT_NAME}
  snapshot_create  ${ZPOOL} ${SNAPSHOT_NAME}
  print_log ${LOGFILE} "End   init of zpool ${ZPOOL}"
  ;;
initall)
  for ZPOOL in $(${ZPOOL_CMD} list -Ho name)
  do
    if [ "_${ZPOOL}_" == "_rpool_" ]
    then
      continue
    fi
    print_log ${LOGFILE} "Begin init of zpool ${ZPOOL}"
    snapshot_destroy ${ZPOOL} ${SNAPSHOT_NAME}
    snapshot_create  ${ZPOOL} ${SNAPSHOT_NAME}
    print_log ${LOGFILE} "End   init of zpool ${ZPOOL}"
  done
  ;;
pre)
  for ZPOOL in ${ZPOOLS}
  do
    snapshot_destroy ${ZPOOL} ${SNAPSHOT_NAME}
  done

  # Shutdown Sophora?
  startup="No"
  case ${ZONE} in
  arcus-rg)
    # Staging zones
    #sophora_shutdown ${ZONE} ${SOPHORA_FMRI}
    #startup="Yes"
    ;;
  incus-zone|velum-zone)
    SOPHORA_ADMINPORT=1196
    # Master-/slave-zones
    is_slave=0
    zone_hostname=$(get_zone_hostname ${ZONE})
    for slave in $(sophora_get_slaves ${ZONE} ${SOPHORA_ADMINPORT} ${SOPHORA_USER} ${SOPHORA_PASS})
    do
      print_log ${LOGFILE} "_${slave}_ == _${zone_hostname}_?"
      if [ "_${slave}_" == "_${zone_hostname}_" ]
      then
        is_slave=1
      fi
    done

    if [ ${is_slave} -eq 1 ]
    then
      # Slave
      print_log ${LOGFILE} "Slave..."
      sophora_shutdown ${ZONE} ${SOPHORA_FMRI}
      startup="Yes"
    else
      # Master
      print_log ${LOGFILE} "Master... Not shutting down Sophora"
    fi
    ;;
  merkel-zone|brandt-zone|schmidt-zone)
    SOPHORA_ADMINPORT=1396
    # Master-/slave-zones
    is_slave=0
    zone_hostname=$(get_zone_hostname ${ZONE})
    for slave in $(sophora_get_slaves ${ZONE} ${SOPHORA_ADMINPORT} ${SOPHORA_USER} ${SOPHORA_PASS})
    do
      print_log ${LOGFILE} "_${slave}_ == _${zone_hostname}_?"
      if [ "_${slave}_" == "_${zone_hostname}_" ]
      then
        is_slave=1
      fi
    done

    if [ ${is_slave} -eq 1 ]
    then
      # Slave
      print_log ${LOGFILE} "Slave..."
      sophora_shutdown ${ZONE} ${SOPHORA_FMRI}
      startup="Yes"
    else
      # Master
      print_log ${LOGFILE} "Master... Not shutting down Sophora"
    fi
    ;;
  *)
    ;;
  esac

  # Find the dir to write down zfs-setup
  for ZPOOL in ${ZPOOLS}
  do
    if [ "_$(${ZFS_CMD} list -Ho name ${ZPOOL}/${ZFS_SETUP_SUBDIR} 2>/dev/null)_" != "__" ]
    then
      CONFIG_DIR=$(${ZFS_CMD} get -Ho value mountpoint ${ZPOOL}/${ZFS_SETUP_SUBDIR})
    else
      if [ -d $(${ZFS_CMD} get -Ho value mountpoint ${ZPOOL})/${ZFS_SETUP_SUBDIR} ]
      then
        CONFIG_DIR=$(${ZFS_CMD} get -Ho value mountpoint ${ZPOOL})/${ZFS_SETUP_SUBDIR}
      fi
    fi
    if [ -d ${CONFIG_DIR} ]
    then
      printf "# Settings for ZFS\n\n" > ${CONFIG_DIR}/${ZFS_CONFIG_FILE}

      ZONE_CONFIG_FILE=zonecfg_${ZONE}.export
      [ "_${ZONE}_" != "__" ] && ${ZONECFG_CMD} -z ${ZONE} export > ${CONFIG_DIR}/${ZONE_CONFIG_FILE}
    fi
  done

  # Save configs and create snapshots
  for ZPOOL in ${ZPOOLS}
  do
    if [ "_${CONFIG_DIR}_" != "__" ]
    then
      # Save zfs config
      dump_zfs_config ${ZPOOL} ${CONFIG_DIR}/${ZFS_CONFIG_FILE}
      # Save Clusterconfig
      dump_cluster_config ${RG} ${CONFIG_DIR}
    fi
    snapshot_create  ${ZPOOL} ${SNAPSHOT_NAME}
  done

  # Startup Sophora?
  if [ "_${startup}_" == "_Yes_" ]
  then
    sophora_startup ${ZONE} ${SOPHORA_FMRI}
  fi
  print_log ${LOGFILE} "End   backup of ${CLIENT_NAME}"
  ;;
pst)
  for ZPOOL in ${ZPOOLS}
  do
    snapshot_destroy ${ZPOOL} ${SNAPSHOT_NAME}
  done
  print_log ${LOGFILE} "End   backup of ${CLIENT_NAME}"
  ;;
*)
  usage
  ;;
esac
print_log ${GLOBAL_LOGFILE} "End   (${cmd_option}) Called from:"
${PTREE_CMD} $$ | print_log ${GLOBAL_LOGFILE}
exit 0

MD5-Checksum

#  digest -a md5 /nsr/bin/nsr_snapshot.sh
01be6677ddf4342b625b1aa59d805628

!!!THIS CODE IS UNTESTED DO NOT USE THIS!!!

!!!THIS JUST AN EXAMPLE!!!

Restore/Recover

Set some variables

NSR_CLIENT="sample-cl"
NSR_SERVER="nsr-server"
ZPOOL="sample_pool"

RG="${NSR_CLIENT%-cl}-rg"
ZONE="${NSR_CLIENT%-cl}-zone"

Look for a valid backup

# /usr/sbin/mminfo -s ${NSR_SERVER} -o t -N /local/${RG}/cluster_config/nsr_backup

Restore ZFS configuration

# /usr/sbin/recover -s ${NSR_SERVER} -c ${NSR_CLIENT} -d /tmp -a /local/${RG}/cluster_config/nsr_backup/ZFS_Setup.sh

Look into file /tmp/ZFS_Setup.sh which should look like this:

# Create ZPool sample_pool with size 1.02T:
Zpool structure of sample_pool:
zpool create sample_pool mirror <device0> <device1>

# Create ZFS
/usr/sbin/zfs create -o mountpoint=none sample_pool/app
/usr/sbin/zfs create -o mountpoint=none sample_pool/cluster_config
/usr/sbin/zfs create -o mountpoint=none sample_pool/data1
/usr/sbin/zfs create -o mountpoint=none sample_pool/data2
/usr/sbin/zfs create -o mountpoint=none sample_pool/home
/usr/sbin/zfs create -o mountpoint=none sample_pool/log
/usr/sbin/zfs create -o mountpoint=none sample_pool/usr_local
/usr/sbin/zfs create -o mountpoint=none sample_pool/zone

# Set ZFS values
/usr/sbin/zfs set -p reservation=104857600 sample_pool
/usr/sbin/zfs set -p mountpoint=none sample_pool
/usr/sbin/zfs set -p mountpoint=/local/sample-rg/app sample_pool/app
/usr/sbin/zfs set -p mountpoint=/local/sample-rg/cluster_config sample_pool/cluster_config
/usr/sbin/zfs set -p mountpoint=/local/sample-rg/data1 sample_pool/data1
/usr/sbin/zfs set -p mountpoint=/local/sample-rg/data2 sample_pool/data2
/usr/sbin/zfs set -p mountpoint=/local/sample-rg/home sample_pool/home
/usr/sbin/zfs set -p mountpoint=/local/sample-rg/log sample_pool/log
/usr/sbin/zfs set -p mountpoint=/local/sample-rg/usr_local sample_pool/usr_local
/usr/sbin/zfs set -p mountpoint=/local/sample-rg/zone sample_pool/zone
/usr/sbin/zfs set -p zpdata:zn=sample-zone sample_pool/zone
/usr/sbin/zfs set -p zpdata:rbe=S10_U9 sample_pool/zone
/usr/sbin/zfs set -p mountpoint=/local/sample-rg/zone-zfsBE_20121105 sample_pool/zone-zfsBE_20121105
/usr/sbin/zfs set -p zoned=off sample_pool/zone-zfsBE_20121105
/usr/sbin/zfs set -p canmount=on sample_pool/zone-zfsBE_20121105
/usr/sbin/zfs set -p zpdata:zn=sample-zone sample_pool/zone-zfsBE_20121105
/usr/sbin/zfs set -p zpdata:rbe=S10_U9 sample_pool/zone-zfsBE_20121105

Mount the needed ZFS filesystems.

Restore zone configuration

# /usr/sbin/recover -s ${NSR_SERVER} -c ${NSR_CLIENT} -d /tmp -a /local/${RG}/cluster_config/nsr_backup/zonecfg_${ZONE}.export
# zonecfg -z ${ZONE} -f /tmp/zonecfg_${ZONE}.export
# zonecfg -z ${ZONE} info

Restore cluster configuration

# /usr/sbin/recover -s ${NSR_SERVER} -c ${NSR_CLIENT} -d /tmp -a /local/${RG}/cluster_config/nsr_backup/*_export.xml
# /usr/sbin/recover -s ${NSR_SERVER} -c ${NSR_CLIENT} -d /tmp -a /local/${RG}/cluster_config/nsr_backup/*.ClusterCreateCommands.txt
# /usr/bin/perl -pi -e "s#/local/${RG}/cluster_config/nsr_backup/#/tmp/#g" /tmp/${RG}.ClusterCreateCommands.txt

Follow the instructions in /tmp/${RG}.ClusterCreateCommands.txt:

Recreate sample-rg:
/usr/cluster/bin/clrg create -i /tmp/sample-rg.clrg_export.xml sample-rg

Add the following entries to all nodes!!!:
/etc/inet/hosts:
10.29.7.96     sample-cl

Recreate sample-lh-res:
/usr/cluster/bin/clrs create -i /tmp/sample-lh-res.clrs_export.xml sample-lh-res

Recreate sample-hasp-zfs-res:
/usr/cluster/bin/clrs create -i /tmp/sample-hasp-zfs-res.clrs_export.xml sample-hasp-zfs-res

Recreate sample-emctl-res:
/usr/cluster/bin/clrs create -i /tmp/sample-emctl-res.clrs_export.xml sample-emctl-res

Recreate sample-oracle-res:
/usr/cluster/bin/clrs create -i /tmp/sample-oracle-res.clrs_export.xml sample-oracle-res

Recreate sample-zone-res:
/usr/cluster/bin/clrs create -i /tmp/sample-zone-res.clrs_export.xml sample-zone-res

Recreate sample-nsr-res:
/usr/cluster/bin/clrs create -i /tmp/sample-nsr-res.clrs_export.xml sample-nsr-res

Registering new resource type LGTO.clnt

1. Install Solaris client package LGTOclnt 2. Register new resource type in cluster. One one node do:

# clrt register -f /usr/sbin/LGTO.clnt.rtr LGTO.clnt

Now you have a new resource type LGTO.clnt in your cluster.

Create client resource of type LGTO.clnt

So I use scripts like this:

# RGname=sample-rg
# clrs create \
  -t LGTO.clnt \
  -g ${RGname} \
  -p Resource_dependencies=$(basename ${RGname} -rg)-hasp-zfs-res \
  -p clientname=$(basename ${RGname} -rg)-lh \
  -p Network_resource=$(basename ${RGname} -rg)-lh-res \
  -p owned_paths=${ZPOOL_BASEDIR} \
  $(basename ${RGname} -rg)-nsr-res

This expands to:

# clrs create \
  -t LGTO.clnt \
  -g sample-rg \
  -p Resource_dependencies=sample-hasp-zfs-res \
  -p clientname=sample-lh \
  -p Network_resource=sample-lh-res \
  -p owned_paths=/local/sample-rg \
  sample-nsr-res

Now we have a client name to which we can connect to: sample-lh