ZFS Networker: Difference between revisions

From Lolly's Wiki
Jump to navigationJump to search
No edit summary
Line 51: Line 51:
<source lang=bash>
<source lang=bash>
#!/bin/bash
#!/bin/bash


function print_option () {
function print_option () {
Line 166: Line 167:
     if ( df -h ${zfs_snapshot/@*/}/nsr_backup )
     if ( df -h ${zfs_snapshot/@*/}/nsr_backup )
     then
     then
#      echo /usr/sbin/save -s ${SERVER_NAME} -g ${GROUP_NAME} -LL -m ${CLIENT_NAME} $(${ZFS_CMD} get -Ho value mountpoint ${zfs_snapshot/@*/}/nsr_backup)
       ${ZFS_CMD} list -Ho creation,name ${zfs_snapshot/@*/}/nsr_backup |  print_log ${LOGFILE}
       ${ZFS_CMD} list -Ho creation,name ${zfs_snapshot/@*/}/nsr_backup |  print_log ${LOGFILE}
     fi
     fi
Line 222: Line 224:
   ;;
   ;;
esac
esac
commandline="$(pargs -e ${pid} | head -1)"
commandline="$(pargs -c ${pid} | nawk -F':' '$1 ~ /^argv/{printf $2}END{print;}')"
# Called from backupserver use -c
# Called from backupserver use -c
CLIENT_NAME=$(print_option -c ${commandline})
CLIENT_NAME=$(print_option -c ${commandline})
Line 302: Line 304:
<source lang=bash>
<source lang=bash>
#  digest -a md5 /opt/nsr/bin/nsr_snapshot.sh
#  digest -a md5 /opt/nsr/bin/nsr_snapshot.sh
c9020478ed49e89240e0b56961e46ac6
62e591f1961ca5ecc9b344fbf269ea57
</source>
</source>



Revision as of 16:48, 14 October 2014

Kategorie:ZFS Kategorie:Solaris

Backup of ZFS snapshots on Solaris Cluster with Legato/EMC Networker

This describes how to setup a backup of the Solaris Cluster resource group named sample-rg.

The structure of my RGs is always:

RG:                <name>-rg
ZFS-HASP:          <name>-hasp-zfs-res
Logical Host:      <name>-lh-res
Logical Host Name: <name>-lh
ZPOOL:             <name>_pool

I used the bash as shell.

Define variables used in the following command lines

# NAME=sample
# RGname=${NAME}-rg
# NetworkerGroup=$(echo ${NAME} | tr 'a-z' 'A-Z' )
# ZPOOL=${NAME}_pool
# ZPOOL_BASEDIR=/local/${RGname}

Define a resource for Networker

What we need now is a resource definition in our Networker directory like this:

# zfs create ${ZPOOL}/nsr
# mkdir ${ZPOOL_BASEDIR}/nsr/{bin,log,res}

# cat > ${ZPOOL_BASEDIR}/nsr/res/${NetworkerGroup}.res <<EOF
type: savepnpc;
precmd: "${ZPOOL_BASEDIR}/nsr/bin/prepst_command.sh pre >${ZPOOL_BASEDIR}/nsr/log/networker_precmd.log 2>&1";
pstcmd: "${ZPOOL_BASEDIR}/nsr/bin/prepst_command.sh pst >${ZPOOL_BASEDIR}/nsr/log/networker_pstcmd.log 2>&1";
timeout: "08:00am";
abort precmd with group: Yes;
EOF

And now create a link on every cluster node to this file

# ln -s ${ZPOOL_BASEDIR}/nsr/res/${NetworkerGroup}.res /nsr/res/${NetworkerGroup}.res

The pre-/pstcmd-script

!!!THIS CODE IS UNTESTED DO NOT USE THIS!!!

!!!THIS JUST AN EXAMPLE!!!

#!/bin/bash


function print_option () {
  option=$1; shift
  # now process line
  while [ $# -gt 0 ]
  do
    case $1 in
    ${option})
        echo $2
        shift
        shift
        ;;
    *)
        shift
        ;;
    esac
  done
}

function print_log () {
  LOGFILE=$1 ; shift
  if [ $# -gt 0 ]
  then
    printf "%s (%s): %s\n" "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" "$*" >> ${LOGFILE}
  else
    printf "%s (%s): " "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" >> ${LOGFILE}
    #cat >> ${LOGFILE}
    while read data
    do
      printf "%s (%s): %s\n" "$(date '+%Y%m%d %H:%M:%S')" "${cmd_option}" "${data}" >> ${LOGFILE}
    done
  fi
}

function snapshot_pre {
  DB=$1
  DBUSER=$2
  if [ $# -eq 3 -a "_$3_" != "__" ]
  then
    ZONE=$3
    ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}"
    ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | nawk '{print $NF;}')
    ZONE_ROOT="${ZONE_BASE}/root"
  else
    ZONE_ROOT=""
    ZONE_CMD="su - ${DBUSER} -c"
  fi
  if( ${ZONE_CMD} echo >/dev/null 2>&1 )
  then
    SCRIPT_NAME="tmp/.nsr-pre-snap-script.$$"

    # Create script inside zone
    cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS
#!/bin/bash
DBDIR=\$(/usr/bin/nawk -F':' -v ORACLE_SID=${ORACLE_SID} '\$1==ORACLE_SID {print \$2;}' /var/opt/oracle/oratab)
\${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF
create pfile from spfile;
alter system archive log current;
alter database backup controlfile to trace;
alter database begin backup;
EOF
EOS
    chmod 755 ${ZONE_ROOT}/${SCRIPT_NAME}

    ${ZONE_CMD} /${SCRIPT_NAME} 2>&1 | print_log ${LOGFILE}
    rm -f ${ZONE_ROOT}/${SCRIPT_NAME}
  fi
}


function snapshot_pst {
  DB=$1
  DBUSER=$2
  if [ $# -eq 3 -a "_$3_" != "__" ]
  then
    ZONE=$3
    ZONE_CMD="${ZLOGIN_CMD} -l ${DBUSER} ${ZONE}"
    ZONE_BASE=$(/usr/sbin/zonecfg -z ${ZONE} info zonepath | nawk '{print $NF;}')
    ZONE_ROOT="${ZONE_BASE}/root"
  else
    ZONE_ROOT=""
    ZONE_CMD="su - ${DBUSER} -c"
  fi

  if( ${ZONE_CMD} echo >/dev/null 2>&1 )
  then
    SCRIPT_NAME="tmp/.nsr-pre-snap-script.$$"

    # Create script inside zone
    cat >${ZONE_ROOT}/{SCRIPT_NAME} <<EOS
#!/bin/bash
DBDIR=\$(/usr/bin/nawk -F':' -v ORACLE_SID=${ORACLE_SID} '\$1==ORACLE_SID {print \$2;}' /var/opt/oracle/oratab)
\${DBDIR}/bin/sqlplus sys/${DBUSER} as sysdba << EOF
alter database end backup;
alter system archive log current;
EOF
EOS
    chmod 755 ${ZONE_ROOT}/${SCRIPT_NAME}

    ${ZONE_CMD} /${SCRIPT_NAME} 2>&1 | print_log ${LOGFILE}
    rm -f ${ZONE_ROOT}/${SCRIPT_NAME}
  fi
}

function snapshot_create {
  ZPOOL=$1
  SNAPSHOT_NAME=$2
  print_log ${LOGFILE} "Create ZFS snapshot -r ${ZPOOL}@${SNAPSHOT_NAME}"
  ${ZFS_CMD} snapshot -r ${ZPOOL}@${SNAPSHOT_NAME}
  for zfs_snapshot in $(${ZFS_CMD} list -Ho name -t snapshot -r ${ZPOOL} | grep ${SNAPSHOT_NAME})
  do
    ${ZFS_CMD} clone -o readonly=on ${zfs_snapshot} ${zfs_snapshot/@*/}/nsr_backup
    ${ZFS_CMD} mount ${zfs_snapshot/@*/}/nsr_backup 2>/dev/null
    if ( df -h ${zfs_snapshot/@*/}/nsr_backup )
    then
#      echo /usr/sbin/save -s ${SERVER_NAME} -g ${GROUP_NAME} -LL -m ${CLIENT_NAME} $(${ZFS_CMD} get -Ho value mountpoint ${zfs_snapshot/@*/}/nsr_backup)
      ${ZFS_CMD} list -Ho creation,name ${zfs_snapshot/@*/}/nsr_backup |  print_log ${LOGFILE}
    fi
  done
}

function snapshot_destroy {
  ZPOOL=$1
  SNAPSHOT_NAME=$2
  if (${ZFS_CMD} list -t snapshot ${ZPOOL}@${SNAPSHOT_NAME})
  then
    for zfs_snapshot in $(${ZFS_CMD} list -Ho name -t snapshot -r ${ZPOOL} | grep ${SNAPSHOT_NAME})
    do
      if ( df -h ${zfs_snapshot/@*/}/nsr_backup )
      then
        print_log ${LOGFILE} "Unmount ZFS clone ${zfs_snapshot/@*/}/nsr_backup"
        ${ZFS_CMD} unmount ${zfs_snapshot/@*/}/nsr_backup
      fi
      # If this is a clone of ${zfs_snapshot}, then destroy it
      if [ "_$(${ZFS_CMD} list -Ho origin ${zfs_snapshot/@*/}/nsr_backup)_" == "_${zfs_snapshot}_" ]
      then
        print_log ${LOGFILE} "Destroy ZFS clone ${zfs_snapshot/@*/}/nsr_backup"
        ${ZFS_CMD} destroy ${zfs_snapshot/@*/}/nsr_backup
      fi
    done
    print_log ${LOGFILE} "Destroy ZFS snapshot -r ${ZPOOL}@${SNAPSHOT_NAME}"
    ${ZFS_CMD} destroy -r ${ZPOOL}@${SNAPSHOT_NAME}
  fi
}

function usage {
  echo "Usage: $0 (pre|pst)"
  exit 1
}

cmd_option=$1
export cmd_option

ORACLE_SID=SAMPLE
ORACLE_USER=oracle

SNAPSHOT_NAME="nsr"
ZFS_CMD="/usr/sbin/zfs"
ZLOGIN_CMD="/usr/bin/zlogin"

case ${cmd_option} in
pre)
  # Get commandline from parent pid
  # pre /usr/sbin/savepnpc -c <NetworkerClient> -s <NetworkerServer> -g <NetworkerGroup> -LL
  pid=$(ptree $$ | nawk '/savepnpc/{print $1}')
  ;;
pst)
  # Get commandline from parent pid
  # pst /usr/bin/pstclntsave -s <NetworkerServer> -g <NetworkerGroup> -c <NetworkerClient>
  pid=$(ptree $$ | nawk '/pstclntsave/{print $1}')
  ;;
esac
commandline="$(pargs -c ${pid} | nawk -F':' '$1 ~ /^argv/{printf $2}END{print;}')"
# Called from backupserver use -c
CLIENT_NAME=$(print_option -c ${commandline})
# If called from cmdline use -m
CLIENT_NAME=${CLIENT_NAME:-$(print_option -m ${commandline})}
# Last resort pre/post
CLIENT_NAME=${CLIENT_NAME:-${cmd_option}}

SERVER_NAME=$(print_option -s ${commandline})
GROUP_NAME=$(print_option -g ${commandline})

LOGFILE=/nsr/logs/${CLIENT_NAME}.log
print_log ${LOGFILE} "Called from ${commandline}"

named_pipe=/tmp/.named_pipe.$$

# Delete named pipe on exit
trap "rm -f ${named_pipe}" EXIT
# Create named pipe
mknod ${named_pipe} p

# Read from named pipe and send it to print_log
tee <${named_pipe} | print_log ${LOGFILE}&
# Close STDOUT & STDERR
exec 1>&-
exec 2>&-
# Redirect them to named pipe
exec >${named_pipe} 2>&1

print_log ${LOGFILE} "Begin backup of ${CLIENT_NAME}"

# Get resource name from hostname
LH_RES=$(/usr/cluster/bin/clrs show -t SUNW.LogicalHostname -p HostnameList | nawk -v Hostname="${CLIENT_NAME}" '/^Resource:/{res=$NF} /HostnameList:/ {for(i=2;i<=NF;i++){if($i == Hostname){print res}}}')

print_log ${LOGFILE} "LogicalHostname of ${CLIENT_NAME} is ${LH_RES}"

# Get ressourceGroup name from ressource name
RG=$(/usr/cluster/bin/scha_resource_get -O GROUP -R ${LH_RES})
print_log ${LOGFILE} "RessourceGroup of ${LH_RES} is ${RG}"


ZPOOLS=$(/usr/cluster/bin/clrs show -g ${RG} -p Zpools  | nawk '$1=="Zpools:"{$1="";print $0}')
print_log ${LOGFILE} "ZPools used in ${RG}: ${ZPOOLS}"

Start_command=$(/usr/cluster/bin/clrs show -p Start_command -g ${RG} | /usr/bin/nawk -F ':' '$1 ~ /Start_command/ && $2 ~ /sczbt/')
print_log ${LOGFILE} "sczbt Start_command is: ${Start_command}"
sczbt_config=$(print_option -P ${Start_command})/sczbt_$(print_option -R ${Start_command})
print_log ${LOGFILE} "sczbt_config is ${sczbt_config}"
ZONE=$(nawk -F '=' '$1=="Zonename"{gsub(/"/,"",$2);print $2}' ${sczbt_config})
print_log ${LOGFILE} "Zone from ${sczbt_config} is ${ZONE}"

case ${cmd_option} in
pre)
  for ZPOOL in ${ZPOOLS}
  do
    snapshot_destroy ${ZPOOL} ${SNAPSHOT_NAME}
  done
#  snapshot_pre     ${DB}     ${DBUSER}        ${ZONE}
  for ZPOOL in ${ZPOOLS}
  do
    snapshot_create  ${ZPOOL} ${SNAPSHOT_NAME}
  done
#  snapshot_pst     ${DB}     ${DBUSER}        ${ZONE}
  ;;
pst)
  #for ZPOOL in ${ZPOOLS}
  #do
  #  snapshot_destroy ${ZPOOL} ${SNAPSHOT_NAME}
  #done
  ;;
*)
  usage
  ;;
esac
print_log ${LOGFILE} "End   backup of ${CLIENT_NAME}"

MD5-Checksum

#  digest -a md5 /opt/nsr/bin/nsr_snapshot.sh
62e591f1961ca5ecc9b344fbf269ea57

!!!THIS CODE IS UNTESTED DO NOT USE THIS!!!

!!!THIS JUST AN EXAMPLE!!!

Registering new resource type LGTO.clnt

1. Install Solaris client package LGTOclnt 2. Register new resource type in cluster. One one node do:

# clrt register -f /usr/sbin/LGTO.clnt.rtr LGTO.clnt

Now you have a new resource type LGTO.clnt in your cluster.

Create client resource of type LGTO.clnt

So I use scripts like this:

# RGname=sample-rg
# clrs create \
  -t LGTO.clnt \
  -g ${RGname} \
  -p Resource_dependencies=$(basename ${RGname} -rg)-hasp-zfs-res \
  -p clientname=$(basename ${RGname} -rg)-lh \
  -p Network_resource=$(basename ${RGname} -rg)-lh-res \
  -p owned_paths=${ZPOOL_BASEDIR} \
  $(basename ${RGname} -rg)-nsr-res

This expands to:

# clrs create \
  -t LGTO.clnt \
  -g sample-rg \
  -p Resource_dependencies=sample-hasp-zfs-res \
  -p clientname=sample-lh \
  -p Network_resource=sample-lh-res \
  -p owned_paths=/local/sample-rg \
  sample-nsr-res

Now we have a client name to which we can connect to: sample-lh