Solaris OracleClusterware: Difference between revisions

From Lolly's Wiki
Jump to navigationJump to search
Line 428: Line 428:
}
}
END {
END {
  printf "<a name=\"compatible.asm\" value=\"11.2\"/>\n";
  printf "<a name=\"compatible.rdbms\" value=\"11.2\"/>\n";
  printf "<a name=\"compatible.advm\" value=\"11.2\"/>\n";
   printf "</dg>\n";
   printf "</dg>\n";
}
}

Revision as of 18:11, 10 February 2016

Clusterware Clusterware

Get Solaris release information

# pkg info kernel | \
  nawk -F '.' '
/Build Release:/{
  solaris=$NF;
}
/Branch:/{
  subrel=$3;
  update=$4;
}
END{
  printf "Solaris %d.%d Update %d\n",solaris,subrel,update;
}'

Needed Solaris packages

Install pkg dependencies

# pkg install developer/assembler
# pkg install developer/build/make
# pkg install x11/diagnostic/x11-info-clients

Check pkg dependencies

# pkg list \
    developer/assembler \
    developer/build/make \
    x11/diagnostic/x11-info-clients

User / group settings

Groups

# groupadd -g 186 oinstall
# groupadd -g 187 asmadmin
# groupadd -g 188 asmdba
# groupadd -g 200 dba

User

# useradd \
    -u 102 \
    -g oinstall \
    -G asmdba,dba \
    -c "Oracle DB" \
    -m -d /export/home/oracle \
    oracle
# useradd \
    -u 406 \
    -g oinstall \
    -G asmdba,asmadmin,dba \
    -c "Oracle Grid" \
    -m -d /export/home/grid \
    grid

Generate ssh public keys

# su - grid
$ ssh-keygen -t rsa -b 2048
Generating public/private rsa key pair.
Enter file in which to save the key (/export/home/grid/.ssh/id_rsa): <Enter>
Created directory '/export/home/grid/.ssh'.
Enter passphrase (empty for no passphrase): <Enter>
Enter same passphrase again: <Enter>
Your identification has been saved in /export/home/grid/.ssh/id_rsa.
Your public key has been saved in /export/home/grid/.ssh/id_rsa.pub.
The key fingerprint is:
..:..:.. grid@grid01
$ cat .ssh/id_rsa.pub > .ssh/authorized_keys
$ chmod 600 .ssh/authorized_keys
$ vi .ssh/authorized_keys

Add the public key of other nodes.

After that do this on all other nodes added as grid:

$ scp grid01:.ssh/authorized_keys .ssh/authorized_keys

Now do a cross login from every node to every other node (even to its self) to add all to the known_hosts. The installer needs this.

Projects

# projadd  -p 186 -G oinstall \
    -K process.max-file-descriptor="(privileged,65536,deny)" \
    -K process.max-sem-nsems="(privileged,2048,deny)" \
    -K project.max-sem-ids="(privileged,2048,deny)" \
    -K project.max-shm-ids="(privileged,200,deny)" \
    -K project.max-shm-memory="(privileged,274877906944,deny)" \
    group.oinstall

Check project settings

# su - oracle
$ for name in process.{max-file-descriptor,max-sem-nsems} ; do  prctl -t privileged -i process -n ${name} $$ ; done
process: 14822: -bash
NAME    PRIVILEGE       VALUE    FLAG   ACTION                       RECIPIENT
process.max-file-descriptor
        privileged      65.5K       -   deny                                 -
process: 14822: -bash
NAME    PRIVILEGE       VALUE    FLAG   ACTION                       RECIPIENT
process.max-sem-nsems
        privileged      2.05K       -   deny                                 -

$ for name in project.{max-sem-ids,max-shm-ids,max-shm-memory} ; do prctl -t privileged -n ${name} $$ ; done
process: 14822: -bash
NAME    PRIVILEGE       VALUE    FLAG   ACTION                       RECIPIENT
project.max-sem-ids
        privileged      2.05K       -   deny                                 -
process: 14822: -bash
NAME    PRIVILEGE       VALUE    FLAG   ACTION                       RECIPIENT
project.max-shm-ids
        privileged        200       -   deny                                 -
process: 14822: -bash
NAME    PRIVILEGE       VALUE    FLAG   ACTION                       RECIPIENT
project.max-shm-memory
        usage               0B
        privileged       256GB      -   deny                                 -

Directories

# zfs create -o mountpoint=none rpool/grid
# zfs create -o mountpoint=/opt/gridhome rpool/grid/gridhome
# zfs create -o mountpoint=/opt/gridbase rpool/grid/gridbase
# zfs create -o mountpoint=/opt/oraInventory rpool/grid/oraInventory
# chown -R grid:oinstall /opt/{grid{home,base},oraInventory}

Storage tasks

Discover LUNs

# luxadm -e port | \
    nawk '{print $1}' | \
    xargs -n 1 luxadm -e dump_map | \
    nawk '/Disk device/{print $5}' | \
    sort -u | \
    xargs luxadm display | \
    nawk '
/DEVICE PROPERTIES for disk:/{
  disk=$NF;
}
/DEVICE PROPERTIES for:/{
  disk="";
}
/Vendor:/{
  vendor=$NF;
}
/Serial Num:/{
  serial=$NF;
}
/Unformatted capacity:/{
  capacity=$(NF-1)""$NF;
}
disk != "" && /^$/{
  printf "%s vendor=%s serial=%s capacity=%s\n",disk,vendor,serial,capacity;
}' | \
    sort -u

Label Disks

Single Disk

# printf 'type 0 no no\nlabel 1 yes\npartition\n0 usr wm 8192 $\nlabel 1 yes\nquit\nquit\n' | \
    format -e /dev/rdsk/<disk>

All FC disks

For x86 you have to call format -> fdisk -> y for all disks first :-\

DON'T DO THE NEXT STEP IF YOU DO NOT KNOW WHAT YOU DO!

format_command_file.txt:

type 0 no no
label 1 yes
partition
0 usr wm 8192 $
label 1 yes
quit
quit
# luxadm -e port | \
    nawk '{print $1}' | \
    xargs -n 1 luxadm -e dump_map | \
    nawk '/Disk device/{print $5}' | \
    sort -u | \
    xargs luxadm display | \
    nawk '
/DEVICE PROPERTIES for disk:/{
  disk=$NF;
}
/DEVICE PROPERTIES for:/{
  disk="";
}
disk && /^$/{
  printf "%s\n",disk;
}' | \
  sort -u | \
  xargs -n 1 format -e -f ~/format_command_file.txt
# chown -RL grid:asmadmin /dev/rdsk/c0t6000*
# chmod 660 /dev/rdsk/c0t6000*


Set swap to physical RAM

# export RAM=256G
# swap -d /dev/zvol/dsk/rpool/swap
# zfs destroy rpool/swap
# zfs create \
   -V ${RAM} \
   -b 8k \
   -o primarycache=metadata \
   -o chksum=on \
   -o dedup=off \
   -o encryption=off \
   -o compression=off \
   rpool/swap
# swap -a /dev/zvol/dsk/rpool/swap


Network

Check port ranges

# for protocol in tcp udp ; do ipadm show-prop ${protocol} -p smallest_anon_port,largest_anon_port ; done
PROTO PROPERTY              PERM CURRENT      PERSISTENT   DEFAULT      POSSIBLE
tcp   smallest_anon_port    rw   9000         9000         32768        1024-65500
tcp   largest_anon_port     rw   65500        65500        65535        9000-65535
PROTO PROPERTY              PERM CURRENT      PERSISTENT   DEFAULT      POSSIBLE
udp   smallest_anon_port    rw   9000         9000         32768        1024-65500
udp   largest_anon_port     rw   65500        65500        65535        9000-65535

Setup private cluster interconnects

Example with a small net with six (eight with net and broadcast) usable IPs. This limits the maximum number of nodes to six... which is obvious...

First node:

# ipadm create-ip net1
# ipadm create-addr -T static -a 10.65.0.1/29 net1/ci1
# ipadm create-ip net5
# ipadm create-addr -T static -a 10.65.0.9/29 net5/ci2

Second node:

# ipadm create-ip net1
# ipadm create-addr -T static -a 10.65.0.2/29 net1/ci1
# ipadm create-ip net5
# ipadm create-addr -T static -a 10.65.0.10/29 net5/ci2

Set slew always for ntp

After configuring ntp set slew always to avoid time warps!

# svccfg -s svc:/network/ntp:default setprop config/slew_always = true
# svcadm refresh svc:/network/ntp:default
# svccfg -s svc:/network/ntp:default listprop config/slew_always
config/slew_always boolean    true

Patching

Upgrade OPatch

Do as root:

export ORACLE_HOME=/opt/gridhome/11.2.0.4
export PATH=${PATH}:${ORACLE_HOME}/OPatch

OPATCH_PATCH_ZIP=~oracle/orainst/p6880880_112000_Solaris86-64.zip

zfs snapshot -r rpool/grid@$(opatch version | nawk '/OPatch Version:/{print $1"_"$NF;}')
eval mv ${ORACLE_HOME}/{$(opatch version | nawk '/OPatch Version:/{print $1","$1"_"$NF;}')}
unzip -d ${ORACLE_HOME} ${OPATCH_PATCH_ZIP}
chown -R grid:oinstall ${ORACLE_HOME}/OPatch
zfs snapshot -r rpool/grid@$(opatch version | nawk '/OPatch Version:/{print $1"_"$NF;}')

Apply PSU

On first node as user grid:

export ORACLE_HOME=/opt/gridhome/11.2.0.4
OCM_RSP=~grid/ocm_gridcluster1.rsp
${ORACLE_HOME}/OPatch/ocm/bin/emocmrsp -output ${OCM_RSP}

scp ${OCM_RSP} <other node1>:
scp ${OCM_RSP} <other node2>:
...

On all nodes do as root:

export ORACLE_HOME=/opt/gridhome/11.2.0.4
export PATH=${PATH}:${ORACLE_HOME}/bin
export PATH=${PATH}:${ORACLE_HOME}/OPatch

OCM_RSP=~grid/ocm_gridcluster1.rsp
PSU_DIR=~oracle/orainst/psu
PSU_ZIP=~oracle/orainst/p22378167_112040_Solaris86-64.zip

PSU=~oracle/orainst/psu/22378167

su - grid -c "mkdir -p ${PSU_DIR}"
su - grid -c "unzip -d ${PSU_DIR} ${PSU_ZIP}"

su - grid -c "opatch lsinventory -detail -oh ${ORACLE_HOME} > ~grid/lsinventory_before_${PSU##*/}"

zfs snapshot -r rpool/grid@before_psu_${PSU##*/}
cd ~grid
for patch in $(find ${PSU} -name bundle.xml | xargs -n 1 dirname) ; do
  opatch auto ${patch} -oh ${ORACLE_HOME} -ocmrf ${OCM_RSP}
done

$ORACLE_HOME/crs/install/rootcrs.pl -unlock                                            # <-- on all nodes

# For every other patch do:
su - grid -c "cd ${patchdir} ; opatch prereq CheckConflictAgainstOHWithDetail -ph ./"  # <-- only on first node
su - grid -c "cd ${patchdir} ; opatch apply"                                           # <-- only on first node

$ORACLE_HOME/crs/install/rootcrs.pl -patch                                             # <-- on all nodes

zfs snapshot -r rpool/grid@after_psu_${PSU##*/}
${ORACLE_HOME}/bin/emctl start dbconsole
su - grid -c "opatch lsinventory -detail -oh ${ORACLE_HOME} > ~grid/lsinventory_after_${PSU##*/}"

Configure local listener to another port

As grid user:

$ srvctl modify listener -l LISTENER -o ${ORACLE_HOME} -p "TCP:50650"
$ srvctl config listener
Name: LISTENER
Network: 1, Owner: grid
Home: <CRS home>
End points: TCP:50650
$ srvctl stop listener -l LISTENER ; srvctl start listener -l LISTENER

$ sqh
SQL>show parameter list

NAME                                 TYPE        VALUE
------------------------------------ ----------- ------------------------------
listener_networks                    string
local_listener                       string       (DESCRIPTION=(ADDRESS_LIST=(A
                                                 DDRESS=(PROTOCOL=TCP)(HOST=172
                                                 .1.20.1)(PORT=1521))))
remote_listener                      string
SQL> alter system set local_listener ="(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=172.1.20.1)(PORT=50650))))" SID='+ASM1' ;

System altered.
SQL> ^D

ASM

Create ASM diskgroups

LUNs.txt contains all disks with:

  1. one line per disk.
  2. each disk in the first field.

Example for chdg

# nawk -v type='DATA' '
BEGIN {
  printf "<chdg name=\"%s\" power=\"3\">\n",type;
}
/002d0/,/011d0/ {
  if(/C913/){storage="HSA1";};
  if(/C916/){storage="HSA2";};
  if(/C061/){storage="HSA3";};
  if(/C062/){storage="HSA4";};
  if(/002d0/){
    # first disk
    count=1;
    printf "  <add>\n";
    printf "    <fg name=\"%s_%s\">\n",storage,type;
  };
  gsub(/s2$/,"s0",$1);
  printf "      <dsk name=\"%s_%s%02d\" string=\"%s\"/>\n",storage,type,count++,$1;
  if(/011d0/){
    # last disk
    print "    </fg>";
    print "  </add>";
  }
}
END {
  printf "</chdg>\n";
}
' LUNs.txt

Example for mkdg

# nawk -v type='FRA' '
BEGIN {
  printf "<dg name=\"%s\" redundancy=\"normal\">\n",type;
}
/012d0/,/015d0/ {
  if(/C913/){storage="HSA1";};
  if(/C916/){storage="HSA2";};
  if(/C061/){storage="HSA3";};
  if(/C062/){storage="HSA4";};
  if(/012d0/){
    # first disk
    count=1;
    printf "  <fg name=\"%s_%s\">\n",storage,type;
  };
  gsub(/s2$/,"s0",$1);
  printf "      <dsk name=\"%s_%s%02d\" string=\"%s\"/>\n",storage,type,count++,$1;
  if(/015d0/){
    # last disk
    print "  </fg>";
  }
}
END {
  printf "<a name=\"compatible.asm\" value=\"11.2\"/>\n";
  printf "<a name=\"compatible.rdbms\" value=\"11.2\"/>\n";
  printf "<a name=\"compatible.advm\" value=\"11.2\"/>\n";
  printf "</dg>\n";
}
' LUNs.txt

data_config.xml:

<chdg name="data" power="3">
  <add>
    <fg name="HSA1_DATA">
      <dsk name="HSA1_DATA01"  string="/dev/rdsk/c0t60002AC000000000C913010650004002d0s0"/>
      <dsk name="HSA1_DATA02"  string="/dev/rdsk/c0t60002AC000000000C913010650004003d0s0"/>
      <dsk name="HSA1_DATA03"  string="/dev/rdsk/c0t60002AC000000000C913010650004004d0s0"/>
      <dsk name="HSA1_DATA04"  string="/dev/rdsk/c0t60002AC000000000C913010650004005d0s0"/>
      <dsk name="HSA1_DATA05"  string="/dev/rdsk/c0t60002AC000000000C913010650004006d0s0"/>
      <dsk name="HSA1_DATA06"  string="/dev/rdsk/c0t60002AC000000000C913010650004007d0s0"/>
      <dsk name="HSA1_DATA07"  string="/dev/rdsk/c0t60002AC000000000C913010650004008d0s0"/>
      <dsk name="HSA1_DATA08"  string="/dev/rdsk/c0t60002AC000000000C913010650004009d0s0"/>
      <dsk name="HSA1_DATA09"  string="/dev/rdsk/c0t60002AC000000000C913010650004010d0s0"/>
      <dsk name="HSA1_DATA10"  string="/dev/rdsk/c0t60002AC000000000C913010650004011d0s0"/>
    </fg>
  </add>
  <add>
    <fg name="HSA2_DATA">
      <dsk name="HSA2_DATA01"  string="/dev/rdsk/c0t60002AC000000000C916010650004002d0s0"/>
      <dsk name="HSA2_DATA02"  string="/dev/rdsk/c0t60002AC000000000C916010650004003d0s0"/>
      <dsk name="HSA2_DATA03"  string="/dev/rdsk/c0t60002AC000000000C916010650004004d0s0"/>
      <dsk name="HSA2_DATA04"  string="/dev/rdsk/c0t60002AC000000000C916010650004005d0s0"/>
      <dsk name="HSA2_DATA05"  string="/dev/rdsk/c0t60002AC000000000C916010650004006d0s0"/>
      <dsk name="HSA2_DATA06"  string="/dev/rdsk/c0t60002AC000000000C916010650004007d0s0"/>
      <dsk name="HSA2_DATA07"  string="/dev/rdsk/c0t60002AC000000000C916010650004008d0s0"/>
      <dsk name="HSA2_DATA08"  string="/dev/rdsk/c0t60002AC000000000C916010650004009d0s0"/>
      <dsk name="HSA2_DATA09"  string="/dev/rdsk/c0t60002AC000000000C916010650004010d0s0"/>
      <dsk name="HSA2_DATA10"  string="/dev/rdsk/c0t60002AC000000000C916010650004011d0s0"/>
    </fg>
  </add>
  <a name="compatible.asm" value="11.2"/>
  <a name="compatible.rdbms" value="11.2"/>
  <a name="compatible.advm" value="11.2"/>
</chdg>

asmh:

ASMCMD [+] > chdg data_config.xml