Solaris OracleClusterware

From Lolly's Wiki
Revision as of 15:35, 25 November 2021 by Lollypop (talk | contribs) (Text replacement - "</source" to "</syntaxhighlight")
Jump to navigationJump to search

Clusterware Clusterware

Get Solaris release information

<source lang=bash>

  1. pkg info kernel | \
 nawk -F '.' '

/Build Release:/{

 solaris=$NF;

} /Branch:/{

 subrel=$3;
 update=$4;

} END{

 printf "Solaris %d.%d Update %d\n",solaris,subrel,update;

}' </syntaxhighlight>

Needed Solaris packages

Install pkg dependencies

<source lang=bash>

  1. pkg install developer/assembler
  2. pkg install developer/build/make
  3. pkg install x11/diagnostic/x11-info-clients

</syntaxhighlight>

Check pkg dependencies

<source lang=bash>

  1. pkg list \
   developer/assembler \
   developer/build/make \
   x11/diagnostic/x11-info-clients

</syntaxhighlight>

User / group settings

Groups

<source lang=bash>

  1. groupadd -g 186 oinstall
  2. groupadd -g 187 asmadmin
  3. groupadd -g 188 asmdba
  4. groupadd -g 200 dba

</syntaxhighlight>

User

<source lang=bash>

  1. useradd \
   -u 102 \
   -g oinstall \
   -G asmdba,dba \
   -c "Oracle DB" \
   -m -d /export/home/oracle \
   oracle
  1. useradd \
   -u 406 \
   -g oinstall \
   -G asmdba,asmadmin,dba \
   -c "Oracle Grid" \
   -m -d /export/home/grid \
   grid

</syntaxhighlight>

Generate ssh public keys

<source lang=bash>

  1. su - grid

$ ssh-keygen -t rsa -b 2048 Generating public/private rsa key pair. Enter file in which to save the key (/export/home/grid/.ssh/id_rsa): <Enter> Created directory '/export/home/grid/.ssh'. Enter passphrase (empty for no passphrase): <Enter> Enter same passphrase again: <Enter> Your identification has been saved in /export/home/grid/.ssh/id_rsa. Your public key has been saved in /export/home/grid/.ssh/id_rsa.pub. The key fingerprint is: ..:..:.. grid@grid01 $ cat .ssh/id_rsa.pub > .ssh/authorized_keys $ chmod 600 .ssh/authorized_keys $ vi .ssh/authorized_keys </syntaxhighlight> Add the public key of other nodes.

After that do this on all other nodes added as grid: <source lang=bash> $ scp grid01:.ssh/authorized_keys .ssh/authorized_keys </syntaxhighlight>

Now do a cross login from every node to every other node (even to its self) to add all to the known_hosts. The installer needs this.

Projects

<source lang=bash>

  1. projadd -p 186 -G oinstall \
   -K process.max-file-descriptor="(basic,1024,deny)" \
   -K process.max-file-descriptor="(privileged,65536,deny)" \
   -K process.max-sem-nsems="(privileged,2048,deny)" \
   -K project.max-sem-ids="(privileged,2048,deny)" \
   -K project.max-shm-ids="(privileged,200,deny)" \
   -K project.max-shm-memory="(privileged,274877906944,deny)" \
   group.oinstall

</syntaxhighlight>

Check project settings

<source lang=bash>

  1. su - oracle

$ for name in process.{max-file-descriptor,max-sem-nsems} ; do prctl -t privileged -i process -n ${name} $$ ; done process: 14822: -bash NAME PRIVILEGE VALUE FLAG ACTION RECIPIENT process.max-file-descriptor

       privileged      65.5K       -   deny                                 -

process: 14822: -bash NAME PRIVILEGE VALUE FLAG ACTION RECIPIENT process.max-sem-nsems

       privileged      2.05K       -   deny                                 -

$ for name in project.{max-sem-ids,max-shm-ids,max-shm-memory} ; do prctl -t privileged -n ${name} $$ ; done process: 14822: -bash NAME PRIVILEGE VALUE FLAG ACTION RECIPIENT project.max-sem-ids

       privileged      2.05K       -   deny                                 -

process: 14822: -bash NAME PRIVILEGE VALUE FLAG ACTION RECIPIENT project.max-shm-ids

       privileged        200       -   deny                                 -

process: 14822: -bash NAME PRIVILEGE VALUE FLAG ACTION RECIPIENT project.max-shm-memory

       usage               0B
       privileged       256GB      -   deny                                 -

</syntaxhighlight>

Directories

<source lang=bash>

  1. zfs create -o mountpoint=none rpool/grid
  2. zfs create -o mountpoint=/opt/gridhome rpool/grid/gridhome
  3. zfs create -o mountpoint=/opt/gridbase rpool/grid/gridbase
  4. zfs create -o mountpoint=/opt/oraInventory rpool/grid/oraInventory
  5. chown -R grid:oinstall /opt/{grid{home,base},oraInventory}

</syntaxhighlight>

Storage tasks

Discover LUNs

<source lang=bash>

  1. luxadm -e port | \
   nawk '{print $1}' | \
   xargs -n 1 luxadm -e dump_map | \
   nawk '/Disk device/{print $5}' | \
   sort -u | \
   xargs luxadm display | \
   nawk '

/DEVICE PROPERTIES for disk:/{

 disk=$NF;

} /DEVICE PROPERTIES for:/{

 disk="";

} /Vendor:/{

 vendor=$NF;

} /Serial Num:/{

 serial=$NF;

} /Unformatted capacity:/{

 capacity=$(NF-1)""$NF;

} disk != "" && /^$/{

 printf "%s vendor=%s serial=%s capacity=%s\n",disk,vendor,serial,capacity;

}' | \

   sort -u

</syntaxhighlight>

Label Disks

Single Disk

<source lang=bash>

  1. printf 'type 0 no no\nlabel 1 yes\npartition\n0 usr wm 8192 $\nlabel 1 yes\nquit\nquit\n' | \
   format -e /dev/rdsk/<disk>

</syntaxhighlight>

All FC disks

For x86 you have to call format -> fdisk -> y for all disks first :-\

DON'T DO THE NEXT STEP IF YOU DO NOT KNOW WHAT YOU DO!

format_command_file.txt: <source lang=bash> type 0 no no label 1 yes partition 0 usr wm 8192 $ label 1 yes quit quit </syntaxhighlight>

<source lang=bash>

  1. luxadm -e port | \
   nawk '{print $1}' | \
   xargs -n 1 luxadm -e dump_map | \
   nawk '/Disk device/{print $5}' | \
   sort -u | \
   xargs luxadm display | \
   nawk '

/DEVICE PROPERTIES for disk:/{

 disk=$NF;

} /DEVICE PROPERTIES for:/{

 disk="";

} disk && /^$/{

 printf "%s\n",disk;

}' | \

 sort -u | \
 xargs -n 1 format -e -f ~/format_command_file.txt

</syntaxhighlight>

<source lang=bash>

  1. chown -RL grid:asmadmin /dev/rdsk/c0t6000*
  2. chmod 660 /dev/rdsk/c0t6000*

</syntaxhighlight>


Set swap to physical RAM

<source lang=bash>

  1. export RAM=256G
  2. swap -d /dev/zvol/dsk/rpool/swap
  3. zfs destroy rpool/swap
  4. zfs create \
  -V ${RAM} \
  -b 8k \
  -o primarycache=metadata \
  -o chksum=on \
  -o dedup=off \
  -o encryption=off \
  -o compression=off \
  rpool/swap
  1. swap -a /dev/zvol/dsk/rpool/swap

</syntaxhighlight>


Network

Check port ranges

<source lang=bash>

  1. for protocol in tcp udp ; do ipadm show-prop ${protocol} -p smallest_anon_port,largest_anon_port ; done

PROTO PROPERTY PERM CURRENT PERSISTENT DEFAULT POSSIBLE tcp smallest_anon_port rw 9000 9000 32768 1024-65500 tcp largest_anon_port rw 65500 65500 65535 9000-65535 PROTO PROPERTY PERM CURRENT PERSISTENT DEFAULT POSSIBLE udp smallest_anon_port rw 9000 9000 32768 1024-65500 udp largest_anon_port rw 65500 65500 65535 9000-65535 </syntaxhighlight>

Setup private cluster interconnects

Example with a small net with six (eight with net and broadcast) usable IPs. This limits the maximum number of nodes to six... which is obvious...

First node: <source lang=bash>

  1. ipadm create-ip net1
  2. ipadm create-addr -T static -a 10.65.0.1/29 net1/ci1
  3. ipadm create-ip net5
  4. ipadm create-addr -T static -a 10.65.0.9/29 net5/ci2

</syntaxhighlight> Second node: <source lang=bash>

  1. ipadm create-ip net1
  2. ipadm create-addr -T static -a 10.65.0.2/29 net1/ci1
  3. ipadm create-ip net5
  4. ipadm create-addr -T static -a 10.65.0.10/29 net5/ci2

</syntaxhighlight>

Set slew always for ntp

After configuring ntp set slew always to avoid time warps! <source lang=bash>

  1. svccfg -s svc:/network/ntp:default setprop config/slew_always = true
  2. svcadm refresh svc:/network/ntp:default
  3. svccfg -s svc:/network/ntp:default listprop config/slew_always

config/slew_always boolean true </syntaxhighlight>

Patching

Upgrade OPatch

Do as root: <source lang=bash> export ORACLE_HOME=/opt/gridhome/11.2.0.4 export PATH=${PATH}:${ORACLE_HOME}/OPatch

OPATCH_PATCH_ZIP=~oracle/orainst/p6880880_112000_Solaris86-64.zip

zfs snapshot -r rpool/grid@$(opatch version | nawk '/OPatch Version:/{print $1"_"$NF;}') eval mv ${ORACLE_HOME}/{$(opatch version | nawk '/OPatch Version:/{print $1","$1"_"$NF;}')} unzip -d ${ORACLE_HOME} ${OPATCH_PATCH_ZIP} chown -R grid:oinstall ${ORACLE_HOME}/OPatch zfs snapshot -r rpool/grid@$(opatch version | nawk '/OPatch Version:/{print $1"_"$NF;}') </syntaxhighlight>

Apply PSU

On first node as user grid: <source lang=bash> export ORACLE_HOME=/opt/gridhome/11.2.0.4 OCM_RSP=~grid/ocm_gridcluster1.rsp ${ORACLE_HOME}/OPatch/ocm/bin/emocmrsp -output ${OCM_RSP}

scp ${OCM_RSP} <other node1>: scp ${OCM_RSP} <other node2>: ... </syntaxhighlight>

On all nodes do as root: <source lang=bash> export ORACLE_HOME=/opt/gridhome/11.2.0.4 export PATH=${PATH}:${ORACLE_HOME}/bin export PATH=${PATH}:${ORACLE_HOME}/OPatch

OCM_RSP=~grid/ocm_gridcluster1.rsp PSU_DIR=~oracle/orainst/psu PSU_ZIP=~oracle/orainst/p22378167_112040_Solaris86-64.zip

PSU=~oracle/orainst/psu/22378167

su - grid -c "mkdir -p ${PSU_DIR}" su - grid -c "unzip -d ${PSU_DIR} ${PSU_ZIP}"

su - grid -c "opatch lsinventory -detail -oh ${ORACLE_HOME} > ~grid/lsinventory_before_${PSU##*/}"

zfs snapshot -r rpool/grid@before_psu_${PSU##*/} cd ~grid for patch in $(find ${PSU} -name bundle.xml | xargs -n 1 dirname) ; do

 opatch auto ${patch} -oh ${ORACLE_HOME} -ocmrf ${OCM_RSP}

done

$ORACLE_HOME/crs/install/rootcrs.pl -unlock # <-- on all nodes

  1. For every other patch do:

su - grid -c "cd ${patchdir} ; opatch prereq CheckConflictAgainstOHWithDetail -ph ./" # <-- only on first node su - grid -c "cd ${patchdir} ; opatch apply" # <-- only on first node

$ORACLE_HOME/crs/install/rootcrs.pl -patch # <-- on all nodes

zfs snapshot -r rpool/grid@after_psu_${PSU##*/} ${ORACLE_HOME}/bin/emctl start dbconsole su - grid -c "opatch lsinventory -detail -oh ${ORACLE_HOME} > ~grid/lsinventory_after_${PSU##*/}" </syntaxhighlight>

Configure local listener to another port

As grid user: <source lang=bash> $ srvctl modify listener -l LISTENER -o ${ORACLE_HOME} -p "TCP:50650" $ srvctl config listener Name: LISTENER Network: 1, Owner: grid Home: <CRS home> End points: TCP:50650 $ srvctl stop listener -l LISTENER ; srvctl start listener -l LISTENER

$ sqh SQL>show parameter list

NAME TYPE VALUE


----------- ------------------------------

listener_networks string local_listener string (DESCRIPTION=(ADDRESS_LIST=(A

                                                DDRESS=(PROTOCOL=TCP)(HOST=172
                                                .1.20.1)(PORT=1521))))

remote_listener string SQL> alter system set local_listener ="(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=172.1.20.1)(PORT=50650))))" SID='+ASM1' ;

System altered. SQL> ^D </syntaxhighlight>

ASM

Create ASM diskgroups

LUNs.txt contains all disks with:

  1. one line per disk.
  2. each disk in the first field.

Example for chdg

<source lang=awk>

  1. nawk -v type='DATA' '

BEGIN {

 printf "<chdg name=\"%s\" power=\"3\">\n",type;

} /002d0/,/011d0/ {

 if(/C903/){storage="HSA1";};
 if(/C906/){storage="HSA2";};
 if(/C061/){storage="HSA3";};
 if(/C062/){storage="HSA4";};
 if(/002d0/){
   # first disk
   count=1;
   printf "  <add>\n";
   printf "    <fg name=\"%s_%s\">\n",storage,type;
 };
 gsub(/s2$/,"s0",$1);
 printf "      <dsk name=\"%s_%s%02d\" string=\"%s\"/>\n",storage,type,count++,$1;
 if(/011d0/){
   # last disk
   print "    </fg>";
   print "  </add>";
 }

} END {

 printf "<a name=\"compatible.asm\" value=\"11.2\"/>\n";
 printf "<a name=\"compatible.rdbms\" value=\"11.2\"/>\n";
 printf "<a name=\"compatible.advm\" value=\"11.2\"/>\n";
 printf "</chdg>\n";

} ' LUNs.txt </syntaxhighlight>

Example for mkdg

<source lang=awk>

  1. nawk -v type='FRA' '

BEGIN {

 printf "<dg name=\"%s\" redundancy=\"normal\">\n",type;

} /012d0/,/015d0/ {

 if(/C903/){storage="HSA1";};
 if(/C906/){storage="HSA2";};
 if(/C061/){storage="HSA3";};
 if(/C062/){storage="HSA4";};
 if(/012d0/){
   # first disk
   count=1;
   printf "  <fg name=\"%s_%s\">\n",storage,type;
 };
 gsub(/s2$/,"s0",$1);
 printf "      <dsk name=\"%s_%s%02d\" string=\"%s\"/>\n",storage,type,count++,$1;
 if(/015d0/){
   # last disk
   print "  </fg>";
 }

} END {

 printf "<a name=\"compatible.asm\" value=\"11.2\"/>\n";
 printf "<a name=\"compatible.rdbms\" value=\"11.2\"/>\n";
 printf "<a name=\"compatible.advm\" value=\"11.2\"/>\n";
 printf "</dg>\n";

} ' LUNs.txt </syntaxhighlight>

data_config.xml: <source lang=xml> <chdg name="data" power="3">

 <add>
   <fg name="HSA1_DATA">
     <dsk name="HSA1_DATA01"  string="/dev/rdsk/c0t60002AC000000000C903010650004002d0s0"/>
     <dsk name="HSA1_DATA02"  string="/dev/rdsk/c0t60002AC000000000C903010650004003d0s0"/>
     <dsk name="HSA1_DATA03"  string="/dev/rdsk/c0t60002AC000000000C903010650004004d0s0"/>
     <dsk name="HSA1_DATA04"  string="/dev/rdsk/c0t60002AC000000000C903010650004005d0s0"/>
     <dsk name="HSA1_DATA05"  string="/dev/rdsk/c0t60002AC000000000C903010650004006d0s0"/>
     <dsk name="HSA1_DATA06"  string="/dev/rdsk/c0t60002AC000000000C903010650004007d0s0"/>
     <dsk name="HSA1_DATA07"  string="/dev/rdsk/c0t60002AC000000000C903010650004008d0s0"/>
     <dsk name="HSA1_DATA08"  string="/dev/rdsk/c0t60002AC000000000C903010650004009d0s0"/>
     <dsk name="HSA1_DATA09"  string="/dev/rdsk/c0t60002AC000000000C903010650004010d0s0"/>
     <dsk name="HSA1_DATA10"  string="/dev/rdsk/c0t60002AC000000000C903010650004011d0s0"/>
   </fg>
 </add>
 <add>
   <fg name="HSA2_DATA">
     <dsk name="HSA2_DATA01"  string="/dev/rdsk/c0t60002AC000000000C906010650004002d0s0"/>
     <dsk name="HSA2_DATA02"  string="/dev/rdsk/c0t60002AC000000000C906010650004003d0s0"/>
     <dsk name="HSA2_DATA03"  string="/dev/rdsk/c0t60002AC000000000C906010650004004d0s0"/>
     <dsk name="HSA2_DATA04"  string="/dev/rdsk/c0t60002AC000000000C906010650004005d0s0"/>
     <dsk name="HSA2_DATA05"  string="/dev/rdsk/c0t60002AC000000000C906010650004006d0s0"/>
     <dsk name="HSA2_DATA06"  string="/dev/rdsk/c0t60002AC000000000C906010650004007d0s0"/>
     <dsk name="HSA2_DATA07"  string="/dev/rdsk/c0t60002AC000000000C906010650004008d0s0"/>
     <dsk name="HSA2_DATA08"  string="/dev/rdsk/c0t60002AC000000000C906010650004009d0s0"/>
     <dsk name="HSA2_DATA09"  string="/dev/rdsk/c0t60002AC000000000C906010650004010d0s0"/>
     <dsk name="HSA2_DATA10"  string="/dev/rdsk/c0t60002AC000000000C906010650004011d0s0"/>
   </fg>
 </add>
 <a name="compatible.asm" value="11.2"/>
 <a name="compatible.rdbms" value="11.2"/>
 <a name="compatible.advm" value="11.2"/>

</chdg> </syntaxhighlight>

asmh: <source lang=oracle11> ASMCMD [+] > chdg data_config.xml </syntaxhighlight>