Solaris OracleClusterware: Difference between revisions
From Lolly's Wiki
Jump to navigationJump to search
Line 310: | Line 310: | ||
# export PATH=${PATH}:${ORACLE_HOME}/OPatch | # export PATH=${PATH}:${ORACLE_HOME}/OPatch | ||
# su grid | # su - grid -c "mkdir -p ${PSU_DIR}" | ||
# su - grid -c "unzip -d ${PSU_DIR} ${PSU_ZIP}" | |||
# zfs snapshot -r rpool/grid@before_psu_${PSU##*/} | # zfs snapshot -r rpool/grid@before_psu_${PSU##*/} |
Revision as of 14:16, 10 February 2016
Get Solaris release information
# pkg info kernel | \
nawk -F '.' '
/Build Release:/{
solaris=$NF;
}
/Branch:/{
subrel=$3;
update=$4;
}
END{
printf "Solaris %d.%d Update %d\n",solaris,subrel,update;
}'
Needed Solaris packages
Install pkg dependencies
# pkg install developer/assembler
# pkg install developer/build/make
# pkg install x11/diagnostic/x11-info-clients
Check pkg dependencies
# pkg list \
developer/assembler \
developer/build/make \
x11/diagnostic/x11-info-clients
User / group settings
Groups
# groupadd -g 186 oinstall
# groupadd -g 187 asmadmin
# groupadd -g 188 asmdba
# groupadd -g 200 dba
User
# useradd \
-u 102 \
-g oinstall \
-G asmdba,dba \
-c "Oracle DB" \
-m -d /export/home/oracle \
oracle
# useradd \
-u 406 \
-g oinstall \
-G asmdba,asmadmin,dba \
-c "Oracle Grid" \
-m -d /export/home/grid \
grid
Generate ssh public keys
# su - grid
$ ssh-keygen -t rsa -b 2048
Generating public/private rsa key pair.
Enter file in which to save the key (/export/home/grid/.ssh/id_rsa): <Enter>
Created directory '/export/home/grid/.ssh'.
Enter passphrase (empty for no passphrase): <Enter>
Enter same passphrase again: <Enter>
Your identification has been saved in /export/home/grid/.ssh/id_rsa.
Your public key has been saved in /export/home/grid/.ssh/id_rsa.pub.
The key fingerprint is:
..:..:.. grid@grid01
$ cat .ssh/id_rsa.pub > .ssh/authorized_keys
$ chmod 600 .ssh/authorized_keys
$ vi .ssh/authorized_keys
Add the public key of other nodes.
After that do this on all other nodes added as grid:
$ scp grid01:.ssh/authorized_keys .ssh/authorized_keys
Now do a cross login from every node to every other node (even to its self) to add all to the known_hosts. The installer needs this.
Projects
# projadd -p 186 -G oinstall \
-K process.max-file-descriptor="(privileged,65536,deny)" \
-K process.max-sem-nsems="(privileged,2048,deny)" \
-K project.max-sem-ids="(privileged,2048,deny)" \
-K project.max-shm-ids="(privileged,200,deny)" \
-K project.max-shm-memory="(privileged,274877906944,deny)" \
group.oinstall
Check project settings
# su - oracle
$ for name in process.{max-file-descriptor,max-sem-nsems} ; do prctl -t privileged -i process -n ${name} $$ ; done
process: 14822: -bash
NAME PRIVILEGE VALUE FLAG ACTION RECIPIENT
process.max-file-descriptor
privileged 65.5K - deny -
process: 14822: -bash
NAME PRIVILEGE VALUE FLAG ACTION RECIPIENT
process.max-sem-nsems
privileged 2.05K - deny -
$ for name in project.{max-sem-ids,max-shm-ids,max-shm-memory} ; do prctl -t privileged -n ${name} $$ ; done
process: 14822: -bash
NAME PRIVILEGE VALUE FLAG ACTION RECIPIENT
project.max-sem-ids
privileged 2.05K - deny -
process: 14822: -bash
NAME PRIVILEGE VALUE FLAG ACTION RECIPIENT
project.max-shm-ids
privileged 200 - deny -
process: 14822: -bash
NAME PRIVILEGE VALUE FLAG ACTION RECIPIENT
project.max-shm-memory
usage 0B
privileged 256GB - deny -
Directories
# zfs create -o mountpoint=none rpool/grid
# zfs create -o mountpoint=/opt/gridhome rpool/grid/gridhome
# zfs create -o mountpoint=/opt/gridbase rpool/grid/gridbase
# zfs create -o mountpoint=/opt/oraInventory rpool/grid/oraInventory
# chown -R grid:oinstall /opt/{grid{home,base},oraInventory}
Storage tasks
Discover LUNs
# luxadm -e port | \
nawk '{print $1}' | \
xargs -n 1 luxadm -e dump_map | \
nawk '/Disk device/{print $5}' | \
sort -u | \
xargs luxadm display | \
nawk '
/DEVICE PROPERTIES for disk:/{
disk=$NF;
}
/DEVICE PROPERTIES for:/{
disk="";
}
/Vendor:/{
vendor=$NF;
}
/Serial Num:/{
serial=$NF;
}
/Unformatted capacity:/{
capacity=$(NF-1)""$NF;
}
disk != "" && /^$/{
printf "%s vendor=%s serial=%s capacity=%s\n",disk,vendor,serial,capacity;
}' | \
sort -u
Label Disks
Single Disk
# printf 'type 0 no no\nlabel 1 yes\npartition\n0 usr wm 8192 $\nlabel 1 yes\nquit\nquit\n' | \
format -e /dev/rdsk/<disk>
All FC disks
For x86 you have to call format -> fdisk -> y for all disks first :-\
DON'T DO THE NEXT STEP IF YOU DO NOT KNOW WHAT YOU DO!
format_command_file.txt:
type 0 no no
label 1 yes
partition
0 usr wm 8192 $
label 1 yes
quit
quit
# luxadm -e port | \
nawk '{print $1}' | \
xargs -n 1 luxadm -e dump_map | \
nawk '/Disk device/{print $5}' | \
sort -u | \
xargs luxadm display | \
nawk '
/DEVICE PROPERTIES for disk:/{
disk=$NF;
}
/DEVICE PROPERTIES for:/{
disk="";
}
disk && /^$/{
printf "%s\n",disk;
}' | \
sort -u | \
xargs -n 1 format -e -f ~/format_command_file.txt
# chown -RL grid:asmadmin /dev/rdsk/c0t6000*
# chmod 660 /dev/rdsk/c0t6000*
Set swap to physical RAM
# export RAM=256G
# swap -d /dev/zvol/dsk/rpool/swap
# zfs destroy rpool/swap
# zfs create \
-V ${RAM} \
-b 8k \
-o primarycache=metadata \
-o chksum=on \
-o dedup=off \
-o encryption=off \
-o compression=off \
rpool/swap
# swap -a /dev/zvol/dsk/rpool/swap
Network
Check port ranges
# for protocol in tcp udp ; do ipadm show-prop ${protocol} -p smallest_anon_port,largest_anon_port ; done
PROTO PROPERTY PERM CURRENT PERSISTENT DEFAULT POSSIBLE
tcp smallest_anon_port rw 9000 9000 32768 1024-65500
tcp largest_anon_port rw 65500 65500 65535 9000-65535
PROTO PROPERTY PERM CURRENT PERSISTENT DEFAULT POSSIBLE
udp smallest_anon_port rw 9000 9000 32768 1024-65500
udp largest_anon_port rw 65500 65500 65535 9000-65535
Setup private cluster interconnects
Example with a small net with six (eight with net and broadcast) usable IPs. This limits the maximum number of nodes to six... which is obvious...
First node:
# ipadm create-ip net1
# ipadm create-addr -T static -a 10.65.0.1/29 net1/ci1
# ipadm create-ip net5
# ipadm create-addr -T static -a 10.65.0.9/29 net5/ci2
Second node:
# ipadm create-ip net1
# ipadm create-addr -T static -a 10.65.0.2/29 net1/ci1
# ipadm create-ip net5
# ipadm create-addr -T static -a 10.65.0.10/29 net5/ci2
Set slew always for ntp
After configuring ntp set slew always to avoid time warps!
# svccfg -s svc:/network/ntp:default setprop config/slew_always = true
# svcadm refresh svc:/network/ntp:default
# svccfg -s svc:/network/ntp:default listprop config/slew_always
config/slew_always boolean true
Upgrade OPatch
# OPATCH_PATCH_ZIP=~oracle/orainst/p6880880_112000_Solaris86-64.zip
# export ORACLE_HOME=/opt/gridhome/11.2.0.4
# export PATH=${PATH}:${ORACLE_HOME}/OPatch
# zfs snapshot -r rpool/grid@$(opatch version | nawk '/OPatch Version:/{print $1"_"$NF;}')
# eval mv ${ORACLE_HOME}/{$(opatch version | nawk '/OPatch Version:/{print $1","$1"_"$NF;}')}
# unzip -d ${ORACLE_HOME} ${OPATCH_PATCH_ZIP}
# chown -R grid:oinstall ${ORACLE_HOME}/OPatch
# zfs snapshot -r rpool/grid@$(opatch version | nawk '/OPatch Version:/{print $1"_"$NF;}')
Apply PSU
On first node as user grid:
$ export ORACLE_HOME=/opt/gridhome/11.2.0.4
$ OCM_RSP=~grid/ocm_gridcluster1.rsp
$ ${ORACLE_HOME}/OPatch/ocm/bin/emocmrsp -output ${OCM_RSP}
$ scp ${OCM_RSP} <other node1>:
$ scp ${OCM_RSP} <other node2>:
...
On all nodes:
# PSU_DIR=~oracle/orainst/psu
# PSU_ZIP=~oracle/orainst/p22378167_112040_Solaris86-64.zip
# OCM_RSP=~grid/ocm_gridcluster1.rsp
# PSU=~oracle/orainst/psu/22378167
# export ORACLE_HOME=/opt/gridhome/11.2.0.4
# export PATH=${PATH}:${ORACLE_HOME}/bin
# export PATH=${PATH}:${ORACLE_HOME}/OPatch
# su - grid -c "mkdir -p ${PSU_DIR}"
# su - grid -c "unzip -d ${PSU_DIR} ${PSU_ZIP}"
# zfs snapshot -r rpool/grid@before_psu_${PSU##*/}
# cd ~grid
# for patch in $(find ${PSU} -name bundle.xml | xargs -n 1 dirname) ; do
opatch auto ${patch} -oh ${ORACLE_HOME} -ocmrf ${OCM_RSP}
done
# zfs snapshot -r rpool/grid@after_psu_${PSU##*/}