Solaris OracleClusterware: Difference between revisions
m (Text replacement - "</source" to "</syntaxhighlight") |
|||
Line 16: | Line 16: | ||
printf "Solaris %d.%d Update %d\n",solaris,subrel,update; | printf "Solaris %d.%d Update %d\n",solaris,subrel,update; | ||
}' | }' | ||
</ | </syntaxhighlight> | ||
=Needed Solaris packages= | =Needed Solaris packages= | ||
Line 24: | Line 24: | ||
# pkg install developer/build/make | # pkg install developer/build/make | ||
# pkg install x11/diagnostic/x11-info-clients | # pkg install x11/diagnostic/x11-info-clients | ||
</ | </syntaxhighlight> | ||
==Check pkg dependencies== | ==Check pkg dependencies== | ||
Line 32: | Line 32: | ||
developer/build/make \ | developer/build/make \ | ||
x11/diagnostic/x11-info-clients | x11/diagnostic/x11-info-clients | ||
</ | </syntaxhighlight> | ||
=User / group settings= | =User / group settings= | ||
Line 41: | Line 41: | ||
# groupadd -g 188 asmdba | # groupadd -g 188 asmdba | ||
# groupadd -g 200 dba | # groupadd -g 200 dba | ||
</ | </syntaxhighlight> | ||
==User== | ==User== | ||
Line 59: | Line 59: | ||
-m -d /export/home/grid \ | -m -d /export/home/grid \ | ||
grid | grid | ||
</ | </syntaxhighlight> | ||
===Generate ssh public keys=== | ===Generate ssh public keys=== | ||
<source lang=bash> | <source lang=bash> | ||
Line 76: | Line 76: | ||
$ chmod 600 .ssh/authorized_keys | $ chmod 600 .ssh/authorized_keys | ||
$ vi .ssh/authorized_keys | $ vi .ssh/authorized_keys | ||
</ | </syntaxhighlight> | ||
Add the public key of other nodes. | Add the public key of other nodes. | ||
Line 82: | Line 82: | ||
<source lang=bash> | <source lang=bash> | ||
$ scp grid01:.ssh/authorized_keys .ssh/authorized_keys | $ scp grid01:.ssh/authorized_keys .ssh/authorized_keys | ||
</ | </syntaxhighlight> | ||
Now do a cross login from every node to every other node (even to its self) to add all to the known_hosts. The installer needs this. | Now do a cross login from every node to every other node (even to its self) to add all to the known_hosts. The installer needs this. | ||
Line 96: | Line 96: | ||
-K project.max-shm-memory="(privileged,274877906944,deny)" \ | -K project.max-shm-memory="(privileged,274877906944,deny)" \ | ||
group.oinstall | group.oinstall | ||
</ | </syntaxhighlight> | ||
===Check project settings=== | ===Check project settings=== | ||
Line 125: | Line 125: | ||
usage 0B | usage 0B | ||
privileged 256GB - deny - | privileged 256GB - deny - | ||
</ | </syntaxhighlight> | ||
=Directories= | =Directories= | ||
Line 134: | Line 134: | ||
# zfs create -o mountpoint=/opt/oraInventory rpool/grid/oraInventory | # zfs create -o mountpoint=/opt/oraInventory rpool/grid/oraInventory | ||
# chown -R grid:oinstall /opt/{grid{home,base},oraInventory} | # chown -R grid:oinstall /opt/{grid{home,base},oraInventory} | ||
</ | </syntaxhighlight> | ||
=Storage tasks= | =Storage tasks= | ||
Line 165: | Line 165: | ||
}' | \ | }' | \ | ||
sort -u | sort -u | ||
</ | </syntaxhighlight> | ||
==Label Disks== | ==Label Disks== | ||
Line 172: | Line 172: | ||
# printf 'type 0 no no\nlabel 1 yes\npartition\n0 usr wm 8192 $\nlabel 1 yes\nquit\nquit\n' | \ | # printf 'type 0 no no\nlabel 1 yes\npartition\n0 usr wm 8192 $\nlabel 1 yes\nquit\nquit\n' | \ | ||
format -e /dev/rdsk/<disk> | format -e /dev/rdsk/<disk> | ||
</ | </syntaxhighlight> | ||
===All FC disks=== | ===All FC disks=== | ||
Line 188: | Line 188: | ||
quit | quit | ||
quit | quit | ||
</ | </syntaxhighlight> | ||
<source lang=bash> | <source lang=bash> | ||
Line 209: | Line 209: | ||
sort -u | \ | sort -u | \ | ||
xargs -n 1 format -e -f ~/format_command_file.txt | xargs -n 1 format -e -f ~/format_command_file.txt | ||
</ | </syntaxhighlight> | ||
<source lang=bash> | <source lang=bash> | ||
# chown -RL grid:asmadmin /dev/rdsk/c0t6000* | # chown -RL grid:asmadmin /dev/rdsk/c0t6000* | ||
# chmod 660 /dev/rdsk/c0t6000* | # chmod 660 /dev/rdsk/c0t6000* | ||
</ | </syntaxhighlight> | ||
Line 232: | Line 232: | ||
rpool/swap | rpool/swap | ||
# swap -a /dev/zvol/dsk/rpool/swap | # swap -a /dev/zvol/dsk/rpool/swap | ||
</ | </syntaxhighlight> | ||
Line 245: | Line 245: | ||
udp smallest_anon_port rw 9000 9000 32768 1024-65500 | udp smallest_anon_port rw 9000 9000 32768 1024-65500 | ||
udp largest_anon_port rw 65500 65500 65535 9000-65535 | udp largest_anon_port rw 65500 65500 65535 9000-65535 | ||
</ | </syntaxhighlight> | ||
==Setup private cluster interconnects== | ==Setup private cluster interconnects== | ||
Line 256: | Line 256: | ||
# ipadm create-ip net5 | # ipadm create-ip net5 | ||
# ipadm create-addr -T static -a 10.65.0.9/29 net5/ci2 | # ipadm create-addr -T static -a 10.65.0.9/29 net5/ci2 | ||
</ | </syntaxhighlight> | ||
Second node: | Second node: | ||
<source lang=bash> | <source lang=bash> | ||
Line 263: | Line 263: | ||
# ipadm create-ip net5 | # ipadm create-ip net5 | ||
# ipadm create-addr -T static -a 10.65.0.10/29 net5/ci2 | # ipadm create-addr -T static -a 10.65.0.10/29 net5/ci2 | ||
</ | </syntaxhighlight> | ||
==Set slew always for ntp== | ==Set slew always for ntp== | ||
Line 272: | Line 272: | ||
# svccfg -s svc:/network/ntp:default listprop config/slew_always | # svccfg -s svc:/network/ntp:default listprop config/slew_always | ||
config/slew_always boolean true | config/slew_always boolean true | ||
</ | </syntaxhighlight> | ||
=Patching= | =Patching= | ||
Line 289: | Line 289: | ||
chown -R grid:oinstall ${ORACLE_HOME}/OPatch | chown -R grid:oinstall ${ORACLE_HOME}/OPatch | ||
zfs snapshot -r rpool/grid@$(opatch version | nawk '/OPatch Version:/{print $1"_"$NF;}') | zfs snapshot -r rpool/grid@$(opatch version | nawk '/OPatch Version:/{print $1"_"$NF;}') | ||
</ | </syntaxhighlight> | ||
==Apply PSU== | ==Apply PSU== | ||
Line 302: | Line 302: | ||
scp ${OCM_RSP} <other node2>: | scp ${OCM_RSP} <other node2>: | ||
... | ... | ||
</ | </syntaxhighlight> | ||
On all nodes do as root: | On all nodes do as root: | ||
Line 338: | Line 338: | ||
${ORACLE_HOME}/bin/emctl start dbconsole | ${ORACLE_HOME}/bin/emctl start dbconsole | ||
su - grid -c "opatch lsinventory -detail -oh ${ORACLE_HOME} > ~grid/lsinventory_after_${PSU##*/}" | su - grid -c "opatch lsinventory -detail -oh ${ORACLE_HOME} > ~grid/lsinventory_after_${PSU##*/}" | ||
</ | </syntaxhighlight> | ||
==Configure local listener to another port== | ==Configure local listener to another port== | ||
Line 365: | Line 365: | ||
System altered. | System altered. | ||
SQL> ^D | SQL> ^D | ||
</ | </syntaxhighlight> | ||
=ASM= | =ASM= | ||
Line 406: | Line 406: | ||
} | } | ||
' LUNs.txt | ' LUNs.txt | ||
</ | </syntaxhighlight> | ||
===Example for mkdg=== | ===Example for mkdg=== | ||
Line 438: | Line 438: | ||
} | } | ||
' LUNs.txt | ' LUNs.txt | ||
</ | </syntaxhighlight> | ||
data_config.xml: | data_config.xml: | ||
Line 475: | Line 475: | ||
<a name="compatible.advm" value="11.2"/> | <a name="compatible.advm" value="11.2"/> | ||
</chdg> | </chdg> | ||
</ | </syntaxhighlight> | ||
asmh: | asmh: | ||
<source lang=oracle11> | <source lang=oracle11> | ||
ASMCMD [+] > chdg data_config.xml | ASMCMD [+] > chdg data_config.xml | ||
</ | </syntaxhighlight> |
Revision as of 15:35, 25 November 2021
Get Solaris release information
<source lang=bash>
- pkg info kernel | \
nawk -F '.' '
/Build Release:/{
solaris=$NF;
} /Branch:/{
subrel=$3; update=$4;
} END{
printf "Solaris %d.%d Update %d\n",solaris,subrel,update;
}' </syntaxhighlight>
Needed Solaris packages
Install pkg dependencies
<source lang=bash>
- pkg install developer/assembler
- pkg install developer/build/make
- pkg install x11/diagnostic/x11-info-clients
</syntaxhighlight>
Check pkg dependencies
<source lang=bash>
- pkg list \
developer/assembler \ developer/build/make \ x11/diagnostic/x11-info-clients
</syntaxhighlight>
User / group settings
Groups
<source lang=bash>
- groupadd -g 186 oinstall
- groupadd -g 187 asmadmin
- groupadd -g 188 asmdba
- groupadd -g 200 dba
</syntaxhighlight>
User
<source lang=bash>
- useradd \
-u 102 \ -g oinstall \ -G asmdba,dba \ -c "Oracle DB" \ -m -d /export/home/oracle \ oracle
- useradd \
-u 406 \ -g oinstall \ -G asmdba,asmadmin,dba \ -c "Oracle Grid" \ -m -d /export/home/grid \ grid
</syntaxhighlight>
Generate ssh public keys
<source lang=bash>
- su - grid
$ ssh-keygen -t rsa -b 2048 Generating public/private rsa key pair. Enter file in which to save the key (/export/home/grid/.ssh/id_rsa): <Enter> Created directory '/export/home/grid/.ssh'. Enter passphrase (empty for no passphrase): <Enter> Enter same passphrase again: <Enter> Your identification has been saved in /export/home/grid/.ssh/id_rsa. Your public key has been saved in /export/home/grid/.ssh/id_rsa.pub. The key fingerprint is: ..:..:.. grid@grid01 $ cat .ssh/id_rsa.pub > .ssh/authorized_keys $ chmod 600 .ssh/authorized_keys $ vi .ssh/authorized_keys </syntaxhighlight> Add the public key of other nodes.
After that do this on all other nodes added as grid: <source lang=bash> $ scp grid01:.ssh/authorized_keys .ssh/authorized_keys </syntaxhighlight>
Now do a cross login from every node to every other node (even to its self) to add all to the known_hosts. The installer needs this.
Projects
<source lang=bash>
- projadd -p 186 -G oinstall \
-K process.max-file-descriptor="(basic,1024,deny)" \ -K process.max-file-descriptor="(privileged,65536,deny)" \ -K process.max-sem-nsems="(privileged,2048,deny)" \ -K project.max-sem-ids="(privileged,2048,deny)" \ -K project.max-shm-ids="(privileged,200,deny)" \ -K project.max-shm-memory="(privileged,274877906944,deny)" \ group.oinstall
</syntaxhighlight>
Check project settings
<source lang=bash>
- su - oracle
$ for name in process.{max-file-descriptor,max-sem-nsems} ; do prctl -t privileged -i process -n ${name} $$ ; done process: 14822: -bash NAME PRIVILEGE VALUE FLAG ACTION RECIPIENT process.max-file-descriptor
privileged 65.5K - deny -
process: 14822: -bash NAME PRIVILEGE VALUE FLAG ACTION RECIPIENT process.max-sem-nsems
privileged 2.05K - deny -
$ for name in project.{max-sem-ids,max-shm-ids,max-shm-memory} ; do prctl -t privileged -n ${name} $$ ; done process: 14822: -bash NAME PRIVILEGE VALUE FLAG ACTION RECIPIENT project.max-sem-ids
privileged 2.05K - deny -
process: 14822: -bash NAME PRIVILEGE VALUE FLAG ACTION RECIPIENT project.max-shm-ids
privileged 200 - deny -
process: 14822: -bash NAME PRIVILEGE VALUE FLAG ACTION RECIPIENT project.max-shm-memory
usage 0B privileged 256GB - deny -
</syntaxhighlight>
Directories
<source lang=bash>
- zfs create -o mountpoint=none rpool/grid
- zfs create -o mountpoint=/opt/gridhome rpool/grid/gridhome
- zfs create -o mountpoint=/opt/gridbase rpool/grid/gridbase
- zfs create -o mountpoint=/opt/oraInventory rpool/grid/oraInventory
- chown -R grid:oinstall /opt/{grid{home,base},oraInventory}
</syntaxhighlight>
Storage tasks
Discover LUNs
<source lang=bash>
- luxadm -e port | \
nawk '{print $1}' | \ xargs -n 1 luxadm -e dump_map | \ nawk '/Disk device/{print $5}' | \ sort -u | \ xargs luxadm display | \ nawk '
/DEVICE PROPERTIES for disk:/{
disk=$NF;
} /DEVICE PROPERTIES for:/{
disk="";
} /Vendor:/{
vendor=$NF;
} /Serial Num:/{
serial=$NF;
} /Unformatted capacity:/{
capacity=$(NF-1)""$NF;
} disk != "" && /^$/{
printf "%s vendor=%s serial=%s capacity=%s\n",disk,vendor,serial,capacity;
}' | \
sort -u
</syntaxhighlight>
Label Disks
Single Disk
<source lang=bash>
- printf 'type 0 no no\nlabel 1 yes\npartition\n0 usr wm 8192 $\nlabel 1 yes\nquit\nquit\n' | \
format -e /dev/rdsk/<disk>
</syntaxhighlight>
All FC disks
For x86 you have to call format -> fdisk -> y for all disks first :-\
DON'T DO THE NEXT STEP IF YOU DO NOT KNOW WHAT YOU DO!
format_command_file.txt: <source lang=bash> type 0 no no label 1 yes partition 0 usr wm 8192 $ label 1 yes quit quit </syntaxhighlight>
<source lang=bash>
- luxadm -e port | \
nawk '{print $1}' | \ xargs -n 1 luxadm -e dump_map | \ nawk '/Disk device/{print $5}' | \ sort -u | \ xargs luxadm display | \ nawk '
/DEVICE PROPERTIES for disk:/{
disk=$NF;
} /DEVICE PROPERTIES for:/{
disk="";
} disk && /^$/{
printf "%s\n",disk;
}' | \
sort -u | \ xargs -n 1 format -e -f ~/format_command_file.txt
</syntaxhighlight>
<source lang=bash>
- chown -RL grid:asmadmin /dev/rdsk/c0t6000*
- chmod 660 /dev/rdsk/c0t6000*
</syntaxhighlight>
Set swap to physical RAM
<source lang=bash>
- export RAM=256G
- swap -d /dev/zvol/dsk/rpool/swap
- zfs destroy rpool/swap
- zfs create \
-V ${RAM} \ -b 8k \ -o primarycache=metadata \ -o chksum=on \ -o dedup=off \ -o encryption=off \ -o compression=off \ rpool/swap
- swap -a /dev/zvol/dsk/rpool/swap
</syntaxhighlight>
Network
Check port ranges
<source lang=bash>
- for protocol in tcp udp ; do ipadm show-prop ${protocol} -p smallest_anon_port,largest_anon_port ; done
PROTO PROPERTY PERM CURRENT PERSISTENT DEFAULT POSSIBLE tcp smallest_anon_port rw 9000 9000 32768 1024-65500 tcp largest_anon_port rw 65500 65500 65535 9000-65535 PROTO PROPERTY PERM CURRENT PERSISTENT DEFAULT POSSIBLE udp smallest_anon_port rw 9000 9000 32768 1024-65500 udp largest_anon_port rw 65500 65500 65535 9000-65535 </syntaxhighlight>
Setup private cluster interconnects
Example with a small net with six (eight with net and broadcast) usable IPs. This limits the maximum number of nodes to six... which is obvious...
First node: <source lang=bash>
- ipadm create-ip net1
- ipadm create-addr -T static -a 10.65.0.1/29 net1/ci1
- ipadm create-ip net5
- ipadm create-addr -T static -a 10.65.0.9/29 net5/ci2
</syntaxhighlight> Second node: <source lang=bash>
- ipadm create-ip net1
- ipadm create-addr -T static -a 10.65.0.2/29 net1/ci1
- ipadm create-ip net5
- ipadm create-addr -T static -a 10.65.0.10/29 net5/ci2
</syntaxhighlight>
Set slew always for ntp
After configuring ntp set slew always to avoid time warps! <source lang=bash>
- svccfg -s svc:/network/ntp:default setprop config/slew_always = true
- svcadm refresh svc:/network/ntp:default
- svccfg -s svc:/network/ntp:default listprop config/slew_always
config/slew_always boolean true </syntaxhighlight>
Patching
Upgrade OPatch
Do as root: <source lang=bash> export ORACLE_HOME=/opt/gridhome/11.2.0.4 export PATH=${PATH}:${ORACLE_HOME}/OPatch
OPATCH_PATCH_ZIP=~oracle/orainst/p6880880_112000_Solaris86-64.zip
zfs snapshot -r rpool/grid@$(opatch version | nawk '/OPatch Version:/{print $1"_"$NF;}') eval mv ${ORACLE_HOME}/{$(opatch version | nawk '/OPatch Version:/{print $1","$1"_"$NF;}')} unzip -d ${ORACLE_HOME} ${OPATCH_PATCH_ZIP} chown -R grid:oinstall ${ORACLE_HOME}/OPatch zfs snapshot -r rpool/grid@$(opatch version | nawk '/OPatch Version:/{print $1"_"$NF;}') </syntaxhighlight>
Apply PSU
On first node as user grid: <source lang=bash> export ORACLE_HOME=/opt/gridhome/11.2.0.4 OCM_RSP=~grid/ocm_gridcluster1.rsp ${ORACLE_HOME}/OPatch/ocm/bin/emocmrsp -output ${OCM_RSP}
scp ${OCM_RSP} <other node1>: scp ${OCM_RSP} <other node2>: ... </syntaxhighlight>
On all nodes do as root: <source lang=bash> export ORACLE_HOME=/opt/gridhome/11.2.0.4 export PATH=${PATH}:${ORACLE_HOME}/bin export PATH=${PATH}:${ORACLE_HOME}/OPatch
OCM_RSP=~grid/ocm_gridcluster1.rsp PSU_DIR=~oracle/orainst/psu PSU_ZIP=~oracle/orainst/p22378167_112040_Solaris86-64.zip
PSU=~oracle/orainst/psu/22378167
su - grid -c "mkdir -p ${PSU_DIR}" su - grid -c "unzip -d ${PSU_DIR} ${PSU_ZIP}"
su - grid -c "opatch lsinventory -detail -oh ${ORACLE_HOME} > ~grid/lsinventory_before_${PSU##*/}"
zfs snapshot -r rpool/grid@before_psu_${PSU##*/} cd ~grid for patch in $(find ${PSU} -name bundle.xml | xargs -n 1 dirname) ; do
opatch auto ${patch} -oh ${ORACLE_HOME} -ocmrf ${OCM_RSP}
done
$ORACLE_HOME/crs/install/rootcrs.pl -unlock # <-- on all nodes
- For every other patch do:
su - grid -c "cd ${patchdir} ; opatch prereq CheckConflictAgainstOHWithDetail -ph ./" # <-- only on first node su - grid -c "cd ${patchdir} ; opatch apply" # <-- only on first node
$ORACLE_HOME/crs/install/rootcrs.pl -patch # <-- on all nodes
zfs snapshot -r rpool/grid@after_psu_${PSU##*/} ${ORACLE_HOME}/bin/emctl start dbconsole su - grid -c "opatch lsinventory -detail -oh ${ORACLE_HOME} > ~grid/lsinventory_after_${PSU##*/}" </syntaxhighlight>
Configure local listener to another port
As grid user: <source lang=bash> $ srvctl modify listener -l LISTENER -o ${ORACLE_HOME} -p "TCP:50650" $ srvctl config listener Name: LISTENER Network: 1, Owner: grid Home: <CRS home> End points: TCP:50650 $ srvctl stop listener -l LISTENER ; srvctl start listener -l LISTENER
$ sqh SQL>show parameter list
NAME TYPE VALUE
----------- ------------------------------
listener_networks string local_listener string (DESCRIPTION=(ADDRESS_LIST=(A
DDRESS=(PROTOCOL=TCP)(HOST=172 .1.20.1)(PORT=1521))))
remote_listener string SQL> alter system set local_listener ="(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=172.1.20.1)(PORT=50650))))" SID='+ASM1' ;
System altered. SQL> ^D </syntaxhighlight>
ASM
Create ASM diskgroups
LUNs.txt contains all disks with:
- one line per disk.
- each disk in the first field.
Example for chdg
<source lang=awk>
- nawk -v type='DATA' '
BEGIN {
printf "<chdg name=\"%s\" power=\"3\">\n",type;
} /002d0/,/011d0/ {
if(/C903/){storage="HSA1";}; if(/C906/){storage="HSA2";}; if(/C061/){storage="HSA3";}; if(/C062/){storage="HSA4";}; if(/002d0/){ # first disk count=1; printf " <add>\n"; printf " <fg name=\"%s_%s\">\n",storage,type; }; gsub(/s2$/,"s0",$1); printf " <dsk name=\"%s_%s%02d\" string=\"%s\"/>\n",storage,type,count++,$1; if(/011d0/){ # last disk print " </fg>"; print " </add>"; }
} END {
printf "<a name=\"compatible.asm\" value=\"11.2\"/>\n"; printf "<a name=\"compatible.rdbms\" value=\"11.2\"/>\n"; printf "<a name=\"compatible.advm\" value=\"11.2\"/>\n"; printf "</chdg>\n";
} ' LUNs.txt </syntaxhighlight>
Example for mkdg
<source lang=awk>
- nawk -v type='FRA' '
BEGIN {
printf "<dg name=\"%s\" redundancy=\"normal\">\n",type;
} /012d0/,/015d0/ {
if(/C903/){storage="HSA1";}; if(/C906/){storage="HSA2";}; if(/C061/){storage="HSA3";}; if(/C062/){storage="HSA4";}; if(/012d0/){ # first disk count=1; printf " <fg name=\"%s_%s\">\n",storage,type; }; gsub(/s2$/,"s0",$1); printf " <dsk name=\"%s_%s%02d\" string=\"%s\"/>\n",storage,type,count++,$1; if(/015d0/){ # last disk print " </fg>"; }
} END {
printf "<a name=\"compatible.asm\" value=\"11.2\"/>\n"; printf "<a name=\"compatible.rdbms\" value=\"11.2\"/>\n"; printf "<a name=\"compatible.advm\" value=\"11.2\"/>\n"; printf "</dg>\n";
} ' LUNs.txt </syntaxhighlight>
data_config.xml: <source lang=xml> <chdg name="data" power="3">
<add> <fg name="HSA1_DATA"> <dsk name="HSA1_DATA01" string="/dev/rdsk/c0t60002AC000000000C903010650004002d0s0"/> <dsk name="HSA1_DATA02" string="/dev/rdsk/c0t60002AC000000000C903010650004003d0s0"/> <dsk name="HSA1_DATA03" string="/dev/rdsk/c0t60002AC000000000C903010650004004d0s0"/> <dsk name="HSA1_DATA04" string="/dev/rdsk/c0t60002AC000000000C903010650004005d0s0"/> <dsk name="HSA1_DATA05" string="/dev/rdsk/c0t60002AC000000000C903010650004006d0s0"/> <dsk name="HSA1_DATA06" string="/dev/rdsk/c0t60002AC000000000C903010650004007d0s0"/> <dsk name="HSA1_DATA07" string="/dev/rdsk/c0t60002AC000000000C903010650004008d0s0"/> <dsk name="HSA1_DATA08" string="/dev/rdsk/c0t60002AC000000000C903010650004009d0s0"/> <dsk name="HSA1_DATA09" string="/dev/rdsk/c0t60002AC000000000C903010650004010d0s0"/> <dsk name="HSA1_DATA10" string="/dev/rdsk/c0t60002AC000000000C903010650004011d0s0"/> </fg> </add> <add> <fg name="HSA2_DATA"> <dsk name="HSA2_DATA01" string="/dev/rdsk/c0t60002AC000000000C906010650004002d0s0"/> <dsk name="HSA2_DATA02" string="/dev/rdsk/c0t60002AC000000000C906010650004003d0s0"/> <dsk name="HSA2_DATA03" string="/dev/rdsk/c0t60002AC000000000C906010650004004d0s0"/> <dsk name="HSA2_DATA04" string="/dev/rdsk/c0t60002AC000000000C906010650004005d0s0"/> <dsk name="HSA2_DATA05" string="/dev/rdsk/c0t60002AC000000000C906010650004006d0s0"/> <dsk name="HSA2_DATA06" string="/dev/rdsk/c0t60002AC000000000C906010650004007d0s0"/> <dsk name="HSA2_DATA07" string="/dev/rdsk/c0t60002AC000000000C906010650004008d0s0"/> <dsk name="HSA2_DATA08" string="/dev/rdsk/c0t60002AC000000000C906010650004009d0s0"/> <dsk name="HSA2_DATA09" string="/dev/rdsk/c0t60002AC000000000C906010650004010d0s0"/> <dsk name="HSA2_DATA10" string="/dev/rdsk/c0t60002AC000000000C906010650004011d0s0"/> </fg> </add> <a name="compatible.asm" value="11.2"/> <a name="compatible.rdbms" value="11.2"/> <a name="compatible.advm" value="11.2"/>
</chdg> </syntaxhighlight>
asmh: <source lang=oracle11> ASMCMD [+] > chdg data_config.xml </syntaxhighlight>