SunCluster Delete Ressource Group: Difference between revisions
m (Text replacement - "</source" to "</syntaxhighlight") |
|||
Line 8: | Line 8: | ||
<source lang=bash> | <source lang=bash> | ||
# RG=my-rg | # RG=my-rg | ||
</ | </syntaxhighlight> | ||
==Ressource anzeigen== | ==Ressource anzeigen== | ||
Line 18: | Line 18: | ||
my-zone-res | my-zone-res | ||
my-hasp-zfs-res | my-hasp-zfs-res | ||
</ | </syntaxhighlight> | ||
==Abschalten der Ressource Group und Ressourcen== | ==Abschalten der Ressource Group und Ressourcen== | ||
Line 24: | Line 24: | ||
# clrg offline ${RG} | # clrg offline ${RG} | ||
# clrs list -g ${RG} | xargs clrs disable | # clrs list -g ${RG} | xargs clrs disable | ||
</ | </syntaxhighlight> | ||
==ZPools anzeigen== | ==ZPools anzeigen== | ||
Line 42: | Line 42: | ||
Type: stringarray | Type: stringarray | ||
... | ... | ||
</ | </syntaxhighlight> | ||
==ZPools anzeigen== | ==ZPools anzeigen== | ||
Line 48: | Line 48: | ||
# clrs show -p ZPools -g ${RG} | nawk '$1=="Zpools:"{$1="";print $0;}' | # clrs show -p ZPools -g ${RG} | nawk '$1=="Zpools:"{$1="";print $0;}' | ||
my_pool my-redo1_pool my-redo2_pool | my_pool my-redo1_pool my-redo2_pool | ||
</ | </syntaxhighlight> | ||
==DID Devices anzeigen== | ==DID Devices anzeigen== | ||
Line 65: | Line 65: | ||
d40 node06:/dev/rdsk/c0t600A0B80006E134400000B5950B30D8Bd0 | d40 node06:/dev/rdsk/c0t600A0B80006E134400000B5950B30D8Bd0 | ||
d45 node06:/dev/rdsk/c0t600A0B80006E10E40000D6FA50B30E62d0 | d45 node06:/dev/rdsk/c0t600A0B80006E10E40000D6FA50B30E62d0 | ||
</ | </syntaxhighlight> | ||
oder nur die DIDs: | oder nur die DIDs: | ||
Line 80: | Line 80: | ||
40 | 40 | ||
45 | 45 | ||
</ | </syntaxhighlight> | ||
==Abschalten des Device Monitorings== | ==Abschalten des Device Monitorings== | ||
Line 86: | Line 86: | ||
<source lang=bash> | <source lang=bash> | ||
# for disk in $(for zpool in $(clrs show -p ZPools -g ${RG} | nawk '$1=="Zpools:"{$1="";print $0;}' ) ; do zpool import ${zpool} 2>/dev/null ; zpool status ${zpool} ; zpool export ${zpool} ; done | nawk '/c[0-9]+t/{gsub(/s.*$/,"",$1);print $1}') ; do echo /dev/rdsk/${disk}; done | xargs scdidadm -lo name | xargs cldev unmonitor | # for disk in $(for zpool in $(clrs show -p ZPools -g ${RG} | nawk '$1=="Zpools:"{$1="";print $0;}' ) ; do zpool import ${zpool} 2>/dev/null ; zpool status ${zpool} ; zpool export ${zpool} ; done | nawk '/c[0-9]+t/{gsub(/s.*$/,"",$1);print $1}') ; do echo /dev/rdsk/${disk}; done | xargs scdidadm -lo name | xargs cldev unmonitor | ||
</ | </syntaxhighlight> | ||
==Ressourcegruppe löschen== | ==Ressourcegruppe löschen== | ||
Line 94: | Line 94: | ||
# clrs delete -g ${RG} + | # clrs delete -g ${RG} + | ||
# clrg delete ${RG} | # clrg delete ${RG} | ||
</ | </syntaxhighlight> | ||
==Jetzt auf dem Storage die LUNs unmappen== | ==Jetzt auf dem Storage die LUNs unmappen== | ||
Line 102: | Line 102: | ||
<source lang=bash> | <source lang=bash> | ||
# for node in $(clnode list) ; do ssh ${node} cfgadm -alo show_SCSI_LUN | nawk '$NF=="unusable"{gsub(/,[0-9]+$/,"",$1);print $1}' | sort -u | xargs -n 1 ssh ${node} cfgadm -c unconfigure -o unusable_SCSI_LUN ; ssh ${node} devfsadm -C -v -c disk ; done | # for node in $(clnode list) ; do ssh ${node} cfgadm -alo show_SCSI_LUN | nawk '$NF=="unusable"{gsub(/,[0-9]+$/,"",$1);print $1}' | sort -u | xargs -n 1 ssh ${node} cfgadm -c unconfigure -o unusable_SCSI_LUN ; ssh ${node} devfsadm -C -v -c disk ; done | ||
</ | </syntaxhighlight> | ||
==DIDs aufräumen== | ==DIDs aufräumen== | ||
<source lang=bash> | <source lang=bash> | ||
# for node in $(clnode list) ; do cldev refresh -n ${node} ; cldev clear -n ${node} ; done | # for node in $(clnode list) ; do cldev refresh -n ${node} ; cldev clear -n ${node} ; done | ||
</ | </syntaxhighlight> | ||
==Bei bedarf Zonenkonfigs aufräumen== | ==Bei bedarf Zonenkonfigs aufräumen== | ||
Line 113: | Line 113: | ||
# ZONE=my-zone | # ZONE=my-zone | ||
# for node in $(clnode list) ; do ssh ${node} zonecfg -z ${ZONE} delete -F ; done | # for node in $(clnode list) ; do ssh ${node} zonecfg -z ${ZONE} delete -F ; done | ||
</ | </syntaxhighlight> |
Revision as of 16:32, 25 November 2021
Komplettes entfernen einer Ressource Group
Herleitung der Daten, die nachher in den Einzeilern benutzt werden.
Macht dies nicht! Ich übernehme auch hier wieder keine Verantwortung! Alles falsch! Nicht machen!
Setzen der betreffenden Ressource Group
<source lang=bash>
- RG=my-rg
</syntaxhighlight>
Ressource anzeigen
<source lang=bash>
- clrs list -g ${RG}
my-nsr-res my-oracle-res my-lh-res my-zone-res my-hasp-zfs-res </syntaxhighlight>
Abschalten der Ressource Group und Ressourcen
<source lang=bash>
- clrg offline ${RG}
- clrs list -g ${RG} | xargs clrs disable
</syntaxhighlight>
ZPools anzeigen
<source lang=bash>
- clrs show -p ZPools -g ${RG}
...
Resources
Resource: my-hasp-zfs-res
--- Standard and extension properties ---
Zpools: my_pool my-redo1_pool my-redo2_pool Class: extension Description: The list of zpools Per-node: False Type: stringarray
... </syntaxhighlight>
ZPools anzeigen
<source lang=bash>
- clrs show -p ZPools -g ${RG} | nawk '$1=="Zpools:"{$1="";print $0;}'
my_pool my-redo1_pool my-redo2_pool
</syntaxhighlight>
DID Devices anzeigen
<source lang=bash>
- for disk in $(for zpool in $(clrs show -p ZPools -g ${RG} | nawk '$1=="Zpools:"{$1="";print $0;}' ) ; do zpool import ${zpool} 2>/dev/null ; zpool status ${zpool} ; zpool export ${zpool} ; done | nawk '/c[0-9]+t/{gsub(/s.*$/,"",$1);print $1}') ; do echo /dev/rdsk/${disk}; done | xargs cldev list -vn $(hostname)
DID Device Full Device Path
----------------
d53 node06:/dev/rdsk/c0t600A0B80006E103C00000B9B50B2F83Ed0 d38 node06:/dev/rdsk/c0t600A0B80006E10020000D54150B2FF26d0 d57 node06:/dev/rdsk/c0t600A0B80006E103C00000B9E50B2F9FFd0 d50 node06:/dev/rdsk/c0t600A0B80006E10020000D54450B300C8d0 d46 node06:/dev/rdsk/c0t600A0B80006E103C00000BA250B3098Ad0 d28 node06:/dev/rdsk/c0t600A0B80006E10020000D54850B310C2d0 d55 node06:/dev/rdsk/c0t600A0B80006E134400000B5350B2FB08d0 d56 node06:/dev/rdsk/c0t600A0B80006E10E40000D6F450B2FBB1d0 d40 node06:/dev/rdsk/c0t600A0B80006E134400000B5950B30D8Bd0 d45 node06:/dev/rdsk/c0t600A0B80006E10E40000D6FA50B30E62d0 </syntaxhighlight>
oder nur die DIDs: <source lang=bash>
- for disk in $(for zpool in $(clrs show -p ZPools -g ${RG} | nawk '$1=="Zpools:"{$1="";print $0;}' ) ; do zpool import ${zpool} 2>/dev/null ; zpool status ${zpool} ; zpool export ${zpool} ; done | nawk '/c[0-9]+t/{gsub(/s.*$/,"",$1);print $1}') ; do echo /dev/rdsk/${disk}; done | xargs scdidadm -lo instance
53 38 57 50 46 28 55 56 40 45 </syntaxhighlight>
Abschalten des Device Monitorings
Das ist wichtig, um die Devices später ganz aus dem Cluster zu bekommen! <source lang=bash>
- for disk in $(for zpool in $(clrs show -p ZPools -g ${RG} | nawk '$1=="Zpools:"{$1="";print $0;}' ) ; do zpool import ${zpool} 2>/dev/null ; zpool status ${zpool} ; zpool export ${zpool} ; done | nawk '/c[0-9]+t/{gsub(/s.*$/,"",$1);print $1}') ; do echo /dev/rdsk/${disk}; done | xargs scdidadm -lo name | xargs cldev unmonitor
</syntaxhighlight>
Ressourcegruppe löschen
<source lang=bash>
- RG=bla-rg
- clrs disable -g ${RG} +
- clrs delete -g ${RG} +
- clrg delete ${RG}
</syntaxhighlight>
Jetzt auf dem Storage die LUNs unmappen
Und gegebenen Falls löschen...
Nicht mehr vorhandene LUNs aus dem Solaris entfernen
<source lang=bash>
- for node in $(clnode list) ; do ssh ${node} cfgadm -alo show_SCSI_LUN | nawk '$NF=="unusable"{gsub(/,[0-9]+$/,"",$1);print $1}' | sort -u | xargs -n 1 ssh ${node} cfgadm -c unconfigure -o unusable_SCSI_LUN ; ssh ${node} devfsadm -C -v -c disk ; done
</syntaxhighlight>
DIDs aufräumen
<source lang=bash>
- for node in $(clnode list) ; do cldev refresh -n ${node} ; cldev clear -n ${node} ; done
</syntaxhighlight>
Bei bedarf Zonenkonfigs aufräumen
<source lang=bash>
- ZONE=my-zone
- for node in $(clnode list) ; do ssh ${node} zonecfg -z ${ZONE} delete -F ; done
</syntaxhighlight>