SunCluster Delete Ressource Group: Difference between revisions
From Lolly's Wiki
Jump to navigationJump to search
m (Text replacement - "</source" to "</syntaxhighlight") |
m (Text replacement - "<source" to "<syntaxhighlight") |
||
Line 6: | Line 6: | ||
==Setzen der betreffenden Ressource Group== | ==Setzen der betreffenden Ressource Group== | ||
< | <syntaxhighlight lang=bash> | ||
# RG=my-rg | # RG=my-rg | ||
</syntaxhighlight> | </syntaxhighlight> | ||
==Ressource anzeigen== | ==Ressource anzeigen== | ||
< | <syntaxhighlight lang=bash> | ||
# clrs list -g ${RG} | # clrs list -g ${RG} | ||
my-nsr-res | my-nsr-res | ||
Line 21: | Line 21: | ||
==Abschalten der Ressource Group und Ressourcen== | ==Abschalten der Ressource Group und Ressourcen== | ||
< | <syntaxhighlight lang=bash> | ||
# clrg offline ${RG} | # clrg offline ${RG} | ||
# clrs list -g ${RG} | xargs clrs disable | # clrs list -g ${RG} | xargs clrs disable | ||
Line 27: | Line 27: | ||
==ZPools anzeigen== | ==ZPools anzeigen== | ||
< | <syntaxhighlight lang=bash> | ||
# clrs show -p ZPools -g ${RG} | # clrs show -p ZPools -g ${RG} | ||
... | ... | ||
Line 45: | Line 45: | ||
==ZPools anzeigen== | ==ZPools anzeigen== | ||
< | <syntaxhighlight lang=bash> | ||
# clrs show -p ZPools -g ${RG} | nawk '$1=="Zpools:"{$1="";print $0;}' | # clrs show -p ZPools -g ${RG} | nawk '$1=="Zpools:"{$1="";print $0;}' | ||
my_pool my-redo1_pool my-redo2_pool | my_pool my-redo1_pool my-redo2_pool | ||
Line 51: | Line 51: | ||
==DID Devices anzeigen== | ==DID Devices anzeigen== | ||
< | <syntaxhighlight lang=bash> | ||
# for disk in $(for zpool in $(clrs show -p ZPools -g ${RG} | nawk '$1=="Zpools:"{$1="";print $0;}' ) ; do zpool import ${zpool} 2>/dev/null ; zpool status ${zpool} ; zpool export ${zpool} ; done | nawk '/c[0-9]+t/{gsub(/s.*$/,"",$1);print $1}') ; do echo /dev/rdsk/${disk}; done | xargs cldev list -vn $(hostname) | # for disk in $(for zpool in $(clrs show -p ZPools -g ${RG} | nawk '$1=="Zpools:"{$1="";print $0;}' ) ; do zpool import ${zpool} 2>/dev/null ; zpool status ${zpool} ; zpool export ${zpool} ; done | nawk '/c[0-9]+t/{gsub(/s.*$/,"",$1);print $1}') ; do echo /dev/rdsk/${disk}; done | xargs cldev list -vn $(hostname) | ||
DID Device Full Device Path | DID Device Full Device Path | ||
Line 68: | Line 68: | ||
oder nur die DIDs: | oder nur die DIDs: | ||
< | <syntaxhighlight lang=bash> | ||
# for disk in $(for zpool in $(clrs show -p ZPools -g ${RG} | nawk '$1=="Zpools:"{$1="";print $0;}' ) ; do zpool import ${zpool} 2>/dev/null ; zpool status ${zpool} ; zpool export ${zpool} ; done | nawk '/c[0-9]+t/{gsub(/s.*$/,"",$1);print $1}') ; do echo /dev/rdsk/${disk}; done | xargs scdidadm -lo instance | # for disk in $(for zpool in $(clrs show -p ZPools -g ${RG} | nawk '$1=="Zpools:"{$1="";print $0;}' ) ; do zpool import ${zpool} 2>/dev/null ; zpool status ${zpool} ; zpool export ${zpool} ; done | nawk '/c[0-9]+t/{gsub(/s.*$/,"",$1);print $1}') ; do echo /dev/rdsk/${disk}; done | xargs scdidadm -lo instance | ||
53 | 53 | ||
Line 84: | Line 84: | ||
==Abschalten des Device Monitorings== | ==Abschalten des Device Monitorings== | ||
Das ist wichtig, um die Devices später ganz aus dem Cluster zu bekommen! | Das ist wichtig, um die Devices später ganz aus dem Cluster zu bekommen! | ||
< | <syntaxhighlight lang=bash> | ||
# for disk in $(for zpool in $(clrs show -p ZPools -g ${RG} | nawk '$1=="Zpools:"{$1="";print $0;}' ) ; do zpool import ${zpool} 2>/dev/null ; zpool status ${zpool} ; zpool export ${zpool} ; done | nawk '/c[0-9]+t/{gsub(/s.*$/,"",$1);print $1}') ; do echo /dev/rdsk/${disk}; done | xargs scdidadm -lo name | xargs cldev unmonitor | # for disk in $(for zpool in $(clrs show -p ZPools -g ${RG} | nawk '$1=="Zpools:"{$1="";print $0;}' ) ; do zpool import ${zpool} 2>/dev/null ; zpool status ${zpool} ; zpool export ${zpool} ; done | nawk '/c[0-9]+t/{gsub(/s.*$/,"",$1);print $1}') ; do echo /dev/rdsk/${disk}; done | xargs scdidadm -lo name | xargs cldev unmonitor | ||
</syntaxhighlight> | </syntaxhighlight> | ||
==Ressourcegruppe löschen== | ==Ressourcegruppe löschen== | ||
< | <syntaxhighlight lang=bash> | ||
# RG=bla-rg | # RG=bla-rg | ||
# clrs disable -g ${RG} + | # clrs disable -g ${RG} + | ||
Line 100: | Line 100: | ||
==Nicht mehr vorhandene LUNs aus dem Solaris entfernen== | ==Nicht mehr vorhandene LUNs aus dem Solaris entfernen== | ||
< | <syntaxhighlight lang=bash> | ||
# for node in $(clnode list) ; do ssh ${node} cfgadm -alo show_SCSI_LUN | nawk '$NF=="unusable"{gsub(/,[0-9]+$/,"",$1);print $1}' | sort -u | xargs -n 1 ssh ${node} cfgadm -c unconfigure -o unusable_SCSI_LUN ; ssh ${node} devfsadm -C -v -c disk ; done | # for node in $(clnode list) ; do ssh ${node} cfgadm -alo show_SCSI_LUN | nawk '$NF=="unusable"{gsub(/,[0-9]+$/,"",$1);print $1}' | sort -u | xargs -n 1 ssh ${node} cfgadm -c unconfigure -o unusable_SCSI_LUN ; ssh ${node} devfsadm -C -v -c disk ; done | ||
</syntaxhighlight> | </syntaxhighlight> | ||
==DIDs aufräumen== | ==DIDs aufräumen== | ||
< | <syntaxhighlight lang=bash> | ||
# for node in $(clnode list) ; do cldev refresh -n ${node} ; cldev clear -n ${node} ; done | # for node in $(clnode list) ; do cldev refresh -n ${node} ; cldev clear -n ${node} ; done | ||
</syntaxhighlight> | </syntaxhighlight> | ||
==Bei bedarf Zonenkonfigs aufräumen== | ==Bei bedarf Zonenkonfigs aufräumen== | ||
< | <syntaxhighlight lang=bash> | ||
# ZONE=my-zone | # ZONE=my-zone | ||
# for node in $(clnode list) ; do ssh ${node} zonecfg -z ${ZONE} delete -F ; done | # for node in $(clnode list) ; do ssh ${node} zonecfg -z ${ZONE} delete -F ; done | ||
</syntaxhighlight> | </syntaxhighlight> |
Revision as of 16:51, 25 November 2021
Komplettes entfernen einer Ressource Group
Herleitung der Daten, die nachher in den Einzeilern benutzt werden.
Macht dies nicht! Ich übernehme auch hier wieder keine Verantwortung! Alles falsch! Nicht machen!
Setzen der betreffenden Ressource Group
# RG=my-rg
Ressource anzeigen
# clrs list -g ${RG}
my-nsr-res
my-oracle-res
my-lh-res
my-zone-res
my-hasp-zfs-res
Abschalten der Ressource Group und Ressourcen
# clrg offline ${RG}
# clrs list -g ${RG} | xargs clrs disable
ZPools anzeigen
# clrs show -p ZPools -g ${RG}
...
=== Resources ===
Resource: my-hasp-zfs-res
--- Standard and extension properties ---
Zpools: my_pool my-redo1_pool my-redo2_pool
Class: extension
Description: The list of zpools
Per-node: False
Type: stringarray
...
ZPools anzeigen
# clrs show -p ZPools -g ${RG} | nawk '$1=="Zpools:"{$1="";print $0;}'
my_pool my-redo1_pool my-redo2_pool
DID Devices anzeigen
# for disk in $(for zpool in $(clrs show -p ZPools -g ${RG} | nawk '$1=="Zpools:"{$1="";print $0;}' ) ; do zpool import ${zpool} 2>/dev/null ; zpool status ${zpool} ; zpool export ${zpool} ; done | nawk '/c[0-9]+t/{gsub(/s.*$/,"",$1);print $1}') ; do echo /dev/rdsk/${disk}; done | xargs cldev list -vn $(hostname)
DID Device Full Device Path
---------- ----------------
d53 node06:/dev/rdsk/c0t600A0B80006E103C00000B9B50B2F83Ed0
d38 node06:/dev/rdsk/c0t600A0B80006E10020000D54150B2FF26d0
d57 node06:/dev/rdsk/c0t600A0B80006E103C00000B9E50B2F9FFd0
d50 node06:/dev/rdsk/c0t600A0B80006E10020000D54450B300C8d0
d46 node06:/dev/rdsk/c0t600A0B80006E103C00000BA250B3098Ad0
d28 node06:/dev/rdsk/c0t600A0B80006E10020000D54850B310C2d0
d55 node06:/dev/rdsk/c0t600A0B80006E134400000B5350B2FB08d0
d56 node06:/dev/rdsk/c0t600A0B80006E10E40000D6F450B2FBB1d0
d40 node06:/dev/rdsk/c0t600A0B80006E134400000B5950B30D8Bd0
d45 node06:/dev/rdsk/c0t600A0B80006E10E40000D6FA50B30E62d0
oder nur die DIDs:
# for disk in $(for zpool in $(clrs show -p ZPools -g ${RG} | nawk '$1=="Zpools:"{$1="";print $0;}' ) ; do zpool import ${zpool} 2>/dev/null ; zpool status ${zpool} ; zpool export ${zpool} ; done | nawk '/c[0-9]+t/{gsub(/s.*$/,"",$1);print $1}') ; do echo /dev/rdsk/${disk}; done | xargs scdidadm -lo instance
53
38
57
50
46
28
55
56
40
45
Abschalten des Device Monitorings
Das ist wichtig, um die Devices später ganz aus dem Cluster zu bekommen!
# for disk in $(for zpool in $(clrs show -p ZPools -g ${RG} | nawk '$1=="Zpools:"{$1="";print $0;}' ) ; do zpool import ${zpool} 2>/dev/null ; zpool status ${zpool} ; zpool export ${zpool} ; done | nawk '/c[0-9]+t/{gsub(/s.*$/,"",$1);print $1}') ; do echo /dev/rdsk/${disk}; done | xargs scdidadm -lo name | xargs cldev unmonitor
Ressourcegruppe löschen
# RG=bla-rg
# clrs disable -g ${RG} +
# clrs delete -g ${RG} +
# clrg delete ${RG}
Jetzt auf dem Storage die LUNs unmappen
Und gegebenen Falls löschen...
Nicht mehr vorhandene LUNs aus dem Solaris entfernen
# for node in $(clnode list) ; do ssh ${node} cfgadm -alo show_SCSI_LUN | nawk '$NF=="unusable"{gsub(/,[0-9]+$/,"",$1);print $1}' | sort -u | xargs -n 1 ssh ${node} cfgadm -c unconfigure -o unusable_SCSI_LUN ; ssh ${node} devfsadm -C -v -c disk ; done
DIDs aufräumen
# for node in $(clnode list) ; do cldev refresh -n ${node} ; cldev clear -n ${node} ; done
Bei bedarf Zonenkonfigs aufräumen
# ZONE=my-zone
# for node in $(clnode list) ; do ssh ${node} zonecfg -z ${ZONE} delete -F ; done