From 1eb76c3273aecd6a99cb71813c3951d4162cac32 Mon Sep 17 00:00:00 2001 From: lpinne Date: Tue, 2 Jul 2024 12:13:30 +0200 Subject: [PATCH] SLES4SAP-hana-angi-perfopt-15.adoc SLES4SAP-hana-angi-scaleout-perfopt-15.adoc: Abstract. Misc. details, alert-fencing --- adoc/SLES4SAP-hana-angi-perfopt-15.adoc | 49 +++-- ...LES4SAP-hana-angi-scaleout-perfopt-15.adoc | 205 ++++++++++++------ 2 files changed, 167 insertions(+), 87 deletions(-) diff --git a/adoc/SLES4SAP-hana-angi-perfopt-15.adoc b/adoc/SLES4SAP-hana-angi-perfopt-15.adoc index 107ce4f0..983278e4 100644 --- a/adoc/SLES4SAP-hana-angi-perfopt-15.adoc +++ b/adoc/SLES4SAP-hana-angi-perfopt-15.adoc @@ -75,25 +75,31 @@ Ask your public cloud provider or your SUSE contact for more information. See <> for details. +NOTE: In this guide the software package SAPHanaSR-angi is used. This package replaces +the two packages SAPHanaSR and SAPHanaSR-ScaleOut. Thus new deployment should be +done with SAPHanaSR-angi only. For upgrading existing clusters to SAPHanaSR-angi, +please read the blog article +https://www.suse.com/c/how-to-upgrade-to-saphanasr-angi/ . + ==== Scale-up versus scale-out // TODO PRIO2: add stonith resource to the graphic -The first set of scenarios includes the architecture and development of _scale-up_ solutions. +The first set of scenarios includes the topology of _scale-up_ solutions. .{HANA} System Replication Scale-Up in the Cluster image::hana_sr_in_cluster.svg[scaledwidth=70.0%] -For these scenarios, {SUSE} has developed the scale-up -resource agent package `{SAPHanaSR}`. System replication helps to -replicate the database data from one computer to another computer to compensate for database failures (single-box replication). +These scenarios are covered by the package SAPHanaSR-angi. System replication +helps to replicate the database data from one computer to another computer to +compensate for database failures (single-box replication). //.{HANA} System Replication Scale-Up in the Cluster //image::hana_sr_in_cluster.svg[scaledwidth=100.0%] -The second set of scenarios includes the architecture and development of -_scale-out_ solutions (multi-box replication). For these scenarios, {SUSE} -has developed the scale-out resource agent package `{SAPHanaSR}-ScaleOut`. +The second set of scenarios includes the toplogy of _scale-out_ solutions +(multi-box replication). These scenarios are also covered by the package +SAPHanaSR-angi. .{HANA} System Replication Scale-Out in the Cluster image::SAPHanaSR-ScaleOut-Cluster.svg[scaledwidth=70.0%] @@ -1859,9 +1865,8 @@ clone cln_SAPHanaTop_{sapsid}_HDB{sapino} rsc_SAPHanaTop_{sapsid}_HDB{sapino} \ meta clone-node-max=1 interleave=true ---- -Additional information about all parameters can be found with the command: - -`man ocf_suse_SAPHanaTopology` +Additional information about all parameters can be found in manual page +ocf_suse_SAPHanaTopology(7). Again, add the configuration to the cluster. @@ -1893,14 +1898,13 @@ primitive rsc_SAPHanaFil_{sapsid}_HDB{sapino} ocf:suse:SAPHanaFilesystem \ op start interval=0 timeout=10 \ op stop interval=0 timeout=20 \ op monitor interval=12 timeout=120 \ - params SID={sapsid} InstanceNumber={sapino} ACTION_ON_FAIL="fence" + params SID={sapsid} InstanceNumber={sapino} ON_FAIL_ACTION="fence" clone cln_SAPHanaFil_{sapsid}_HDB{sapino} rsc_SAPHanaFil_{sapsid}_HDB{sapino} \ meta clone-node-max=1 interleave=true ---- -Additional information about all parameters can be found with the command: - -`man ocf_suse_SAPHanaFilesystem` +Additional information about all parameters can be found in manual page +ocf_suse_SAPHanaFilesystem(7). Again, add the configuration to the cluster. @@ -1910,7 +1914,7 @@ Again, add the configuration to the cluster. ---- The most important parameters here are SID and InstanceNumber, which are -quite self explaining in the SAP context. ACTION_ON_FAIL defines how the RA +quite self explaining in the SAP context. ON_FAIL_ACTION defines how the RA should react on monitor failures. Beside these parameters, typical tuneables are the timeout values or the operations (start, monitor, stop). @@ -1960,12 +1964,19 @@ primary will be registered after the time difference is passed. If "only" the SAP HANA RDBMS has crashed, the former primary will be registered immediately. After this registration to the new primary, all data will be overwritten by the system replication. + +|ON_FAIL_ACTION +| Defines how the RA escalates monitor failures on an HANA primary node. +If srHook=SOK, in case of monitor failure an node fencing could be triggered. +For srHook=SFAIL, the restart will be proceeded as usual. ON_FAIL_ACTION=*fence* +may speed up takeover, depending on how long HANA needs for stopping. +See also SAPHanaSR-alert-fencing(8). This option currently is *unsupported*. |======================================================================= -Additional information about all parameters of the SAPHanaController RA can be -found with the following command: +// TODO PRIO1: change above, once it is supported -`man ocf_suse_SAPHanaController` +Additional information about all parameters of the SAPHanaController RA can be +found in manual page ocf_suse_SAPHanaController(7). [subs="attributes,quotes"] ---- @@ -3340,7 +3351,7 @@ primitive rsc_SAPHanaTop_{sapsid}_HDB{sapino} ocf:suse:SAPHanaTopology \ primitive rsc_SAPHanaFil_{sapsid}_HDB{sapino} ocf:suse:SAPHanaFilesystem \ op start interval=0 timeout=10 \ op stop interval=0 timeout=20 \ - op monitor interval=120 timeout=120 ACTION_ON_FAIL="fence" \ + op monitor interval=120 timeout=120 ON_FAIL_ACTION="fence" \ params SID={sapsid} InstanceNumber={sapino} primitive rsc_SAPHanaCon_{sapsid}_HDB{sapino} ocf:suse:SAPHana \ diff --git a/adoc/SLES4SAP-hana-angi-scaleout-perfopt-15.adoc b/adoc/SLES4SAP-hana-angi-scaleout-perfopt-15.adoc index f6f30712..39c9ee7d 100644 --- a/adoc/SLES4SAP-hana-angi-scaleout-perfopt-15.adoc +++ b/adoc/SLES4SAP-hana-angi-scaleout-perfopt-15.adoc @@ -41,7 +41,6 @@ document at hand describes the synchronous replication from memory into memory of the second system. This is the only method that allows the cluster to make a decision based on coded algorithms. - === Abstract This guide describes planning, setup, and basic testing of {sles4sap} {prodNr} @@ -66,9 +65,15 @@ From the infrastructure perspective the following variants are covered: - On-premises deployment on physical and virtual machines - Public cloud deployment (usually needs additional documentation on cloud specific details) -Deployment automation simplifies roll-out. There are several options available, particularly on public cloud platfoms. Ask your public cloud provider or your +Deployment automation simplifies roll-out. There are several options available, +particularly on public cloud platfoms. Ask your public cloud provider or your {SUSE} contact for details. +NOTE: In this guide the software package SAPHanaSR-angi is used. This package replaces +the two packages SAPHanaSR and SAPHanaSR-ScaleOut. Thus new deployment should be +done with SAPHanaSR-angi only. For upgrading existing clusters to SAPHanaSR-angi, +please read the blog article +https://www.suse.com/c/how-to-upgrade-to-saphanasr-angi/ . === Additional documentation and resources @@ -204,8 +209,8 @@ plus the majority maker: - optional: 2nd additional IP address for active/read-enabled setup NOTE: The minimum lab requirements mentioned here are no SAP sizing information. -These data are provided only to rebuild the described cluster in a lab for test purposes. -Even for such tests the requirements can increase depending on your test +These data are provided only to rebuild the described cluster in a lab for test +purposes. Even for such tests the requirements can increase depending on your test scenario. For productive systems ask your hardware vendor or use the official SAP sizing tools and services. @@ -290,9 +295,9 @@ With the current version of resource agents, {saphana} system replication for scale-out is supported in the following scenarios or use cases: Performance optimized, single container (A \=> B):: -In the performance optimized scenario an {saphana} RDBMS on site "A" is synchronizing with an -{saphana} RDBMS on a second site "B". As the {saphana} RDBMS on the second site -is configured to preload the tables the takeover time is typically very short. +In the performance optimized scenario an {saphana} RDBMS on site "A" is synchronizing +with an {saphana} RDBMS on a second site "B". As the {saphana} RDBMS on the second +site is configured to preload the tables the takeover time is typically very short. See also the requirements section below for details. Performance optimized, multi-tenancy also named MDC (%A \=> %B):: @@ -311,7 +316,8 @@ feature AUTOMATED_REGISTER=true is not possible with pure Multi-Tier replication See also the requirements section below. Multi-Target Replication (A \<= B \-> C):: -This scenario and setup is described in this document. A Multi-Target system replication has an additional target, which is connected to +This scenario and setup is described in this document. A Multi-Target system +replication has an additional target, which is connected to either the secondary (chain topology) or to the primary (star topology). Multi-Target replication is possible since {saphana} 2.0 SPS04. See also the requirements section below. @@ -319,7 +325,8 @@ See also the requirements section below. === The concept of the multi-target scenario -A multi-target scenario consists of 3 sites. Site 1 and site 2 are in HA cluster while site 3 is outside the HA cluster. +A multi-target scenario consists of 3 sites. Site 1 and site 2 are in HA cluster +while site 3 is outside the HA cluster. In case of failure of the primary {saphana} on site 1 the cluster first tries to start the takeover process. This allows to use the already loaded data at the @@ -342,10 +349,10 @@ also automatically register a former failed primary to get the new secondary. Find configuration details in manual page ocf_suse_SAPHanaController(7). The resource agent for HANA in a Linux cluster does not trigger a takeover to -the secondary site when a software failure causes one or more HANA processes -to be restarted. The same is valid when a hardware error causes the index server to restart locally. -Therefor the SAPHanaSR-angi package contains the HA/DR provider hook script -susChkSrv.py. For details see manual page susChkSrv.py(7). +the secondary site when a software failure causes one or more HANA processes to +be restarted. The same is valid when a hardware error causes the index server to +restart locally. Therefor the SAPHanaSR-angi package contains the HA/DR provider +hook script susChkSrv.py. For details see manual page susChkSrv.py(7). Site 3 is connected as an additional system replication target to either {saphana} site inside the cluster. That two HANA sites need to be configured for automatically @@ -475,17 +482,19 @@ image::SAPHanaSR-ScaleOut-MultiTarget-Plan-Phase2.svg[scaledwidth="100%"] installation of the operating system. In this document, first {sles4sap} is installed and configured. Then the {saphana} -database including the system replication is set up. Next, the -automation with the cluster is set up and configured. Finally, the multi-target setup of the 3rd site is set up and configured. +database including the system replication is set up. Next, the automation with the +cluster is set up and configured. Finally, the multi-target setup of the 3rd site +is set up and configured. // TODO PRIO3: SAP notes reference - each note mentioned here should also be added to the appendix === Installing {sles4sap} -Multiple installation guides are already existing, with different reasons to set up the server in a certain way. -Below it is outlined where this information can be found. -In addition, you will find important details you should consider to get a system which is well prepared to deliver {saphana}. +Multiple installation guides are already existing, with different reasons to set +up the server in a certain way. Below it is outlined where this information can +be found. In addition, you will find important details you should consider to get +a system which is well prepared to deliver {saphana}. ==== Installing the base operating system @@ -520,7 +529,7 @@ To do so, for example, use Zypper: .Uninstall the {sapHanaSR} agent for scale-up ==== -As user_root_ , type: +As Linux user _root_ , type: ---- # zypper remove SAPHanaSR @@ -1488,7 +1497,7 @@ active_master = hanaso0:3{Inst}01 Refer to {saphana} documentation for details. // TODO PRIO2: link to SAP docu -// TODO PRIO1: re-registering neccessary for cativating name server changes? +// TODO PRIO1: re-registering necessary for activating name server changes? // TODO PRIO2: detailled command example for above change @@ -1529,8 +1538,8 @@ The section `[trace]` might be adapted. The ready-to-use HA/DR hook script is shipped with the SAPHanaSR-angi package in directory /usr/share/SAPHanaSR-angi/. The hook script must be available on all cluster nodes, including the majority -maker. Find more details in manual pages susChkSrv.py(7) and -SAPHanaSR-manageProvider(8). +maker. Find more details in manual pages susChkSrv.py(7), SAPHanaSR-manageProvider(8) +and SAPHanaSR-alert-fencing(8). .Adding susChkSrv.py via global.ini =================================== @@ -1759,7 +1768,7 @@ SAPHanaSR-angi package which is also part of {sles4sap}. . Install the cluster packages . Basic cluster configuration -. Configure cluster properties and resources +. Configure cluster properties, resources and alerts . Final steps === Installing the cluster packages @@ -1788,8 +1797,8 @@ on *all* nodes === Configuring the basic cluster -After having installed the cluster packages, the next step is to set up the basic cluster framework. For convenience, use -YaST or the _ha-cluster-init_ script. +After having installed the cluster packages, the next step is to set up the basic +cluster framework. For convenience, use YaST or the _ha-cluster-init_ script. [IMPORTANT] It is strongly recommended to add a second corosync ring, implement unicast (UCAST) @@ -1803,24 +1812,25 @@ communication and adjust the timeout values to your environment. * STONITH method ==== Seting up watchdog for "Storage-based Fencing" -It is recommended to use SBD as central STONITH device, as done in the example at hand. Each node constantly monitors -connectivity to the storage device, and terminates itself in case the partition becomes unreachable. -Whenever SBD is used, a -correctly working watchdog is crucial. Modern systems support a hardware watchdog that needs to -be "tickled" or "fed" by a software component. The software component (usually a daemon) regularly -writes a service pulse to the watchdog. If the daemon stops feeding the watchdog, the hardware will -enforce a system restart. This protects against failures of the SBD process itself, such as dying, or -getting stuck on an I/O error. +It is recommended to use SBD as central STONITH device, as done in the example +at hand. Each node constantly monitors connectivity to the storage device, and +terminates itself in case the partition becomes unreachable. Whenever SBD is used, +a correctly working watchdog is crucial. Modern systems support a hardware watchdog +that needs to be "tickled" by a software component. The software component (usually +a daemon) regularly writes a service pulse to the watchdog. If the daemon stops +"tickling" the watchdog, the hardware will enforce a system restart. This protects +against failures of the SBD process itself, such as dying, or getting stuck on an +I/O error. .Set up for Watchdog ==== IMPORTANT: Access to the Watchdog Timer: -No other software must access the watchdog timer. Some hardware vendors ship systems management -software that uses the watchdog for system resets (for example, HP ASR daemon). Disable such -software, if watchdog is used by SBD. +No other software must access the watchdog timer. Some hardware vendors ship systems +management software that uses the watchdog for system resets (e.g. HP ASR daemon). +Disable such software, if watchdog is used by SBD. -Determine the right watchdog module. Alternatively, you can find a list of installed drivers with your -kernel version. +Determine the right watchdog module. Alternatively, you can find a list of installed +drivers with your kernel version. ---- # ls -l /lib/modules/$(uname -r)/kernel/drivers/watchdog @@ -1832,8 +1842,8 @@ Check if any watchdog module is already loaded. # lsmod | egrep "(wd|dog|i6|iT|ibm)" ---- -If you get a result, the system has already a loaded watchdog. If the watchdog does not match -your watchdog device, you need to unload the module. +If you get a result, the system has already a loaded watchdog. If the watchdog does +not match your watchdog device, you need to unload the module. To safely unload the module, check first if an application is using the watchdog device. @@ -1842,8 +1852,8 @@ To safely unload the module, check first if an application is using the watchdog # rmmod ---- -Enable your watchdog module and make it persistent. For the example below, _softdog_ has been used which has some -restrictions and should not be used as first option. +Enable your watchdog module and make it persistent. For the example below, _softdog_ +has been used which has some restrictions and should not be used as first option. ---- # echo softdog > /etc/modules-load.d/watchdog.conf @@ -1922,13 +1932,14 @@ As requested by _ha-cluster-init_, change the password of the user _hacluster_ o NOTE: Do not forget to change the password of the user _hacluster_. ==== Cluster configuration for all other cluster nodes -The other nodes of the cluster could be integrated by starting the -command _ha-cluster-join_. This command asks for the IP address or name of -the *first* cluster node. Than all needed configuration files are copied over. -As a result the cluster is started on *all* nodes. Do not forget the majority maker. +The other nodes of the cluster could be integrated by starting the command +_ha-cluster-join_. This command asks for the IP address or name of the *first* +cluster node. Than all needed configuration files are copied over. As a result +the cluster is started on *all* nodes. Do not forget the majority maker. If you are using SBD as STONITH method, you need to activate the _softdog_ kernel -module matching your systems. In the example at hand the _softdog_ kernel module is used. +module matching your systems. In the example at hand the _softdog_ kernel module +is used. [subs="specialchars,attributes"] ---- @@ -1985,8 +1996,8 @@ stonith-sbd (stonith:external/sbd): Started hanamm === Configuring cluster properties and resources -This section describes how to configure bootstrap, STONITH, resources, and constraints -using the _crm_ configure shell command as described in section +This section describes how to configure bootstrap, STONITH, resources and +constraints using the _crm_ configure shell command as described in section _Configuring and Managing Cluster Resources (Command Line)_ of the {sleha} Administration Guide (see https://documentation.suse.com/sle-ha/15-SP4/html/SLE-HA-all/cha-ha-manual-config.html). @@ -2105,8 +2116,9 @@ best way to avoid unexpected cluster reactions is to ==== SAPHanaTopology -Next, define the group of resources needed, before the {saphana} instances can be -started. Prepare the changes in a text file, for example _crm-saphanatop.txt_, +SAPHanaTopology is the resource agent (RA) that analyzes the SAP HANA topology +and writes its findings into the CIB. +Prepare the RA configuration in a text file, for example _crm-saphanatop.txt_, and load these with the _crm_ command. If necessary, change the *SID* and *instance number* (bold) to appropriate @@ -2165,6 +2177,8 @@ The most important parameters here are _SID_ ({SID}) and _InstanceNumber_ ({Inst which are self explaining in an SAP context. Beside these parameters, the timeout values or the operations (start, monitor, stop) are typical values to be adjusted for your environment. +Additional information about all parameters can be found in manual page +ocf_suse_SAPHanaTopology(7). ==== SAPHanaFilesystem @@ -2192,7 +2206,7 @@ primitive rsc_SAPHanaFil_{refSID}_HDB{refInst} ocf:suse:SAPHanaFilesystem \ op monitor interval="120" timeout="120" \ op start interval="0" timeout="10" \ op stop interval="0" timeout="20" \ - params SID="**{refSID}**" InstanceNumber="**{refInst}**" ACTION_ON_FAIL="fence" + params SID="**{refSID}**" InstanceNumber="**{refInst}**" ON_FAIL_ACTION="fence" clone cln_SAPHanaFil_{refSID}_HDB{refInst} rsc_SAPHanaFil_{refSID}_HDB{refInst} \ meta clone-node-max="1" interleave="true" @@ -2207,15 +2221,15 @@ primitive rsc_SAPHanaFil\_**{SID}**_HDB**{Inst}** ocf:suse:SAPHanaFilesystem \ op monitor interval="120" timeout="120" \ op start interval="0" timeout="10" \ op stop interval="0" timeout="20" \ - params SID="**{SID}**" InstanceNumber="**{Inst}**" ACTION_ON_FAIL="fence" + params SID="**{SID}**" InstanceNumber="**{Inst}**" ON_FAIL_ACTION="fence" clone cln_SAPHanaFil_**{SID}**\_HDB**{Inst}** rsc_SAPHanaFil_**{SID}**_HDB**{Inst}** \ meta clone-node-max="1" interleave="true" ---- //========================= -For additional information about all parameters, use the command -`man ocf_suse_SAPHanaFilesystem`. +Additional information about all parameters can be found in manual page +ocf_suse_SAPHanaFilesystem(7). Again, add the configuration to the cluster. @@ -2226,7 +2240,7 @@ Again, add the configuration to the cluster. ========== The most important parameters here are _SID_ ({SID}) and _InstanceNumber_ ({Inst}), -which are self explaining in an SAP context. ACTION_ON_FAIL defines how the RA +which are self explaining in an SAP context. ON_FAIL_ACTION defines how the RA should react on monitor failures. See also manual page SAPHanaSR-alert-fencing(8). Beside these parameters, the timeout values or the operations (start, monitor, stop) are typical values to be adjusted for your environment. @@ -2234,17 +2248,19 @@ stop) are typical values to be adjusted for your environment. ==== SAPHanaController -Next, define the group of resources needed, before the {saphana} instances can be -started. Edit the changes in a text file, for example `crm-saphanacon.txt` and -load these with the command `crm`. +SAPHanaController is the resource agent (RA) that controls the HANA database. +Prepare the RA configuration in a text file, for example _crm-saphanatop.txt_, +and load these with the _crm_ command. +.Configure SAPHanaController +========== +Enter the following to crm-saphanacon.txt [subs="specialchars,attributes"] + ---- # vi crm-saphanacon.txt ---- -.Configure SAPHanaController -========== Enter the following to crm-saphanacon.txt [subs="specialchars,attributes"] @@ -2331,12 +2347,67 @@ of the former primary crashed, the former primary will be registered after the time difference is passed. If "only" the {saphana} RDBMS has crashed, then the former primary will be registered immediately. After this registration to the new primary, all data will be overwritten by the system replication. + +|ON_FAIL_ACTION +| Defines how the RA escalates monitor failures on an HANA primary node. +If srHook=SOK, in case of monitor failure an node fencing could be triggered. +For srHook=SFAIL, the restart will be proceeded as usual. ON_FAIL_ACTION=*fence* +may speed up takeover, depending on how long HANA needs for stopping. +See also SAPHanaSR-alert-fencing(8). This option currently is *unsupported*. |=== -Additional information about all parameters can be found with the command -`man ocf_suse_SAPHana_Controller`. +// TODO PRIO1: change above, once it is supported + +Additional information about all parameters can be found in manual page +ocf_suse_SAPHana_Controller(7). + +// TODO PRIO1: uncomment SAPHanaSR-alert-fencing, once it is supported +//// +// begin SAPHanaSR-alert-fencing + +==== SAPHanaSR-alert-fencing alert agent + +SAPHanaSR-alert-fencing reacts on Linux cluster fencing alerts. When the Linux +cluster has performed a node fencing, it calls SAPHanaSR-alert-fencing on each +active cluster node. The agent checks whether the local node belongs to the same +{HANA} site as the fenced node. If so, it asks the cluster to fence the local +node as well. + +.Configure the fencing alert agent +========== + +[subs="specialchars,attributes"] +---- +# vi crm-alert-fencing.txt +---- + +Enter the following to _crm-alert.txt_: + +[subs="specialchars,attributes,quotes"] +---- +alert fencing-1 "/usr/bin/SAPHanaSR-alert-fencing" \ + select fencing \ + attributes alert_fencing_delay=300 +---- + +Load the file to the cluster. -==== The virtual IP address of the HANA primary +[subs="specialchars,attributes"] +---- +# crm configure load update crm-alert.txt +---- +========== + +The alert_fencing_delay=300 should be sufficient to avid fencing loops. +Automatic restart of just fenced nodes should be disabled. In case of automatic +restart of just fenced nodes, it might be necessary to adapt SBD_START_DELAY to +avoid fencing loops. +See manual pages SAPHanaSR-alert-fencing(8) and sbd(8) for details. + +// end SAPHanaSR-alert-fencing +//// + +==== Virtual IP address of the HANA primary The last mandatory resource to be added to the cluster is covering the virtual IP address for the HANA primary master name server. @@ -2379,7 +2450,7 @@ Load the file to the cluster. In most installations, only the parameter *ip* needs to be set to the virtual IP address to be presented to the client systems. -Use the command `man ocf_heartbeat_IPaddr2` for details on additional parameters. +See manual page ocf_heartbeat_IPaddr2(7) for details on additional parameters. ==== Constraints @@ -2436,7 +2507,7 @@ Load the file to the cluster. ---- ========== -==== The virtual IP address of the HANA read-enabled secondary +==== Virtual IP address of the HANA read-enabled secondary This optional resource is covering the virtual IP address for the read-enabled HANA secondary master name server. It is useful if {saphana} is configured with @@ -2498,8 +2569,6 @@ IP address to be presented to the client systems. Use the command `man ocf_heartbeat_IPaddr2` for details on additional parameters. See also manual page SAPHanaSR-ScaleOut_basic_cluster(7). -// TODO PRIO1: add SAPHanaSR-alert-fencing here or above? - === Final steps