diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 00000000..ae1c8ac3
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,3 @@
+{
+ "asciidoc.antora.enableAntoraSupport": true
+}
\ No newline at end of file
diff --git a/DC-SAP-EIC b/DC-SAP-EIC
new file mode 100644
index 00000000..f0d49916
--- /dev/null
+++ b/DC-SAP-EIC
@@ -0,0 +1,20 @@
+MAIN="SAP-EIC-Main.adoc"
+
+ADOC_TYPE="article"
+
+ADOC_POST="yes"
+
+ADOC_ATTRIBUTES="--attribute docdate=2024-07-01"
+
+# stylesheets
+STYLEROOT=/usr/share/xml/docbook/stylesheet/sbp
+FALLBACK_STYLEROOT=/usr/share/xml/docbook/stylesheet/suse2022-ns
+# FALLBACK_STYLEROOT=/usr/share/xml/docbook/stylesheet/suse2013-sbp-ns
+
+XSLTPARAM="--stringparam publishing.series=sbp"
+
+#DRAFT=yes
+ROLE="sbp"
+#PROFROLE="sbp"
+
+DOCBOOK5_RNG_URI="http://docbook.org/xml/5.2/rng/docbookxi.rnc"
diff --git a/DC-SAPDI-RKE-Harvester b/DC-SAPDI-RKE-Harvester
index 264127d5..6460cf66 100644
--- a/DC-SAPDI-RKE-Harvester
+++ b/DC-SAPDI-RKE-Harvester
@@ -4,7 +4,7 @@ ADOC_TYPE="article"
ADOC_POST="yes"
-ADOC_ATTRIBUTES="--attribute docdate=2022-11-11"
+ADOC_ATTRIBUTES="--attribute docdate=2024-04-01"
# stylesheets
STYLEROOT=/usr/share/xml/docbook/stylesheet/sbp
diff --git a/adoc/SAP-EIC-Main-docinfo.xml b/adoc/SAP-EIC-Main-docinfo.xml
new file mode 100644
index 00000000..a7944617
--- /dev/null
+++ b/adoc/SAP-EIC-Main-docinfo.xml
@@ -0,0 +1,79 @@
+
+
+ https://github.com/SUSE/suse-best-practices/issues/new
+ SUSE Best Practices
+
+
+
+SUSE Linux Enterprise Micro
+5.4
+SUSE Best Practices
+SAP Edge
+
+SUSE Linux Enterprise Micro 5.4
+Rancher Kubernetes Engine 2
+Longhorn
+Rancher Prime
+
+
+
+
+ Kevin
+ Klinger
+
+
+ SAP Solution Architect
+ SUSE
+
+
+
+
+ Dominik
+ Mathern
+
+
+ SAP Solution Architect
+ SUSE
+
+
+
+
+ Dr. Ulrich
+ Schairer
+
+
+ SAP Solution Architect
+ SUSE
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ SUSEĀ® offers a full stack for your container workloads. This best practice document describes how you can make use of this offerings
+ for your installation of SAP Edge Integration Cell. The operations of SAP Edge Integration Cell and/or SAP Integration Suite are not covered in this document.
+
+
+
+ Disclaimer:
+ Documents published as part of the SUSE Best Practices series have been contributed voluntarily
+ by SUSE employees and third parties. They are meant to serve as examples of how particular
+ actions can be performed. They have been compiled with utmost attention to detail.
+ However, this does not guarantee complete accuracy. SUSE cannot verify that actions described
+ in these documents do what is claimed or whether actions described have unintended consequences.
+ SUSE LLC, its affiliates, the authors, and the translators may not be held liable for possible errors
+ or the consequences thereof.
+
+
diff --git a/adoc/SAP-EIC-Main.adoc b/adoc/SAP-EIC-Main.adoc
new file mode 100644
index 00000000..c3d56353
--- /dev/null
+++ b/adoc/SAP-EIC-Main.adoc
@@ -0,0 +1,299 @@
+:docinfo:
+
+:sles: SUSE Linux Enterprise Server
+:sles4sap: SUSE Linux Enterprise Server for SAP Applications
+:slem: SUSE Linux Enterprise Micro
+:slem_version: 5.4
+:sles_version: 15 SP5
+:lh: Longhorn
+:rancher: Rancher Prime
+:harvester: Harvester
+:rke: Rancher Kubernetes Engine 2
+:eic: SAP Edge Integration Cell
+:elm: SAP Edge Lifecycle Management
+:rac: Rancher Application Collection
+:redis: Redis
+:sis: SAP Integration Suite
+:pg: PostgreSQL
+:metallb: MetalLB
+
+
+= {eic} on SUSE
+
+== Introduction
+
+This guide describes how to prepare your infrastructure for the installation of {eic} on {rke} using {rancher}.
+It will guide you through the steps of:
+
+* Installing {rancher}
+* Setup {rke} clusters
+* Deploy mandatory components for {eic}
+// * Deploying {eic} into your {rke}
+
+NOTE: This guide does not contain information about sizing your landscapes. Please refer to
+https://help.sap.com/docs/integration-suite?locale=en-US and look for the "Edge Integration Cell Sizing Guide".
+
+== Preparations
+
+* Get subscriptions for:
+** {slem} {slem_version}
+** {rancher}
+** {lh}
+
+* Check the storage requirements.
+
+* Create a or get access to a private container registry.
+
+* Get an SAP S-user to access software and documentation by SAP.
+
+* Read the relevant SAP documentation:
+
+** https://me.sap.com/notes/3247839[Release Note for SAP Edge Integration Cell]
+
+** https://me.sap.com/notes/2946788[Release Note for SAP ELM Bridge]
+
+** https://help.sap.com/docs/integration-suite/sap-integration-suite/setting-up-and-managing-edge-integration-cell[Installation Guide at help.sap.com]
+
+
+== Installing {slem} {slem_version}
+There are several ways to install {slem} {slem_version}. We will use in our Best Practice Guide the installation method via the graphical installer. Further installation routines can be found in the https://documentation.suse.com/sle-micro/5.4/html/SLE-Micro-all/book-deployment-slemicro.html[Deployment Guide].
+
+include::SAP-EIC-SLEMicro.adoc[SLEMicro]
+
+++++
+
+++++
+
+//TODO check dependencies of other doc files to adjust header hierarchy
+include::SAPDI3-Rancher.adoc[Rancher]
+
+++++
+
+++++
+
+== Installing RKE2 using {rancher}
+include::SAP-Rancher-RKE2-Installation.adoc[]
+
+++++
+
+++++
+
+include::SAPDI3-Longhorn.adoc[]
+
+++++
+
+++++
+
+== Installing {metallb} and databases
+
+In this chapter we'll give an example how to setup {metallb}, {redis} and {pg}.
+
+NOTE: Please note, that this might differ from the deployment you'll need for your infrastructure and use-cases.
+
+=== Login to {rac}
+
+{rancher} instances prior to version 2.9 can not integrate the {rac}. Thus you need to use the console and Helm.
+The easiest way to do so is to use the built-in shell in {rancher}. To access it, navigate to your cluster and click on *Kubectl Shell* as shown below:
+
+image::EIC-Rancher-Kubectl-Button.png[title=Rancher Shell Access,scaledwidth=99%]
+
+A shell will open as in the given picture:
+
+image::EIC-Rancher-Kubectl-Shell.png[title=Rancher Shell Overview,scaledwidth=99%]
+
+
+You will need to login to the {rac} which can be done like:
+----
+$ helm registry login dp.apps.rancher.io/charts -u -p
+----
+
+
+[#imagePullSecret]
+=== Creating an imagePullSecret
+To make the ressources be available to roll out, you'll need to create a imagePullSecret.
+In this guide we'll use the name application-collection for it.
+
+==== Creating a imagePullSecret using kubectl
+
+Using kubectl to create the imagePullSecret is quite easy.
+Get your username and your access token for the {rac}.
+Then run:
+----
+$ kubectl create secret docker-registry application-collection --docker-server=dp.apps.rancher.io --docker-username= --docker-password=
+----
+
+==== Creating an imagePullSecret using {rancher}
+
+You can also create an imagePullSecret using {rancher}.
+Therefore open {rancher} and enter your cluster.
+
+Navigate to *Storage* -> *Secrets* as shown below:
+
+image::EIC-Secrets-Menu.png[title=Secrets Menu,scaledwidth=99%]
+
+++++
+
+++++
+
+Select the *Create* button in the upper right corner.
+
+image::EIC-Secrets-Overview.png[title=Secrets Overview,scaledwidth=99%]
+
+A selection screen will be shown asking you to choose the Secret type. Select *Registry* as shown here:
+
+image::EIC-Secrets-Types.png[title=Secrets Type Selection,scaledwidth=99%]
+
+++++
+
+++++
+
+Enter a name like application-collection for the Secret. For the text field *Registry Domain Name*, enter dp.apps.rancher.io .
+Enter your username and password and hit the *Create* button on the bottom right side.
+
+image::EIC-Secret-Create.png[title=Secrets Creation Step,scaledwidth=99%]
+
+++++
+
+++++
+
+=== Installing {metallb}
+
+This chapter is to guide you through the installation and configuration of {metallb} on your Kubernetes cluster used for the {eic}.
+
+include::SAP-EIC-Metallb.adoc[Metallb]
+++++
+
+++++
+
+=== Installing {redis}
+
+// https://help.sap.com/docs/integration-suite/sap-integration-suite/prepare-your-kubernetes-cluster#redis-data-store-requirements
+
+Before deploying {redis}, make sure that the requirements described in
+https://me.sap.com/notes/3247839
+are met.
+
+Furthermore make sure to get an understanding of what grade of persistence you want to achieve for your {redis} cluster.
+To get more information about persistence in {redis}, have a look at
+https://redis.io/docs/management/persistence/ .
+
+
+include::SAP-EIC-Redis.adoc[]
+
+++++
+
+++++
+
+=== Installing {pg}
+
+// https://help.sap.com/docs/integration-suite/sap-integration-suite/prepare-your-kubernetes-cluster#postgresql-database-requirements
+
+Before deploying {pg}, make sure that the requirements described in
+https://me.sap.com/notes/3247839 are met.
+
+include::SAP-EIC-PostgreSQL.adoc[]
+
+++++
+
+++++
+
+== Installing {eic}
+
+// include::SAP-EIC.adoc[]
+At this point, you should be able to deploy {eic}.
+Please follow the instructions at https://help.sap.com/docs/integration-suite/sap-integration-suite/setting-up-and-managing-edge-integration-cell
+to install {eic} in your prepared environments.
+
+++++
+
+++++
+
+[#Appendix]
+== Appendix
+
+=== Using self signed certificates
+
+In this chapter we will explain how to create self signed certificates and how to make them available within Kubernetes.
+
+==== Create the self signed certificates
+
+CAUTION: It is discouraged to use self signed certifcates for production environments.
+
+As a first step, you need to create a certificate authority (further called CA) with its key and certificate.
+In the excerpt below you can find an example how create such with a passpharse of your choice:
+----
+$ openssl req -x509 -sha256 -days 1825 -newkey rsa:2048 -keyout rootCA.key -out rootCA.crt -passout pass: -subj "/C=DE/ST=BW/L=Nuremberg/O=SUSE"
+----
+
+This gives you the *rootCA.key* and the *rootCA.crt*.
+For the server certificate, a certificate signing request (further called CSR) is needed.
+The given excerpt show how to create such a CSR:
+----
+$ openssl req -newkey rsa:2048 -keyout domain.key -out domain.csr -passout pass: -subj "/C=DE/ST=BW/L=Nuremberg/O=SUSE"
+----
+
+Before you can sing the CSR, you'll need to add the DNS names of your Kuberntes Services to the CSR.
+Therefore create a file with the content below and replace the ** and ** with the name of your Kubernetes service and the namespace this is placed in:
+
+----
+authorityKeyIdentifier=keyid,issuer
+basicConstraints=CA:FALSE
+subjectAltName = @alt_names
+[alt_names]
+DNS.1 = ..svc.cluster.local
+DNS.2 = ..svc.cluster.local
+----
+
+You can now use the previously created rootCA.key and rootCA.crt with the extension file to sign the CSR.
+The example below shows how to do that by passing the extension file (here named *domain.ext*):
+----
+$ openssl x509 -req -CA rootCA.crt -CAkey rootCA.key -in domain.csr -out server.pem -days 365 -CAcreateserial -extfile domain.ext -passin pass:
+----
+
+This creates a file called *server.pem* which is your certificate to be used for your application.
+
+
+Your *domain.key* is at this point still encrypted, but the application will need an uncrypted server key.
+To decrypt, run the given command, which will create the *server.key*
+----
+$ openssl rsa -passin pass: -in domain.key -out server.key
+----
+
+Some applications (like Redis) require a full certificate chain to operate.
+To get a full certificate chain, concat the generated *server.pem* with the *rooCA.crt* like below:
+
+----
+$ cat server.pem rootCA.crt > chained.pem
+----
+
+Afterwards you should have the files called server.pem, server.key and chained.pem which can be used for your applications like Redis or PostgresSQL.
+
+
+==== Upload certificates to Kubernetes
+
+To use certificate files in Kubernetes, you need to store them as so called *secrets*.
+An example how to upload your certificates to Kubernetes is shown in the given excerpt:
+
+----
+$ kubectl -n create secret generic --from-file=./root.pem --from-file=./server.pem --from-file=./server.key
+----
+
+NOTE: Most applications are expecting to have the secret to be used in the same namespace as the application.
+
+
+++++
+
+++++
+
+:leveloffset: 0
+// Standard SUSE Best Practices includes
+== Legal notice
+include::common_sbp_legal_notice.adoc[]
+
+++++
+
+++++
+
+// Standard SUSE Best Practices includes
+:leveloffset: 0
+include::common_gfdl1.2_i.adoc[]
diff --git a/adoc/SAP-EIC-Metallb.adoc b/adoc/SAP-EIC-Metallb.adoc
new file mode 100644
index 00000000..4fefc440
--- /dev/null
+++ b/adoc/SAP-EIC-Metallb.adoc
@@ -0,0 +1,75 @@
+==== Installation and Configuration of {metallb}
+
+There are multiple ways to install the {metallb} software. In this guide we'll cover how to install {metallb} using kubectl or Helm.
+A complete overview and more details about {metallb} can be found on their
+link:https://metallb.universe.tf/[official website]
+
+===== Pre-requisites
+
+Before starting the installation, make sure you meet all the requirements. In particular, you should pay attention to network addon compatibility.
+If you are trying to run {metallb} on a cloud platform, you should also look at the cloud compatibility page and make sure your cloud platform can work with {metallb} (most cannot).
+
+There are several ways to deploy {metallb}. In this guide we'll describe how to use the {rac} to deploy {metallb}.
+
+Please make sure to have a range of IP addresses available for configuring {metallb}.
+
+===== Preparations
+
+Make sure the related Kernel modules are loaded on your Kubernetes worker nodes as described in xref:SAP-EIC-SLEMicro#metal-slem[].
+
+Make sure you enabled strictarp as described in xref:SAP-Rancher-RKE2-Installation.adoc#metal-rke[]
+
+
+===== Installation of {metallb}
+
+To install {metallb} run the following lines in your terminal:
+
+----
+$ helm pull oci://dp.apps.rancher.io/charts/metallb --untar
+$ helm install --namespace=metallb --set-json 'imagePullSecrets=[{"name":"application-collection"}]' --create-namespace metallb ./metallb
+----
+
+++++
+
+++++
+
+==== Configuration
+
+{metallb} needs two configurations to function properly:
+
+- IP address pool
+- L2 advertisement configuration
+
+Create the configuration files for the {metallb} IP address pool:
+
+----
+# cat <iprange.yaml
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ name: first-example-pool
+ namespace: metallb
+spec:
+ addresses:
+ - 192.168.1.240-192.168.1.250
+EOF
+----
+
+and the layer 2 network advertisement:
+
+----
+# cat < l2advertisement.yaml
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: example
+ namespace: metallb
+EOF
+----
+
+Apply the configuration:
+
+----
+# kubectl apply -f iprange.yaml
+# kubectl apply -f l2advertisement.yaml
+----
diff --git a/adoc/SAP-EIC-PostgreSQL.adoc b/adoc/SAP-EIC-PostgreSQL.adoc
new file mode 100644
index 00000000..ed6c1120
--- /dev/null
+++ b/adoc/SAP-EIC-PostgreSQL.adoc
@@ -0,0 +1,84 @@
+:pg: PostgreSQL
+:redis: Redis
+
+IMPORTANT::
+SUSE does not offer database support for {pg} on Kubernetes.
+To get support, go to link:https://www.postgresql.org/support/[The PostgreSQL Global Development Group].
+
+
+IMPORTANT::
+In this guide we'll describe one variant of installing {pg}.
+There are other possible ways to setup {pg} which are not focussed in this guide. It is also possible to install {pg} as a single instance on top of our operation system.
+We will focus on installing {pg} into a kubernetes cluster, because we also need a {redis} database and we will put them together into one cluster.
+
+==== Deploying {pg}
+Even though {pg} is available for deployment using the {rancher} Apps, we recommend to use the {rac}.
+The {pg} chart can be found at https://apps.rancher.io/applications/postgresql.
+
+==== Create Secret for {rac}
+First we need to create a namespace and the *imagePullSecret* for installing the {pg} database into the cluster.
+----
+kubectl create namespace postgresql
+----
+
+How to create the *imagePullSecret* is described in the Section xref:SAP-EIC-Main.adoc#imagePullSecret[].
+
+===== Create Secret with certificates
+Second we need to create the Kubernetes secret with the certificates. You will find an example how to to dis in the xref:SAP-EIC-Main.adoc#Appendix[].
+
+===== Installing the application
+
+You will need to login to the {rac} which can be done like:
+----
+$ helm registry login dp.apps.rancher.io/charts -u -p
+----
+
+Create a file *values.yaml* which holds some configuration for the {pg} Helm chart.
+The config may look like:
+----
+global:
+ # -- Global override for container image registry pull secrets
+ imagePullSecrets: ["application-collection"]
+images:
+ postgresql:
+ # -- Image name to use for the PostgreSQL container
+ repository: dp.apps.rancher.io/containers/postgresql
+ # -- Image tag to use for the PostgreSQL container
+ tag: "15.7"
+auth:
+ # -- PostgreSQL username for the superuser
+ postgresUsername: postgres
+ # -- PostgreSQL password for the superuser
+ postgresPassword: ""
+ # -- Replication username
+ replicationUsername: replication
+ # -- Replication password
+ replicationPassword: ""
+tls:
+ # -- Enable SSL/TLS
+ enabled: false
+ # -- Name of the secret containing the PostgreSQL certificates
+ existingSecret: "postgresqlcert"
+ # -- Whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server. Valid values: prefer (default), disable, allow, require, verify-ca, verify-full
+ sslMode: "verify-full"
+ # -- Certificate filename in the secret (will be ignored if empty)
+ certFilename: "server.pem"
+ # -- Certificate key filename in the secret (will be ignored if empty)
+ keyFilename: "server.key"
+ # -- CA certificate filename in the secret (will be ignored if empty)
+ caCertFilename: "root.pem"
+persistentVolumeClaimRetentionPolicy:
+ whenDeleted: Delete
+----
+
+++++
+
+++++
+
+To install the application run:
+----
+$ helm pull oci://dp.apps.rancher.io/charts/postgres --untar
+$ helm install -f values.yaml --namespace=postgresql ./postgresql
+----
+
+
diff --git a/adoc/SAP-EIC-Redis.adoc b/adoc/SAP-EIC-Redis.adoc
new file mode 100644
index 00000000..928e25a7
--- /dev/null
+++ b/adoc/SAP-EIC-Redis.adoc
@@ -0,0 +1,63 @@
+[#Redis]
+
+:redis: Redis
+
+IMPORTANT::
+SUSE does not offer database support for {redis}.
+To get support, turn to
+link:https://redis.com/[Redis Ltd.]
+
+
+IMPORTANT::
+In this guide we'll describe one variant of installing {redis} which is called Redis Cluster.
+There are other possible ways to setup {redis} which are not focussed in this guide.
+Please check out if you rather require
+link:https://redis.io/docs/management/sentinel/[Sentinel]
+instead of
+link:https://redis.io/docs/management/scaling/[Cluster]
+
+==== Deploying Redis
+
+Even though {redis} is available for deployment using the {rancher} Apps, we recommend to use the {rac}.
+The {redis} chart can be found at https://apps.rancher.io/applications/redis .
+
+++++
+
+++++
+
+
+===== Deploy the chart
+
+If you want to use self signed certificates, you can find instructions how to create such in xref:SAP-EIC-Main.adoc#Appendix[]
+
+Create a file *values.yaml* which holds some configuration for the {redis} Helm chart.
+The config may look like:
+----
+storageClassName: "longhorn"
+global:
+ imagePullSecrets: ["application-collection"]
+architecture: cluster
+nodeCount: 3
+auth:
+ password:
+tls:
+ # -- Enable TLS
+ enabled: true
+ # -- Whether to require Redis clients to authenticate with a valid certificate (authenticated against the trusted root CA certificate)
+ authClients: false
+ # -- Name of the secret containing the Redis certificates
+ existingSecret: "redisCert"
+ # -- Certificate filename in the secret
+ certFilename: "server.pem"
+ # -- Certificate key filename in the secret
+ keyFilename: "server.key"
+ # CA certificate filename in the secret - needs to hold the CA.crt and the server.pem
+ caCertFilename: "root.pem"
+----
+
+To install the application run:
+
+----
+$ helm pull oci://dp.apps.rancher.io/charts/redis --untar
+$ helm install -f values.yaml --namespace=redis --create-namespace redis ./redis
+----
diff --git a/adoc/SAP-EIC-SLEMicro.adoc b/adoc/SAP-EIC-SLEMicro.adoc
new file mode 100644
index 00000000..87f27732
--- /dev/null
+++ b/adoc/SAP-EIC-SLEMicro.adoc
@@ -0,0 +1,204 @@
+[#SLEMicro]
+
+=== Preparation
+
+On each server in your environment for {eic} and {rancher}, install {slem} {slem_version} as the operating system.
+This chapter describes all recommended steps for the installation.
+
+TIP: If you have already set up all machines and the operating system,
+skip this chapter.
+
+++++
+
+++++
+
+* Mount the {slem} into your virtual machine and start the VM.
+* When the boot menu appears select *Installation*.
++
+image::EIC_SLE_Micro_setup_boot_menu.png[title=SLE Micro Boot Menu,scaledwidth=99%]
+
+++++
+
+++++
+
+* Select your *Language*, *Keyboard Layout* and accept the License Agreement.
++
+image::EIC_SLE_Micro_setup_License_Agreement.png[title=SLE Micro Setup License Agreement,scaledwidth=99%]
+
+++++
+
+++++
+
+* It is recommended to use a static network configuration.
+During the installation setup, the first time to adjust this is when the registration page is displayed.
+In the upper right corner, click the button *Network Configuration ...*:
+
+image::EIC_SLE_Micro_setup_Registration.png[title=SLE Micro Setup Registration,scaledwidth=99%]
+
+++++
+
+++++
+
+* The *Network Settings* page is displayed. By default, the network adapter is configured to use DHCP.
+To change this, click the Button *Edit*.
++
+image::EIC_SLE_Micro_setup_Network_Settings.png[title=SLE Micro Setup Network Settings,scaledwidth=99%]
+
+++++
+
+++++
+
+* On the *Network Card Setup* page, select *Statically Assigned IP Address* and fill in the fields *IP Address*, *Subnet Mask* and *Hostname*.
++
+image::EIC_SLE_Micro_setup_Network_Card_Setup.png[title=SLE Micro Setup Network Card,scaledwidth=99%]
+
+++++
+
+++++
+
+* Back to the *Network Settings* go top the *Hostname/DNS* Section and set your *hostname*, *Name Server* and *Domain Search*.
++
+image::EIC_SLE_Micro_setup_Network_Settings_DNS.png[title=SLE Micro Setup Hostname/DNS,scaledwidth=99%]
+
+++++
+
+++++
+
+* Then switch to the *Routing* Section and go to *Add*.
++
+image::EIC_SLE_Micro_setup_Network_Settings_Routing.png[title=SLE Micro Setup Hostname/DNS,scaledwidth=99%]
+
+++++
+
+++++
+
+* Fill out the *Gateway* and set it as *Default Route*.
++
+image::EIC_SLE_Micro_setup_Network_Settings_default_route.png[title=SLE Micro Setup Network Settings Default Route,scaledwidth=99%]
+
+++++
+
+++++
+
+* You will come back to the *Registration* page and here we will select *Skip Registration* and will do it later.
++
+image::EIC_SLE_Micro_setup_skip_Registration.png[title=SLE Micro Setup Skip Registration,scaledwidth=99%]
+
+++++
+
+++++
+
+* In the next window you can change the NTP Server or keep the default.
++
+image::EIC_SLE_Micro_setup_NTP_Configuration.png[title=SLE Micro Setup NTP Configuration,scaledwidth=99%]
+
+++++
+
+++++
+
+* On the next page fill out our password for the *root* user and if you want you can import public ssh keys for the root user.
++
+image::EIC_SLE_Micro_setup_Authentication.png[title=SLE Micro Setup Authentication for the System Administrator "root",scaledwidth=99%]
+
+++++
+
+++++
+
+* On the last page you see a summary of your *Installation Settings* where you can change the disk layout, software packages and more. Please make sure that:
+
+ ** The firewall will be disabled.
+ ** The SSH service will be enabled.
+ ** Kdump status is disabled.
+ ** SELinux will be set in permissive mode.
+
++
+image::EIC_SLE_Micro_setup_Installation_Settings01.png[title=SLE Micro Setup Installation Settings upper page,scaledwidth=99%]
+image::EIC_SLE_Micro_setup_Installation_Settings02.png[title=SLE Micro Setup Installation Settings lower page,scaledwidth=99%]
+
+* To disable Kdump, scroll down and click its label . This opens the *Kdump Start-Up* page.
+On that page, make sure "Disable Kdump" is selected.
+
+* To set SELinux im permissive mode, scroll down and click on *Security*. This open the *Security* page. On the right site there is the menu entry *Selected Module*. Open the dropdown menu and select *Permissive*.
+
+* Click on *Install* and confirm the installation.
++
+image::EIC_SLE_Micro_setup_Confirm_Installation.png[title=SLE Micro Setup Confirm Installation,scaledwidth=99%]
+
+* After the installation is finished you need to reboot the system.
++
+image::EIC_SLE_Micro_setup_reboot.png[title=SLE Micro Setup reboot,scaledwidth=99%]
+
+* You will see a login screen and you can login with your choosen Username and password.
+
+=== Register your system
+To bring your system up to date you need to register your system against a SUSE Manager, RMT server or direct to the SCC Portal. We will describe the process in our guide with the direct connect to the SCC. For more information please look into the {slem} documentation.
+
+Registering the system is possible from the command line using the *transactional-update register* command. For information that goes beyond the scope of this section, refer to the inline documentation with *SUSEConnect --help*. To register {slem} with SUSE Customer Center, run *transactional-update register* as follows:
+----
+# transactional-update register -r REGISTRATION_CODE -e EMAIL_ADDRESS
+----
+To register with a local registration server, additionally provide the URL to the server:
+----
+# transactional-update register -r REGISTRATION_CODE -e EMAIL_ADDRESS \
+--url "https://suse_register.example.com/"
+----
+Replace *REGISTRATION_CODE* with the registration code you received with your copy of {slem}. Replace *EMAIL_ADDRESS* with the e-mail address associated with the SUSE account you or your organization uses to manage subscriptions.
+. Reboot your system to switch to the latest snapshot.
+. {slem} is now registered.
+You can found more information in the {slem} {slem_version} link:https://documentation.suse.com/sle-micro/{slem_version}/single-html/SLE-Micro-deployment/[Deployment Guide] .
+
+=== Update your system
+Login into the system and after your system is registered you can update it with the *transactional-update* command.
+----
+# transactional-update
+----
+
+=== Disable automatic reboot
+Per default {slem} runs a timer for *transactional-update* in the background which could automatic reboot your system. We will disable it.
+
+----
+# systemctl --now disable transactional-update.timer
+----
+
+++++
+
+++++
+
+ifdef::metallb[]
+// Needed due to Github issue: https://github.com/rancher/rke2/issues/3710
+[#metal-slem]
+=== Preparation for {metallb}
+
+If you want to use {metallb} as a Kubernetes Load Balancer, you need to make sure that the kernel modules for ip_vs are loaded correctly on boot time.
+To do so, create and populate the file */etc/modules-load.d/ip_vs.conf* on each cluster node as followed:
+
+[source, shell]
+----
+# cat <> /etc/modules-load.d/ip_vs.conf
+ip_vs
+ip_vs_rr
+ip_vs_wrr
+ip_vs_sh
+EOF
+----
+endif::[]
+
+
+// To do so, create a file on each cluster node named:
+
+// ----
+// /etc/modules-load.d/ip_vs.conf
+// ----
+
+// Now, you need to add the entries for the related kernel modules:
+// ----
+// ip_vs
+// ip_vs_rr
+// ip_vs_wrr
+// ip_vs_sh
+// ----
+
+// Reboot the nodes and check that the kernel modules are loaded successfully:
+// ----
+// # lsmod | grep ip_vs
+// ----
diff --git a/adoc/SAP-EIC.adoc b/adoc/SAP-EIC.adoc
new file mode 100644
index 00000000..aa8d891c
--- /dev/null
+++ b/adoc/SAP-EIC.adoc
@@ -0,0 +1,49 @@
+
+=== Prerequisites
+
+Get a BTP account
+Get a subscription for SAP Integration Suite
+
+
+
+
+Solutions tab shows all available solutions
+
+=== Add an Edge Node in {sis}
+
+* Open SAP Integration Suite Portal
+* Activate the {eic} runtime (Integration Suite Portal: Settings -> Runtime)
+* Follow the link to the Edge Lifcycle Management
+* Add an Edge node by clicking the *Add* button
+* Enter a name for the Edge node and select the *Provider Type* Generic.
+** For production environment tick in the *High Availabilty Mode*
+* *Hit the *Step 2* button to go ahead
+* When you enable logging, follow the process at //TODO link to SAP docs
+* When setting up a production environment, enable the local container registry
+* When you require a proxy, enable the HTTPS proxy option in step 4
+* Verify everything is configured as needed and click the *Add Edge Node* button on the bottom right
+
+
+* In the next screen you'll need to upload the kubeconfig of your RKE2 cluster.
+// TODO describe how to get the kubeconfing from Rancher
+* Enter a password for the Edge Node Cluster.
+* Download the bootstrap file
+* Download the ELM
+* Execute the ELM
+* Enter the password previously entered
+* Wait until the node appears as "ready" in the SAP ELM portal.
+* Click *Deploy Solution*
+* Select the desired Edge Integration Cell solution and the desired version
+** The *Default Virtual Host* defines where the {eic} will be reachable (DNS name)
+** The *Default Virtual Host Key Alias* which key of the SAP Integration suite shall be used
+// TODO Keystore can be accessed under SAP Integration Suite -> keystore
+* Click *Next Step*
+* You can now see the dependencies that will be deployed. Click *Next Step*
+*
+// TODO Recording Oliver Deckert ~48:00 -> Redis and Postgres are allowed to be deployed in the same K8s cluster as EIC
+
+* Istio -> LoadBalancer select *Other*
+
+
+// TODO Enable logging for production recommended/mandatory/optional?
+// TODO Local container registry for production recommended/mandatory/optional?
\ No newline at end of file
diff --git a/adoc/SAP-Rancher-RKE2-Installation.adoc b/adoc/SAP-Rancher-RKE2-Installation.adoc
new file mode 100644
index 00000000..9c761ef3
--- /dev/null
+++ b/adoc/SAP-Rancher-RKE2-Installation.adoc
@@ -0,0 +1,82 @@
+Creating new RKE2 clusters is very easy when using {rancher}.
+
+
+Go to the home menu of your {rancher} instance.
+Click the "Create" button on the right hand side of the screen, as it's shown here:
+
+image::SAP-Rancher-Main-Create.png[title=Rancher home menu,scaledwidth=99%]
+
+
+The window will show you the available options to create new Kubernetes clusters.
+Make sure that the toggle button on the right side of the screen is set to RKE2/K3s as shown below:
+
+image::SAP-Rancher-Create-RKE-Version.png[title=Rancher RKE version selection,scaledwidth=99%]
+
+If you want to create Kubernetes clusters on existing (virtual) machines, choose the "Custom" option at the very bottom, as shown in the picture below:
+
+image::SAP-Rancher-Create-Custom.png[title=Rancher create custom cluster,scaledwidth=99%]
+
+Next, you'll see a window to configure your Kubernetes cluster. It will look similar to the image below:
+
+image::SAP-Rancher-Create-Config.png[title=Rancher create custom cluster config,scaledwidth=99%]
+
+Here, you'll need to name the cluster. The name will only be used within {rancher} and won't affect your workloads.
+In the next step, make sure to select a Kubernetes version that is supported by the workload you want to deploy.
+
+++++
+
+++++
+
+// Section is only needed if metallb shall be used
+// Ref.: https://forums.rancher.com/t/kube-proxy-settings-in-custom-rke2-cluster/40107/2
+// Ref.: https://github.com/rancher/rke2/issues/3710
+ifdef::metallb[]
+[#metal-rke]
+If you do not plan to use {metallb}, please continue xref:SAP-Rancher-RKE2-Installation.adoc#nmetallb[below].
+
+To prepare {rke} for running {metallb}, you'll need to enable strictarp mode for ipvs in kube-proxy.
+To enable strictarp for clusters you want to roll out using {rancher}, you'll need to add the following lines to your configuration:
+
+
+[source,yaml]
+----
+machineGlobalConfig:
+ kube-proxy-arg:
+ - proxy-mode=ipvs
+ - ipvs-strict-arp=true
+----
+
+To do so, apply all configuration as usuall and hit the *Edit as YAML* button in the creation step, as shown below:
+
+image::SAP-Rancher-Create-Config-YAML.png[title=Rancher create custom cluster yaml config,scaledwidth=99%]
+
+The excrept is to be located under *spec.rkeConfig*. An example can be seen here:
+
+image::SAP-Rancher-Create-StrictARP.png[title=Rancher create Cluster with strict ARP, scaledwidth=99%]
+
+endif::[]
+
+++++
+
+++++
+
+[#nmetallb]
+If you don't have any further requirements to Kubernetes, you can click the "Create" button at the very bottom.
+In any other cases talk to your administrators before making adjustements.
+
+Once you've clicked the "Create" button, you should see a screen like this:
+
+image::SAP-Rancher-Create-Register.png[title=Rancher create registration,scaledwidth=99%]
+
+In the first step here, select the roles your node(s) should receive.
+A common high avaiability setup holds:
+
+* 3 x etcd / controll plane nodes
+* 3 x worker nodes
+
+The next step is to copy the registration command to the target machines shell and execute it.
+If your {rancher} instance does hold a self-signed certifcate, make sure to tick in the checkbox below the text bar holding the registration command.
+
+You can run the command on all nodes in parallel and don't have to wait until a single node is down.
+Once all machines are registered, you can see the cluster status at the top, changing from "updating" to "active".
+At this point in time, your Kubernetes cluster is ready to be used.
\ No newline at end of file
diff --git a/adoc/SAPDI3-Longhorn.adoc b/adoc/SAPDI3-Longhorn.adoc
index 5ccba174..3167d990 100644
--- a/adoc/SAPDI3-Longhorn.adoc
+++ b/adoc/SAPDI3-Longhorn.adoc
@@ -1,6 +1,6 @@
[#Longhorn]
-:lh_version: 1.3.1
+:lh_version: 1.6.2
== Installing {lh}
@@ -18,7 +18,7 @@ all nodes must have the `open-iscsi` package installed, and the ISCSI daemon nee
To make sure a node is prepared for {lh}, you can use the following script to check:
----
-$ curl -sSfL https://raw.githubusercontent.com/longhorn/longhorn/v1.2.4/scripts/environment_check.sh | bash
+$ curl -sSfL https://raw.githubusercontent.com/longhorn/longhorn/v1.6.2/scripts/environment_check.sh | bash
----
@@ -42,6 +42,8 @@ These commands will add the Longhorn Helm charts to the list of Helm repositorie
=== Installing {lh} using `kubectl`
You can install {lh} using `kubectl` with the following command:
+
+[subs="attributes"]
----
$ kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v{lh_version}/deploy/longhorn.yaml
----
diff --git a/adoc/SAPDI3-RKE2-Install.adoc b/adoc/SAPDI3-RKE2-Install.adoc
index a044fc1f..13b6e1f0 100644
--- a/adoc/SAPDI3-RKE2-Install.adoc
+++ b/adoc/SAPDI3-RKE2-Install.adoc
@@ -5,6 +5,7 @@
:di_version: 3.3
:sles: SUSE Linux Enterprise Server
:sles_version: 15 SP4
+:sles4sap: SUSE Linux Enterprise Server for SAP Applications
:lh: Longhorn
:rancher: SUSE Rancher
:harvester: Harvester
diff --git a/adoc/SAPDI3-Rancher.adoc b/adoc/SAPDI3-Rancher.adoc
index 275ec6dc..aee6fbd6 100644
--- a/adoc/SAPDI3-Rancher.adoc
+++ b/adoc/SAPDI3-Rancher.adoc
@@ -4,13 +4,94 @@
=== Preparation
-==== Installing Helm
+In order to have an high available {rancher} setup, you'll need a load balancer for you {rancher} nodes.
+In this chapter we'll describe how to set up a custom load balancer using haproxy. If you already have a load balancer, you can facilitate that to make {rancher} high available.
+
+If you do not plan to set up a high available {rancher} cluster, you can skip this chapter.
+
+==== Install haproxy based load balancer
+
+Setup a virtual machine or bare metal server with {sles} and the HA Extension or use {sles4sap}. Install the haproxy package.
-The easiest option to install Helm is to run:
----
-# curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
+# zypper in haproxy
+----
+
+Create the configuration for haproxy.
+Here follows an example configuration file for haproxy, please adapt for the actual environment.
+----
+# cat < /etc/haproxy/haproxy.cfg
+global
+ log /dev/log daemon
+ maxconn 32768
+ chroot /var/lib/haproxy
+ user haproxy
+ group haproxy
+ daemon
+ tune.bufsize 32768
+ tune.ssl.default-dh-param 2048
+ ssl-default-bind-ciphers ALL:!aNULL:!eNULL:!EXPORT:!DES:!3DES:!MD5:!PSK:!RC4:!ADH:!LOW@STRENGTH
+
+defaults
+ log global
+ mode tcp
+ option log-health-checks
+ option log-separate-errors
+ option dontlog-normal
+ option dontlognull
+ option tcplog
+ retries 3
+ option redispatch
+ maxconn 10000
+ timeout connect 5s
+ timeout client 50s
+ timeout server 450s
+
+listen stats
+ bind 0.0.0.0:80
+ bind :::80 v6only
+ stats enable
+ stats uri /
+ stats refresh 5s
+
+# access the kubernetes api
+frontend kubeapi
+ bind *:6443
+ mode tcp
+ default_backend kubeapibackend
+
+# address to register new nodes
+frontend rke2server
+ bind *:9345
+ mode tcp
+ default_backend rke2serverbackend
+
+backend kubeapibackend
+ balance roundrobin
+ server mynode1 192.168.122.20:6443 check
+ server mynode2 192.168.122.30:6443 check
+ server mynode3 192.168.122.40:6443 check
+
+
+backend rke2serverbackend
+ balance roundrobin
+ server mynode1 192.168.122.20:9345 check
+EOF
+----
+
+Check the configuration file:
+----
+# haproxy -f /path/to/your/haproxy.conf -c
+----
+
+Enable and start the haproxy load balancer:
+----
+# systemctl enable haproxy
+# systemctl start haproxy
----
+Do not forget to restart or reload haproxy if there were changes to the haproxy config file.
+
==== Installing RKE2
@@ -19,7 +100,34 @@ To install RKE2, the script provided at https://get.rke2.io can be used as follo
# curl -sfL https://get.rke2.io | sh -
----
-After the script finished, start the RKE2 components and run:
+For HA setups it is necessary to create RKE2 cluster configuration files in advance.
+On the first master node:
+----
+# mkdir -p /etc/rancher/rke2
+# cat < /etc/rancher/rke2/config.yaml
+token: 'your cluster token'
+tls-san:
+ - FQDN of fixed registration address on load balancer
+ - other hostname
+ - IP v4 address
+EOF
+----
+
+Create configuration files for additional cluster nodes:
+----
+# cat > /etc/rancher/rke2/config.yaml
+server: https://"FQDN of registration address":9345
+token: 'your cluster token'
+tls-san:
+ - FQDN of fixed registration address on load balancer
+ - other hostname
+ - IP v4 address
+
+EOF
+----
+
+
+Now it is time to enable and start the RKE2 components and run on each cluster node:
----
# systemctl enable rke2-server --now
----
@@ -35,13 +143,34 @@ For convenience, the `kubectl` binary can be added to the *$PATH* and the given
# export KUBECONFIG=/etc/rancher/rke2/rke2.yaml
----
+++++
+
+++++
+
+
+==== Installing Helm
+
+In order to install {rancher} and some of its required components, you'll need to use Helm.
+
+The easiest option to install Helm is to run:
+----
+# curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
+----
+
+==== Installing cert-manager
+
+----
+$ helm repo add jetstack https://charts.jetstack.io
+$ helm repo update
+$ helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --set installCRDs=true
+----
=== Installing {rancher}
To install {rancher}, you need to add the related Helm repository.
To achieve that, use the following command:
----
-$ helm repo add rancher-stable https://releases.rancher.com/server-charts/stable
+$ helm repo add rancher https://charts.rancher.com/server-charts/prime
----
As a next step, create the cattle-system namespace in Kubernetes as follows:
@@ -51,7 +180,7 @@ $ kubectl create namespace cattle-system
The Kubernetes cluster is now ready for the installation of {rancher}:
----
-$ helm install rancher rancher-stable/rancher \
+$ helm install rancher rancher/rancher \
--namespace cattle-system \
--set hostname= \
--set replicas=3
@@ -64,4 +193,3 @@ $ kubectl -n cattle-system rollout status deploy/rancher
When the deployment is done, you can access the {rancher} cluster at https://[].
Here you will also find a description about how to log in for the first time.
-
diff --git a/adoc/SAPDI3-SUSE_Kubernetes_Stack.adoc b/adoc/SAPDI3-SUSE_Kubernetes_Stack.adoc
index a343b5db..f24cebba 100644
--- a/adoc/SAPDI3-SUSE_Kubernetes_Stack.adoc
+++ b/adoc/SAPDI3-SUSE_Kubernetes_Stack.adoc
@@ -5,6 +5,7 @@
:di_version: 3.3
:sles: SUSE Linux Enterprise Server
:sles_version: 15 SP4
+:sles4sap: SUSE Linux Enterprise Server for SAP Applications
:lh: Longhorn
:rancher: SUSE Rancher
:harvester: Harvester
diff --git a/images/src/png/EIC-Rancher-Kubectl-Button.png b/images/src/png/EIC-Rancher-Kubectl-Button.png
new file mode 100644
index 00000000..e84f3256
Binary files /dev/null and b/images/src/png/EIC-Rancher-Kubectl-Button.png differ
diff --git a/images/src/png/EIC-Rancher-Kubectl-Shell.png b/images/src/png/EIC-Rancher-Kubectl-Shell.png
new file mode 100644
index 00000000..c890fa3c
Binary files /dev/null and b/images/src/png/EIC-Rancher-Kubectl-Shell.png differ
diff --git a/images/src/png/EIC-Secret-Create.png b/images/src/png/EIC-Secret-Create.png
new file mode 100644
index 00000000..87a9a37c
Binary files /dev/null and b/images/src/png/EIC-Secret-Create.png differ
diff --git a/images/src/png/EIC-Secrets-Menu.png b/images/src/png/EIC-Secrets-Menu.png
new file mode 100644
index 00000000..74456859
Binary files /dev/null and b/images/src/png/EIC-Secrets-Menu.png differ
diff --git a/images/src/png/EIC-Secrets-Overview.png b/images/src/png/EIC-Secrets-Overview.png
new file mode 100644
index 00000000..8e2be1e3
Binary files /dev/null and b/images/src/png/EIC-Secrets-Overview.png differ
diff --git a/images/src/png/EIC-Secrets-Types.png b/images/src/png/EIC-Secrets-Types.png
new file mode 100644
index 00000000..ab4870a2
Binary files /dev/null and b/images/src/png/EIC-Secrets-Types.png differ
diff --git a/images/src/png/EIC_SLE_Micro_setup_Authentication.png b/images/src/png/EIC_SLE_Micro_setup_Authentication.png
new file mode 100644
index 00000000..37dba846
Binary files /dev/null and b/images/src/png/EIC_SLE_Micro_setup_Authentication.png differ
diff --git a/images/src/png/EIC_SLE_Micro_setup_Confirm_Installation.png b/images/src/png/EIC_SLE_Micro_setup_Confirm_Installation.png
new file mode 100644
index 00000000..ae210697
Binary files /dev/null and b/images/src/png/EIC_SLE_Micro_setup_Confirm_Installation.png differ
diff --git a/images/src/png/EIC_SLE_Micro_setup_Installation_Settings01.png b/images/src/png/EIC_SLE_Micro_setup_Installation_Settings01.png
new file mode 100644
index 00000000..0d684d14
Binary files /dev/null and b/images/src/png/EIC_SLE_Micro_setup_Installation_Settings01.png differ
diff --git a/images/src/png/EIC_SLE_Micro_setup_Installation_Settings02.png b/images/src/png/EIC_SLE_Micro_setup_Installation_Settings02.png
new file mode 100644
index 00000000..26b1a0af
Binary files /dev/null and b/images/src/png/EIC_SLE_Micro_setup_Installation_Settings02.png differ
diff --git a/images/src/png/EIC_SLE_Micro_setup_License_Agreement.png b/images/src/png/EIC_SLE_Micro_setup_License_Agreement.png
new file mode 100644
index 00000000..c77806ff
Binary files /dev/null and b/images/src/png/EIC_SLE_Micro_setup_License_Agreement.png differ
diff --git a/images/src/png/EIC_SLE_Micro_setup_NTP_Configuration.png b/images/src/png/EIC_SLE_Micro_setup_NTP_Configuration.png
new file mode 100644
index 00000000..dfe35df9
Binary files /dev/null and b/images/src/png/EIC_SLE_Micro_setup_NTP_Configuration.png differ
diff --git a/images/src/png/EIC_SLE_Micro_setup_Network_Card_Setup.png b/images/src/png/EIC_SLE_Micro_setup_Network_Card_Setup.png
new file mode 100644
index 00000000..8e506b90
Binary files /dev/null and b/images/src/png/EIC_SLE_Micro_setup_Network_Card_Setup.png differ
diff --git a/images/src/png/EIC_SLE_Micro_setup_Network_Settings.png b/images/src/png/EIC_SLE_Micro_setup_Network_Settings.png
new file mode 100644
index 00000000..7654caa7
Binary files /dev/null and b/images/src/png/EIC_SLE_Micro_setup_Network_Settings.png differ
diff --git a/images/src/png/EIC_SLE_Micro_setup_Network_Settings_DNS.png b/images/src/png/EIC_SLE_Micro_setup_Network_Settings_DNS.png
new file mode 100644
index 00000000..b2d2bcab
Binary files /dev/null and b/images/src/png/EIC_SLE_Micro_setup_Network_Settings_DNS.png differ
diff --git a/images/src/png/EIC_SLE_Micro_setup_Network_Settings_Routing.png b/images/src/png/EIC_SLE_Micro_setup_Network_Settings_Routing.png
new file mode 100644
index 00000000..7914bdb4
Binary files /dev/null and b/images/src/png/EIC_SLE_Micro_setup_Network_Settings_Routing.png differ
diff --git a/images/src/png/EIC_SLE_Micro_setup_Network_Settings_default_route.png b/images/src/png/EIC_SLE_Micro_setup_Network_Settings_default_route.png
new file mode 100644
index 00000000..efc1dce2
Binary files /dev/null and b/images/src/png/EIC_SLE_Micro_setup_Network_Settings_default_route.png differ
diff --git a/images/src/png/EIC_SLE_Micro_setup_Registration.png b/images/src/png/EIC_SLE_Micro_setup_Registration.png
new file mode 100644
index 00000000..46cb8c80
Binary files /dev/null and b/images/src/png/EIC_SLE_Micro_setup_Registration.png differ
diff --git a/images/src/png/EIC_SLE_Micro_setup_boot_menu.png b/images/src/png/EIC_SLE_Micro_setup_boot_menu.png
new file mode 100644
index 00000000..93aa809a
Binary files /dev/null and b/images/src/png/EIC_SLE_Micro_setup_boot_menu.png differ
diff --git a/images/src/png/EIC_SLE_Micro_setup_reboot.png b/images/src/png/EIC_SLE_Micro_setup_reboot.png
new file mode 100644
index 00000000..8084bbf9
Binary files /dev/null and b/images/src/png/EIC_SLE_Micro_setup_reboot.png differ
diff --git a/images/src/png/EIC_SLE_Micro_setup_skip_Registration.png b/images/src/png/EIC_SLE_Micro_setup_skip_Registration.png
new file mode 100644
index 00000000..0e5cce19
Binary files /dev/null and b/images/src/png/EIC_SLE_Micro_setup_skip_Registration.png differ
diff --git a/images/src/png/Rancher_Redis_App.png b/images/src/png/Rancher_Redis_App.png
new file mode 100644
index 00000000..093d334d
Binary files /dev/null and b/images/src/png/Rancher_Redis_App.png differ
diff --git a/images/src/png/Rancher_Redis_Overview.png b/images/src/png/Rancher_Redis_Overview.png
new file mode 100644
index 00000000..40b5d758
Binary files /dev/null and b/images/src/png/Rancher_Redis_Overview.png differ
diff --git a/images/src/png/SAP-Rancher-Create-Config-YAML.png b/images/src/png/SAP-Rancher-Create-Config-YAML.png
new file mode 100755
index 00000000..af529c82
Binary files /dev/null and b/images/src/png/SAP-Rancher-Create-Config-YAML.png differ
diff --git a/images/src/png/SAP-Rancher-Create-Config.png b/images/src/png/SAP-Rancher-Create-Config.png
new file mode 100755
index 00000000..ad97afa6
Binary files /dev/null and b/images/src/png/SAP-Rancher-Create-Config.png differ
diff --git a/images/src/png/SAP-Rancher-Create-Custom.png b/images/src/png/SAP-Rancher-Create-Custom.png
new file mode 100755
index 00000000..399b9631
Binary files /dev/null and b/images/src/png/SAP-Rancher-Create-Custom.png differ
diff --git a/images/src/png/SAP-Rancher-Create-RKE-Version.png b/images/src/png/SAP-Rancher-Create-RKE-Version.png
new file mode 100755
index 00000000..9a990ab5
Binary files /dev/null and b/images/src/png/SAP-Rancher-Create-RKE-Version.png differ
diff --git a/images/src/png/SAP-Rancher-Create-Register.png b/images/src/png/SAP-Rancher-Create-Register.png
new file mode 100755
index 00000000..6594d357
Binary files /dev/null and b/images/src/png/SAP-Rancher-Create-Register.png differ
diff --git a/images/src/png/SAP-Rancher-Create-StrictARP.png b/images/src/png/SAP-Rancher-Create-StrictARP.png
new file mode 100755
index 00000000..706be3ef
Binary files /dev/null and b/images/src/png/SAP-Rancher-Create-StrictARP.png differ
diff --git a/images/src/png/SAP-Rancher-Main-Create.png b/images/src/png/SAP-Rancher-Main-Create.png
new file mode 100755
index 00000000..b0e8e6e7
Binary files /dev/null and b/images/src/png/SAP-Rancher-Main-Create.png differ