-
Notifications
You must be signed in to change notification settings - Fork 1
/
deploy.sh
executable file
·215 lines (187 loc) · 5.6 KB
/
deploy.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
#!/bin/bash
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)"
echo $DIR
# Configuring yaml files
if [ -f multipass.yaml ]; then
rm multipass.yaml
fi
touch multipass.yaml
echo "# multipass.yaml" >> multipass.yaml
echo "ssh_authorized_keys:" >> multipass.yaml
echo " - $(cat ~/.ssh/id_rsa.pub)" >> multipass.yaml
# Define the directory path
data_dir="volumes/data"
# Check if the directory exists
if [ -d "$data_dir" ]; then
# Directory exists, so delete it
echo "Deleting existing directory: $data_dir"
rm -rf "$data_dir"
fi
mkdir -p "$data_dir"
if [ -f minio.yaml ]; then
rm minio.yaml
fi
echo "apiVersion: v1
kind: Namespace
metadata:
name: stores
labels:
name: stores
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: minio
namespace: stores
spec:
selector:
matchLabels:
app: minio
replicas: 1
template:
metadata:
labels:
app: minio
spec:
containers:
- name: minio
image: quay.io/minio/minio:latest
command:
- /bin/bash
- -c
args:
- minio server /data --console-address :9090
ports:
- containerPort: 9000
volumeMounts:
- mountPath: /data
name: localvolume
nodeSelector:
kubernetes.io/hostname: pronghorn" >> minio.yaml
data_dir="$DIR/volumes/data"
echo " volumes:
- name: localvolume
hostPath:
path: $(echo $data_dir)
type: DirectoryOrCreate" >> minio.yaml
# Default configuration
nodes=3
cpus=2
memory="8G"
disk_size="50G"
# Bootstrap The Nodes
for node in $(seq 1 $nodes)
do
if [ $node -eq 1 ];
then
echo "Deploying the master node"
multipass launch --cpus $cpus --memory $memory --disk $disk_size --name pronghorn 20.04 --cloud-init multipass.yaml
echo "Master node deployed"
else
echo Launching the worker nodes
multipass launch --cpus $cpus --memory $memory --disk $disk_size --name "pronghorn-m0$node" 20.04 --cloud-init multipass.yaml
echo "Worker node $node deployed"
fi
done
# Deploy Kubernetes Cluster
k3sup install --ip $(multipass info pronghorn | grep IPv4 | awk '{print $2}') --user ubuntu --k3s-extra-args '--cluster-init'
for node in $(seq 2 $nodes)
do
k3sup join --ip $(multipass info pronghorn-m0$node | grep IPv4 | awk '{print $2}') --server-ip $(multipass info pronghorn | grep IPv4 | awk '{print $2}') --user ubuntu
done
echo "[Completed] Kuberentes cluster created with $nodes nodes, $cpus CPUs, $memory MB memory and $disk_size disk size."
# Set correct contexts
export KUBECONFIG="$DIR/kubeconfig"
# Install OpenFaaS
arkade install openfaas --set faasnetes.image=skharban/faas-netes:privileged-containers &> /dev/null
if kubectl get namespace openfaas &> /dev/null; then
echo "[Completed] OpenFaaS Installed."
else
echo "[Error] OpenFaaS Installation Unsuccessful."
exit 1
fi
# Ensure gateway rollout is complete
while true; do
pod_list=$(kubectl get pods -n openfaas | grep gateway)
if [ -n "$pod_list" ]; then
status=$(echo "$pod_list" | grep Running)
if [ -n "$status" ]; then
echo "[Completed] Gateway Rollout"
break;
else
echo "[Waiting] Gateway Rollout In Progress"
sleep 10
fi
else
echo "[Error] No Gateway Pod Found"
exit 1
fi
done
# Port forward OpenFaaS Gateway
nohup kubectl port-forward -n openfaas svc/gateway 8080:8080 &
sleep 5s
# Export OpenFaaS Password
export OPENFAAS_PASSWORD=$(kubectl get secret -n openfaas basic-auth -o jsonpath="{.data.basic-auth-password}" | base64 --decode; echo)
echo $OPENFAAS_PASSWORD > .credentials
# Authenticate OpenFaaS
cat .credentials | faas-cli login --username admin --password-stdin
# Deploy MinIO (Object Store)
kubectl apply -f $DIR/minio.yaml
kubectl apply -f $DIR/minio-service.yaml
# Deploy Database
kubectl apply -f $DIR/database/pod.yaml
# Check Database deployment
attempts=0
pod_name=$(kubectl get pod -n stores -o jsonpath='{.items[0].metadata.name}')
while [[ "$attempts" -lt 3 ]]; do
pod_status=$(kubectl get pod -n stores $pod_name -o jsonpath='{.status.phase}')
if [[ "$pod_status" == "Running" ]]; then
echo "[Completed] Database Deployed on Cluster."
break
else
echo "[Waiting] Database Deployment In Progress"
sleep 10
attempts=$((attempts+1))
fi
done
if [[ "$attempts" -eq 3 ]]; then
echo "[Error] Database Deployment Failed."
exit 1
fi
# Check MinIO deployment
attempts=0
pod_name=$(kubectl get pod -n stores -o jsonpath='{.items[1].metadata.name}')
while [[ "$attempts" -lt 3 ]]; do
pod_status=$(kubectl get pod -n stores $pod_name -o jsonpath='{.status.phase}')
if [[ "$pod_status" == "Running" ]]; then
echo "[Completed] MinIO Deployed on Cluster."
break
else
echo "[Waiting] MinIO Deployment In Progress"
sleep 10
attempts=$((attempts+1))
fi
done
if [[ "$attempts" -eq 3 ]]; then
echo "[Error] MinIO Deployment Failed."
exit 1
fi
# Check if MinIO service is running
attempts=0
while [[ "$attempts" -lt 3 ]]; do
service_ip=$(kubectl get svc minio-svc -n stores -o jsonpath='{.spec.clusterIP}')
if [[ -n "$service_ip" ]]; then
echo "[Completed] MinIO Service Deployed."
break
else
echo "[Waiting] MinIO Service Deployment In Progress"
sleep 10
attempts=$((attempts+1))
fi
done
if [[ "$attempts" -eq 3 ]]; then
echo "[Error] MinIO Service Deployment Failed."
exit 1
fi
nohup kubectl port-forward -n stores svc/minio-svc 9000:9000 &
mc alias set myminio http://localhost:9000 minioadmin minioadmin