-
Notifications
You must be signed in to change notification settings - Fork 0
/
values.yaml
527 lines (475 loc) · 14.1 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
global:
ingress:
annotations:
kubernetes.io/ingress.class: alb
alb.ingress.kubernetes.io/tags: scos.delete.on.teardown=true
alb.ingress.kubernetes.io/scheme: internal
alb.ingress.kubernetes.io/ssl-policy: "ELBSecurityPolicy-TLS-1-2-2017-01"
rbac:
create: true
serviceAccounts:
fluentbit:
create: true
name:
kibana:
create: true
name:
elasticsearch:
image:
repository: "docker.elastic.co/elasticsearch/elasticsearch-oss"
tag: "6.4.2"
pullPolicy: "IfNotPresent"
initImage:
repository: "busybox"
tag: "latest"
pullPolicy: "Always"
cluster:
name: "es-lexlogger"
# If you want X-Pack installed, switch to an image that includes it, enable this option and toggle the features you want
# enabled in the environment variables outlined in the README
xpackEnable: false
# Some settings must be placed in a keystore, so they need to be mounted in from a secret.
# Use this setting to specify the name of the secret
# keystoreSecret: eskeystore
config: {}
# Custom parameters, as string, to be added to ES_JAVA_OPTS environment variable
additionalJavaOpts: ""
env:
# IMPORTANT: https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#minimum_master_nodes
# To prevent data loss, it is vital to configure the discovery.zen.minimum_master_nodes setting so that each master-eligible
# node knows the minimum number of master-eligible nodes that must be visible in order to form a cluster.
MINIMUM_MASTER_NODES: "2"
client:
name: es-client
replicas: 2
serviceType: ClusterIP
loadBalancerIP: {}
loadBalancerSourceRanges: {}
heapSize: "512m"
antiAffinity: "soft"
nodeAffinity: {}
nodeSelector: {}
tolerations: []
resources:
limits:
cpu: "500m"
memory: "1536Mi"
requests:
cpu: "250m"
memory: "1536Mi"
priorityClassName: ""
## (dict) If specified, apply these annotations to each client Pod
# podAnnotations:
# example: client-foo
podDisruptionBudget:
enabled: false
minAvailable: 1
# maxUnavailable: 1
master:
name: es-master
exposeHttp: false
replicas: 2
heapSize: "512m"
persistence:
enabled: true
accessMode: ReadWriteOnce
name: data
size: "4Gi"
antiAffinity: "soft"
nodeAffinity: {}
nodeSelector: {}
tolerations: []
resources:
limits:
cpu: "250m"
memory: "1536Mi"
requests:
cpu: "100m"
memory: "1536Mi"
priorityClassName: ""
## (dict) If specified, apply these annotations to each master Pod
# podAnnotations:
# example: master-foo
podDisruptionBudget:
enabled: false
minAvailable: 1 # Same as `cluster.env.MINIMUM_MASTER_NODES`
# maxUnavailable: 1
updateStrategy:
type: RollingUpdate
data:
name: es-data
exposeHttp: false
replicas: 2
heapSize: "3072m"
heapNewSize: "512m"
persistence:
enabled: true
accessMode: ReadWriteOnce
name: data
size: "30Gi"
terminationGracePeriodSeconds: 3600
antiAffinity: "soft"
nodeAffinity: {}
nodeSelector: {}
tolerations: []
resources:
limits:
cpu: "1000m"
memory: "4096Mi"
requests:
cpu: "500m"
memory: "4096Mi"
priorityClassName: ""
## (dict) If specified, apply these annotations to each data Pod
# podAnnotations:
# example: data-foo
podDisruptionBudget:
enabled: false
# minAvailable: 1
maxUnavailable: 1
updateStrategy:
type: RollingUpdate
## Additional init containers
extraInitContainers: |
fluentbit:
name: fluentbit
on_minikube: false
image:
repository: 199837183662.dkr.ecr.us-east-2.amazonaws.com/scos/fluent-bit
tag: 0.14.4
pullPolicy: Always
# When enabled, exposes json and prometheus metrics on {{ .Release.Name }}-metrics service
metrics:
enabled: false
service:
port: 2020
type: ClusterIP
# When enabled, fluent-bit will keep track of tailing offsets across pod restarts.
trackOffsets: false
parsers:
enabled: false
## List the respective parsers in key: value format per entry
## Regex required fields are name and regex. JSON required field
## is name.
regex: []
json: []
env: []
podAnnotations: {}
## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.existingConfigMap}}
## Defining existingConfigMap will cause templates/config.yaml
## to NOT generate a ConfigMap resource
##
existingConfigMap: ""
## Extra volumes containing additional files required for fluent-bit to work
## (eg. CA certificates)
## Ref: https://kubernetes.io/docs/concepts/storage/volumes/
extraVolumes: []
## Extra volume mounts for the fluent-bit pod.
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-volume-storage/
extraVolumeMounts: []
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
tolerations: []
nodeSelector: {}
extraFilters:
- name: grep
match: "*"
exclude: log /.*ELB-HealthChecker.*/
filter:
kubeURL: https://kubernetes.default.svc:443
kubeCAFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
kubeTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
kubeTag: kube
# If true, check to see if the log field content is a JSON string map, if so,
# it append the map fields as part of the log structure.
# mergeJSONLog: true
# If true, enable the use of monitoring for a pod annotation of
# fluentbit.io/parser: parser_name. parser_name must be the name
# of a parser contained within parsers.conf
# enableParser: true
# If true, enable the use of monitoring for a pod annotation of
# fluentbit.io/exclude: true. If present, discard logs from that pod.
# enableExclude: true
fluentd:
name: fluentd
replicaCount: 2
image:
repository: gcr.io/google-containers/fluentd-elasticsearch
tag: v2.3.1
pullPolicy: IfNotPresent
output:
buffer_chunk_limit: 2M
buffer_queue_limit: 8
env: {}
resources:
limits:
cpu: 500m
memory: 200Mi
requests:
cpu: 250m
memory: 200Mi
service:
type: ClusterIP
externalPort: 80
ports:
- name: "monitor-agent"
protocol: TCP
containerPort: 24220
- name: "fbit-forward"
protocol: TCP
containerPort: 24224
ingress:
enabled: false
# hosts:
# - name: "http-input.local"
# protocol: TCP
# serviceName: http-input
# servicePort: 9880
annotations:
alb.ingress.kubernetes.io/healthcheck-path: /api/plugins.json
alb.ingress.kubernetes.io/healthcheck-port: "24220"
tls:
configMaps:
general.conf: |
# Prevent fluentd from handling records containing its own logs. Otherwise
# it can lead to an infinite loop, when error in sending one message generates
# another message which also fails to be sent and so on.
<match **fluentd**>
@type null
</match>
# Used for health checking
<source>
@type http
port 9880
bind 0.0.0.0
</source>
# Emits internal metrics to every minute, and also exposes them on port
# 24220. Useful for determining if an output plugin is retryring/erroring,
# or determining the buffer queue length.
<source>
@type monitor_agent
bind 0.0.0.0
port 24220
tag fluentd.monitor.metrics
</source>
system.conf: |-
<system>
root_dir /tmp/fluentd-buffers/
log_level warn
</system>
forward-input.conf: |
<source>
@type forward
port 24224
bind 0.0.0.0
</source>
filter.conf: |
<filter **>
@type dedot
de_dot true
de_dot_separator _
de_dot_nested true
</filter>
output.conf: |
<match **>
@type copy
<store>
@id elasticsearch
@type elasticsearch
@log_level warn
include_tag_key true
# Replace with the host/port to your Elasticsearch cluster.
host "#{ENV['OUTPUT_HOST']}"
port "#{ENV['OUTPUT_PORT']}"
logstash_format true
<buffer>
@type memory
flush_mode interval
retry_type exponential_backoff
flush_thread_count 8
flush_interval 5s
retry_forever
retry_max_interval 30
chunk_limit_size "#{ENV['OUTPUT_BUFFER_CHUNK_LIMIT']}"
queue_limit_length "#{ENV['OUTPUT_BUFFER_QUEUE_LIMIT']}"
overflow_action drop_oldest_chunk
</buffer>
</store>
<store>
@type prometheus
<metric>
name fluentd_output_status_num_records_total
type counter
desc The total number of outgoing records
<labels>
tag ${tag}
hostname ${hostname}
</labels>
</metric>
</store>
</match>
metrics.conf: |
<filter **>
@type prometheus
@log_level warn
<metric>
name fluentd_input_status_num_records_total
type counter
desc The total number of incoming records
<labels>
tag ${tag}
hostname ${hostname}
</labels>
</metric>
</filter>
<source>
@type prometheus
bind 0.0.0.0
port 24231
metrics_path /metrics
</source>
<source>
@type prometheus_output_monitor
interval 10
<labels>
hostname ${hostname}
</labels>
</source>
## Persist data to a persistent volume
persistence:
enabled: false
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
# annotations: {}
accessMode: ReadWriteOnce
size: 10Gi
nodeSelector: {}
tolerations: []
affinity: {}
kibana:
name: kibana
image:
repository: "docker.elastic.co/kibana/kibana-oss"
tag: "6.4.2"
pullPolicy: "IfNotPresent"
commandline:
args:
env: {}
# All Kibana configuration options are adjustable via env vars.
# To adjust a config option to an env var uppercase + replace `.` with `_`
# Ref: https://www.elastic.co/guide/en/kibana/current/settings.html
#
# ELASTICSEARCH_URL: http://elasticsearch-client:9200
# SERVER_PORT: 5601
# LOGGING_VERBOSE: "true"
# SERVER_DEFAULTROUTE: "/app/kibana"
files:
kibana.yml:
## Default Kibana configuration from kibana-docker.
server.name: kibana
server.host: "0"
elasticsearch.url: http://elasticsearch:9200
logging.quiet: true
## Custom config properties below
## Ref: https://www.elastic.co/guide/en/kibana/current/settings.html
# server.port: 5601
# logging.verbose: "true"
# server.defaultRoute: "/app/kibana"
service:
type: NodePort
externalPort: 443
internalPort: 5601
# authProxyPort: 5602 To be used with authProxyEnabled and a proxy extraContainer
## External IP addresses of service
## Default: nil
##
# externalIPs:
# - 192.168.0.1
#
## LoadBalancer IP if service.type is LoadBalancer
## Default: nil
##
# loadBalancerIP: 10.2.2.2
annotations:
labels:
ingress:
enabled: true
# hosts:
# - chart-example.local
annotations:
alb.ingress.kubernetes.io/healthcheck-path: /status
alb.ingress.kubernetes.io/actions.redirect: '{"Type": "redirect", "RedirectConfig":{"Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}'
alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS": 443}]'
# kubernetes.io/tls-acme: "true"
# tls:
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
livenessProbe:
enabled: false
initialDelaySeconds: 30
timeoutSeconds: 10
readinessProbe:
enabled: false
initialDelaySeconds: 30
timeoutSeconds: 10
# Enable an authproxy. Specify container in extraContainers
authProxyEnabled: false
extraContainers: |
# - name: proxy
# image: quay.io/gambol99/keycloak-proxy:latest
# args:
# - --resource=uri=/*
# - --discovery-url=https://discovery-url
# - --client-id=client
# - --client-secret=secret
# - --listen=0.0.0.0:5602
# - --upstream-url=http://127.0.0.1:5601
# ports:
# - name: web
# containerPort: 9090
resources:
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
priorityClassName: ""
affinity: {}
tolerations: []
nodeSelector: {}
podAnnotations: {}
replicaCount: 1
revisionHistoryLimit: 3
# To export a dashboard from a running Kibana 6.3.x use:
# curl --user <username>:<password> -XGET https://kibana.yourdomain.com:5601/api/kibana/dashboards/export?dashboard=<some-dashboard-uuid> > my-dashboard.json
# A dashboard is defined by a name and a string with the json payload or the download url
dashboardImport:
timeout: 60
xpackauth:
enabled: false
username: myuser
password: mypass
dashboards: {}
# k8s: https://raw.githubusercontent.com/monotek/kibana-dashboards/master/k8s-fluentd-elasticsearch.json
# List of pluginns to install using initContainer
plugins:
# - https://github.com/sivasamyk/logtrail/releases/download/v0.1.29/logtrail-6.4.0-0.1.29.zip
# - other_plugin
# helm upgrade --install lexlogger . \
# --namespace lexlogger \
# --set global.ingress.annotations."alb\.ingress\.kubernetes\.io\/subnets"="${SUBNETS//,/\\,}" \
# --set global.ingress.annotations."alb\.ingress\.kubernetes\.io\/security\-groups"="${SECURITY_GROUPS}" \
# --set kibana.ingress.hosts[0]="kibana\.${DNS_ZONE}" \
# --values ./values.yaml