-
Notifications
You must be signed in to change notification settings - Fork 0
/
values-curator.yaml
140 lines (128 loc) · 3.87 KB
/
values-curator.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
# Default values for elasticsearch-curator.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
elasticsearch-curator:
cronjob:
# At 01:00 every day
schedule: "0 1 * * *"
annotations: {}
concurrencyPolicy: ""
failedJobsHistoryLimit: ""
successfulJobsHistoryLimit: ""
pod:
annotations: {}
image:
repository: quay.io/pires/docker-elasticsearch-curator
tag: 5.5.4
pullPolicy: IfNotPresent
hooks:
install: false
upgrade: false
# run curator in dry-run mode
dryrun: false
command: ["curator"]
env: {}
configMaps:
# Delete indices older than 7 days
action_file_yml: |-
---
actions:
1:
action: delete_indices
description: "Clean up ES by deleting old indices"
options:
timeout_override:
continue_if_exception: False
disable_action: False
ignore_empty_list: True
filters:
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 15
field:
stats_result:
epoch:
exclude: False
# Having config_yaml WILL override the other config
config_yml: |-
---
client:
hosts:
- lexlogger-es-client
port: 9200
# url_prefix:
# use_ssl: True
# certificate:
# client_cert:
# client_key:
# ssl_no_validate: True
# http_auth:
# timeout: 30
# master_only: False
# logging:
# loglevel: INFO
# logfile:
# logformat: default
# blacklist: ['elasticsearch', 'urllib3']
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
priorityClassName: ""
# extraVolumes and extraVolumeMounts allows you to mount other volumes
# Example Use Case: mount ssl certificates when elasticsearch has tls enabled
# extraVolumes:
# - name: es-certs
# secret:
# defaultMode: 420
# secretName: es-certs
# extraVolumeMounts:
# - name: es-certs
# mountPath: /certs
# readOnly: true
# Add your own init container or uncomment and modify the given example.
extraInitContainers: {}
## Don't configure S3 repository till Elasticsearch is reachable.
## Ensure that it is available at http://elasticsearch:9200
##
# elasticsearch-s3-repository:
# image: jwilder/dockerize:latest
# imagePullPolicy: "IfNotPresent"
# command:
# - "/bin/sh"
# - "-c"
# args:
# - |
# ES_HOST=elasticsearch
# ES_PORT=9200
# ES_REPOSITORY=backup
# S3_REGION=us-east-1
# S3_BUCKET=bucket
# S3_BASE_PATH=backup
# S3_COMPRESS=true
# S3_STORAGE_CLASS=standard
# apk add curl --no-cache && \
# dockerize -wait http://${ES_HOST}:${ES_PORT} --timeout 120s && \
# cat <<EOF | curl -sS -XPUT -H "Content-Type: application/json" -d @- http://${ES_HOST}:${ES_PORT}/_snapshot/${ES_REPOSITORY} \
# {
# "type": "s3",
# "settings": {
# "bucket": "${S3_BUCKET}",
# "base_path": "${S3_BASE_PATH}",
# "region": "${S3_REGION}",
# "compress": "${S3_COMPRESS}",
# "storage_class": "${S3_STORAGE_CLASS}"
# }
# }
securityContext:
runAsUser: 16 # run as cron user instead of root