Skip to content

Commit

Permalink
Merge branch 'release/v3.5.0'
Browse files Browse the repository at this point in the history
  • Loading branch information
royrusso committed Dec 31, 2018
2 parents e7ee485 + 2913674 commit 1936f4a
Show file tree
Hide file tree
Showing 42 changed files with 2,280 additions and 1,993 deletions.
60 changes: 28 additions & 32 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,41 +1,37 @@
FROM python:3.6-alpine3.7

RUN apk update
RUN apk add supervisor
RUN apk add --update py2-pip

# Upgrade and install basic Python dependencies
# This block added because of the trouble installing gevent on many systems
# https://hub.docker.com/r/openwhisk/dockerskeleton/~/dockerfile/
RUN apk add --no-cache bash \
&& apk add --no-cache --virtual .build-deps \
bzip2-dev \
gcc \
libc-dev
# && pip install --no-cache-dir gevent \
# && apk del .build-deps

# reqs layer
ADD requirements.txt .
RUN pip3 install -U -r requirements.txt
RUN pip3 install gunicorn==19.7.1

# Bundle app source
ADD . /src

COPY ./deployment/logging.conf /src/logging.conf
COPY ./deployment/gunicorn.conf /src/gunicorn.conf

RUN apk update && \
apk add supervisor && \
apk add --update py2-pip && \
apk add --no-cache bash && \
apk add --no-cache --virtual .build-deps bzip2-dev gcc libc-dev

# Copy project sources
COPY . /src

# Set working directory
WORKDIR /src

# Install app dependencies and create supervisord dirs
RUN pip3 install -U -r requirements.txt && \
pip3 install gunicorn==19.7.1 && \
mkdir -p /etc/supervisor/conf.d /var/log/supervisor /var/run/supervisor

# Copy configuration files
RUN cp /src/deployment/logging.conf /src/logging.conf && \
cp /src/deployment/gunicorn.conf /src/gunicorn.conf && \
cp /src/deployment/supervisord.conf /etc/supervisor/supervisord.conf && \
cp /src/deployment/gunicorn.conf /etc/supervisor/conf.d/gunicorn.conf

# Fix permissions
RUN chgrp -R 0 /src /var/log/supervisor /var/run/supervisor && \
chmod -R g=u /src /var/log/supervisor /var/run/supervisor

# Expose service port
EXPOSE 5000

# Setup supervisord
RUN mkdir -p /var/log/supervisor
COPY ./deployment/supervisord.conf /etc/supervisor/supervisord.conf
COPY ./deployment/gunicorn.conf /etc/supervisor/conf.d/gunicorn.conf

# Start processes
CMD ["supervisord", "-c", "/etc/supervisor/supervisord.conf"]


#ENTRYPOINT ["python"]
#CMD ["src/application.py"]
9 changes: 6 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ Simplified Monitoring and Management for ElasticSearch clusters.

![alt text](https://raw.githubusercontent.com/ElasticHQ/elasticsearch-HQ/master/main_dashboard.png)


## Key Features

* Works with 2.x, 5.x, 6.x and current versions of Elasticsearch.
Expand Down Expand Up @@ -46,9 +46,12 @@ For further installation and configuration help, please read the docs: [ElasticH

## Docker Installation

We are hosted on Dockerhub: [ElasticHQ on Dockerhub](https://hub.docker.com/r/elastichq/elasticsearch-hq/)
We are hosted on Dockerhub: [ElasticHQ on Dockerhub](https://hub.docker.com/r/elastichq/elasticsearch-hq/)

1. ``docker run -p 5000:5000 elastichq/elasticsearch-hq``
2. Access HQ with: `` http://localhost:5000 ``

Please see relevant documentation: [Docker Images](http://docs.elastichq.org/installation.html#docker-images).
For further instructions, please see relevant documentation: [Docker Images](http://docs.elastichq.org/installation.html#docker-images).

## Useful Links

Expand Down
11 changes: 10 additions & 1 deletion application.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
# import argparse
import optparse
import os

import logging
import logging.config
from elastichq import create_app
from elastichq.globals import socketio
from elastichq.utils import find_config

default_host = '0.0.0.0'
default_port = 5000
Expand Down Expand Up @@ -45,7 +47,14 @@
application.config['CA_CERTS'] = os.environ.get('HQ_CA_CERTS', options.ca_certs)

if is_gunicorn:
if options.debug:
config = find_config('logger_debug.json')
logging.config.dictConfig(config)

# we set reloader False so gunicorn doesn't call two instances of all the Flask init functions.
socketio.run(application, host=options.host, port=options.port, debug=options.debug, use_reloader=False)
else:
if options.debug:
config = find_config('logger_debug.json')
logging.config.dictConfig(config)
socketio.run(application, host=options.host, port=options.port, debug=options.debug)
11 changes: 9 additions & 2 deletions deployment/supervisord.conf
Original file line number Diff line number Diff line change
@@ -1,8 +1,15 @@
[supervisord]
nodaemon=true
logfile=/var/log/supervisor/supervisord.log
pidfile=/var/run/supervisor/supervisord.pid

[program:gunicorn]
command=/usr/local/bin/gunicorn application:application -w 1 --worker-class eventlet --config /src/gunicorn.conf --log-config /src/logging.conf --bind 0.0.0.0:5000
directory=/src
stdout_logfile=/var/log/supervisor/stdout.log
stderr_logfile=/var/log/supervisor/stderr.log
# Uncomment the following lines and comment those after if you want to
# save stdout and stderr logs in seperate files.
# stdout_logfile=/var/log/supervisor/elastichq_stdout.log
# stderr_logfile=/var/log/supervisor/elastichq_stderr.log
stdout_logfile=/dev/fd/1
stdout_logfile_maxbytes=0
redirect_stderr=true
2 changes: 1 addition & 1 deletion docs/.buildinfo
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
config: fd1e7fa5cfedfe6f8f3e6bb6c5e9d514
config: 975bbf0704e836cd18ad12bc8c255959
tags: 645f666f9bcd5a90fca523b33c5a78b7
Binary file modified docs/.doctrees/developer-guide.doctree
Binary file not shown.
Binary file modified docs/.doctrees/environment.pickle
Binary file not shown.
Binary file modified docs/.doctrees/faq.doctree
Binary file not shown.
Binary file modified docs/.doctrees/index.doctree
Binary file not shown.
Binary file modified docs/.doctrees/installation.doctree
Binary file not shown.
Binary file modified docs/.doctrees/rest-api.doctree
Binary file not shown.
Binary file modified docs/.doctrees/table-with-code.doctree
Binary file not shown.
Binary file modified docs/.doctrees/user-guide.doctree
Binary file not shown.
2 changes: 1 addition & 1 deletion elastichq/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def create_app(env='PROD'):
else:
raise ValueError('Unknown environment: %s' % (env,))

init_log()
init_log(app)

app.register_blueprint(api_blueprint)
app.register_blueprint(public_blueprint)
Expand Down
2 changes: 1 addition & 1 deletion elastichq/config/logger.json
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
"propagate": "no"
},
"sqlalchemy": {
"level": "INFO",
"level": "WARN",
"handlers": [],
"qualname": "sqlalchemy",
"propagate": "no"
Expand Down
82 changes: 82 additions & 0 deletions elastichq/config/logger_debug.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
{
"version": 1,
"disable_existing_loggers": false,
"formatters": {
"simple": {
"format": "%(asctime)s \t %(levelname)s \t %(name)s \t %(module)s.%(funcName)s:%(lineno)d \t %(message)s"
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "simple",
"stream": "ext://sys.stdout"
},
"standard_handler": {
"class": "logging.handlers.RotatingFileHandler",
"level": "DEBUG",
"formatter": "simple",
"filename": "application.log",
"maxBytes": 104857600,
"backupCount": 2,
"encoding": "utf8"
}
},
"loggers": {
"elastichq": {
"level": "DEBUG",
"handlers": [],
"propagate": "no"
},
"sqlalchemy": {
"level": "DEBUG",
"handlers": [],
"qualname": "sqlalchemy",
"propagate": "no"
},
"flasklogger": {
"level": "DEBUG",
"handlers": [],
"qualname": "flasklogger",
"propagate": "no"
},
"werkzeug": {
"level": "DEBUG",
"handlers": [],
"qualname": "werkzeug",
"propagate": "no"
},
"requests": {
"level": "DEBUG",
"handlers": [],
"qualname": "requests",
"propagate": "no"
},
"elasticsearch": {
"level": "DEBUG",
"handlers": [],
"qualname": "elasticsearch",
"propagate": "no"
},
"elasticsearch.trace": {
"level": "DEBUG",
"handlers": [],
"qualname": "elasticsearch",
"propagate": "no"
},
"urllib3.connectionpool": {
"level": "ERROR",
"handlers": [],
"qualname": "urllib3",
"propagate": "no"
}
},
"root": {
"level": "DEBUG",
"handlers": [
"console",
"standard_handler"
]
}
}
32 changes: 17 additions & 15 deletions elastichq/config/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,19 +49,20 @@ class TestSettings(BaseSettings):
# cluster settings
HQ_CLUSTER_SETTINGS = {
'doc_id': 'hqsettings',
'index_name': '.elastichq',
'version': 1,
'doc_type': 'data',
'store_metrics': True,
'websocket_interval': 5,
'historic_poll_interval': 60,
'historic_days_to_store': 7
'index_name': '.elastichq', # internal elastichq settings index. one per cluster.
'version': 1, # version of this json settings doc
'doc_type': 'data', # mapping name
'store_metrics': True, # store the metrics being polled.
'websocket_interval': 5, # interval, in seconds, to poll cluster for metrics data.
'historic_poll_interval': 60, # number of seconds between poll calls to retrieve and store historical metrics.
'historic_days_to_store': 7, # days to store historic metrics
'show_dot_indices': True # filter dot-prefixed indices from display
}

# static
HQ_SITE_URL = 'http://elastichq.org'
HQ_GH_URL = 'https://github.com/ElasticHQ/elasticsearch-HQ'
API_VERSION = 'v3.4.1'
API_VERSION = 'v3.5.0'
ES_V2_HOST = '127.0.0.1'
ES_V2_PORT = '9200'
ES_V5_HOST = '127.0.0.1'
Expand Down Expand Up @@ -123,19 +124,20 @@ class ProdSettings(BaseSettings):
# static
HQ_SITE_URL = 'http://elastichq.org'
HQ_GH_URL = 'https://github.com/ElasticHQ/elasticsearch-HQ'
API_VERSION = '3.4.1'
API_VERSION = '3.5.0'
SERVER_NAME = None

# cluster settings: specific settings for each cluster and how HQ should handle it.
HQ_CLUSTER_SETTINGS = {
'doc_id': 'hqsettings',
'index_name': '.elastichq',
'version': 1,
'doc_type': 'data',
'index_name': '.elastichq', # internal elastichq settings index. one per cluster.
'version': 1, # version of this json settings doc
'doc_type': 'data', # mapping name
'store_metrics': True, # whether to store metrics for this cluster
'websocket_interval': 5, # seconds
'historic_poll_interval': 60 * 5, # seconds
'historic_days_to_store': 7 # num days to keep historical metrics data
'websocket_interval': 5, # interval, in seconds, to poll cluster for metrics data.
'historic_poll_interval': 60 * 5, # number of seconds between poll calls to retrieve/store historical metrics.
'historic_days_to_store': 7, # num days to keep historical metrics data
'show_dot_indices': True # filter dot-prefixed indices from display
}

# CACHE
Expand Down
2 changes: 1 addition & 1 deletion elastichq/globals.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def init_marshmallow(app):
ma.init_app(app)


def init_log():
def init_log(app):
"""
Initializes log format and console/file appenders
:return:
Expand Down
4 changes: 3 additions & 1 deletion elastichq/service/HQService.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,9 @@ def update_settings(self, cluster_name, body=None):
'historic_poll_interval': body.get('historic_poll_interval',
current_settings.get('historic_poll_interval')),
'historic_days_to_store': body.get('historic_days_to_store',
current_settings.get('historic_days_to_store'))
current_settings.get('historic_days_to_store')),
'show_dot_indices': body.get('show_dot_indices',
current_settings.get('show_dot_indices'))
}

connection = ConnectionService().get_connection(cluster_name)
Expand Down
7 changes: 6 additions & 1 deletion elastichq/service/IndicesService.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import jmespath

from elastichq.common.exceptions import BadRequest
from elastichq.service import ClusterService, ConnectionService
from elastichq.service import ClusterService, ConnectionService, HQService
from ..globals import REQUEST_TIMEOUT


Expand Down Expand Up @@ -96,11 +96,16 @@ def get_indices_summary(self, cluster_name, indices_names=None):
cluster_state = ClusterService().get_cluster_state(cluster_name, metric="metadata", indices=indices_names)
state_indices = jmespath.search("metadata.indices", cluster_state)
cat = connection.cat.indices(format='json')
show_dot_indices = HQService().get_settings(cluster_name).get('show_dot_indices')
indices = []
if state_indices:
the_indices = indices_stats.get("indices", None)
index_keys = list(the_indices.keys())
for key in index_keys:

if show_dot_indices is False and key.startswith(".") is True:
continue

one_index = the_indices.get(key)
index = {"index_name": key}
index['health'] = [x['health'] for x in cat if x['index'] == key][0]
Expand Down
Loading

0 comments on commit 1936f4a

Please sign in to comment.