diff --git a/contrib/demo-collector.py b/contrib/demo-collector.py index 6f9541c6d..cb8032512 100755 --- a/contrib/demo-collector.py +++ b/contrib/demo-collector.py @@ -2,33 +2,39 @@ from commands import getstatusoutput from platform import node from socket import socket, AF_INET, SOCK_STREAM -from sys import argv, exit +from sys import argv from time import sleep, time DELAY = 60 CARBON_SERVER = 'localhost' CARBON_PORT = 2003 + class Carbon: def __init__(self, hostname, port): self.s = socket(AF_INET, SOCK_STREAM) self.hostname = hostname self.port = int(port) self.connect() + def connect(self): try: self.s.connect((self.hostname, self.port)) - except IOError, e: + except IOError as e: print("connect: ", e) return - def disconnect(self): self.s.close() + + def disconnect(self): + self.s.close() + def send(self, data): try: self.s.sendall(data + "\n") - except: + except Exception: self.connect() self.s.sendall(data + "\n") + class Host: def __init__(self): self.historical = {} @@ -53,9 +59,11 @@ def delta_analyzer(self, measurements, data, now): for line in data: for measurement, loc in measurements.iteritems(): metric_name = "%s.%s" % (line[0], measurement) - try: value = line[loc] - except: continue - if self.historical.has_key(metric_name): + try: + value = line[loc] + except Exception: + continue + if metric_name in self.historical: current = value delta = int(value) - int(self.historical[metric_name][1]) timedelta = time() - self.historical[metric_name][0] @@ -147,7 +155,7 @@ def fetch_smb_statistics(self): for i, block in enumerate(raw_data.split("\n\n")): if i not in measurements.keys(): continue raw_data = block.split("\n") - if this_node is not None: + if this_node is not None: this_node_count = [line.startswith(this_node + ":") for line in raw_data].count(True) else: this_node_count = len(raw_data) - 4 @@ -160,7 +168,7 @@ def main(): host = Host() hostname = node().split('.')[0] - graphite = Carbon(CARBON_SERVER, CARBON_PORT); + graphite = Carbon(CARBON_SERVER, CARBON_PORT) while True: data = host.get_all() @@ -170,6 +178,6 @@ def main(): graphite.send(metric) sleep(DELAY) + if __name__ == '__main__': main() - diff --git a/contrib/memcache_whisper.py b/contrib/memcache_whisper.py index 5d14d6320..8831d417e 100644 --- a/contrib/memcache_whisper.py +++ b/contrib/memcache_whisper.py @@ -30,13 +30,22 @@ For details on the modification, read https://bugs.launchpad.net/graphite/+bug/245835 """ -import os, struct, time +import os +import struct +import sys +import time try: import fcntl CAN_LOCK = True except ImportError: CAN_LOCK = False +if sys.version_info[0] == 3: + xrange = range + # This `file` hack avoids linter warnings under Python-3, + # but this code likely only works with Python-2. + file = None + LOCK = False CACHE_HEADERS = False __headerCache = {} @@ -58,12 +67,15 @@ debug = startBlock = endBlock = lambda *a,**k: None + def exists(path): return os.path.exists(path) + def drop(path): os.remove(path) + def enableMemcache(servers = ['127.0.0.1:11211'], min_compress_len = 0): from StringIO import StringIO import memcache @@ -84,15 +96,17 @@ def close(self): if self.mode == "r+b" or self.mode == "wb": MC.set(self.name, self.getvalue(), min_compress_len = min_compress_len) StringIO.close(self) - + def exists(path): - return MC.get(path) != None + return MC.get(path) is not None def drop(path): MC.delete(path) + def enableDebug(): global open, debug, startBlock, endBlock + class open(file): def __init__(self,*args,**kwargs): file.__init__(self,*args,**kwargs) @@ -180,13 +194,13 @@ def create(path,archiveList,xFilesFactor=0.5): if i == len(archiveList) - 1: break next = archiveList[i+1] assert archive[0] < next[0],\ - "You cannot configure two archives with the same precision %s,%s" % (archive,next) + "You cannot configure two archives with the same precision %s,%s" % (archive,next) assert (next[0] % archive[0]) == 0,\ - "Higher precision archives' precision must evenly divide all lower precision archives' precision %s,%s" % (archive[0],next[0]) + "Higher precision archives' precision must evenly divide all lower precision archives' precision %s,%s" % (archive[0],next[0]) retention = archive[0] * archive[1] nextRetention = next[0] * next[1] assert nextRetention > retention,\ - "Lower precision archives must cover larger time intervals than higher precision archives %s,%s" % (archive,next) + "Lower precision archives must cover larger time intervals than higher precision archives %s,%s" % (archive,next) #Looks good, now we create the file and write the header assert not exists(path), "File %s already exists!" % path fh = open(path,'wb') @@ -211,7 +225,7 @@ def create(path,archiveList,xFilesFactor=0.5): def __propagate(fh,timestamp,xff,higher,lower): lowerIntervalStart = timestamp - (timestamp % lower['secondsPerPoint']) - lowerIntervalEnd = lowerIntervalStart + lower['secondsPerPoint'] + # lowerIntervalEnd = lowerIntervalStart + lower['secondsPerPoint'] fh.seek(higher['offset']) packedPoint = fh.read(pointSize) (higherBaseInterval,higherBaseValue) = struct.unpack(pointFormat,packedPoint) @@ -301,7 +315,7 @@ def update(path,value,timestamp=None): if baseInterval == 0: #This file's first update fh.seek(archive['offset']) fh.write(myPackedPoint) - baseInterval,baseValue = myInterval,value + baseInterval,baseValue = myInterval,value # noqa: F841 else: #Not our first update timeDistance = myInterval - baseInterval pointDistance = timeDistance / archive['secondsPerPoint'] diff --git a/contrib/test_aggregator_rules.py b/contrib/test_aggregator_rules.py index 58d9065b6..e93c3ea66 100644 --- a/contrib/test_aggregator_rules.py +++ b/contrib/test_aggregator_rules.py @@ -8,12 +8,12 @@ LIB_DIR = join(ROOT_DIR, 'graphite', 'lib') sys.path.insert(0, LIB_DIR) -from carbon.aggregator.rules import RuleManager +from carbon.aggregator.rules import RuleManager # noqa: E402 ### Basic usage if len(sys.argv) != 3: print("Usage: %s 'aggregator rule' 'line item'" % (__file__)) - print("\nSample invocation: %s %s %s" % \ + print("\nSample invocation: %s %s %s" % (__file__, "'...sum.all (10) = sum ..<>.sum.'", 'stats.prod.js.ktime_sum.sum.host2' )) sys.exit(42) diff --git a/setup.cfg b/setup.cfg index 4adb04e4b..916230f13 100644 --- a/setup.cfg +++ b/setup.cfg @@ -18,7 +18,7 @@ provides = graphite obsoletes = graphite <= 0.9.9 [flake8] -exclude = .tox,contrib +exclude = .tox ignore = # E111 indentation is not a multiple of four E111, @@ -26,10 +26,6 @@ ignore = E114, # E121 continuation line under-indented for hanging indent E121, - # E122 continuation line missing indentation or outdented - E122, - # E124 closing bracket does not match visual indentation - E124, # E126 continuation line over-indented for hanging indent E126, # E128 continuation line under-indented for visual indent @@ -68,9 +64,5 @@ ignore = W504, # E701 multiple statements on one line (colon) E701, - # E713 test for membership should be 'not in' - E713, # E731 do not assign a lambda expression, use a def E731, - # F841 local variable 'stuff' is assigned to but never used - F841, diff --git a/webapp/graphite/errors.py b/webapp/graphite/errors.py index 2a11e74ab..5d3a6618a 100644 --- a/webapp/graphite/errors.py +++ b/webapp/graphite/errors.py @@ -1,3 +1,6 @@ +from django.http import HttpResponseBadRequest + + class NormalizeEmptyResultError(Exception): # throw error for normalize() when empty pass @@ -5,3 +8,14 @@ class NormalizeEmptyResultError(Exception): class InputParameterError(ValueError): pass + + +# decorator which turns InputParameterExceptions into Django's HttpResponseBadRequest +def handleInputParameterError(f): + def new_f(*args, **kwargs): + try: + return f(*args, **kwargs) + except InputParameterError as e: + return HttpResponseBadRequest('Bad Request: {err}'.format(err=e)) + + return new_f diff --git a/webapp/graphite/functions/params.py b/webapp/graphite/functions/params.py index 85345e7a8..c041395cf 100644 --- a/webapp/graphite/functions/params.py +++ b/webapp/graphite/functions/params.py @@ -179,15 +179,28 @@ def toJSON(self): jsonVal['suggestions'] = self.suggestions return jsonVal - def validateValue(self, value): + def validateValue(self, value, func): + # if value isn't specified and there's a default then the default will be used, + # we don't need to validate the default value because we trust that it is valid if value is None and self.default is not None: - value = self.default + return True # None is ok for optional params if not self.required and value is None: return True - return self.type.isValid(value) + # parameter is restricted to a defined set of values, but value is not in it + if self.options and value not in self.options: + raise InputParameterError( + 'Invalid option specified for function "{func}" parameter "{param}": {value}'.format( + func=func, param=self.name, value=repr(value))) + + if not self.type.isValid(value): + raise InputParameterError( + 'Invalid "{type}" value specified for function "{func}" parameter "{param}": {value}'.format( + type=self.type.name, func=func, param=self.name, value=repr(value))) + + return True def validateParams(func, params, args, kwargs): @@ -218,17 +231,7 @@ def validateParams(func, params, args, kwargs): # requirement is satisfied from "args" value = args[i] - # parameter is restricted to a defined set of values, but value is not in it - if params[i].options and value not in params[i].options: - raise InputParameterError( - 'Invalid option specified for function "{func}" parameter "{param}": {value}'.format( - func=func, param=params[i].name, value=repr(value))) - - if not params[i].validateValue(value): - raise InputParameterError( - 'Invalid "{type}" value specified for function "{func}" parameter "{param}": {value}'.format( - type=params[i].type.name, func=func, param=params[i].name, value=repr(value))) - + params[i].validateValue(value, func) valid_args.append(params[i].name) return True diff --git a/webapp/graphite/logger.py b/webapp/graphite/logger.py index 4baafa442..c75f3802c 100644 --- a/webapp/graphite/logger.py +++ b/webapp/graphite/logger.py @@ -15,19 +15,8 @@ import os import logging from logging.handlers import TimedRotatingFileHandler as Rotater -try: - from logging import NullHandler -except ImportError as ie: # py2.6 - from logging import Handler - - class NullHandler(Handler): - - def emit(self, record): - pass -try: - from logging import FileHandler, StreamHandler -except ImportError as ie: # py2.6 - from logging.handlers import FileHandler, StreamHandler +from logging import NullHandler, FileHandler, StreamHandler + from django.conf import settings diff --git a/webapp/graphite/metrics/views.py b/webapp/graphite/metrics/views.py index df6978b04..f7a1a42f1 100644 --- a/webapp/graphite/metrics/views.py +++ b/webapp/graphite/metrics/views.py @@ -22,6 +22,7 @@ from graphite.carbonlink import CarbonLink from graphite.compat import HttpResponse, HttpResponseBadRequest +from graphite.errors import InputParameterError, handleInputParameterError from graphite.logger import log from graphite.render.attime import parseATTime from graphite.storage import STORE, extractForwardHeaders @@ -49,6 +50,20 @@ def index_json(request): return json_response_for(request, matches, jsonp=jsonp) +def queryParamAsInt(queryParams, name, default): + if name not in queryParams: + return default + + try: + return int(queryParams[name]) + except Exception as e: + raise InputParameterError('Invalid int value {value} for param {name}: {err}'.format( + value=repr(queryParams[name]), + name=name, + err=str(e))) + + +@handleInputParameterError def find_view(request): "View for finding metrics matching a given pattern" @@ -56,33 +71,56 @@ def find_view(request): queryParams.update(request.POST) format = queryParams.get('format', 'treejson') - leaves_only = int( queryParams.get('leavesOnly', 0) ) - local_only = int( queryParams.get('local', 0) ) - wildcards = int( queryParams.get('wildcards', 0) ) + leaves_only = queryParamAsInt(queryParams, 'leavesOnly', 0) + local_only = queryParamAsInt(queryParams, 'local', 0) + wildcards = queryParamAsInt(queryParams, 'wildcards', 0) tzinfo = pytz.timezone(settings.TIME_ZONE) if 'tz' in queryParams: try: - tzinfo = pytz.timezone(queryParams['tz']) + value = queryParams['tz'] + tzinfo = pytz.timezone(value) except pytz.UnknownTimeZoneError: pass + except Exception as e: + raise InputParameterError( + 'Invalid value {value} for param tz: {err}' + .format(value=repr(value), err=str(e))) if 'now' in queryParams: - now = parseATTime(queryParams['now'], tzinfo) + try: + value = queryParams['now'] + now = parseATTime(value, tzinfo) + except Exception as e: + raise InputParameterError( + 'Invalid value {value} for param now: {err}' + .format(value=repr(value), err=str(e))) else: now = datetime.now(tzinfo) if 'from' in queryParams and str(queryParams['from']) != '-1': - fromTime = int(epoch(parseATTime(queryParams['from'], tzinfo, now))) + try: + value = queryParams['from'] + fromTime = int(epoch(parseATTime(value, tzinfo, now))) + except Exception as e: + raise InputParameterError( + 'Invalid value {value} for param from: {err}' + .format(value=repr(value), err=str(e))) else: fromTime = -1 - if 'until' in queryParams and str(queryParams['from']) != '-1': - untilTime = int(epoch(parseATTime(queryParams['until'], tzinfo, now))) + if 'until' in queryParams and str(queryParams['until']) != '-1': + try: + value = queryParams['until'] + untilTime = int(epoch(parseATTime(value, tzinfo, now))) + except Exception as e: + raise InputParameterError( + 'Invalid value {value} for param until: {err}' + .format(value=repr(value), err=str(e))) else: untilTime = -1 - nodePosition = int( queryParams.get('position', -1) ) + nodePosition = queryParamAsInt(queryParams, 'position', -1) jsonp = queryParams.get('jsonp', False) forward_headers = extractForwardHeaders(request) @@ -91,17 +129,15 @@ def find_view(request): if untilTime == -1: untilTime = None - automatic_variants = int( queryParams.get('automatic_variants', 0) ) + automatic_variants = queryParamAsInt(queryParams, 'automatic_variants', 0) try: query = str(queryParams['query']) except KeyError: - return HttpResponseBadRequest(content="Missing required parameter 'query'", - content_type='text/plain') + raise InputParameterError('Missing required parameter \'query\'') if query == '': - return HttpResponseBadRequest(content="Required parameter 'query' is empty", - content_type='text/plain') + raise InputParameterError('Required parameter \'query\' is empty') if '.' in query: base_path = query.rsplit('.', 1)[0] + '.' diff --git a/webapp/graphite/render/datalib.py b/webapp/graphite/render/datalib.py index e2c11b948..a53f05ef0 100755 --- a/webapp/graphite/render/datalib.py +++ b/webapp/graphite/render/datalib.py @@ -82,11 +82,16 @@ def consolidate(self, valuesPerPoint): 'first': lambda usable: usable[0], 'last': lambda usable: usable[-1], } + __consolidation_function_aliases = { + 'avg': 'average', + } def __consolidatingGenerator(self, gen): - try: + if self.consolidationFunc in self.__consolidation_functions: cf = self.__consolidation_functions[self.consolidationFunc] - except KeyError: + elif self.consolidationFunc in self.__consolidation_function_aliases: + cf = self.__consolidation_functions[self.__consolidation_function_aliases[self.consolidationFunc]] + else: raise Exception("Invalid consolidation function: '%s'" % self.consolidationFunc) buf = [] diff --git a/webapp/graphite/render/functions.py b/webapp/graphite/render/functions.py index 6d32efc6b..9490d5464 100644 --- a/webapp/graphite/render/functions.py +++ b/webapp/graphite/render/functions.py @@ -1855,7 +1855,7 @@ def consolidateBy(requestContext, seriesList, consolidationFunc): """ Takes one metric or a wildcard seriesList and a consolidation function name. - Valid function names are 'sum', 'average', 'min', 'max', 'first' & 'last'. + Valid function names are 'sum', 'average'/'avg', 'min', 'max', 'first' & 'last'. When a graph is drawn where width of the graph size in pixels is smaller than the number of datapoints to be graphed, Graphite consolidates the values to @@ -1882,7 +1882,7 @@ def consolidateBy(requestContext, seriesList, consolidationFunc): consolidateBy.group = 'Special' consolidateBy.params = [ Param('seriesList', ParamTypes.seriesList, required=True), - Param('consolidationFunc', ParamTypes.string, required=True, options=['sum', 'average', 'avg_zero', 'min', 'max', 'first', 'last']), + Param('consolidationFunc', ParamTypes.string, required=True, options=['sum', 'average', 'avg', 'avg_zero', 'min', 'max', 'first', 'last']), ] @@ -3787,12 +3787,13 @@ def getLastDeviation(i): deviationSeries = TimeSeries(deviationName, series.start, series.end , series.step, deviations, tags=deviationTags, xFilesFactor=series.xFilesFactor) - results = { 'predictions': forecastSeries - , 'deviations': deviationSeries - , 'intercepts': intercepts - , 'slopes': slopes - , 'seasonals': seasonals - } + results = { + 'predictions': forecastSeries, + 'deviations' : deviationSeries, + 'intercepts' : intercepts, + 'slopes' : slopes, + 'seasonals' : seasonals, + } return results diff --git a/webapp/graphite/render/glyph.py b/webapp/graphite/render/glyph.py index 6f75b449c..2851321ab 100644 --- a/webapp/graphite/render/glyph.py +++ b/webapp/graphite/render/glyph.py @@ -90,7 +90,7 @@ percent_l_supported = True else: percent_l_supported = False -except ValueError as e: +except ValueError: percent_l_supported = False DATE_FORMAT = settings.DATE_FORMAT @@ -1816,7 +1816,7 @@ def drawGraph(self,**params): self.valueLabelsMin = float( params.get('valueLabelsMin',5) ) self.valueLabels = params.get('valueLabels','percent') assert self.valueLabels in self.validValueLabels, \ - "valueLabels=%s must be one of %s" % (self.valueLabels,self.validValueLabels) + "valueLabels=%s must be one of %s" % (self.valueLabels,self.validValueLabels) if self.valueLabels != 'none': self.drawLabels() diff --git a/webapp/graphite/render/views.py b/webapp/graphite/render/views.py index b7ca5cf9b..5624d96dc 100755 --- a/webapp/graphite/render/views.py +++ b/webapp/graphite/render/views.py @@ -22,7 +22,7 @@ from six.moves.urllib.parse import urlencode, urlsplit, urlunsplit, parse_qs from graphite.compat import HttpResponse -from graphite.errors import InputParameterError +from graphite.errors import InputParameterError, handleInputParameterError from graphite.user_util import getProfileByUsername from graphite.util import json, unpickle, pickle, msgpack, BytesIO from graphite.storage import extractForwardHeaders @@ -34,7 +34,7 @@ from graphite.render.glyph import GraphTypes from graphite.tags.models import Series, Tag, TagValue, SeriesTag # noqa # pylint: disable=unused-import -from django.http import HttpResponseServerError, HttpResponseRedirect, HttpResponseBadRequest +from django.http import HttpResponseServerError, HttpResponseRedirect from django.template import Context, loader from django.core.cache import cache from django.core.exceptions import ObjectDoesNotExist @@ -46,20 +46,17 @@ loadFunctions() -def handleInputParameterError(f): - def new_f(*args, **kwargs): - try: - return f(*args, **kwargs) - except InputParameterError as e: - return HttpResponseBadRequest('Bad Request: {err}'.format(err=e)) - - return new_f - - @handleInputParameterError def renderView(request): start = time() - (graphOptions, requestOptions) = parseOptions(request) + + try: + # we consider exceptions thrown by the option + # parsing to be due to user input error + (graphOptions, requestOptions) = parseOptions(request) + except Exception as e: + raise InputParameterError(str(e)) + useCache = 'noCache' not in requestOptions cacheTimeout = requestOptions['cacheTimeout'] # TODO: Make that a namedtuple or a class. diff --git a/webapp/graphite/storage.py b/webapp/graphite/storage.py index 126c67d04..f8ce0edc3 100755 --- a/webapp/graphite/storage.py +++ b/webapp/graphite/storage.py @@ -21,6 +21,7 @@ from django.utils.importlib import import_module from graphite.logger import log +from graphite.errors import InputParameterError from graphite.node import LeafNode from graphite.intervals import Interval, IntervalSet from graphite.finders.utils import FindQuery, BaseFinder @@ -181,9 +182,6 @@ def fetch(self, patterns, startTime, endTime, now, requestContext): ) jobs.append(job) - done = 0 - errors = 0 - # Start fetches start = time.time() results = self.wait_jobs(jobs, settings.FETCH_TIMEOUT, @@ -250,12 +248,17 @@ def get_index(self, requestContext=None): return sorted(list(set(results))) def find(self, pattern, startTime=None, endTime=None, local=False, headers=None, leaves_only=False): - query = FindQuery( - pattern, startTime, endTime, - local=local, - headers=headers, - leaves_only=leaves_only - ) + try: + query = FindQuery( + pattern, startTime, endTime, + local=local, + headers=headers, + leaves_only=leaves_only + ) + except Exception as e: + raise InputParameterError( + 'Failed to instantiate find query: {err}' + .format(err=str(e))) warn_threshold = settings.METRICS_FIND_WARNING_THRESHOLD fail_threshold = settings.METRICS_FIND_FAILURE_THRESHOLD diff --git a/webapp/graphite/util.py b/webapp/graphite/util.py index 43c774d9e..6b4cc559e 100644 --- a/webapp/graphite/util.py +++ b/webapp/graphite/util.py @@ -168,11 +168,11 @@ class SafeUnpickler(object): @classmethod def find_class(cls, module, name): - if not module in cls.PICKLE_SAFE: + if module not in cls.PICKLE_SAFE: raise pickle.UnpicklingError('Attempting to unpickle unsafe module %s' % module) __import__(module) mod = sys.modules[module] - if not name in cls.PICKLE_SAFE[module]: + if name not in cls.PICKLE_SAFE[module]: raise pickle.UnpicklingError('Attempting to unpickle unsafe class %s' % name) return getattr(mod, name) @@ -201,11 +201,11 @@ class SafeUnpickler(pickle.Unpickler): } def find_class(self, module, name): - if not module in self.PICKLE_SAFE: + if module not in self.PICKLE_SAFE: raise pickle.UnpicklingError('Attempting to unpickle unsafe module %s' % module) __import__(module) mod = sys.modules[module] - if not name in self.PICKLE_SAFE[module]: + if name not in self.PICKLE_SAFE[module]: raise pickle.UnpicklingError('Attempting to unpickle unsafe class %s' % name) return getattr(mod, name) diff --git a/webapp/tests/test_attime.py b/webapp/tests/test_attime.py index 866f3b4be..e0623f782 100644 --- a/webapp/tests/test_attime.py +++ b/webapp/tests/test_attime.py @@ -399,11 +399,11 @@ def test_get_years(self): def test_m_raises_Exception(self): with self.assertRaises(Exception): - result = getUnitString("m") + _ = getUnitString("m") def test_integer_raises_Exception(self): with self.assertRaises(Exception): - result = getUnitString(1) + _ = getUnitString(1) @mock.patch('graphite.render.attime.datetime', mockDateTime(2016, 2, 29, 00, 0, 0)) diff --git a/webapp/tests/test_dashboard.py b/webapp/tests/test_dashboard.py index d8536a4e2..160d0f949 100644 --- a/webapp/tests/test_dashboard.py +++ b/webapp/tests/test_dashboard.py @@ -45,14 +45,14 @@ def test_dashboard_conf_read_failure(self, check): check.side_effect = OSError(errno.EPERM, 'Operation not permitted') url = reverse('dashboard') with self.assertRaises(Exception): - response = self.client.get(url) + _ = self.client.get(url) @mock.patch('graphite.dashboard.views.DashboardConfig.check') def test_dashboard_template_conf_read_failure(self, check): check.side_effect = OSError(errno.EPERM, 'Operation not permitted') url = reverse('dashboard_template', args=['bogustemplate', 'testkey']) with self.assertRaises(Exception): - response = self.client.get(url) + _ = self.client.get(url) @override_settings(DASHBOARD_CONF=os.path.join(TEST_CONF_DIR, 'dashboard.conf.missing_ui')) def test_dashboard_conf_missing_ui(self): diff --git a/webapp/tests/test_functions.py b/webapp/tests/test_functions.py index fb0289b7e..efbf769c8 100644 --- a/webapp/tests/test_functions.py +++ b/webapp/tests/test_functions.py @@ -510,18 +510,19 @@ def test_matchSeries(self): ) expectedResult = [ - [ - TimeSeries('collectd.test-db1.load.value',0,1,1,[1,10,11]), - TimeSeries('collectd.test-db2.load.value',0,1,1,[2,20,21]), - TimeSeries('collectd.test-db3.load.value',0,1,1,[3,30,31]), - TimeSeries('collectd.test-db4.load.value',0,1,1,[4,40,41]), - ], - [ - TimeSeries('collectd.test-db1.load.value',0,1,1,[1,5,9]), - TimeSeries('collectd.test-db2.load.value',0,1,1,[2,6,10]), - TimeSeries('collectd.test-db3.load.value',0,1,1,[3,7,11]), - TimeSeries('collectd.test-db4.load.value',0,1,1,[4,8,12]), - ]] + [ + TimeSeries('collectd.test-db1.load.value',0,1,1,[1,10,11]), + TimeSeries('collectd.test-db2.load.value',0,1,1,[2,20,21]), + TimeSeries('collectd.test-db3.load.value',0,1,1,[3,30,31]), + TimeSeries('collectd.test-db4.load.value',0,1,1,[4,40,41]), + ], + [ + TimeSeries('collectd.test-db1.load.value',0,1,1,[1,5,9]), + TimeSeries('collectd.test-db2.load.value',0,1,1,[2,6,10]), + TimeSeries('collectd.test-db3.load.value',0,1,1,[3,7,11]), + TimeSeries('collectd.test-db4.load.value',0,1,1,[4,8,12]), + ], + ] results = functions.matchSeries(copy.deepcopy(seriesList1), copy.deepcopy(seriesList2)) for i, (series1, series2) in enumerate(results): self.assertEqual(series1, expectedResult[0][i]) @@ -2554,8 +2555,7 @@ def test_limit(self): limit = len(seriesList) - 1 results = functions.limit({}, seriesList, limit) self.assertEqual(len(results), limit, - "More than {0} results returned".format(limit), - ) + "More than {0} results returned".format(limit)) def _verify_series_options(self, seriesList, name, value): """ @@ -2612,7 +2612,7 @@ def test_vertical_line_before_start(self): ) message = r"verticalLine\(\): timestamp 3600 exists before start of range" with self.assertRaisesRegexp(ValueError, message): - result = functions.verticalLine(requestContext, "01:0019700101", "foo") + _ = functions.verticalLine(requestContext, "01:0019700101", "foo") def test_vertical_line_after_end(self): requestContext = self._build_requestContext( @@ -2622,7 +2622,7 @@ def test_vertical_line_after_end(self): ) message = r"verticalLine\(\): timestamp 31539600 exists after end of range" with self.assertRaisesRegexp(ValueError, message): - result = functions.verticalLine(requestContext, "01:0019710101", "foo") + _ = functions.verticalLine(requestContext, "01:0019710101", "foo") def test_line_width(self): seriesList = self._generate_series_list() @@ -2662,18 +2662,18 @@ def test_transform_null(self): results = functions.transformNull({}, copy.deepcopy(seriesList), transform) for counter, series in enumerate(seriesList): - if not None in series: + if None not in series: continue # If the None values weren't transformed, there is a problem self.assertNotIn(None, results[counter], - "tranformNull should remove all None values", - ) + "tranformNull should remove all None values") # Anywhere a None was in the original series, verify it # was transformed to the given value it should be. for i, value in enumerate(series): if value is None: result_val = results[counter][i] - self.assertEqual(transform, result_val, + self.assertEqual( + transform, result_val, "Transformed value should be {0}, not {1}".format(transform, result_val), ) @@ -2688,7 +2688,7 @@ def test_transform_null_reference(self): results = functions.transformNull({}, copy.deepcopy(seriesList), transform, [referenceSeries]) for counter, series in enumerate(seriesList): - if not None in series: + if None not in series: continue # Anywhere a None was in the original series, verify it @@ -2697,7 +2697,8 @@ def test_transform_null_reference(self): for i, value in enumerate(series): if value is None and referenceSeries[i] is not None: result_val = results[counter][i] - self.assertEqual(transform, result_val, + self.assertEqual( + transform, result_val, "Transformed value should be {0}, not {1}".format(transform, result_val), ) @@ -2709,18 +2710,18 @@ def test_transform_null_reference_empty(self): results = functions.transformNull({}, copy.deepcopy(seriesList), transform, [referenceSeries]) for counter, series in enumerate(seriesList): - if not None in series: + if None not in series: continue # If the None values weren't transformed, there is a problem self.assertNotIn(None, results[counter], - "tranformNull should remove all None values", - ) + "tranformNull should remove all None values") # Anywhere a None was in the original series, verify it # was transformed to the given value if a value existed for i, value in enumerate(series): if value is None: result_val = results[counter][i] - self.assertEqual(transform, result_val, + self.assertEqual( + transform, result_val, "Transformed value should be {0}, not {1}".format(transform, result_val), ) @@ -2827,8 +2828,7 @@ def test_alias_sub(self): results = functions.aliasSub({}, seriesList, r"^\w+", substitution) for series in results: self.assertTrue(series.name.startswith(substitution), - "aliasSub should replace the name with {0}".format(substitution), - ) + "aliasSub should replace the name with {0}".format(substitution)) def test_alias_query(self): @@ -2890,9 +2890,6 @@ def test_alias_by_node(self): seriesList = self._generate_series_list() def verify_node_name(cases, expected, *nodes): - if isinstance(nodes, int): - node_number = [nodes] - # Use deepcopy so the original seriesList is unmodified results = functions.aliasByNode({}, copy.deepcopy(cases), *nodes) @@ -3025,11 +3022,7 @@ def test_groupByNodes(self): seriesList, inputList = self._generate_mr_series() def verify_groupByNodes(expectedResult, func, *nodes): - if isinstance(nodes, int): - node_number = [nodes] - results = functions.groupByNodes({}, copy.deepcopy(seriesList), func, *nodes) - self.assertEqual(results, expectedResult) expectedResult = [ @@ -3116,11 +3109,9 @@ def test_color(self): for i, series in enumerate(results): self.assertTrue(hasattr(series, "color"), - "The transformed seriesList is missing the 'color' attribute", - ) + "The transformed seriesList is missing the 'color' attribute") self.assertFalse(hasattr(seriesList[i], "color"), - "The original seriesList shouldn't have a 'color' attribute", - ) + "The original seriesList shouldn't have a 'color' attribute") self.assertEqual(series.color, color) def test_substr(self): @@ -3579,7 +3570,7 @@ def test_averageBelow_empty_list(self): def test_constantLine(self): requestContext = {'startTime': datetime(2014,3,12,2,0,0,2,pytz.timezone(settings.TIME_ZONE)), 'endTime':datetime(2014,3,12,3,0,0,2,pytz.timezone(settings.TIME_ZONE))} - results = functions.constantLine(requestContext, [1]) + _ = functions.constantLine(requestContext, [1]) def test_aggregateLine_default(self): seriesList = self._gen_series_list_with_data( @@ -3714,14 +3705,14 @@ def test_aggregateLine_bad(self): ) with self.assertRaisesRegexp(ValueError, '^Unsupported aggregation function: bad$'): - result = functions.aggregateLine( + functions.aggregateLine( self._build_requestContext( startTime=datetime(1970,1,1,1,0,0,0,pytz.timezone(settings.TIME_ZONE)), endTime=datetime(1970,1,1,1,0,0,0,pytz.timezone(settings.TIME_ZONE)) ), seriesList, 'bad' - ) + ) def test_threshold_default(self): expectedResult = [ @@ -5687,36 +5678,37 @@ def test_summarize_1minute(self): data=[list(range(0, 240)), list(range(0, -240, -1)), [None] * 240, list(range(0, 480, 2))] ) - expectedResults = {'sum' : [ - TimeSeries('summarize(servers.s1.disk.bytes_used, "1minute", "sum")', 0, 300, 60, [1770, 5370, 8970, 12570, None]), - TimeSeries('summarize(servers.s1.disk.bytes_free, "1minute", "sum")', 0, 300, 60, [-1770, -5370, -8970, -12570, None]), - TimeSeries('summarize(servers.s2.disk.bytes_used, "1minute", "sum")', 0, 300, 60, [None, None, None, None, None]), - TimeSeries('summarize(servers.s2.disk.bytes_free, "1minute", "sum")', 0, 300, 60, [3540, 10740, 17940, 25140, None]) - ], - 'avg' : [ - TimeSeries('summarize(servers.s1.disk.bytes_used, "1minute", "avg")', 0, 300, 60, [29.5, 89.5, 149.5, 209.5, None]), - TimeSeries('summarize(servers.s1.disk.bytes_free, "1minute", "avg")', 0, 300, 60, [-29.5, -89.5, -149.5, -209.5, None]), - TimeSeries('summarize(servers.s2.disk.bytes_used, "1minute", "avg")', 0, 300, 60, [None, None, None, None, None]), - TimeSeries('summarize(servers.s2.disk.bytes_free, "1minute", "avg")', 0, 300, 60, [59.0, 179.0, 299.0, 419.0, None]) - ], - 'last' : [ - TimeSeries('summarize(servers.s1.disk.bytes_used, "1minute", "last")', 0, 300, 60, [59, 119, 179, 239, None]), - TimeSeries('summarize(servers.s1.disk.bytes_free, "1minute", "last")', 0, 300, 60, [-59, -119, -179, -239, None]), - TimeSeries('summarize(servers.s2.disk.bytes_used, "1minute", "last")', 0, 300, 60, [None, None, None, None, None]), - TimeSeries('summarize(servers.s2.disk.bytes_free, "1minute", "last")', 0, 300, 60, [118, 238, 358, 478, None]) - ], - 'max' : [ - TimeSeries('summarize(servers.s1.disk.bytes_used, "1minute", "max")', 0, 300, 60, [59, 119, 179, 239, None]), - TimeSeries('summarize(servers.s1.disk.bytes_free, "1minute", "max")', 0, 300, 60, [0, -60, -120, -180, None]), - TimeSeries('summarize(servers.s2.disk.bytes_used, "1minute", "max")', 0, 300, 60, [None, None, None, None, None]), - TimeSeries('summarize(servers.s2.disk.bytes_free, "1minute", "max")', 0, 300, 60, [118, 238, 358, 478, None]) - ], - 'min' : [ - TimeSeries('summarize(servers.s1.disk.bytes_used, "1minute", "min")', 0, 300, 60, [0, 60, 120, 180, None]), - TimeSeries('summarize(servers.s1.disk.bytes_free, "1minute", "min")', 0, 300, 60, [-59, -119, -179, -239, None]), - TimeSeries('summarize(servers.s2.disk.bytes_used, "1minute", "min")', 0, 300, 60, [None, None, None, None, None]), - TimeSeries('summarize(servers.s2.disk.bytes_free, "1minute", "min")', 0, 300, 60, [0, 120, 240, 360, None]) - ], + expectedResults = { + 'sum' : [ + TimeSeries('summarize(servers.s1.disk.bytes_used, "1minute", "sum")', 0, 300, 60, [1770, 5370, 8970, 12570, None]), + TimeSeries('summarize(servers.s1.disk.bytes_free, "1minute", "sum")', 0, 300, 60, [-1770, -5370, -8970, -12570, None]), + TimeSeries('summarize(servers.s2.disk.bytes_used, "1minute", "sum")', 0, 300, 60, [None, None, None, None, None]), + TimeSeries('summarize(servers.s2.disk.bytes_free, "1minute", "sum")', 0, 300, 60, [3540, 10740, 17940, 25140, None]) + ], + 'avg' : [ + TimeSeries('summarize(servers.s1.disk.bytes_used, "1minute", "avg")', 0, 300, 60, [29.5, 89.5, 149.5, 209.5, None]), + TimeSeries('summarize(servers.s1.disk.bytes_free, "1minute", "avg")', 0, 300, 60, [-29.5, -89.5, -149.5, -209.5, None]), + TimeSeries('summarize(servers.s2.disk.bytes_used, "1minute", "avg")', 0, 300, 60, [None, None, None, None, None]), + TimeSeries('summarize(servers.s2.disk.bytes_free, "1minute", "avg")', 0, 300, 60, [59.0, 179.0, 299.0, 419.0, None]) + ], + 'last' : [ + TimeSeries('summarize(servers.s1.disk.bytes_used, "1minute", "last")', 0, 300, 60, [59, 119, 179, 239, None]), + TimeSeries('summarize(servers.s1.disk.bytes_free, "1minute", "last")', 0, 300, 60, [-59, -119, -179, -239, None]), + TimeSeries('summarize(servers.s2.disk.bytes_used, "1minute", "last")', 0, 300, 60, [None, None, None, None, None]), + TimeSeries('summarize(servers.s2.disk.bytes_free, "1minute", "last")', 0, 300, 60, [118, 238, 358, 478, None]) + ], + 'max' : [ + TimeSeries('summarize(servers.s1.disk.bytes_used, "1minute", "max")', 0, 300, 60, [59, 119, 179, 239, None]), + TimeSeries('summarize(servers.s1.disk.bytes_free, "1minute", "max")', 0, 300, 60, [0, -60, -120, -180, None]), + TimeSeries('summarize(servers.s2.disk.bytes_used, "1minute", "max")', 0, 300, 60, [None, None, None, None, None]), + TimeSeries('summarize(servers.s2.disk.bytes_free, "1minute", "max")', 0, 300, 60, [118, 238, 358, 478, None]) + ], + 'min' : [ + TimeSeries('summarize(servers.s1.disk.bytes_used, "1minute", "min")', 0, 300, 60, [0, 60, 120, 180, None]), + TimeSeries('summarize(servers.s1.disk.bytes_free, "1minute", "min")', 0, 300, 60, [-59, -119, -179, -239, None]), + TimeSeries('summarize(servers.s2.disk.bytes_used, "1minute", "min")', 0, 300, 60, [None, None, None, None, None]), + TimeSeries('summarize(servers.s2.disk.bytes_free, "1minute", "min")', 0, 300, 60, [0, 120, 240, 360, None]) + ], } for func in expectedResults: @@ -5740,36 +5732,37 @@ def test_summarize_1minute_alignToFrom(self): data=[list(range(0, 240)), list(range(0, -240, -1)), [None] * 240, list(range(0, 480, 2))] ) - expectedResults = {'sum' : [ - TimeSeries('summarize(servers.s1.disk.bytes_used, "1minute", "sum", true)', 0, 240, 60, [1770, 5370, 8970, 12570]), - TimeSeries('summarize(servers.s1.disk.bytes_free, "1minute", "sum", true)', 0, 240, 60, [-1770, -5370, -8970, -12570]), - TimeSeries('summarize(servers.s2.disk.bytes_used, "1minute", "sum", true)', 0, 240, 60, [None, None, None, None]), - TimeSeries('summarize(servers.s2.disk.bytes_free, "1minute", "sum", true)', 0, 240, 60, [3540, 10740, 17940, 25140]) - ], - 'avg' : [ - TimeSeries('summarize(servers.s1.disk.bytes_used, "1minute", "avg", true)', 0, 240, 60, [29.5, 89.5, 149.5, 209.5]), - TimeSeries('summarize(servers.s1.disk.bytes_free, "1minute", "avg", true)', 0, 240, 60, [-29.5, -89.5, -149.5, -209.5]), - TimeSeries('summarize(servers.s2.disk.bytes_used, "1minute", "avg", true)', 0, 240, 60, [None, None, None, None]), - TimeSeries('summarize(servers.s2.disk.bytes_free, "1minute", "avg", true)', 0, 240, 60, [59.0, 179.0, 299.0, 419.0]) - ], - 'last' : [ - TimeSeries('summarize(servers.s1.disk.bytes_used, "1minute", "last", true)', 0, 240, 60, [59, 119, 179, 239]), - TimeSeries('summarize(servers.s1.disk.bytes_free, "1minute", "last", true)', 0, 240, 60, [-59, -119, -179, -239]), - TimeSeries('summarize(servers.s2.disk.bytes_used, "1minute", "last", true)', 0, 240, 60, [None, None, None, None]), - TimeSeries('summarize(servers.s2.disk.bytes_free, "1minute", "last", true)', 0, 240, 60, [118, 238, 358, 478]) - ], - 'max' : [ - TimeSeries('summarize(servers.s1.disk.bytes_used, "1minute", "max", true)', 0, 240, 60, [59, 119, 179, 239]), - TimeSeries('summarize(servers.s1.disk.bytes_free, "1minute", "max", true)', 0, 240, 60, [0, -60, -120, -180]), - TimeSeries('summarize(servers.s2.disk.bytes_used, "1minute", "max", true)', 0, 240, 60, [None, None, None, None]), - TimeSeries('summarize(servers.s2.disk.bytes_free, "1minute", "max", true)', 0, 240, 60, [118, 238, 358, 478]) - ], - 'min' : [ - TimeSeries('summarize(servers.s1.disk.bytes_used, "1minute", "min", true)', 0, 240, 60, [0, 60, 120, 180]), - TimeSeries('summarize(servers.s1.disk.bytes_free, "1minute", "min", true)', 0, 240, 60, [-59, -119, -179, -239]), - TimeSeries('summarize(servers.s2.disk.bytes_used, "1minute", "min", true)', 0, 240, 60, [None, None, None, None]), - TimeSeries('summarize(servers.s2.disk.bytes_free, "1minute", "min", true)', 0, 240, 60, [0, 120, 240, 360]) - ], + expectedResults = { + 'sum' : [ + TimeSeries('summarize(servers.s1.disk.bytes_used, "1minute", "sum", true)', 0, 240, 60, [1770, 5370, 8970, 12570]), + TimeSeries('summarize(servers.s1.disk.bytes_free, "1minute", "sum", true)', 0, 240, 60, [-1770, -5370, -8970, -12570]), + TimeSeries('summarize(servers.s2.disk.bytes_used, "1minute", "sum", true)', 0, 240, 60, [None, None, None, None]), + TimeSeries('summarize(servers.s2.disk.bytes_free, "1minute", "sum", true)', 0, 240, 60, [3540, 10740, 17940, 25140]) + ], + 'avg' : [ + TimeSeries('summarize(servers.s1.disk.bytes_used, "1minute", "avg", true)', 0, 240, 60, [29.5, 89.5, 149.5, 209.5]), + TimeSeries('summarize(servers.s1.disk.bytes_free, "1minute", "avg", true)', 0, 240, 60, [-29.5, -89.5, -149.5, -209.5]), + TimeSeries('summarize(servers.s2.disk.bytes_used, "1minute", "avg", true)', 0, 240, 60, [None, None, None, None]), + TimeSeries('summarize(servers.s2.disk.bytes_free, "1minute", "avg", true)', 0, 240, 60, [59.0, 179.0, 299.0, 419.0]) + ], + 'last' : [ + TimeSeries('summarize(servers.s1.disk.bytes_used, "1minute", "last", true)', 0, 240, 60, [59, 119, 179, 239]), + TimeSeries('summarize(servers.s1.disk.bytes_free, "1minute", "last", true)', 0, 240, 60, [-59, -119, -179, -239]), + TimeSeries('summarize(servers.s2.disk.bytes_used, "1minute", "last", true)', 0, 240, 60, [None, None, None, None]), + TimeSeries('summarize(servers.s2.disk.bytes_free, "1minute", "last", true)', 0, 240, 60, [118, 238, 358, 478]) + ], + 'max' : [ + TimeSeries('summarize(servers.s1.disk.bytes_used, "1minute", "max", true)', 0, 240, 60, [59, 119, 179, 239]), + TimeSeries('summarize(servers.s1.disk.bytes_free, "1minute", "max", true)', 0, 240, 60, [0, -60, -120, -180]), + TimeSeries('summarize(servers.s2.disk.bytes_used, "1minute", "max", true)', 0, 240, 60, [None, None, None, None]), + TimeSeries('summarize(servers.s2.disk.bytes_free, "1minute", "max", true)', 0, 240, 60, [118, 238, 358, 478]) + ], + 'min' : [ + TimeSeries('summarize(servers.s1.disk.bytes_used, "1minute", "min", true)', 0, 240, 60, [0, 60, 120, 180]), + TimeSeries('summarize(servers.s1.disk.bytes_free, "1minute", "min", true)', 0, 240, 60, [-59, -119, -179, -239]), + TimeSeries('summarize(servers.s2.disk.bytes_used, "1minute", "min", true)', 0, 240, 60, [None, None, None, None]), + TimeSeries('summarize(servers.s2.disk.bytes_free, "1minute", "min", true)', 0, 240, 60, [0, 120, 240, 360]) + ], } for func in expectedResults: diff --git a/webapp/tests/test_metrics.py b/webapp/tests/test_metrics.py index 62b68e289..7497770bb 100644 --- a/webapp/tests/test_metrics.py +++ b/webapp/tests/test_metrics.py @@ -95,8 +95,7 @@ def mock_STORE_get_index(self, requestContext=None): def test_find_view(self): ts = int(time.time()) - #create a minus 60 variable to test with, otherwise the build could fail the longer the test runs - ts_minus_sixty_seconds = ts - 60 + # ts_minus_sixty_seconds = ts - 60 # usage always commented-out below? self.create_whisper_hosts(ts) self.addCleanup(self.wipe_whisper_hosts) @@ -108,7 +107,42 @@ def test_find_view(self): # response = self.client.post(url, {}) self.assertEqual(response.status_code, 400) - self.assertEqual(response.content, b"Missing required parameter 'query'") + self.assertEqual(response.content, b"Bad Request: Missing required parameter 'query'") + + # + # invalid from/until params + # + response = self.client.post(url, { + 'query': '*', + 'from': 'now-1h', + 'until': 'now-2h', + }) + self.assertEqual(response.status_code, 400) + # response contains timestamps such as: + # Bad Request: Failed to instantiate find query: Invalid interval start=1582801589 end=1582797989 + self.assertRegex(response.content, b"^Bad Request: Failed to instantiate find query: Invalid interval start=[0-9]+ end=[0-9]+$") + + # + # Wrong type for param 'wildcards' + # + response = self.client.post(url, { + 'query': '*', + 'wildcards': '123a', + }) + self.assertEqual(response.status_code, 400) + # the output in Python 2/3 slightly varies because repr() shows unicode strings differently, that's why the "u?" + self.assertRegex(response.content, b"^Bad Request: Invalid int value u?'123a' for param wildcards: invalid literal for int\\(\\) with base 10: u?'123a'$") + + # + # Invalid 'from' timestamp + # + response = self.client.post(url, { + 'query': '*', + 'from': 'now-1mmminute', # "mmminute" is misspelled + }) + self.assertEqual(response.status_code, 400) + # the output in Python 2/3 slightly varies because repr() shows unicode strings differently, that's why the "u?" + self.assertRegex(response.content, b"^Bad Request: Invalid value u?'now-1mmminute' for param from: Invalid offset unit u?'mmminute'$") # # format=invalid_format diff --git a/webapp/tests/test_params.py b/webapp/tests/test_params.py index d81973390..e4a84e517 100644 --- a/webapp/tests/test_params.py +++ b/webapp/tests/test_params.py @@ -239,3 +239,15 @@ def test_param_type_int_or_inf(self): [1.2], {}, ) + + def test_default_value(self): + # if no value is specified, but there is a default value, we don't + # want the validator to raise an exception because 'None' is invalid + self.assertTrue(validateParams( + 'TestParam', + [ + Param('one', ParamTypes.aggFunc, default='sum'), + ], + [], + {}, + )) diff --git a/webapp/tests/test_readers_remote.py b/webapp/tests/test_readers_remote.py index f12132b45..2f2b22942 100644 --- a/webapp/tests/test_readers_remote.py +++ b/webapp/tests/test_readers_remote.py @@ -57,7 +57,8 @@ def test_RemoteReader_fetch_multi(self, http_request): reader = RemoteReader(finder, {'intervals': [], 'path': 'a.b.c.d'}) data = [ - {'start': startTime, + { + 'start': startTime, 'step': 60, 'end': endTime, 'values': [1.0, 0.0, 1.0, 0.0, 1.0], @@ -100,7 +101,8 @@ def test_RemoteReader_fetch_multi(self, http_request): reader = RemoteReader(finder, {'intervals': [], 'path': 'a.b.c.d'}, bulk_query=['a.b.c.d']) data = [ - {'start': startTime, + { + 'start': startTime, 'step': 60, 'end': endTime, 'values': [1.0, 0.0, 1.0, 0.0, 1.0], @@ -205,13 +207,15 @@ def test_RemoteReader_fetch(self, http_request): reader = RemoteReader(finder, {'intervals': [], 'path': 'a.b.c.d'}, bulk_query=['a.b.c.*']) data = [ - {'start': startTime, + { + 'start': startTime, 'step': 60, 'end': endTime, 'values': [1.0, 0.0, 1.0, 0.0, 1.0], 'name': 'a.b.c.c' }, - {'start': startTime, + { + 'start': startTime, 'step': 60, 'end': endTime, 'values': [1.0, 0.0, 1.0, 0.0, 1.0], diff --git a/webapp/tests/test_render_datalib.py b/webapp/tests/test_render_datalib.py index 950b7db49..5ba00541f 100644 --- a/webapp/tests/test_render_datalib.py +++ b/webapp/tests/test_render_datalib.py @@ -127,7 +127,7 @@ def test_TimeSeries_iterate_valuesPerPoint_2_none_values(self): series.xFilesFactor = 1 self.assertEqual(list(series), list(expected)) - def test_TimeSeries_iterate_valuesPerPoint_2_avg(self): + def test_TimeSeries_iterate_valuesPerPoint_2_average(self): values = list(range(0,100)) series = TimeSeries("collectd.test-db.load.value", 0, len(values)/2, 1, values) @@ -227,6 +227,23 @@ def test_TimeSeries_iterate_valuesPerPoint_2_last(self): expected = TimeSeries("collectd.test-db.load.value", 0, 5, 1, list(range(2,100,3)) + [99]) self.assertEqual(list(series), list(expected)) + # avg is an alias for average + def test_TimeSeries_iterate_valuesPerPoint_2_avg_alias(self): + values = list(range(0,100)) + + series = TimeSeries("collectd.test-db.load.value", 0, len(values)/2, 1, values, consolidate='avg') + self.assertEqual(series.valuesPerPoint, 1) + + series.consolidate(2) + self.assertEqual(series.valuesPerPoint, 2) + expected = TimeSeries("collectd.test-db.load.value", 0, 5, 1, [0.5, 2.5, 4.5, 6.5, 8.5, 10.5, 12.5, 14.5, 16.5, 18.5, 20.5, 22.5, 24.5, 26.5, 28.5, 30.5, 32.5, 34.5, 36.5, 38.5, 40.5, 42.5, 44.5, 46.5, 48.5, 50.5, 52.5, 54.5, 56.5, 58.5, 60.5, 62.5, 64.5, 66.5, 68.5, 70.5, 72.5, 74.5, 76.5, 78.5, 80.5, 82.5, 84.5, 86.5, 88.5, 90.5, 92.5, 94.5, 96.5, 98.5]) + self.assertEqual(list(series), list(expected)) + + series.consolidate(3) + self.assertEqual(series.valuesPerPoint, 3) + expected = TimeSeries("collectd.test-db.load.value", 0, 5, 1, map(float, list(range(1, 100, 3)) + [99])) + self.assertEqual(list(series), list(expected)) + def test_TimeSeries_iterate_valuesPerPoint_2_invalid(self): values = list(range(0,100)) @@ -236,7 +253,7 @@ def test_TimeSeries_iterate_valuesPerPoint_2_invalid(self): series.consolidate(2) self.assertEqual(series.valuesPerPoint, 2) with self.assertRaisesRegexp(Exception, "Invalid consolidation function: 'bogus'"): - result = list(series) + _ = list(series) class DatalibFunctionTest(TestCase): @@ -286,7 +303,7 @@ def test__merge_results_no_results(self, log_debug): pathExpr = 'collectd.test-db.load.value' startTime=datetime(1970, 1, 1, 0, 10, 0, 0, pytz.timezone(settings.TIME_ZONE)) endTime=datetime(1970, 1, 1, 0, 20, 0, 0, pytz.timezone(settings.TIME_ZONE)) - timeInfo = [startTime, endTime, 60] + # timeInfo = [startTime, endTime, 60] result_queue = [ [pathExpr, None], ] @@ -303,7 +320,7 @@ def test__merge_results_bad_results(self, log_exception): pathExpr = 'collectd.test-db.load.value' startTime=datetime(1970, 1, 1, 0, 10, 0, 0, pytz.timezone(settings.TIME_ZONE)) endTime=datetime(1970, 1, 1, 0, 20, 0, 0, pytz.timezone(settings.TIME_ZONE)) - timeInfo = [startTime, endTime, 60] + # timeInfo = [startTime, endTime, 60] result_queue = [ [pathExpr, ['invalid input']], ] diff --git a/webapp/tests/test_util.py b/webapp/tests/test_util.py index a916084c8..918828e37 100644 --- a/webapp/tests/test_util.py +++ b/webapp/tests/test_util.py @@ -94,7 +94,7 @@ def test_find_escaped_pattern_fields(self): def test_load_module(self): with self.assertRaises(IOError): - module = util.load_module('test', member=None) + _ = util.load_module('test', member=None) @patch('graphite.util.log') def test_logtime(self, log): diff --git a/webapp/tests/test_whitelist.py b/webapp/tests/test_whitelist.py index 82efbf832..9554258af 100644 --- a/webapp/tests/test_whitelist.py +++ b/webapp/tests/test_whitelist.py @@ -36,7 +36,7 @@ def create_whitelist(self): def test_whitelist_show_no_whitelist(self): url = reverse('whitelist_show') with self.assertRaises(IOError): - response = self.client.get(url) + _ = self.client.get(url) def test_whitelist_show(self): url = reverse('whitelist_show')