diff --git a/.travis.yml b/.travis.yml index a7d1e2335..c1ed08a20 100644 --- a/.travis.yml +++ b/.travis.yml @@ -49,7 +49,6 @@ matrix: env: - TOXENV=py27-django18-pyparsing2 - - TOXENV=py27-django111-pyparsing2-rrdtool - TOXENV=py27-django111-pyparsing2-msgpack - TOXENV=py27-django111-pyparsing2-pyhash - TOXENV=py27-django111-pyparsing2-mysql TEST_MYSQL_PASSWORD=graphite diff --git a/docs/config-carbon.rst b/docs/config-carbon.rst index 5c2731f6a..518d5ed5f 100644 --- a/docs/config-carbon.rst +++ b/docs/config-carbon.rst @@ -30,7 +30,7 @@ Important notes before continuing: * There can be many sections in this file. * The sections are applied in order from the top (first) and bottom (last). -* The patterns are regular expressions, as opposed to the wildcards used in the URL API. +* The patterns are `regular expressions `_, as opposed to the wildcards used in the URL API. * The first pattern that matches the metric name is used. * This retention is set at the time the first metric is sent. * Changing this file will not affect already-created .wsp files. Use whisper-resize.py to change those. @@ -41,7 +41,7 @@ A given rule is made up of 3 lines: * A regex, specified after "pattern=" * A retention rate line, specified after "retentions=" -The retentions line can specify multiple retentions. Each retention of ``frequency:history`` is separated by a comma. +The retentions line can specify multiple retentions. Each retention of ``frequency:history`` is separated by a comma. Frequencies and histories are specified using the following suffixes: @@ -61,9 +61,9 @@ Here's a simple, single retention example: pattern = garbageCollections$ retentions = 10s:14d -The name ``[garbage_collection]`` is mainly for documentation purposes, and will show up in ``creates.log`` when metrics matching this section are created. +The name ``[garbage_collection]`` is mainly for documentation purposes, and will show up in ``creates.log`` when metrics matching this section are created. -The regular expression pattern will match any metric that ends with ``garbageCollections``. For example, ``com.acmeCorp.instance01.jvm.memory.garbageCollections`` would match, but ``com.acmeCorp.instance01.jvm.memory.garbageCollections.full`` would not. +The regular expression ``pattern`` will match any metric that ends with ``garbageCollections``. For example, ``com.acmeCorp.instance01.jvm.memory.garbageCollections`` would match, but ``com.acmeCorp.instance01.jvm.memory.garbageCollections.full`` would not. Graphite is using the `Python Regular Expression Syntax `_, for an introduction to regular expressions consult the `Regular Expression HOWTO `_. The ``retentions`` line is saying that each datapoint represents 10 seconds, and we want to keep enough datapoints so that they add up to 14 days of data. diff --git a/docs/render_api.rst b/docs/render_api.rst index 5f6ab8a92..171882205 100644 --- a/docs/render_api.rst +++ b/docs/render_api.rst @@ -473,7 +473,7 @@ areaAlpha --------- *Default: 1.0* -Takes a floating point number between 0.0 and 1.0 +Takes a floating point number between 0.0 and 1.0 Sets the alpha (transparency) value of filled areas when using an areaMode_ @@ -484,7 +484,7 @@ areaMode *Default: none* Enables filling of the area below the graphed lines. Fill area is the same color as -the line color associated with it. See areaAlpha_ to make this area transparent. +the line color associated with it. See areaAlpha_ to make this area transparent. Takes one of the following parameters which determines the fill mode to use: ``none`` @@ -498,7 +498,7 @@ Takes one of the following parameters which determines the fill mode to use: Each target line is displayed as the sum of all previous lines plus the value of the current line. .. _param-bgcolor: - + bgcolor ------- *Default: value from the [default] template in graphTemplates.conf* @@ -573,7 +573,7 @@ drawNullAsZero Converts any None (null) values in the displayed metrics to zero at render time. -.. _param-fgcolor: +.. _param-fgcolor: fgcolor ------- @@ -1076,7 +1076,7 @@ Example: &title=Apache Busy Threads, All Servers, Past 24h .. _param-tz: - + tz -- *Default: The timezone specified in local_settings.py* @@ -1202,7 +1202,7 @@ Sets the time format used when displaying the X-axis. See for format specification details. .. _param-yAxisSide: - + yAxisSide --------- *Default: left* @@ -1210,15 +1210,15 @@ yAxisSide Sets the side of the graph on which to render the Y-axis. Accepts values of ``left`` or ``right`` .. _param-yDivisors: - + yDivisors --------- *Default: 4,5,6* Sets the preferred number of intermediate values to display on the Y-axis (Y values between the -minimum and maximum). Note that Graphite will ultimately choose what values (and how many) to -display based on a 'pretty' factor, which tries to maintain a sensible scale (e.g. preferring -intermediary values like 25%,50%,75% over 33.3%,66.6%). To explicitly set the Y-axis values, +minimum and maximum). Note that Graphite will ultimately choose what values (and how many) to +display based on a 'pretty' factor, which tries to maintain a sensible scale (e.g. preferring +intermediary values like 25%,50%,75% over 33.3%,66.6%). To explicitly set the Y-axis values, see `yStep`_ .. _param-yLimit: @@ -1295,7 +1295,7 @@ yMinRight In dual Y-axis mode, sets the lower bound of the right Y-Axis (See: `yMin`_) .. _param-yStep: - + yStep ----- *Default: Calculated automatically* diff --git a/setup.cfg b/setup.cfg index 103fce46c..9df34ed59 100644 --- a/setup.cfg +++ b/setup.cfg @@ -58,4 +58,6 @@ obsoletes = graphite <= 0.9.9 # E731 do not assign a lambda expression, use a def # E114 indentation is not a multiple of four (comment) # E266 too many leading '#' for block comment -ignore=E128,E111,E501,E231,E201,E202,E203,E251,E124,E265,E121,E261,E226,E262,E701,E241,E221,E703,E502,F841,E401,E222,E126,E122,E222,E225,E228,E127,E123,E713,E125,E227,E271,E272,N802,N803,N806,E731,E114,E266 +# W504 line break after binary operator +# W605 invalid escape sequence +ignore=E128,E111,E501,E231,E201,E202,E203,E251,E124,E265,E121,E261,E226,E262,E701,E241,E221,E703,E502,F841,E401,E222,E126,E122,E222,E225,E228,E127,E123,E713,E125,E227,E271,E272,N802,N803,N806,E731,E114,E266,W504,W605 diff --git a/webapp/content/js/dashboard.js b/webapp/content/js/dashboard.js index be7e69deb..6c981ddb7 100644 --- a/webapp/content/js/dashboard.js +++ b/webapp/content/js/dashboard.js @@ -331,7 +331,7 @@ function initDashboard () { url: document.body.dataset.baseUrl + 'metrics/find/', autoLoad: true, baseParams: { - query: '', + query: '*', format: 'completer', automatic_variants: (UI_CONFIG.automatic_variants) ? '1' : '0' }, @@ -383,6 +383,9 @@ function initDashboard () { var autocompleteTask = new Ext.util.DelayedTask(function () { var query = metricSelectorTextField.getValue(); var store = metricSelectorGrid.getStore(); + if (query === '') { + query = '*' + } store.setBaseParam('query', query); store.load(); }); diff --git a/webapp/graphite/finders/standard.py b/webapp/graphite/finders/standard.py index a9e287276..117338e4a 100644 --- a/webapp/graphite/finders/standard.py +++ b/webapp/graphite/finders/standard.py @@ -69,13 +69,14 @@ def find_nodes(self, query): datasource_pattern = None absolute_path = join(root_dir, relative_path) - metric_path = fs_to_metric(relative_path) - real_metric_path = get_real_metric_path(absolute_path, metric_path) # if we're finding by tag, return the proper metric path if tagged: metric_path = query.pattern + real_metric_path = query.pattern else: + metric_path = fs_to_metric(relative_path) + real_metric_path = get_real_metric_path(absolute_path, metric_path) metric_path_parts = metric_path.split('.') for field_index in find_escaped_pattern_fields(query.pattern): metric_path_parts[field_index] = pattern_parts[field_index].replace('\\', '') diff --git a/webapp/graphite/readers/rrd.py b/webapp/graphite/readers/rrd.py index db4719e92..52d2bcc92 100644 --- a/webapp/graphite/readers/rrd.py +++ b/webapp/graphite/readers/rrd.py @@ -1,7 +1,6 @@ import sys import os import time -import six # Use the built-in version of scandir/stat if possible, otherwise # use the scandir module version @@ -25,8 +24,12 @@ class RRDReader(BaseReader): @staticmethod def _convert_fs_path(fs_path): - if isinstance(fs_path, six.text_type): - fs_path = fs_path.encode(sys.getfilesystemencoding()) + # Only Python 2 'unicode' needs to be converted to str/bytes + try: + if isinstance(fs_path, unicode): + fs_path = fs_path.encode(sys.getfilesystemencoding()) + except NameError: + pass return os.path.realpath(fs_path) def __init__(self, fs_path, datasource_name): diff --git a/webapp/graphite/readers/utils.py b/webapp/graphite/readers/utils.py index 894c14336..9d0d9af44 100644 --- a/webapp/graphite/readers/utils.py +++ b/webapp/graphite/readers/utils.py @@ -1,3 +1,5 @@ +from __future__ import division + import abc from graphite.logger import log @@ -40,10 +42,12 @@ def consolidate(func, values): usable = [v for v in values if v is not None] if not usable: return None + if func == 'avg_zero': + return sum([0 if v is None else v for v in values]) / len(values) if func == 'sum': return sum(usable) if func == 'average': - return float(sum(usable)) / len(usable) + return sum(usable) / len(usable) if func == 'max': return max(usable) if func == 'min': diff --git a/webapp/graphite/render/datalib.py b/webapp/graphite/render/datalib.py index 1d00622b3..39675e01e 100755 --- a/webapp/graphite/render/datalib.py +++ b/webapp/graphite/render/datalib.py @@ -76,6 +76,7 @@ def consolidate(self, valuesPerPoint): __consolidation_functions = { 'sum': sum, 'average': lambda usable: sum(usable) / len(usable), + 'avg_zero': lambda usable: sum(usable) / len(usable), 'max': max, 'min': min, 'first': lambda usable: usable[0], @@ -88,24 +89,29 @@ def __consolidatingGenerator(self, gen): except KeyError: raise Exception("Invalid consolidation function: '%s'" % self.consolidationFunc) - buf = [] # only the not-None values + buf = [] valcnt = 0 + nonNull = 0 for x in gen: valcnt += 1 if x is not None: buf.append(x) + nonNull += 1 + elif self.consolidationFunc == 'avg_zero': + buf.append(0) if valcnt == self.valuesPerPoint: - if buf and (len(buf) / self.valuesPerPoint) >= self.xFilesFactor: + if nonNull and (nonNull / self.valuesPerPoint) >= self.xFilesFactor: yield cf(buf) else: yield None buf = [] valcnt = 0 + nonNull = 0 if valcnt > 0: - if buf and (len(buf) / self.valuesPerPoint) >= self.xFilesFactor: + if nonNull and (nonNull / self.valuesPerPoint) >= self.xFilesFactor: yield cf(buf) else: yield None diff --git a/webapp/graphite/render/evaluator.py b/webapp/graphite/render/evaluator.py index 081d2f4de..dde13f1a3 100644 --- a/webapp/graphite/render/evaluator.py +++ b/webapp/graphite/render/evaluator.py @@ -64,7 +64,7 @@ def evaluateTokens(requestContext, tokens, replacements=None, pipedArg=None): val = replacements[name] if not isinstance(val, six.string_types): return val - elif re.match('^-?[\d.]+$', val): + elif re.match(r'^-?[\d.]+$', val): return float(val) else: return val diff --git a/webapp/graphite/render/functions.py b/webapp/graphite/render/functions.py index 352acb20a..9497e4ff5 100644 --- a/webapp/graphite/render/functions.py +++ b/webapp/graphite/render/functions.py @@ -115,6 +115,11 @@ def safeAvg(values): return sum(safeValues) / len(safeValues) +def safeAvgZero(values): + if values: + return sum([0 if v is None else v for v in values]) / len(values) + + def safeMedian(values): safeValues = [v for v in values if v is not None] if safeValues: @@ -255,6 +260,7 @@ def formatPathExpressions(seriesList): aggFuncs = { 'average': safeAvg, + 'avg_zero': safeAvgZero, 'median': safeMedian, 'sum': safeSum, 'min': safeMin, @@ -1200,7 +1206,7 @@ def movingWindow(requestContext, seriesList, windowSize, func='average', xFilesF Takes one metric or a wildcard seriesList, a number N of datapoints or a quoted string with a length of time like '1hour' or '5min' (See ``from / - until`` in the render\_api_ for examples of time formats), a function to apply to the points + until`` in the :doc:`Render API ` for examples of time formats), a function to apply to the points in the window to produce the output, and an xFilesFactor value to specify how many points in the window must be non-null for the output to be considered valid. Graphs the output of the function for the preceeding datapoints for each point on the graph. @@ -1362,7 +1368,7 @@ def movingMedian(requestContext, seriesList, windowSize, xFilesFactor=None): Takes one metric or a wildcard seriesList followed by a number N of datapoints or a quoted string with a length of time like '1hour' or '5min' (See ``from / - until`` in the render\_api_ for examples of time formats), and an xFilesFactor value to specify + until`` in the :doc:`Render API ` for examples of time formats), and an xFilesFactor value to specify how many points in the window must be non-null for the output to be considered valid. Graphs the median of the preceeding datapoints for each point on the graph. @@ -1712,7 +1718,7 @@ def movingAverage(requestContext, seriesList, windowSize, xFilesFactor=None): Takes one metric or a wildcard seriesList followed by a number N of datapoints or a quoted string with a length of time like '1hour' or '5min' (See ``from / - until`` in the render\_api_ for examples of time formats), and an xFilesFactor value to specify + until`` in the :doc:`Render API ` for examples of time formats), and an xFilesFactor value to specify how many points in the window must be non-null for the output to be considered valid. Graphs the average of the preceeding datapoints for each point on the graph. @@ -1742,7 +1748,7 @@ def movingSum(requestContext, seriesList, windowSize, xFilesFactor=None): Takes one metric or a wildcard seriesList followed by a number N of datapoints or a quoted string with a length of time like '1hour' or '5min' (See ``from / - until`` in the render\_api_ for examples of time formats), and an xFilesFactor value to specify + until`` in the :doc:`Render API ` for examples of time formats), and an xFilesFactor value to specify how many points in the window must be non-null for the output to be considered valid. Graphs the sum of the preceeding datapoints for each point on the graph. @@ -1772,7 +1778,7 @@ def movingMin(requestContext, seriesList, windowSize, xFilesFactor=None): Takes one metric or a wildcard seriesList followed by a number N of datapoints or a quoted string with a length of time like '1hour' or '5min' (See ``from / - until`` in the render\_api_ for examples of time formats), and an xFilesFactor value to specify + until`` in the :doc:`Render API ` for examples of time formats), and an xFilesFactor value to specify how many points in the window must be non-null for the output to be considered valid. Graphs the minimum of the preceeding datapoints for each point on the graph. @@ -1802,7 +1808,7 @@ def movingMax(requestContext, seriesList, windowSize, xFilesFactor=None): Takes one metric or a wildcard seriesList followed by a number N of datapoints or a quoted string with a length of time like '1hour' or '5min' (See ``from / - until`` in the render\_api_ for examples of time formats), and an xFilesFactor value to specify + until`` in the :doc:`Render API ` for examples of time formats), and an xFilesFactor value to specify how many points in the window must be non-null for the output to be considered valid. Graphs the maximum of the preceeding datapoints for each point on the graph. @@ -1883,7 +1889,7 @@ def consolidateBy(requestContext, seriesList, consolidationFunc): consolidateBy.group = 'Special' consolidateBy.params = [ Param('seriesList', ParamTypes.seriesList, required=True), - Param('consolidationFunc', ParamTypes.string, required=True, options=['sum', 'average', 'min', 'max', 'first', 'last']), + Param('consolidationFunc', ParamTypes.string, required=True, options=['sum', 'average', 'avg_zero', 'min', 'max', 'first', 'last']), ] @@ -1973,12 +1979,15 @@ def derivative(requestContext, seriesList): ] -def perSecond(requestContext, seriesList, maxValue=None): +def perSecond(requestContext, seriesList, maxValue=None, minValue=None): """ NonNegativeDerivative adjusted for the series time interval This is useful for taking a running total metric and showing how many requests per second were handled. + The optional ``minValue`` and ``maxValue`` parameters have the same + meaning as in ``nonNegativeDerivative``. + Example: .. code-block:: none @@ -1997,7 +2006,7 @@ def perSecond(requestContext, seriesList, maxValue=None): step = series.step for val in series: - delta, prev = _nonNegativeDelta(val, prev, maxValue) + delta, prev = _nonNegativeDelta(val, prev, maxValue, minValue) if delta is not None: # Division long by float cause OverflowError @@ -2020,6 +2029,7 @@ def perSecond(requestContext, seriesList, maxValue=None): perSecond.params = [ Param('seriesList', ParamTypes.seriesList, required=True), Param('maxValue', ParamTypes.float), + Param('minValue', ParamTypes.float), ] @@ -2147,13 +2157,18 @@ def integralByInterval(requestContext, seriesList, intervalUnit): ] -def nonNegativeDerivative(requestContext, seriesList, maxValue=None): +def nonNegativeDerivative(requestContext, seriesList, maxValue=None, minValue=None): """ Same as the derivative function above, but ignores datapoints that trend down. Useful for counters that increase for a long time, then wrap or reset. (Such as if a network interface is destroyed and recreated by unloading and re-loading a kernel module, common with USB / WiFi cards. + By default, a null value is returned in place of negative datapoints. When + ``maxValue`` is supplied, the missing value is computed as if the counter + had wrapped at ``maxValue``. When ``minValue`` is supplied, the missing + value is computed as if the counter had wrapped to ``minValue``. + Example: .. code-block:: none @@ -2168,7 +2183,7 @@ def nonNegativeDerivative(requestContext, seriesList, maxValue=None): prev = None for val in series: - delta, prev = _nonNegativeDelta(val, prev, maxValue) + delta, prev = _nonNegativeDelta(val, prev, maxValue, minValue) newValues.append(delta) @@ -2184,13 +2199,16 @@ def nonNegativeDerivative(requestContext, seriesList, maxValue=None): nonNegativeDerivative.params = [ Param('seriesList', ParamTypes.seriesList, required=True), Param('maxValue', ParamTypes.float), + Param('minValue', ParamTypes.float), ] -def _nonNegativeDelta(val, prev, maxValue): +def _nonNegativeDelta(val, prev, maxValue, minValue): # ignore values larger than maxValue if maxValue is not None and val > maxValue: return None, None + if minValue is not None and val < minValue: + return None, None # first reading if None in (prev, val): @@ -2200,12 +2218,16 @@ def _nonNegativeDelta(val, prev, maxValue): if val >= prev: return val - prev, val - # counter wrapped and we have maxValue - # calculate delta based on maxValue + 1 + val - prev + # counter wrapped and we have maxValue (and optionally minValue) + # calculate delta based on maxValue + 1 + val - prev - minValue if maxValue is not None: - return maxValue + 1 + val - prev, val + return maxValue + 1 + val - prev - (minValue or 0), val + # counter wrapped and we have maxValue + # calculate delta based on val - minValue + if minValue is not None: + return val - minValue, val - # counter wrapped or reset and we don't have maxValue + # counter wrapped or reset and we don't have minValue/maxValue # just use None return None, val @@ -3385,7 +3407,7 @@ def sortByName(requestContext, seriesList, natural=False, reverse=False): - Natural sorting: server1, server2, server11, server12 """ def natSortKey(series): - return re.sub("(\d+)", lambda x: "{0:010}".format(int(x.group(0))), series.name) + return re.sub(r"(\d+)", lambda x: "{0:010}".format(int(x.group(0))), series.name) if natural: seriesList.sort(key=natSortKey, reverse=reverse) @@ -3970,7 +3992,7 @@ def linearRegression(requestContext, seriesList, startSourceAt=None, endSourceAt Takes one metric or a wildcard seriesList, followed by a quoted string with the time to start the line and another quoted string with the time to end the line. The start and end times are inclusive (default range is from to until). See - ``from / until`` in the render\_api_ for examples of time formats. Datapoints + ``from / until`` in the :doc:`Render API ` for examples of time formats. Datapoints in the range is used to regression. Example: @@ -4106,7 +4128,7 @@ def dashed(requestContext, seriesList, dashLength=5): def timeStack(requestContext, seriesList, timeShiftUnit='1d', timeShiftStart=0, timeShiftEnd=7): """ Takes one metric or a wildcard seriesList, followed by a quoted string with the - length of time (See ``from / until`` in the render\_api_ for examples of time formats). + length of time (See ``from / until`` in the :doc:`Render API ` for examples of time formats). Also takes a start multiplier and end multiplier for the length of time create a seriesList which is composed the original metric series stacked with time shifts @@ -4163,7 +4185,7 @@ def timeStack(requestContext, seriesList, timeShiftUnit='1d', timeShiftStart=0, def timeShift(requestContext, seriesList, timeShift, resetEnd=True, alignDST=False): """ Takes one metric or a wildcard seriesList, followed by a quoted string with the - length of time (See ``from / until`` in the render\_api_ for examples of time formats). + length of time (See ``from / until`` in the :doc:`Render API ` for examples of time formats). Draws the selected metrics shifted in time. If no sign is given, a minus sign ( - ) is implied which will shift the metric back in time. If a plus sign ( + ) is given, the @@ -4250,7 +4272,7 @@ def timeSlice(requestContext, seriesList, startSliceAt, endSliceAt="now"): """ Takes one metric or a wildcard metric, followed by a quoted string with the time to start the line and another quoted string with the time to end the line. - The start and end times are inclusive. See ``from / until`` in the render\_api_ + The start and end times are inclusive. See ``from / until`` in the :doc:`Render API ` for examples of time formats. Useful for filtering out a part of a series of data from a wider range of @@ -4424,7 +4446,7 @@ def verticalLine(requestContext, ts, label=None, color=None): def threshold(requestContext, value, label=None, color=None): """ Takes a float F, followed by a label (in double quotes) and a color. - (See ``bgcolor`` in the render\_api_ for valid color names & formats.) + (See ``bgcolor`` in the :doc:`Render API ` for valid color names & formats.) Draws a horizontal line at value F across the graph. diff --git a/webapp/tests/test_composer.py b/webapp/tests/test_composer.py new file mode 100644 index 000000000..f7dcc270d --- /dev/null +++ b/webapp/tests/test_composer.py @@ -0,0 +1,34 @@ +import mock + +from urllib3.response import HTTPResponse +from graphite.util import BytesIO + +from .base import TestCase +try: + from django.urls import reverse +except ImportError: # Django < 1.10 + from django.core.urlresolvers import reverse + + +class ComposerTest(TestCase): + @mock.patch('six.moves.http_client.HTTPConnection.request') + @mock.patch('six.moves.http_client.HTTPConnection.getresponse') + @mock.patch('graphite.composer.views.SMTP') + @mock.patch('django.conf.settings.SMTP_SERVER', 'localhost') + def test_send_email(self, mock_smtp, http_response, http_request): + url = reverse('composer_send_email') + request = { "to": "noreply@localhost", + "url": 'https://localhost:8000/render?target=sumSeries(a.b.c.d)&title=Test&width=500&from=-55minutes&until=now&height=400'} + + response = self.client.get(reverse('render'), {'target': 'test'}) + self.assertEqual(response['Content-Type'], 'image/png') + data = response.content + responseObject = HTTPResponse(body=BytesIO(data), status=200, preload_content=False) + http_request.return_value = responseObject + http_response.return_value = responseObject + + instance = mock_smtp.return_value + instance.sendmail.return_value = {} + + response = self.client.get(url, request) + self.assertEqual(response.content, b'OK') diff --git a/webapp/tests/test_finders.py b/webapp/tests/test_finders.py index 9f025948d..b6ee5ff40 100644 --- a/webapp/tests/test_finders.py +++ b/webapp/tests/test_finders.py @@ -247,6 +247,25 @@ def test_standard_finder_gzipped_whisper(self, scandir_mock): scandir_mock.call_count = 0 self.wipe_whisper() + def test_standard_finder_tagged_whisper_carbonlink(self): + try: + self.create_whisper(join( + '_tagged', + 'b34', + '2de', + # foo;bar=baz2 + 'b342defa10cb579981c63ef78be5ac248f681f4bd2c35bc0209d3a7b9eb99346.wsp' + )) + + finder = get_finders('graphite.finders.standard.StandardFinder')[0] + + nodes = list(finder.find_nodes(FindQuery('foo;bar=baz2', None, None))) + self.assertEqual(len(nodes), 1) + self.assertEqual(nodes[0].reader.real_metric_path, 'foo;bar=baz2') + + finally: + self.wipe_whisper() + def test_globstar(self): self.addCleanup(self.wipe_whisper) store = Store(finders=get_finders('graphite.finders.standard.StandardFinder')) diff --git a/webapp/tests/test_functions.py b/webapp/tests/test_functions.py index 97a8721bd..df6ec4441 100644 --- a/webapp/tests/test_functions.py +++ b/webapp/tests/test_functions.py @@ -1439,7 +1439,7 @@ def test_cumulative(self): def test_consolidateBy(self): seriesList = self._generate_series_list() self._verify_series_consolidationFunc(seriesList, "average") - avail_funcs = ['sum', 'average', 'min', 'max', 'first', 'last'] + avail_funcs = ['sum', 'average', 'avg_zero', 'min', 'max', 'first', 'last'] for func in avail_funcs: results = functions.consolidateBy({}, seriesList, func) self._verify_series_consolidationFunc(results, func) @@ -1637,6 +1637,18 @@ def test_nonNegativeDerivative_max(self): result = functions.nonNegativeDerivative({}, seriesList,5) self.assertEqual(expected, result, 'nonNegativeDerivative result incorrect') + def test_nonNegativeDerivative_min(self): + seriesList = self._gen_series_list_with_data(key='test',start=0,end=600,step=60,data=[0, 1, 2, 3, 4, 5, 2, 3, 4, 5]) + expected = [TimeSeries('nonNegativeDerivative(test)', 0, 600, 60, [None, None, 1, 1, 1, 1, 1, 1, 1, 1])] + result = functions.nonNegativeDerivative({}, seriesList,None,1) + self.assertEqual(expected, result, 'nonNegativeDerivative result incorrect') + + def test_nonNegativeDerivative_min_max(self): + seriesList = self._gen_series_list_with_data(key='test',start=0,end=600,step=60,data=[0, 1, 2, 3, 4, 5, 2, 3, 4, 5]) + expected = [TimeSeries('nonNegativeDerivative(test)', 0, 600, 60, [None, None, 1, 1, 1, 1, 3, 1, 1, 1])] + result = functions.nonNegativeDerivative({}, seriesList,6,1) + self.assertEqual(expected, result, 'nonNegativeDerivative result incorrect') + def test_perSecond(self): seriesList = self._gen_series_list_with_data(key='test',start=0,end=600,step=60,data=[0, 120, 240, 480, 960, 1920, 3840, 7680, 15360, 60 ** 256 + 15360 ]) expected = [TimeSeries('perSecond(test)', 0, 600, 60, [None, 2, 2, 4, 8, 16, 32, 64, 128, 60 ** 255])] diff --git a/webapp/tests/test_readers_util.py b/webapp/tests/test_readers_util.py index d3ddd4ea8..33d19e24e 100644 --- a/webapp/tests/test_readers_util.py +++ b/webapp/tests/test_readers_util.py @@ -209,6 +209,38 @@ def test_merge_with_cache_with_different_step_average(self): self.assertEqual(expected_values, values) + def test_merge_with_cache_with_different_step_avg_zero(self): + # Data values from the Reader: + start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016) + window_size = 7200 # (2 hour) + step = 60 # (1 minute) + + # Fill in half the data. Nones for the rest. + values = list(range(0, window_size//2, step)) + for i in range(0, window_size//2, step): + values.append(None) + + # Generate data that would normally come from Carbon. + # Step will be different since that is what we are testing + cache_results = [] + for i in range(start+window_size//2, start+window_size, 1): + cache_results.append((i, 1 if i%2==0 else None)) + + # merge the db results with the cached results + values = merge_with_cache( + cached_datapoints=cache_results, + start=start, + step=step, + values=values, + func='avg_zero' + ) + + # Generate the expected values + expected_values = list(range(0, window_size//2, step)) + for i in range(0, window_size//2, step): + expected_values.append(0.5) + self.assertEqual(expected_values, values) + def test_merge_with_cache_with_different_step_max(self): # Data values from the Reader: start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)