diff --git a/webapp/graphite/finders/ceres.py b/webapp/graphite/finders/ceres.py index bfc53fd59..f7659ed05 100644 --- a/webapp/graphite/finders/ceres.py +++ b/webapp/graphite/finders/ceres.py @@ -25,15 +25,17 @@ def __init__(self, directory=None): self.tree = CeresTree(directory) def find_nodes(self, query): - # translate query pattern if it is tagged tagged = not query.pattern.startswith('_tagged.') and ';' in query.pattern if tagged: - # tagged series are stored in ceres using encoded names, so to retrieve them we need to encode the - # query pattern using the same scheme used in carbon when they are written. - variants = [TaggedSeries.encode(query.pattern)] + # tagged series are stored in ceres using encoded names, so to retrieve them we need to + # encode the query pattern using the same scheme used in carbon when they are written. + variants = [ + TaggedSeries.encode(query.pattern, hash_only=True), + TaggedSeries.encode(query.pattern, hash_only=False), + ] else: - variants = extract_variants(query.pattern) + variants = extract_variants(query.pattern) for variant in variants: for fs_path in glob(self.tree.getFilesystemPath(variant)): @@ -42,14 +44,12 @@ def find_nodes(self, query): if CeresNode.isNodeDir(fs_path): ceres_node = self.tree.getNode(metric_path) - if ceres_node.hasDataForInterval( - query.startTime, query.endTime): - real_metric_path = get_real_metric_path( - fs_path, metric_path) + if ceres_node.hasDataForInterval(query.startTime, query.endTime): + real_metric_path = get_real_metric_path(fs_path, metric_path) reader = CeresReader(ceres_node, real_metric_path) # if we're finding by tag, return the proper metric path if tagged: - metric_path = query.pattern + metric_path = query.pattern yield LeafNode(metric_path, reader) elif os.path.isdir(fs_path): @@ -59,12 +59,12 @@ def get_index(self, requestContext): matches = [] for root, _, files in walk(settings.CERES_DIR): - root = root.replace(settings.CERES_DIR, '') - for filename in files: - if filename == '.ceres-node': - matches.append(root) + root = root.replace(settings.CERES_DIR, '') + for filename in files: + if filename == '.ceres-node': + matches.append(root) return sorted([ - m.replace('/', '.').lstrip('.') - for m in matches + m.replace('/', '.').lstrip('.') + for m in matches ]) diff --git a/webapp/graphite/finders/remote.py b/webapp/graphite/finders/remote.py index 554891fbe..b8facc839 100644 --- a/webapp/graphite/finders/remote.py +++ b/webapp/graphite/finders/remote.py @@ -189,6 +189,7 @@ def auto_complete_tags(self, exprs, tagPrefix=None, limit=None, requestContext=N fields = [ ('tagPrefix', tagPrefix or ''), ('limit', str(limit)), + ('local', self.params.get('local', '1')), ] for expr in exprs: fields.append(('expr', expr)) @@ -223,6 +224,7 @@ def auto_complete_values(self, exprs, tag, valuePrefix=None, limit=None, request ('tag', tag or ''), ('valuePrefix', valuePrefix or ''), ('limit', str(limit)), + ('local', self.params.get('local', '1')), ] for expr in exprs: fields.append(('expr', expr)) diff --git a/webapp/graphite/finders/standard.py b/webapp/graphite/finders/standard.py index f1ef20fb8..a9e287276 100644 --- a/webapp/graphite/finders/standard.py +++ b/webapp/graphite/finders/standard.py @@ -1,6 +1,6 @@ import bisect import fnmatch -import operator +import os from os.path import isdir, isfile, join, basename, splitext from django.conf import settings @@ -18,7 +18,7 @@ from graphite.finders.utils import BaseFinder from graphite.tags.utils import TaggedSeries -from . import fs_to_metric, get_real_metric_path, match_entries +from . import fs_to_metric, get_real_metric_path, match_entries, expand_braces class StandardFinder(BaseFinder): @@ -34,36 +34,52 @@ def find_nodes(self, query): # translate query pattern if it is tagged tagged = not query.pattern.startswith('_tagged.') and ';' in query.pattern if tagged: - # tagged series are stored in whisper using encoded names, so to retrieve them we need to encode the - # query pattern using the same scheme used in carbon when they are written. - clean_pattern = TaggedSeries.encode(query.pattern) + # tagged series are stored in whisper using encoded names, so to retrieve them we need to + # encode the query pattern using the same scheme used in carbon when they are written. + encoded_paths = [ + TaggedSeries.encode(query.pattern, sep=os.sep, hash_only=True), + TaggedSeries.encode(query.pattern, sep=os.sep, hash_only=False), + ] pattern_parts = clean_pattern.split('.') for root_dir in self.directories: - for absolute_path in self._find_paths(root_dir, pattern_parts): - if basename(absolute_path).startswith('.'): + if tagged: + relative_paths = [] + for pattern in encoded_paths: + entries = [ + pattern + '.wsp', + pattern + '.wsp.gz', + pattern + '.rrd', + ] + for entry in entries: + if isfile(join(root_dir, entry)): + relative_paths.append(entry) + else: + relative_paths = self._find_paths(root_dir, pattern_parts) + + for relative_path in relative_paths: + if basename(relative_path).startswith('.'): continue - if self.DATASOURCE_DELIMITER in basename(absolute_path): - (absolute_path, datasource_pattern) = absolute_path.rsplit( + if self.DATASOURCE_DELIMITER in basename(relative_path): + (relative_path, datasource_pattern) = relative_path.rsplit( self.DATASOURCE_DELIMITER, 1) else: datasource_pattern = None - relative_path = absolute_path[len(root_dir):].lstrip('/') + absolute_path = join(root_dir, relative_path) metric_path = fs_to_metric(relative_path) real_metric_path = get_real_metric_path(absolute_path, metric_path) - metric_path_parts = metric_path.split('.') - for field_index in find_escaped_pattern_fields(query.pattern): - metric_path_parts[field_index] = pattern_parts[field_index].replace( - '\\', '') - metric_path = '.'.join(metric_path_parts) - # if we're finding by tag, return the proper metric path if tagged: metric_path = query.pattern + else: + metric_path_parts = metric_path.split('.') + for field_index in find_escaped_pattern_fields(query.pattern): + metric_path_parts[field_index] = pattern_parts[field_index].replace('\\', '') + metric_path = '.'.join(metric_path_parts) # Now we construct and yield an appropriate Node object if isdir(absolute_path): @@ -74,8 +90,7 @@ def find_nodes(self, query): yield LeafNode(metric_path, reader) elif absolute_path.endswith('.wsp.gz') and GzippedWhisperReader.supported: - reader = GzippedWhisperReader( - absolute_path, real_metric_path) + reader = GzippedWhisperReader(absolute_path, real_metric_path) yield LeafNode(metric_path, reader) elif absolute_path.endswith('.rrd') and RRDReader.supported: @@ -91,73 +106,75 @@ def find_nodes(self, query): def _find_paths(self, current_dir, patterns): """Recursively generates absolute paths whose components underneath current_dir match the corresponding pattern in patterns""" - pattern = patterns[0] + raw_pattern = patterns[0] patterns = patterns[1:] - has_wildcard = pattern.find( - '{') > -1 or pattern.find('[') > -1 or pattern.find('*') > -1 or pattern.find('?') > -1 - - matching_subdirs = [] - files = [] - if has_wildcard: # this avoids os.listdir() for performance - subdirs = [] - try: - for x in scandir(current_dir): - if x.is_file(): - files.append(x.name) - if x.is_dir(): - subdirs.append(x.name) - except OSError as e: - log.exception(e) - - if pattern == "**": - matching_subdirs = map(operator.itemgetter(0), walk(current_dir)) - - # if this is a terminal globstar, add a pattern for all files in subdirs - if not patterns: - patterns = ["*"] - else: - matching_subdirs = match_entries(subdirs, pattern) - elif isdir(join(current_dir, pattern)): - matching_subdirs.append(pattern) - - # the last pattern may apply to RRD data sources - if len(patterns) == 1 and RRDReader.supported: - if not has_wildcard: - entries = [ - pattern + ".rrd", - ] - rrd_files = [entry for entry in entries if isfile(join(current_dir, entry))] - else: - rrd_files = match_entries(files, pattern + ".rrd") - - if rrd_files: # let's assume it does - datasource_pattern = patterns[0] - - for rrd_file in rrd_files: - absolute_path = join(current_dir, rrd_file) - yield absolute_path + self.DATASOURCE_DELIMITER + datasource_pattern + for pattern in expand_braces(raw_pattern): + has_wildcard = pattern.find('[') > -1 or pattern.find('*') > -1 or pattern.find('?') > -1 + + matching_subdirs = [] + files = [] + if has_wildcard: # this avoids os.listdir() for performance + subdirs = [] + try: + for x in scandir(current_dir): + if x.is_file(): + files.append(x.name) + if x.is_dir(): + subdirs.append(x.name) + except OSError as e: + log.exception(e) + + if pattern == "**": + matching_subdirs = map( + lambda item: item[0][len(current_dir) + 1:], + walk(current_dir) + ) + + # if this is a terminal globstar, add a pattern for all files in subdirs + if not patterns: + patterns = ["*"] + else: + matching_subdirs = match_entries(subdirs, pattern) + elif isdir(join(current_dir, pattern)): + matching_subdirs.append(pattern) + + # the last pattern may apply to RRD data sources + if len(patterns) == 1 and RRDReader.supported: + if not has_wildcard: + entries = [ + pattern + ".rrd", + ] + rrd_files = [entry for entry in entries if isfile(join(current_dir, entry))] + else: + rrd_files = match_entries(files, pattern + ".rrd") + + if rrd_files: # let's assume it does + datasource_pattern = patterns[0] + + for rrd_file in rrd_files: + yield rrd_file + self.DATASOURCE_DELIMITER + datasource_pattern + + if patterns: # we've still got more directories to traverse + for subdir in matching_subdirs: + absolute_path = join(current_dir, subdir) + for match in self._find_paths(absolute_path, patterns): + yield join(subdir, match) + + else: # we've got the last pattern + if not has_wildcard: + entries = [ + pattern + '.wsp', + pattern + '.wsp.gz', + pattern + '.rrd', + ] + matching_files = [entry for entry in entries if isfile(join(current_dir, entry))] + else: + matching_files = match_entries(files, pattern + '.*') - if patterns: # we've still got more directories to traverse - for subdir in matching_subdirs: - absolute_path = join(current_dir, subdir) - for match in self._find_paths(absolute_path, patterns): + for match in matching_files + matching_subdirs: yield match - else: # we've got the last pattern - if not has_wildcard: - entries = [ - pattern + '.wsp', - pattern + '.wsp.gz', - pattern + '.rrd', - ] - matching_files = [entry for entry in entries if isfile(join(current_dir, entry))] - else: - matching_files = match_entries(files, pattern + '.*') - - for base_name in matching_files + matching_subdirs: - yield join(current_dir, base_name) - def get_index(self, requestContext): matches = [] diff --git a/webapp/graphite/render/datalib.py b/webapp/graphite/render/datalib.py index 20ff29221..037b7c44e 100755 --- a/webapp/graphite/render/datalib.py +++ b/webapp/graphite/render/datalib.py @@ -148,6 +148,11 @@ def copy(self, name=None, start=None, end=None, step=None, values=None, consolid ) + def datapoints(self): + timestamps = range(int(self.start), int(self.end) + 1, int(self.step * self.valuesPerPoint)) + return list(zip(self, timestamps)) + + # Data retrieval API @logtime def fetchData(requestContext, pathExpr, timer=None): diff --git a/webapp/graphite/render/functions.py b/webapp/graphite/render/functions.py index 2424c65f0..a015aaebf 100644 --- a/webapp/graphite/render/functions.py +++ b/webapp/graphite/render/functions.py @@ -4078,11 +4078,14 @@ def constantLine(requestContext, value): Param('value', ParamTypes.float, required=True), ] -def aggregateLine(requestContext, seriesList, func='average'): +def aggregateLine(requestContext, seriesList, func='average', keepStep=False): """ Takes a metric or wildcard seriesList and draws a horizontal line based on the function applied to each series. + If the optional keepStep parameter is set to True, the result will + have the same time period and step as the source series. + Note: By default, the graphite renderer consolidates data points by averaging data points over time. If you are using the 'min' or 'max' function for aggregateLine, this can cause an unusual gap in the @@ -4110,16 +4113,21 @@ def aggregateLine(requestContext, seriesList, func='average'): else: name = 'aggregateLine(%s, None)' % (series.name) - [series] = constantLine(requestContext, value) - series.name = name - series.pathExpression = name - results.append(series) + if keepStep: + aggSeries = series.copy(name=name, values=[value] * len(series)) + else: + [aggSeries] = constantLine(requestContext, value) + aggSeries.name = name + aggSeries.pathExpression = name + + results.append(aggSeries) return results aggregateLine.group = 'Calculate' aggregateLine.params = [ Param('seriesList', ParamTypes.seriesList, required=True), Param('func', ParamTypes.aggFunc, default='average', options=aggFuncNames), + Param('keepStep', ParamTypes.boolean, default=False), ] def verticalLine(requestContext, ts, label=None, color=None): @@ -4848,7 +4856,7 @@ def hitcount(requestContext, seriesList, intervalString, alignToInterval = False for series in seriesList: step = int(series.step) - bucket_count = int(math.ceil(float(series.end - series.start) // interval)) + bucket_count = int(math.ceil(float(series.end - series.start) / interval)) buckets = [[] for _ in range(bucket_count)] newStart = int(series.end - bucket_count * interval) diff --git a/webapp/graphite/render/glyph.py b/webapp/graphite/render/glyph.py index 4985cca7b..2c2ffa4fe 100644 --- a/webapp/graphite/render/glyph.py +++ b/webapp/graphite/render/glyph.py @@ -19,6 +19,7 @@ from six.moves.configparser import SafeConfigParser from django.conf import settings import pytz +import six from graphite.render.datalib import TimeSeries from graphite.util import json, BytesIO @@ -594,7 +595,7 @@ def setColor(self, value, alpha=1.0, forceAlpha=False): r,g,b = value elif value in colorAliases: r,g,b = colorAliases[value] - elif type(value) in (str,unicode) and len(value) >= 6: + elif isinstance(value, six.string_types) and len(value) >= 6: s = value if s[0] == '#': s = s[1:] if s[0:3] == '%23': s = s[3:] @@ -985,7 +986,7 @@ def drawGraph(self,**params): if 'yUnitSystem' not in params: params['yUnitSystem'] = 'si' else: - params['yUnitSystem'] = unicode(params['yUnitSystem']).lower() + params['yUnitSystem'] = six.text_type(params['yUnitSystem']).lower() if params['yUnitSystem'] not in UnitSystems: params['yUnitSystem'] = 'si' @@ -1044,11 +1045,11 @@ def drawGraph(self,**params): self.setColor( self.foregroundColor ) if params.get('title'): - self.drawTitle( unicode( unquote_plus(params['title']) ) ) + self.drawTitle( six.text_type( unquote_plus(params['title']) ) ) if params.get('vtitle'): - self.drawVTitle( unicode( unquote_plus(params['vtitle']) ) ) + self.drawVTitle( six.text_type( unquote_plus(params['vtitle']) ) ) if self.secondYAxis and params.get('vtitleRight'): - self.drawVTitle( unicode( unquote_plus(params['vtitleRight']) ), rightAlign=True ) + self.drawVTitle( six.text_type( unquote_plus(params['vtitleRight']) ), rightAlign=True ) self.setFont() if not params.get('hideLegend', len(self.data) > settings.LEGEND_MAX_ITEMS): @@ -1850,7 +1851,7 @@ def drawLabels(self): if slice['value'] < 10 and slice['value'] != int(slice['value']): label = "%.2f" % slice['value'] else: - label = unicode(int(slice['value'])) + label = six.text_type(int(slice['value'])) theta = slice['midAngle'] x = self.x0 + (self.radius / 2.0 * math.cos(theta)) y = self.y0 + (self.radius / 2.0 * math.sin(theta)) diff --git a/webapp/graphite/render/views.py b/webapp/graphite/render/views.py index 9c6ad4552..baeccd1df 100755 --- a/webapp/graphite/render/views.py +++ b/webapp/graphite/render/views.py @@ -191,48 +191,41 @@ def renderViewCsv(requestOptions, data): def renderViewJson(requestOptions, data): series_data = [] - if 'maxDataPoints' in requestOptions and any(data): - maxDataPoints = requestOptions['maxDataPoints'] - if maxDataPoints == 1: - for series in data: - series.consolidate(len(series)) - datapoints = list(zip(series, [int(series.start)])) - series_data.append(dict(target=series.name, tags=series.tags, datapoints=datapoints)) - else: - startTime = min([series.start for series in data]) - endTime = max([series.end for series in data]) - timeRange = endTime - startTime - for series in data: - numberOfDataPoints = timeRange/series.step - if maxDataPoints < numberOfDataPoints: - valuesPerPoint = math.ceil(float(numberOfDataPoints) / float(maxDataPoints)) - secondsPerPoint = int(valuesPerPoint * series.step) - # Nudge start over a little bit so that the consolidation bands align with each call - # removing 'jitter' seen when refreshing. - nudge = secondsPerPoint + (series.start % series.step) - (series.start % secondsPerPoint) - series.start = series.start + nudge - valuesToLose = int(nudge/series.step) - for r in range(1, valuesToLose): - del series[0] - series.consolidate(valuesPerPoint) - timestamps = range(int(series.start), int(series.end) + 1, int(secondsPerPoint)) - else: - timestamps = range(int(series.start), int(series.end) + 1, int(series.step)) - datapoints = list(zip(series, timestamps)) - series_data.append(dict(target=series.name, tags=series.tags, datapoints=datapoints)) - elif 'noNullPoints' in requestOptions and any(data): - for series in data: - values = [] - for (index,v) in enumerate(series): - if v is not None and not math.isnan(v): - timestamp = series.start + (index * series.step) - values.append((v,timestamp)) - if len(values) > 0: - series_data.append(dict(target=series.name, tags=series.tags, datapoints=values)) - else: + + if any(data): + startTime = min([series.start for series in data]) + endTime = max([series.end for series in data]) + timeRange = endTime - startTime + for series in data: - timestamps = range(int(series.start), int(series.end) + 1, int(series.step)) - datapoints = list(zip(series, timestamps)) + if 'maxDataPoints' in requestOptions: + maxDataPoints = requestOptions['maxDataPoints'] + if maxDataPoints == 1: + series.consolidate(len(series)) + else: + numberOfDataPoints = timeRange/series.step + if maxDataPoints < numberOfDataPoints: + valuesPerPoint = math.ceil(float(numberOfDataPoints) / float(maxDataPoints)) + secondsPerPoint = int(valuesPerPoint * series.step) + # Nudge start over a little bit so that the consolidation bands align with each call + # removing 'jitter' seen when refreshing. + nudge = secondsPerPoint + (series.start % series.step) - (series.start % secondsPerPoint) + series.start = series.start + nudge + valuesToLose = int(nudge/series.step) + for r in range(1, valuesToLose): + del series[0] + series.consolidate(valuesPerPoint) + + datapoints = series.datapoints() + + if 'noNullPoints' in requestOptions: + datapoints = [ + point for point in datapoints + if point[0] is not None and not math.isnan(point[0]) + ] + if not datapoints: + continue + series_data.append(dict(target=series.name, tags=series.tags, datapoints=datapoints)) output = json.dumps(series_data, indent=(2 if requestOptions.get('pretty') else None)).replace('None,', 'null,').replace('NaN,', 'null,').replace('Infinity,', '1e9999,') diff --git a/webapp/graphite/tags/utils.py b/webapp/graphite/tags/utils.py index 218087724..37854b8e7 100644 --- a/webapp/graphite/tags/utils.py +++ b/webapp/graphite/tags/utils.py @@ -67,7 +67,7 @@ def format(tags): ])) @staticmethod - def encode(metric, sep='.'): + def encode(metric, sep='.', hash_only=False): """ Helper function to encode tagged series for storage in whisper etc @@ -81,6 +81,10 @@ def encode(metric, sep='.'): each carbon database and graphite-web finder is responsible for handling its own encoding so that different backends can create their own schemes if desired. + The hash_only parameter can be set to True to use the hash as the filename instead of a + human-readable name. This avoids issues with filename length restrictions, at the expense of + being unable to decode the filename and determine the original metric name. + A concrete example: .. code-block:: none @@ -95,7 +99,12 @@ def encode(metric, sep='.'): """ if ';' in metric: metric_hash = sha256(metric.encode('utf8')).hexdigest() - return sep.join(['_tagged', metric_hash[0:3], metric_hash[3:6], metric.replace('.', '_DOT_')]) + return sep.join([ + '_tagged', + metric_hash[0:3], + metric_hash[3:6], + metric_hash if hash_only else metric.replace('.', '_DOT_') + ]) # metric isn't tagged, just replace dots with the separator and trim any leading separator return metric.replace('.', sep).lstrip(sep) diff --git a/webapp/graphite/tags/views.py b/webapp/graphite/tags/views.py index c21c58d4d..a4080b5f2 100644 --- a/webapp/graphite/tags/views.py +++ b/webapp/graphite/tags/views.py @@ -1,9 +1,10 @@ from graphite.util import jsonResponse, HttpResponse, HttpError from graphite.storage import STORE, extractForwardHeaders -def _requestContext(request): +def _requestContext(request, queryParams): return { 'forwardHeaders': extractForwardHeaders(request), + 'localOnly': queryParams.get('local') == '1', } @jsonResponse @@ -15,7 +16,7 @@ def tagSeries(request, queryParams): if not path: raise HttpError('no path specified', status=400) - return STORE.tagdb.tag_series(path, requestContext=_requestContext(request)) + return STORE.tagdb.tag_series(path, requestContext=_requestContext(request, queryParams)) @jsonResponse def tagMultiSeries(request, queryParams): @@ -32,7 +33,7 @@ def tagMultiSeries(request, queryParams): else: raise HttpError('no paths specified',status=400) - return STORE.tagdb.tag_multi_series(paths, requestContext=_requestContext(request)) + return STORE.tagdb.tag_multi_series(paths, requestContext=_requestContext(request, queryParams)) @jsonResponse def delSeries(request, queryParams): @@ -49,7 +50,7 @@ def delSeries(request, queryParams): else: raise HttpError('no path specified', status=400) - return STORE.tagdb.del_multi_series(paths, requestContext=_requestContext(request)) + return STORE.tagdb.del_multi_series(paths, requestContext=_requestContext(request, queryParams)) @jsonResponse def findSeries(request, queryParams): @@ -67,7 +68,7 @@ def findSeries(request, queryParams): if not exprs: raise HttpError('no tag expressions specified', status=400) - return STORE.tagdb.find_series(exprs, requestContext=_requestContext(request)) + return STORE.tagdb.find_series(exprs, requestContext=_requestContext(request, queryParams)) @jsonResponse def tagList(request, queryParams): @@ -77,7 +78,7 @@ def tagList(request, queryParams): return STORE.tagdb.list_tags( tagFilter=request.GET.get('filter'), limit=request.GET.get('limit'), - requestContext=_requestContext(request), + requestContext=_requestContext(request, queryParams), ) @jsonResponse @@ -89,7 +90,7 @@ def tagDetails(request, queryParams, tag): tag, valueFilter=queryParams.get('filter'), limit=queryParams.get('limit'), - requestContext=_requestContext(request), + requestContext=_requestContext(request, queryParams), ) @jsonResponse @@ -109,7 +110,7 @@ def autoCompleteTags(request, queryParams): exprs, tagPrefix=queryParams.get('tagPrefix'), limit=queryParams.get('limit'), - requestContext=_requestContext(request) + requestContext=_requestContext(request, queryParams) ) @jsonResponse @@ -134,5 +135,5 @@ def autoCompleteValues(request, queryParams): tag, valuePrefix=queryParams.get('valuePrefix'), limit=queryParams.get('limit'), - requestContext=_requestContext(request) + requestContext=_requestContext(request, queryParams) ) diff --git a/webapp/tests/test_finders.py b/webapp/tests/test_finders.py index 9de92d254..40ea699a3 100644 --- a/webapp/tests/test_finders.py +++ b/webapp/tests/test_finders.py @@ -137,6 +137,15 @@ def test_standard_finder(self,scandir_mock): self.create_whisper('foo.wsp') self.create_whisper(join('foo', 'bar', 'baz.wsp')) self.create_whisper(join('bar', 'baz', 'foo.wsp')) + self.create_whisper(join('_tagged', '9c6', '79b', 'foo;bar=baz.wsp')) + self.create_whisper(join( + '_tagged', + 'b34', + '2de', + # foo;bar=baz2 + 'b342defa10cb579981c63ef78be5ac248f681f4bd2c35bc0209d3a7b9eb99346.wsp' + )) + finder = get_finders('graphite.finders.standard.StandardFinder')[0] scandir_mock.call_count = 0 @@ -152,52 +161,62 @@ def test_standard_finder(self,scandir_mock): scandir_mock.call_count = 0 nodes = finder.find_nodes(FindQuery('*.ba?.{baz,foo}', None, None)) self.assertEqual(len(list(nodes)), 2) - self.assertEqual(scandir_mock.call_count, 5) + self.assertEqual(scandir_mock.call_count, 4) scandir_mock.call_count = 0 nodes = finder.find_nodes(FindQuery('{foo,bar}.{baz,bar}.{baz,foo}', None, None)) self.assertEqual(len(list(nodes)), 2) - self.assertEqual(scandir_mock.call_count, 5) + self.assertEqual(scandir_mock.call_count, 0) scandir_mock.call_count = 0 nodes = finder.find_nodes(FindQuery('{foo}.bar.*', None, None)) self.assertEqual(len(list(nodes)), 1) - self.assertEqual(scandir_mock.call_count, 2) + self.assertEqual(scandir_mock.call_count, 1) scandir_mock.call_count = 0 nodes = finder.find_nodes(FindQuery('foo.{ba{r,z},baz}.baz', None, None)) self.assertEqual(len(list(nodes)), 1) - self.assertEqual(scandir_mock.call_count, 1) + self.assertEqual(scandir_mock.call_count, 0) scandir_mock.call_count = 0 nodes = finder.find_nodes(FindQuery('{foo,garbage}.bar.baz', None, None)) self.assertEqual(len(list(nodes)), 1) - self.assertEqual(scandir_mock.call_count, 1) + self.assertEqual(scandir_mock.call_count, 0) scandir_mock.call_count = 0 nodes = finder.find_nodes(FindQuery('{fo{o}}.bar.baz', None, None)) self.assertEqual(len(list(nodes)), 1) - self.assertEqual(scandir_mock.call_count, 1) + self.assertEqual(scandir_mock.call_count, 0) scandir_mock.call_count = 0 nodes = finder.find_nodes(FindQuery('foo{}.bar.baz', None, None)) self.assertEqual(len(list(nodes)), 1) - self.assertEqual(scandir_mock.call_count, 1) + self.assertEqual(scandir_mock.call_count, 0) scandir_mock.call_count = 0 nodes = finder.find_nodes(FindQuery('{fo,ba}{o}.bar.baz', None, None)) self.assertEqual(len(list(nodes)), 1) - self.assertEqual(scandir_mock.call_count, 1) + self.assertEqual(scandir_mock.call_count, 0) scandir_mock.call_count = 0 nodes = finder.find_nodes(FindQuery('{fo,ba}{o,o}.bar.baz', None, None)) self.assertEqual(len(list(nodes)), 1) - self.assertEqual(scandir_mock.call_count, 1) + self.assertEqual(scandir_mock.call_count, 0) scandir_mock.call_count = 0 nodes = finder.find_nodes(FindQuery('{fo,ba}{o,z}.bar.baz', None, None)) self.assertEqual(len(list(nodes)), 1) - self.assertEqual(scandir_mock.call_count, 1) + self.assertEqual(scandir_mock.call_count, 0) + + scandir_mock.call_count = 0 + nodes = finder.find_nodes(FindQuery('foo;bar=baz', None, None)) + self.assertEqual(len(list(nodes)), 1) + self.assertEqual(scandir_mock.call_count, 0) + + scandir_mock.call_count = 0 + nodes = finder.find_nodes(FindQuery('foo;bar=baz2', None, None)) + self.assertEqual(len(list(nodes)), 1) + self.assertEqual(scandir_mock.call_count, 0) results = finder.fetch(['foo'], 0, 1) self.assertEqual(results, []) @@ -222,7 +241,7 @@ def test_standard_finder_gzipped_whisper(self, scandir_mock): scandir_mock.call_count = 0 nodes = finder.find_nodes(FindQuery('foo{}.bar.baz', None, None)) self.assertEqual(len(list(nodes)), 1) - self.assertEqual(scandir_mock.call_count, 1) + self.assertEqual(scandir_mock.call_count, 0) finally: scandir_mock.call_count = 0 @@ -325,6 +344,14 @@ def wipe_ceres(): create_ceres('foo') create_ceres('foo.bar.baz') create_ceres('bar.baz.foo') + create_ceres( + # foo;bar=baz + '_tagged.9c6.79b.foo;bar=baz' + ) + create_ceres( + # foo;bar=baz2 + '_tagged.b34.2de.b342defa10cb579981c63ef78be5ac248f681f4bd2c35bc0209d3a7b9eb99346' + ) finder = get_finders('graphite.finders.ceres.CeresFinder')[0] @@ -344,6 +371,12 @@ def wipe_ceres(): nodes = finder.find_nodes(FindQuery('*.ba?.{baz,foo}', None, None)) self.assertEqual(len(list(nodes)), 2) + nodes = finder.find_nodes(FindQuery('foo;bar=baz', None, None)) + self.assertEqual(len(list(nodes)), 1) + + nodes = finder.find_nodes(FindQuery('foo;bar=baz2', None, None)) + self.assertEqual(len(list(nodes)), 1) + # Search for something that isn't valid Ceres content fh = open(join(test_dir, 'foo', 'blah'), 'wb') fh.close() @@ -352,4 +385,10 @@ def wipe_ceres(): # get index result = finder.get_index({}) - self.assertEqual(result, ['bar.baz.foo', 'foo', 'foo.bar.baz']) + self.assertEqual(result, [ + '_tagged.9c6.79b.foo;bar=baz', + '_tagged.b34.2de.b342defa10cb579981c63ef78be5ac248f681f4bd2c35bc0209d3a7b9eb99346', + 'bar.baz.foo', + 'foo', + 'foo.bar.baz', + ]) diff --git a/webapp/tests/test_finders_remote.py b/webapp/tests/test_finders_remote.py index c7505b44c..361eae0f4 100644 --- a/webapp/tests/test_finders_remote.py +++ b/webapp/tests/test_finders_remote.py @@ -339,6 +339,7 @@ def test_auto_complete_tags(self, http_request): 'fields': [ ('tagPrefix', 'tag'), ('limit', '100'), + ('local', '1'), ('expr', 'name=test'), ], 'headers': None, @@ -367,6 +368,7 @@ def test_auto_complete_tags(self, http_request): 'fields': [ ('tagPrefix', 'tag'), ('limit', '5'), + ('local', '1'), ('expr', 'name=test'), ('expr', 'tag3=value3'), ], @@ -417,6 +419,7 @@ def test_auto_complete_values(self, http_request): ('tag', 'tag1'), ('valuePrefix', 'value'), ('limit', '100'), + ('local', '1'), ('expr', 'name=test'), ], 'headers': None, @@ -446,6 +449,7 @@ def test_auto_complete_values(self, http_request): ('tag', 'tag1'), ('valuePrefix', 'value'), ('limit', '5'), + ('local', '1'), ('expr', 'name=test'), ('expr', 'tag3=value3'), ], diff --git a/webapp/tests/test_functions.py b/webapp/tests/test_functions.py index b5017f7d1..e76f78e04 100644 --- a/webapp/tests/test_functions.py +++ b/webapp/tests/test_functions.py @@ -3577,10 +3577,10 @@ def test_aggregateLine_avg(self): ) expectedResult = [ - TimeSeries('aggregateLine(collectd.test-db1.load.value, 4)', 3600, 3600, 0, [4.0, 4.0, 4.0]), - TimeSeries('aggregateLine(collectd.test-db2.load.value, None)', 3600, 3600, 0, [None, None, None]), - TimeSeries('aggregateLine(collectd.test-db3.load.value, 1.85714)', 3600, 3600, 0, [1.8571428571428572, 1.8571428571428572, 1.8571428571428572]), - TimeSeries('aggregateLine(collectd.test-db4.load.value, 8.22222)', 3600, 3600, 0, [8.222222222222221, 8.222222222222221, 8.222222222222221]), + TimeSeries('aggregateLine(collectd.test-db1.load.value, 4)', 0, 600, 60, [4.0] * 10), + TimeSeries('aggregateLine(collectd.test-db2.load.value, None)', 0, 600, 60, [None] * 10), + TimeSeries('aggregateLine(collectd.test-db3.load.value, 1.85714)', 0, 600, 60, [1.8571428571428572] * 10), + TimeSeries('aggregateLine(collectd.test-db4.load.value, 8.22222)', 0, 600, 60, [8.222222222222221] * 10), ] result = functions.aggregateLine( self._build_requestContext( @@ -3588,7 +3588,8 @@ def test_aggregateLine_avg(self): endTime=datetime(1970,1,1,1,0,0,0,pytz.timezone(settings.TIME_ZONE)) ), seriesList, - 'avg' + 'avg', + keepStep=True ) self.assertEqual(result, expectedResult) @@ -3635,10 +3636,10 @@ def test_aggregateLine_max(self): ) expectedResult = [ - TimeSeries('aggregateLine(collectd.test-db1.load.value, 7)', 3600, 3600, 0, [7.0, 7.0, 7.0]), - TimeSeries('aggregateLine(collectd.test-db2.load.value, None)', 3600, 3600, 0, [None, None, None]), - TimeSeries('aggregateLine(collectd.test-db3.load.value, 4)', 3600, 3600, 0, [4.0, 4.0, 4.0]), - TimeSeries('aggregateLine(collectd.test-db4.load.value, 10)', 3600, 3600, 0, [10.0, 10.0, 10.0]), + TimeSeries('aggregateLine(collectd.test-db1.load.value, 7)', 0, 600, 60, [7.0] * 10), + TimeSeries('aggregateLine(collectd.test-db2.load.value, None)', 0, 600, 60, [None] * 10), + TimeSeries('aggregateLine(collectd.test-db3.load.value, 4)', 0, 600, 60, [4.0] * 10), + TimeSeries('aggregateLine(collectd.test-db4.load.value, 10)', 0, 600, 60, [10.0] * 10), ] result = functions.aggregateLine( self._build_requestContext( @@ -3646,7 +3647,8 @@ def test_aggregateLine_max(self): endTime=datetime(1970,1,1,1,0,0,0,pytz.timezone(settings.TIME_ZONE)) ), seriesList, - 'max' + 'max', + keepStep=True ) self.assertEqual(result, expectedResult)