From a5269eadc631b53776b74e36a319d80140b217eb Mon Sep 17 00:00:00 2001 From: Matt Way Date: Tue, 2 Feb 2021 14:54:22 -0500 Subject: [PATCH 01/25] Make histogram allocations more efficient, general cleanup --- glide.lock | 16 +- histogram.go | 107 +++++++--- .../buffered_read_transport_test.go | 1 + m3/example/local_server.go | 5 +- m3/reporter.go | 196 ++++++++++++------ m3/reporter_benchmark_test.go | 10 - m3/reporter_test.go | 9 +- m3/resource_pool.go | 2 + m3/scope_test.go | 1 + m3/thriftudp/transport_test.go | 10 +- stats.go | 111 +++++++--- 11 files changed, 313 insertions(+), 155 deletions(-) diff --git a/glide.lock b/glide.lock index bb90fb08..1fcc69d0 100644 --- a/glide.lock +++ b/glide.lock @@ -1,8 +1,8 @@ hash: 6c5a37d4f995175d7ab310d09b5866057c683536b0ae3d8f478f87943aa03be4 -updated: 2019-11-07T15:02:21.080076-05:00 +updated: 2021-02-05T09:45:14.308188-05:00 imports: - name: github.com/beorn7/perks - version: 3a771d992973f24aa725d07868b467d1ddfceafb + version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 subpackages: - quantile - name: github.com/cactus/go-statsd-client @@ -10,7 +10,7 @@ imports: subpackages: - statsd - name: github.com/golang/protobuf - version: 14aad3d5ea4c323bcd7a2137e735da24a76e814c + version: 6c65a5562fc06764971b7c5d05c76c75e84bdbf7 subpackages: - proto - name: github.com/m3db/prometheus_client_golang @@ -35,13 +35,15 @@ imports: subpackages: - pbutil - name: github.com/pkg/errors - version: ba968bfe8b2f7e042a574c888954fccecfa385b4 + version: 614d223910a179a466c1767a985424175c39b465 +- name: github.com/pkg/profile + version: 5b67d428864e92711fcbd2f8629456121a56d91f - name: go.uber.org/atomic - version: 9dc4df04d0d1c39369750a9f6c32c39560672089 + version: 12f27ba2637fa0e13772a4f05fa46a5d18d53182 - name: gopkg.in/validator.v2 - version: 135c24b11c19e52befcae2ec3fca5d9b78c4e98e + version: 2b28d334fa054977b7c725c7639a1ca4efc18bad - name: gopkg.in/yaml.v2 - version: 5420a8b6744d3b0345ab293f6fcba19c978f1183 + version: 7649d4548cb53a614db133b2a8ac1f31859dda8c testImports: - name: github.com/axw/gocov version: 54b98cfcac0c63fb3f9bd8e7ad241b724d4e985b diff --git a/histogram.go b/histogram.go index de22cedd..9b4bf67b 100644 --- a/histogram.go +++ b/histogram.go @@ -35,6 +35,13 @@ var ( errBucketsCountNeedsGreaterThanZero = errors.New("n needs to be > 0") errBucketsStartNeedsGreaterThanZero = errors.New("start needs to be > 0") errBucketsFactorNeedsGreaterThanOne = errors.New("factor needs to be > 1") + + _singleBucket = bucketPair{ + lowerBoundDuration: time.Duration(math.MinInt64), + upperBoundDuration: time.Duration(math.MaxInt64), + lowerBoundValue: -math.MaxFloat64, + upperBoundValue: math.MaxFloat64, + } ) // ValueBuckets is a set of float64 values that implements Buckets. @@ -119,54 +126,90 @@ func (v DurationBuckets) AsDurations() []time.Duration { return []time.Duration(v) } +func newBucketPair( + htype histogramType, + durations []time.Duration, + prevDuration time.Duration, + values []float64, + prevValue float64, + upperBoundIndex int, +) bucketPair { + var pair bucketPair + + switch htype { + case durationHistogramType: + pair = bucketPair{ + lowerBoundDuration: prevDuration, + upperBoundDuration: durations[upperBoundIndex], + } + case valueHistogramType: + pair = bucketPair{ + lowerBoundValue: prevValue, + upperBoundValue: values[upperBoundIndex], + } + default: + // nop + } + + return pair +} + // BucketPairs creates a set of bucket pairs from a set // of buckets describing the lower and upper bounds for // each derived bucket. func BucketPairs(buckets Buckets) []BucketPair { + htype := valueHistogramType + if _, ok := buckets.(DurationBuckets); ok { + htype = durationHistogramType + } + if buckets == nil || buckets.Len() < 1 { - return []BucketPair{ - - bucketPair{ - lowerBoundValue: -math.MaxFloat64, - upperBoundValue: math.MaxFloat64, - lowerBoundDuration: time.Duration(math.MinInt64), - upperBoundDuration: time.Duration(math.MaxInt64), - }, - } + return []BucketPair{_singleBucket} } var ( - asValueBuckets = copyAndSortValues(buckets.AsValues()) - asDurationBuckets = copyAndSortDurations(buckets.AsDurations()) - pairs = make([]BucketPair, 0, buckets.Len()+2) + values []float64 + durations []time.Duration + prevDuration = _singleBucket.lowerBoundDuration + prevValue = _singleBucket.lowerBoundValue + pairs = make([]BucketPair, 0, buckets.Len()+2) + pair bucketPair ) - pairs = append(pairs, bucketPair{ - lowerBoundValue: -math.MaxFloat64, - upperBoundValue: asValueBuckets[0], - lowerBoundDuration: time.Duration(math.MinInt64), - upperBoundDuration: asDurationBuckets[0], - }) + switch htype { + case durationHistogramType: + durations = copyAndSortDurations(buckets.AsDurations()) + pair.lowerBoundDuration = prevDuration + pair.upperBoundDuration = durations[0] + case valueHistogramType: + values = copyAndSortValues(buckets.AsValues()) + pair.lowerBoundValue = prevValue + pair.upperBoundValue = values[0] + default: + // n.b. This branch will never be executed because htype is only ever + // one of two values. + panic("unsupported histogram type") + } - prevValueBucket, prevDurationBucket := - asValueBuckets[0], asDurationBuckets[0] + pairs = append(pairs, pair) + prevDuration = pairs[0].UpperBoundDuration() + prevValue = pairs[0].UpperBoundValue() for i := 1; i < buckets.Len(); i++ { - pairs = append(pairs, bucketPair{ - lowerBoundValue: prevValueBucket, - upperBoundValue: asValueBuckets[i], - lowerBoundDuration: prevDurationBucket, - upperBoundDuration: asDurationBuckets[i], - }) - prevValueBucket, prevDurationBucket = - asValueBuckets[i], asDurationBuckets[i] + pairs = append( + pairs, + newBucketPair(htype, durations, prevDuration, values, prevValue, i), + ) + + prevValue = pairs[i].UpperBoundValue() + prevDuration = pairs[i].UpperBoundDuration() } pairs = append(pairs, bucketPair{ - lowerBoundValue: prevValueBucket, - upperBoundValue: math.MaxFloat64, - lowerBoundDuration: prevDurationBucket, - upperBoundDuration: time.Duration(math.MaxInt64), + lowerBoundValue: prevValue, + upperBoundValue: _singleBucket.upperBoundValue, + lowerBoundDuration: prevDuration, + upperBoundDuration: _singleBucket.upperBoundDuration, }) return pairs diff --git a/m3/customtransports/buffered_read_transport_test.go b/m3/customtransports/buffered_read_transport_test.go index ee70f671..eddf7b21 100644 --- a/m3/customtransports/buffered_read_transport_test.go +++ b/m3/customtransports/buffered_read_transport_test.go @@ -45,6 +45,7 @@ func TestTBufferedReadTransport(t *testing.T) { secondRead := make([]byte, 7) n, err = trans.Read(secondRead) require.Equal(t, 6, n) + require.NoError(t, err) require.Equal(t, []byte("String"), secondRead[0:6]) require.Equal(t, uint64(0), trans.RemainingBytes()) } diff --git a/m3/example/local_server.go b/m3/example/local_server.go index d5c84fd8..884abde2 100644 --- a/m3/example/local_server.go +++ b/m3/example/local_server.go @@ -86,7 +86,10 @@ func (f *localM3Server) Serve() error { } else { proto = thrift.NewTBinaryProtocolTransport(trans) } - f.processor.Process(proto, proto) + + if _, err = f.processor.Process(proto, proto); err != nil { + fmt.Println("Error processing thrift metric:", err) + } } } diff --git a/m3/reporter.go b/m3/reporter.go index 589ff0d1..d4ac05a2 100644 --- a/m3/reporter.go +++ b/m3/reporter.go @@ -21,15 +21,16 @@ package m3 import ( - "errors" "fmt" "io" "math" "os" + "sort" "strconv" "sync" "time" + "github.com/pkg/errors" "github.com/uber-go/tally" customtransport "github.com/uber-go/tally/m3/customtransports" m3thrift "github.com/uber-go/tally/m3/thrift" @@ -207,7 +208,7 @@ func NewReporter(opts Options) (Reporter, error) { if opts.CommonTags[HostTag] == "" { hostname, err := os.Hostname() if err != nil { - return nil, fmt.Errorf("error resolving host tag: %v", err) + return nil, errors.WithMessage(err, "error resolving host tag") } tags[createTag(resourcePool, HostTag, hostname)] = true } @@ -217,13 +218,22 @@ func NewReporter(opts Options) (Reporter, error) { batch := resourcePool.getBatch() batch.CommonTags = tags batch.Metrics = []*m3thrift.Metric{} + proto := resourcePool.getProto() - batch.Write(proto) - calc := proto.Transport().(*customtransport.TCalcTransport) - numOverheadBytes := emitMetricBatchOverhead + calc.GetCount() + if err := batch.Write(proto); err != nil { + return nil, errors.WithMessage( + err, + "failed to write to proto for size calculation", + ) + } + + var ( + calc = proto.Transport().(*customtransport.TCalcTransport) + numOverheadBytes = emitMetricBatchOverhead + calc.GetCount() + freeBytes = opts.MaxPacketSizeBytes - numOverheadBytes + ) calc.ResetCount() - freeBytes := opts.MaxPacketSizeBytes - numOverheadBytes if freeBytes <= 0 { return nil, errCommonTagSize } @@ -250,35 +260,60 @@ func NewReporter(opts Options) (Reporter, error) { // AllocateCounter implements tally.CachedStatsReporter. func (r *reporter) AllocateCounter( - name string, tags map[string]string, + name string, + tags map[string]string, ) tally.CachedCount { return r.allocateCounter(name, tags) } func (r *reporter) allocateCounter( - name string, tags map[string]string, + name string, + tags map[string]string, ) cachedMetric { - counter := r.newMetric(name, tags, counterType) - size := r.calculateSize(counter) - return cachedMetric{counter, r, size} + var ( + counter = r.newMetric(name, tags, counterType) + size = r.calculateSize(counter) + ) + + return cachedMetric{ + metric: counter, + reporter: r, + size: size, + } } // AllocateGauge implements tally.CachedStatsReporter. func (r *reporter) AllocateGauge( - name string, tags map[string]string, + name string, + tags map[string]string, ) tally.CachedGauge { - gauge := r.newMetric(name, tags, gaugeType) - size := r.calculateSize(gauge) - return cachedMetric{gauge, r, size} + var ( + gauge = r.newMetric(name, tags, gaugeType) + size = r.calculateSize(gauge) + ) + + return cachedMetric{ + metric: gauge, + reporter: r, + size: size, + } } // AllocateTimer implements tally.CachedStatsReporter. func (r *reporter) AllocateTimer( - name string, tags map[string]string, + name string, + tags map[string]string, ) tally.CachedTimer { - timer := r.newMetric(name, tags, timerType) - size := r.calculateSize(timer) - return cachedMetric{timer, r, size} + var ( + timer = r.newMetric(name, tags, timerType) + size = r.calculateSize(timer) + ) + + return cachedMetric{ + metric: timer, + reporter: r, + size: size, + } } // AllocateHistogram implements tally.CachedStatsReporter. @@ -288,45 +323,57 @@ func (r *reporter) AllocateHistogram( buckets tally.Buckets, ) tally.CachedHistogram { var ( + _, isDuration = buckets.(tally.DurationBuckets) + bucketIDLen = int(math.Max( + float64(len(strconv.Itoa(buckets.Len()))), + float64(minMetricBucketIDTagLength), + )) + bucketIDLenStr = strconv.Itoa(bucketIDLen) + bucketIDFmt = "%0" + bucketIDLenStr + "d" cachedValueBuckets []cachedHistogramBucket cachedDurationBuckets []cachedHistogramBucket ) - bucketIDLen := len(strconv.Itoa(buckets.Len())) - bucketIDLen = int(math.Max(float64(bucketIDLen), - float64(minMetricBucketIDTagLength))) - bucketIDLenStr := strconv.Itoa(bucketIDLen) - bucketIDFmt := "%0" + bucketIDLenStr + "d" + for i, pair := range tally.BucketPairs(buckets) { - valueTags, durationTags := - make(map[string]string), make(map[string]string) + var ( + histTags = make(map[string]string, len(tags)) + idTagValue = fmt.Sprintf(bucketIDFmt, i) + ) for k, v := range tags { - valueTags[k], durationTags[k] = v, v + histTags[k] = v } + histTags[r.bucketIDTagName] = idTagValue + + if isDuration { + histTags[r.bucketTagName] = + r.durationBucketString(pair.LowerBoundDuration()) + "-" + + r.durationBucketString(pair.UpperBoundDuration()) + + cachedDurationBuckets = append(cachedDurationBuckets, cachedHistogramBucket{ + valueUpperBound: pair.UpperBoundValue(), + durationUpperBound: pair.UpperBoundDuration(), + metric: r.allocateCounter(name, histTags), + }) + } else { + histTags[r.bucketTagName] = + r.valueBucketString(pair.LowerBoundValue()) + "-" + + r.valueBucketString(pair.UpperBoundValue()) + + cachedValueBuckets = append(cachedValueBuckets, cachedHistogramBucket{ + valueUpperBound: pair.UpperBoundValue(), + durationUpperBound: pair.UpperBoundDuration(), + metric: r.allocateCounter(name, histTags), + }) + } + } - idTagValue := fmt.Sprintf(bucketIDFmt, i) - - valueTags[r.bucketIDTagName] = idTagValue - valueTags[r.bucketTagName] = fmt.Sprintf("%s-%s", - r.valueBucketString(pair.LowerBoundValue()), - r.valueBucketString(pair.UpperBoundValue())) - - cachedValueBuckets = append(cachedValueBuckets, - cachedHistogramBucket{pair.UpperBoundValue(), - pair.UpperBoundDuration(), - r.allocateCounter(name, valueTags)}) - - durationTags[r.bucketIDTagName] = idTagValue - durationTags[r.bucketTagName] = fmt.Sprintf("%s-%s", - r.durationBucketString(pair.LowerBoundDuration()), - r.durationBucketString(pair.UpperBoundDuration())) - - cachedDurationBuckets = append(cachedDurationBuckets, - cachedHistogramBucket{pair.UpperBoundValue(), - pair.UpperBoundDuration(), - r.allocateCounter(name, durationTags)}) + return cachedHistogram{ + r: r, + name: name, + tags: tags, + cachedValueBuckets: cachedValueBuckets, + cachedDurationBuckets: cachedDurationBuckets, } - return cachedHistogram{r, name, tags, buckets, - cachedValueBuckets, cachedDurationBuckets} } func (r *reporter) valueBucketString(v float64) string { @@ -398,7 +445,7 @@ func (r *reporter) newMetric( func (r *reporter) calculateSize(m *m3thrift.Metric) int32 { r.calcLock.Lock() - m.Write(r.calcProto) + m.Write(r.calcProto) //nolint:errcheck size := r.calc.GetCount() r.calc.ResetCount() r.calcLock.Unlock() @@ -483,8 +530,10 @@ func (r *reporter) Tagging() bool { } func (r *reporter) process() { - mets := make([]*m3thrift.Metric, 0, (r.freeBytes / 10)) - bytes := int32(0) + var ( + mets = make([]*m3thrift.Metric, 0, (r.freeBytes / 10)) + bytes int32 + ) for smet := range r.metCh { if smet.m == nil { @@ -518,7 +567,7 @@ func (r *reporter) flush( ) []*m3thrift.Metric { r.curBatchLock.Lock() r.curBatch.Metrics = mets - r.client.EmitMetricBatch(r.curBatch) + r.client.EmitMetricBatch(r.curBatch) //nolint:errcheck r.curBatch.Metrics = nil r.curBatchLock.Unlock() @@ -585,7 +634,6 @@ type cachedHistogram struct { r *reporter name string tags map[string]string - buckets tally.Buckets cachedValueBuckets []cachedHistogramBucket cachedDurationBuckets []cachedHistogramBucket } @@ -597,25 +645,39 @@ type cachedHistogramBucket struct { } func (h cachedHistogram) ValueBucket( - bucketLowerBound, bucketUpperBound float64, + bucketLowerBound float64, + bucketUpperBound float64, ) tally.CachedHistogramBucket { - for _, b := range h.cachedValueBuckets { - if b.valueUpperBound >= bucketUpperBound { - return b.metric - } + var ( + n = len(h.cachedValueBuckets) + idx = sort.Search(n, func(i int) bool { + return h.cachedValueBuckets[i].valueUpperBound >= bucketUpperBound + }) + ) + + if idx == n { + return noopMetric{} } - return noopMetric{} + + return h.cachedValueBuckets[idx].metric } func (h cachedHistogram) DurationBucket( - bucketLowerBound, bucketUpperBound time.Duration, + bucketLowerBound time.Duration, + bucketUpperBound time.Duration, ) tally.CachedHistogramBucket { - for _, b := range h.cachedDurationBuckets { - if b.durationUpperBound >= bucketUpperBound { - return b.metric - } + var ( + n = len(h.cachedDurationBuckets) + idx = sort.Search(n, func(i int) bool { + return h.cachedDurationBuckets[i].durationUpperBound >= bucketUpperBound + }) + ) + + if idx == n { + return noopMetric{} } - return noopMetric{} + + return h.cachedDurationBuckets[idx].metric } type sizedMetric struct { diff --git a/m3/reporter_benchmark_test.go b/m3/reporter_benchmark_test.go index 7c7126f3..51be337f 100644 --- a/m3/reporter_benchmark_test.go +++ b/m3/reporter_benchmark_test.go @@ -28,16 +28,6 @@ import ( "github.com/uber-go/tally" ) -const ( - updaters = 10 - updates = 1000 - numIds = 10 - - testID = "stats.$dc.gauges.m3+" + - "servers.my-internal-server-$dc.network.eth0_tx_colls+" + - "dc=$dc,domain=production.$zone,env=production,pipe=$pipe,service=servers,type=gauge" -) - func BenchmarkNewMetric(b *testing.B) { r, _ := NewReporter(Options{ HostPorts: []string{"127.0.0.1:9052"}, diff --git a/m3/reporter_test.go b/m3/reporter_test.go index 4d7a5324..54a044e8 100644 --- a/m3/reporter_test.go +++ b/m3/reporter_test.go @@ -42,7 +42,6 @@ import ( ) const ( - numReaders = 10 queueSize = 1000 includeHost = true maxPacketSize = int32(1440) @@ -628,7 +627,13 @@ func (f *fakeM3Server) Serve() { } else { proto = thrift.NewTBinaryProtocolTransport(trans) } - f.processor.Process(proto, proto) + + _, err = f.processor.Process(proto, proto) + if terr, ok := err.(thrift.TTransportException); ok { + require.Equal(f.t, thrift.END_OF_FILE, terr.TypeId()) + } else { + require.NoError(f.t, err) + } } } diff --git a/m3/resource_pool.go b/m3/resource_pool.go index 1c97cd7c..10da73f4 100644 --- a/m3/resource_pool.go +++ b/m3/resource_pool.go @@ -146,6 +146,7 @@ func (r *resourcePool) getProto() thrift.TProtocol { return o.(thrift.TProtocol) } +//nolint:unused func (r *resourcePool) releaseProto(proto thrift.TProtocol) { calc := proto.Transport().(*customtransport.TCalcTransport) calc.ResetCount() @@ -180,6 +181,7 @@ func (r *resourcePool) releaseMetricValue(metVal *m3thrift.MetricValue) { r.valuePool.Put(metVal) } +//nolint:unused func (r *resourcePool) releaseMetrics(mets []*m3thrift.Metric) { for _, m := range mets { r.releaseMetric(m) diff --git a/m3/scope_test.go b/m3/scope_test.go index 46617734..2a201bb6 100644 --- a/m3/scope_test.go +++ b/m3/scope_test.go @@ -231,6 +231,7 @@ func BenchmarkScopeReportHistogram(b *testing.B) { } histogram := perEndpointScope.Histogram("inbound.latency", buckets) + b.ReportAllocs() b.ResetTimer() bucketsLen := len(buckets) diff --git a/m3/thriftudp/transport_test.go b/m3/thriftudp/transport_test.go index 60c2060f..f3ee3bc4 100644 --- a/m3/thriftudp/transport_test.go +++ b/m3/thriftudp/transport_test.go @@ -278,8 +278,10 @@ func TestFlushErrors(t *testing.T) { require.NoError(t, err) trans.conn.Close() - trans.Write([]byte{1, 2, 3, 4}) - err = trans.Flush() + _, err = trans.Write([]byte{1, 2, 3, 4}) + require.NoError(t, err) + + // err = trans.Flush() require.Error(t, trans.Flush(), "Flush with data should fail") }) } @@ -291,7 +293,9 @@ func TestResetInFlush(t *testing.T) { trans, err := NewTUDPClientTransport(conn.LocalAddr().String(), "") require.NoError(t, err) - trans.Write([]byte("some nonsense")) + _, err = trans.Write([]byte("some nonsense")) + require.NoError(t, err) + trans.conn.Close() // close the transport's connection via back door err = trans.Flush() diff --git a/stats.go b/stats.go index 657b10f0..0559367a 100644 --- a/stats.go +++ b/stats.go @@ -280,29 +280,40 @@ func newHistogram( buckets Buckets, cachedHistogram CachedHistogram, ) *histogram { - htype := valueHistogramType + var ( + pairs = BucketPairs(buckets) + htype = valueHistogramType + ) + if _, ok := buckets.(DurationBuckets); ok { htype = durationHistogramType } - pairs := BucketPairs(buckets) - h := &histogram{ - htype: htype, - name: name, - tags: tags, - reporter: reporter, - specification: buckets, - buckets: make([]histogramBucket, 0, len(pairs)), - lookupByValue: make([]float64, 0, len(pairs)), - lookupByDuration: make([]int, 0, len(pairs)), + htype: htype, + name: name, + tags: tags, + reporter: reporter, + specification: buckets, + buckets: make([]histogramBucket, 0, len(pairs)), + } + + switch htype { + case valueHistogramType: + h.lookupByValue = make([]float64, 0, len(pairs)) + case durationHistogramType: + h.lookupByDuration = make([]int, 0, len(pairs)) } for _, pair := range pairs { - h.addBucket(newHistogramBucket(h, - pair.LowerBoundValue(), pair.UpperBoundValue(), - pair.LowerBoundDuration(), pair.UpperBoundDuration(), - cachedHistogram)) + h.addBucket(newHistogramBucket( + h, + pair.LowerBoundValue(), + pair.UpperBoundValue(), + pair.LowerBoundDuration(), + pair.UpperBoundDuration(), + cachedHistogram, + )) } return h @@ -310,8 +321,15 @@ func newHistogram( func (h *histogram) addBucket(b histogramBucket) { h.buckets = append(h.buckets, b) - h.lookupByValue = append(h.lookupByValue, b.valueUpperBound) - h.lookupByDuration = append(h.lookupByDuration, int(b.durationUpperBound)) + + switch h.htype { + case durationHistogramType: + h.lookupByDuration = append(h.lookupByDuration, int(b.durationUpperBound)) + case valueHistogramType: + h.lookupByValue = append(h.lookupByValue, b.valueUpperBound) + default: + // nop + } } func (h *histogram) report(name string, tags map[string]string, r StatsReporter) { @@ -320,15 +338,26 @@ func (h *histogram) report(name string, tags map[string]string, r StatsReporter) if samples == 0 { continue } + switch h.htype { case valueHistogramType: - r.ReportHistogramValueSamples(name, tags, h.specification, - h.buckets[i].valueLowerBound, h.buckets[i].valueUpperBound, - samples) + r.ReportHistogramValueSamples( + name, + tags, + h.specification, + h.buckets[i].valueLowerBound, + h.buckets[i].valueUpperBound, + samples, + ) case durationHistogramType: - r.ReportHistogramDurationSamples(name, tags, h.specification, - h.buckets[i].durationLowerBound, h.buckets[i].durationUpperBound, - samples) + r.ReportHistogramDurationSamples( + name, + tags, + h.specification, + h.buckets[i].durationLowerBound, + h.buckets[i].durationUpperBound, + samples, + ) } } } @@ -339,6 +368,7 @@ func (h *histogram) cachedReport() { if samples == 0 { continue } + switch h.htype { case valueHistogramType: h.buckets[i].cachedValueBucket.ReportSamples(samples) @@ -349,6 +379,10 @@ func (h *histogram) cachedReport() { } func (h *histogram) RecordValue(value float64) { + if h.htype != valueHistogramType { + return + } + // Find the highest inclusive of the bucket upper bound // and emit directly to it. Since we use BucketPairs to derive // buckets there will always be an inclusive bucket as @@ -358,6 +392,10 @@ func (h *histogram) RecordValue(value float64) { } func (h *histogram) RecordDuration(value time.Duration) { + if h.htype != durationHistogramType { + return + } + // Find the highest inclusive of the bucket upper bound // and emit directly to it. Since we use BucketPairs to derive // buckets there will always be an inclusive bucket as @@ -376,7 +414,7 @@ func (h *histogram) RecordStopwatch(stopwatchStart time.Time) { } func (h *histogram) snapshotValues() map[float64]int64 { - if h.htype == durationHistogramType { + if h.htype != valueHistogramType { return nil } @@ -389,7 +427,7 @@ func (h *histogram) snapshotValues() map[float64]int64 { } func (h *histogram) snapshotDurations() map[time.Duration]int64 { - if h.htype == valueHistogramType { + if h.htype != durationHistogramType { return nil } @@ -414,9 +452,9 @@ type histogramBucket struct { func newHistogramBucket( h *histogram, - valueLowerBound, + valueLowerBound float64, valueUpperBound float64, - durationLowerBound, + durationLowerBound time.Duration, durationUpperBound time.Duration, cachedHistogram CachedHistogram, ) histogramBucket { @@ -427,14 +465,21 @@ func newHistogramBucket( durationLowerBound: durationLowerBound, durationUpperBound: durationUpperBound, } + if cachedHistogram != nil { - bucket.cachedValueBucket = cachedHistogram.ValueBucket( - bucket.valueLowerBound, bucket.valueUpperBound, - ) - bucket.cachedDurationBucket = cachedHistogram.DurationBucket( - bucket.durationLowerBound, bucket.durationUpperBound, - ) + if h.htype == valueHistogramType { + bucket.cachedValueBucket = cachedHistogram.ValueBucket( + bucket.valueLowerBound, bucket.valueUpperBound, + ) + } + + if h.htype == durationHistogramType { + bucket.cachedDurationBucket = cachedHistogram.DurationBucket( + bucket.durationLowerBound, bucket.durationUpperBound, + ) + } } + return bucket } From 2289f613c08cf2ea2a5321e5e70d4ed8f6e6664d Mon Sep 17 00:00:00 2001 From: Matt Way Date: Thu, 11 Feb 2021 16:17:50 -0500 Subject: [PATCH 02/25] Reduce Thrift pointers and share most histogram storage --- glide.lock | 8 +- glide.yaml | 2 + histogram.go | 8 +- internal/identity/accumulator.go | 63 ++ m3/example/local_server.go | 2 +- m3/example/m3_main.go | 2 +- m3/reporter.go | 298 +++---- m3/reporter_benchmark_test.go | 8 +- m3/reporter_test.go | 106 +-- m3/resource_pool.go | 181 +--- m3/resource_pool_test.go | 89 +- m3/thrift/Makefile | 12 +- m3/thrift/constants.go | 39 - m3/thrift/v1/constants.go | 18 + m3/thrift/{ => v1}/m3.go | 80 +- m3/thrift/{ => v1}/ttypes.go | 461 ++++++---- m3/thrift/{v1.0.0/m3.thrift => v1/v1.thrift} | 0 m3/thrift/v2/constants.go | 18 + m3/thrift/v2/m3.go | 242 ++++++ m3/thrift/v2/ttypes.go | 849 +++++++++++++++++++ scope.go | 37 +- scope_registry.go | 1 + scope_test.go | 14 +- stats.go | 147 ++-- stats_test.go | 6 +- 25 files changed, 1873 insertions(+), 818 deletions(-) create mode 100644 internal/identity/accumulator.go delete mode 100644 m3/thrift/constants.go create mode 100644 m3/thrift/v1/constants.go rename m3/thrift/{ => v1}/m3.go (64%) rename m3/thrift/{ => v1}/ttypes.go (58%) rename m3/thrift/{v1.0.0/m3.thrift => v1/v1.thrift} (100%) create mode 100644 m3/thrift/v2/constants.go create mode 100644 m3/thrift/v2/m3.go create mode 100644 m3/thrift/v2/ttypes.go diff --git a/glide.lock b/glide.lock index 1fcc69d0..05853b28 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 6c5a37d4f995175d7ab310d09b5866057c683536b0ae3d8f478f87943aa03be4 -updated: 2021-02-05T09:45:14.308188-05:00 +hash: bfadfc85c3b6bf803090b34bbbcc4b9e16165226e8b4c194658b8ba105c3438b +updated: 2021-02-11T15:18:03.188401-05:00 imports: - name: github.com/beorn7/perks version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 @@ -36,8 +36,8 @@ imports: - pbutil - name: github.com/pkg/errors version: 614d223910a179a466c1767a985424175c39b465 -- name: github.com/pkg/profile - version: 5b67d428864e92711fcbd2f8629456121a56d91f +- name: github.com/twmb/murmur3 + version: 610077ff6fd864908e4604f64b6c0fc000aa6232 - name: go.uber.org/atomic version: 12f27ba2637fa0e13772a4f05fa46a5d18d53182 - name: gopkg.in/validator.v2 diff --git a/glide.yaml b/glide.yaml index 8dd97e92..3b31589a 100644 --- a/glide.yaml +++ b/glide.yaml @@ -18,6 +18,8 @@ import: version: ^1 - package: github.com/pkg/errors version: ^0.8.1 +- package: github.com/twmb/murmur3 + version: ^1.1.5 testImport: - package: github.com/axw/gocov version: 54b98cfcac0c63fb3f9bd8e7ad241b724d4e985b diff --git a/histogram.go b/histogram.go index 9b4bf67b..caefed2c 100644 --- a/histogram.go +++ b/histogram.go @@ -217,18 +217,14 @@ func BucketPairs(buckets Buckets) []BucketPair { func copyAndSortValues(values []float64) []float64 { valuesCopy := make([]float64, len(values)) - for i := range values { - valuesCopy[i] = values[i] - } + copy(valuesCopy, values) sort.Sort(ValueBuckets(valuesCopy)) return valuesCopy } func copyAndSortDurations(durations []time.Duration) []time.Duration { durationsCopy := make([]time.Duration, len(durations)) - for i := range durations { - durationsCopy[i] = durations[i] - } + copy(durationsCopy, durations) sort.Sort(DurationBuckets(durationsCopy)) return durationsCopy } diff --git a/internal/identity/accumulator.go b/internal/identity/accumulator.go new file mode 100644 index 00000000..7a8194cd --- /dev/null +++ b/internal/identity/accumulator.go @@ -0,0 +1,63 @@ +package identity + +import ( + "github.com/twmb/murmur3" +) + +const ( + _hashSeed uint64 = 23 + _hashFold uint64 = 31 +) + +// Accumulator is a commutative folding accumulator. +type Accumulator uint64 + +// NewAccumulator creates a new Accumulator with a default seed value. +//go:nosplit +func NewAccumulator() Accumulator { + return Accumulator(_hashSeed) +} + +// NewAccumulatorWithSeed creates a new Accumulator with the provided seed value. +//go:nosplit +func NewAccumulatorWithSeed(seed uint64) Accumulator { + return Accumulator(seed) +} + +// AddString hashes str and folds it into the accumulator. +//go:nosplit +func (a Accumulator) AddString(str string) Accumulator { + return a + (Accumulator(murmur3.StringSum64(str)) * Accumulator(_hashFold)) +} + +// AddStrings serially hashes and folds each of strs into the accumulator. +//go:nosplit +func (a Accumulator) AddStrings(strs ...string) Accumulator { + for _, str := range strs { + a += (Accumulator(murmur3.StringSum64(str)) * Accumulator(_hashFold)) + } + + return a +} + +// AddUint64 folds u64 into the accumulator. +//go:nosplit +func (a Accumulator) AddUint64(u64 uint64) Accumulator { + return a + Accumulator(u64*_hashFold) +} + +// AddUint64s serially folds each of u64s into the accumulator. +//go:nosplit +func (a Accumulator) AddUint64s(u64s ...uint64) Accumulator { + for _, u64 := range u64s { + a += Accumulator(u64 * _hashFold) + } + + return a +} + +// Value returns the accumulated value. +//go:nosplit +func (a Accumulator) Value() uint64 { + return uint64(a) +} diff --git a/m3/example/local_server.go b/m3/example/local_server.go index 884abde2..5e197d6f 100644 --- a/m3/example/local_server.go +++ b/m3/example/local_server.go @@ -28,7 +28,7 @@ import ( "github.com/uber-go/tally/m3" customtransport "github.com/uber-go/tally/m3/customtransports" - m3thrift "github.com/uber-go/tally/m3/thrift" + m3thrift "github.com/uber-go/tally/m3/thrift/v1" "github.com/uber-go/tally/thirdparty/github.com/apache/thrift/lib/go/thrift" ) diff --git a/m3/example/m3_main.go b/m3/example/m3_main.go index 0e1f05ea..1170e87b 100644 --- a/m3/example/m3_main.go +++ b/m3/example/m3_main.go @@ -31,7 +31,7 @@ import ( "github.com/uber-go/tally" "github.com/uber-go/tally/m3" - m3thrift "github.com/uber-go/tally/m3/thrift" + m3thrift "github.com/uber-go/tally/m3/thrift/v1" validator "gopkg.in/validator.v2" yaml "gopkg.in/yaml.v2" diff --git a/m3/reporter.go b/m3/reporter.go index d4ac05a2..c13db93f 100644 --- a/m3/reporter.go +++ b/m3/reporter.go @@ -33,11 +33,15 @@ import ( "github.com/pkg/errors" "github.com/uber-go/tally" customtransport "github.com/uber-go/tally/m3/customtransports" - m3thrift "github.com/uber-go/tally/m3/thrift" + m3thrift "github.com/uber-go/tally/m3/thrift/v2" "github.com/uber-go/tally/m3/thriftudp" "github.com/uber-go/tally/thirdparty/github.com/apache/thrift/lib/go/thrift" ) +// need to pool: +// []MetricTag +// []Metric + // Protocol describes a M3 thrift transport protocol. type Protocol int @@ -72,17 +76,11 @@ const ( minMetricBucketIDTagLength = 4 ) -// Initialize max vars in init function to avoid lint error. var ( - maxInt64 int64 - maxFloat64 float64 + _maxInt64 = int64(math.MaxInt64) + _maxFloat64 = math.MaxFloat64 ) -func init() { - maxInt64 = math.MaxInt64 - maxFloat64 = math.MaxFloat64 -} - type metricType int const ( @@ -107,13 +105,14 @@ type Reporter interface { // remote M3 collector, metrics are batched together and emitted // via either thrift compact or binary protocol in batch UDP packets. type reporter struct { - client *m3thrift.M3Client - curBatch *m3thrift.MetricBatch - curBatchLock sync.Mutex - calc *customtransport.TCalcTransport - calcProto thrift.TProtocol - calcLock sync.Mutex - commonTags map[*m3thrift.MetricTag]bool + client *m3thrift.M3Client + // curBatch *m3thrift.MetricBatch + // curBatchLock sync.Mutex + calc *customtransport.TCalcTransport + calcProto thrift.TProtocol + calcLock sync.Mutex + // commonTags map[*m3thrift.MetricTag]bool + commonTags []m3thrift.MetricTag freeBytes int32 processors sync.WaitGroup resourcePool *resourcePool @@ -184,25 +183,32 @@ func NewReporter(opts Options) (Reporter, error) { protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() } - client := m3thrift.NewM3ClientFactory(trans, protocolFactory) - resourcePool := newResourcePool(protocolFactory) + var ( + client = m3thrift.NewM3ClientFactory(trans, protocolFactory) + resourcePool = newResourcePool(protocolFactory) + tagm = make(map[string]string) + tags = resourcePool.getMetricTagSlice() + ) // Create common tags - tags := resourcePool.getTagList() + // tags := resourcePool.getTagList() for k, v := range opts.CommonTags { - tags[createTag(resourcePool, k, v)] = true + tagm[k] = v + // tags[createTag(resourcePool, k, v)] = true } if opts.CommonTags[ServiceTag] == "" { if opts.Service == "" { return nil, fmt.Errorf("%s common tag is required", ServiceTag) } - tags[createTag(resourcePool, ServiceTag, opts.Service)] = true + // tags[createTag(resourcePool, ServiceTag, opts.Service)] = true + tagm[ServiceTag] = opts.Service } if opts.CommonTags[EnvTag] == "" { if opts.Env == "" { return nil, fmt.Errorf("%s common tag is required", EnvTag) } - tags[createTag(resourcePool, EnvTag, opts.Env)] = true + // tags[createTag(resourcePool, EnvTag, opts.Env)] = true + tagm[EnvTag] = opts.Env } if opts.IncludeHost { if opts.CommonTags[HostTag] == "" { @@ -210,16 +216,27 @@ func NewReporter(opts Options) (Reporter, error) { if err != nil { return nil, errors.WithMessage(err, "error resolving host tag") } - tags[createTag(resourcePool, HostTag, hostname)] = true + // tags[createTag(resourcePool, HostTag, hostname)] = true + tagm[HostTag] = hostname } } + for k, v := range tagm { + tags = append(tags, m3thrift.MetricTag{ + Name: k, + Value: v, + }) + } + // Calculate size of common tags - batch := resourcePool.getBatch() - batch.CommonTags = tags - batch.Metrics = []*m3thrift.Metric{} + var ( + batch = m3thrift.MetricBatch{ + Metrics: resourcePool.getMetricSlice(), + CommonTags: tags, + } + proto = resourcePool.getProto() + ) - proto := resourcePool.getProto() if err := batch.Write(proto); err != nil { return nil, errors.WithMessage( err, @@ -227,6 +244,8 @@ func NewReporter(opts Options) (Reporter, error) { ) } + resourcePool.releaseMetricSlice(batch.Metrics) + var ( calc = proto.Transport().(*customtransport.TCalcTransport) numOverheadBytes = emitMetricBatchOverhead + calc.GetCount() @@ -240,7 +259,6 @@ func NewReporter(opts Options) (Reporter, error) { r := &reporter{ client: client, - curBatch: batch, calc: calc, calcProto: proto, commonTags: tags, @@ -330,40 +348,71 @@ func (r *reporter) AllocateHistogram( )) bucketIDLenStr = strconv.Itoa(bucketIDLen) bucketIDFmt = "%0" + bucketIDLenStr + "d" + htags = make(map[string]string, len(tags)) cachedValueBuckets []cachedHistogramBucket cachedDurationBuckets []cachedHistogramBucket ) + for k, v := range tags { + htags[k] = v + } + for i, pair := range tally.BucketPairs(buckets) { - var ( - histTags = make(map[string]string, len(tags)) - idTagValue = fmt.Sprintf(bucketIDFmt, i) - ) - for k, v := range tags { - histTags[k] = v - } - histTags[r.bucketIDTagName] = idTagValue + idTagValue := fmt.Sprintf(bucketIDFmt, i) if isDuration { - histTags[r.bucketTagName] = - r.durationBucketString(pair.LowerBoundDuration()) + "-" + - r.durationBucketString(pair.UpperBoundDuration()) - - cachedDurationBuckets = append(cachedDurationBuckets, cachedHistogramBucket{ - valueUpperBound: pair.UpperBoundValue(), - durationUpperBound: pair.UpperBoundDuration(), - metric: r.allocateCounter(name, histTags), - }) + var ( + counter = r.allocateCounter(name, htags) + bucket = r.durationBucketString(pair.LowerBoundDuration()) + + "-" + r.durationBucketString(pair.UpperBoundDuration()) + ) + counter.metric.Tags = append( + counter.metric.Tags, + m3thrift.MetricTag{ + Name: r.bucketIDTagName, + Value: idTagValue, + }, + m3thrift.MetricTag{ + Name: r.bucketTagName, + Value: bucket, + }, + ) + + cachedDurationBuckets = append( + cachedDurationBuckets, + cachedHistogramBucket{ + valueUpperBound: pair.UpperBoundValue(), + durationUpperBound: pair.UpperBoundDuration(), + metric: counter, + }, + ) } else { - histTags[r.bucketTagName] = - r.valueBucketString(pair.LowerBoundValue()) + "-" + - r.valueBucketString(pair.UpperBoundValue()) - - cachedValueBuckets = append(cachedValueBuckets, cachedHistogramBucket{ - valueUpperBound: pair.UpperBoundValue(), - durationUpperBound: pair.UpperBoundDuration(), - metric: r.allocateCounter(name, histTags), - }) + var ( + counter = r.allocateCounter(name, htags) + bucket = r.valueBucketString(pair.LowerBoundValue()) + + "-" + r.valueBucketString(pair.UpperBoundValue()) + ) + + counter.metric.Tags = append( + counter.metric.Tags, + m3thrift.MetricTag{ + Name: r.bucketIDTagName, + Value: idTagValue, + }, + m3thrift.MetricTag{ + Name: r.bucketTagName, + Value: bucket, + }, + ) + + cachedValueBuckets = append( + cachedValueBuckets, + cachedHistogramBucket{ + valueUpperBound: pair.UpperBoundValue(), + durationUpperBound: pair.UpperBoundDuration(), + metric: counter, + }, + ) } } @@ -403,47 +452,39 @@ func (r *reporter) newMetric( name string, tags map[string]string, t metricType, -) *m3thrift.Metric { - var ( - m = r.resourcePool.getMetric() - metVal = r.resourcePool.getValue() - ) - m.Name = name - if tags != nil { - metTags := r.resourcePool.getTagList() - for k, v := range tags { - val := v - metTag := r.resourcePool.getTag() - metTag.TagName = k - metTag.TagValue = &val - metTags[metTag] = true - } - m.Tags = metTags - } else { - m.Tags = nil +) m3thrift.Metric { + m := m3thrift.Metric{ + Name: name, + Timestamp: _maxInt64, } - m.Timestamp = &maxInt64 switch t { case counterType: - c := r.resourcePool.getCount() - c.I64Value = &maxInt64 - metVal.Count = c + m.Value.MetricType = m3thrift.MetricType_COUNTER + m.Value.Count = _maxInt64 case gaugeType: - g := r.resourcePool.getGauge() - g.DValue = &maxFloat64 - metVal.Gauge = g + m.Value.MetricType = m3thrift.MetricType_GAUGE + m.Value.Gauge = _maxFloat64 case timerType: - t := r.resourcePool.getTimer() - t.I64Value = &maxInt64 - metVal.Timer = t + m.Value.MetricType = m3thrift.MetricType_TIMER + m.Value.Timer = _maxInt64 + } + + if len(tags) > 0 { + m.Tags = r.resourcePool.getMetricTagSlice() + + for k, v := range tags { + m.Tags = append(m.Tags, m3thrift.MetricTag{ + Name: k, + Value: v, + }) + } } - m.MetricValue = metVal return m } -func (r *reporter) calculateSize(m *m3thrift.Metric) int32 { +func (r *reporter) calculateSize(m m3thrift.Metric) int32 { r.calcLock.Lock() m.Write(r.calcProto) //nolint:errcheck size := r.calc.GetCount() @@ -452,40 +493,21 @@ func (r *reporter) calculateSize(m *m3thrift.Metric) int32 { return size } -func (r *reporter) reportCopyMetric( - m *m3thrift.Metric, - size int32, - t metricType, - iValue int64, - dValue float64, -) { - copy := r.resourcePool.getMetric() - copy.Name = m.Name - copy.Tags = m.Tags - timestampNano := time.Now().UnixNano() - copy.Timestamp = ×tampNano - copy.MetricValue = r.resourcePool.getValue() +func (r *reporter) reportCopyMetric(m m3thrift.Metric, size int32) { + m.Timestamp = time.Now().UnixNano() - switch t { - case counterType: - c := r.resourcePool.getCount() - c.I64Value = &iValue - copy.MetricValue.Count = c - case gaugeType: - g := r.resourcePool.getGauge() - g.DValue = &dValue - copy.MetricValue.Gauge = g - case timerType: - t := r.resourcePool.getTimer() - t.I64Value = &iValue - copy.MetricValue.Timer = t + sm := sizedMetric{ + m: m, + size: size, + set: true, } r.status.RLock() if !r.status.closed { select { - case r.metCh <- sizedMetric{copy, size}: + case r.metCh <- sm: default: + // TODO } } r.status.RUnlock() @@ -531,12 +553,12 @@ func (r *reporter) Tagging() bool { func (r *reporter) process() { var ( - mets = make([]*m3thrift.Metric, 0, (r.freeBytes / 10)) + mets = make([]m3thrift.Metric, 0, (r.freeBytes / 10)) bytes int32 ) for smet := range r.metCh { - if smet.m == nil { + if !smet.set { // Explicit flush requested if len(mets) > 0 { mets = r.flush(mets) @@ -563,56 +585,41 @@ func (r *reporter) process() { } func (r *reporter) flush( - mets []*m3thrift.Metric, -) []*m3thrift.Metric { - r.curBatchLock.Lock() - r.curBatch.Metrics = mets - r.client.EmitMetricBatch(r.curBatch) //nolint:errcheck - r.curBatch.Metrics = nil - r.curBatchLock.Unlock() - - r.resourcePool.releaseShallowMetrics(mets) + mets []m3thrift.Metric, +) []m3thrift.Metric { + //nolint:errcheck + r.client.EmitMetricBatch(m3thrift.MetricBatch{ + Metrics: mets, + CommonTags: r.commonTags, + }) - for i := range mets { - mets[i] = nil - } return mets[:0] } -func createTag( - pool *resourcePool, - tagName, tagValue string, -) *m3thrift.MetricTag { - tag := pool.getTag() - tag.TagName = tagName - if tagValue != "" { - tag.TagValue = &tagValue - } - - return tag -} - type cachedMetric struct { - metric *m3thrift.Metric + metric m3thrift.Metric reporter *reporter size int32 } func (c cachedMetric) ReportCount(value int64) { - c.reporter.reportCopyMetric(c.metric, c.size, counterType, value, 0) + c.metric.Value.Count = value + c.reporter.reportCopyMetric(c.metric, c.size) } func (c cachedMetric) ReportGauge(value float64) { - c.reporter.reportCopyMetric(c.metric, c.size, gaugeType, 0, value) + c.metric.Value.Gauge = value + c.reporter.reportCopyMetric(c.metric, c.size) } func (c cachedMetric) ReportTimer(interval time.Duration) { - val := int64(interval) - c.reporter.reportCopyMetric(c.metric, c.size, timerType, val, 0) + c.metric.Value.Timer = int64(interval) + c.reporter.reportCopyMetric(c.metric, c.size) } func (c cachedMetric) ReportSamples(value int64) { - c.reporter.reportCopyMetric(c.metric, c.size, counterType, value, 0) + c.metric.Value.Count = value + c.reporter.reportCopyMetric(c.metric, c.size) } type noopMetric struct { @@ -681,6 +688,7 @@ func (h cachedHistogram) DurationBucket( } type sizedMetric struct { - m *m3thrift.Metric + m m3thrift.Metric size int32 + set bool } diff --git a/m3/reporter_benchmark_test.go b/m3/reporter_benchmark_test.go index 51be337f..05ecb838 100644 --- a/m3/reporter_benchmark_test.go +++ b/m3/reporter_benchmark_test.go @@ -76,9 +76,8 @@ func BenchmarkCalulateSize(b *testing.B) { defer r.Close() benchReporter := r.(*reporter) - val := int64(123456) met := benchReporter.newMetric("foo", map[string]string{"domain": "foo"}, counterType) - met.MetricValue.Count.I64Value = &val + met.Value.Count = 123456 b.ResetTimer() @@ -100,10 +99,9 @@ func BenchmarkTimer(b *testing.B) { benchReporter := r.(*reporter) go func() { - resourcePool := benchReporter.resourcePool // Blindly consume metrics - for met := range benchReporter.metCh { - resourcePool.releaseShallowMetric(met.m) + for range benchReporter.metCh { + // nop } }() diff --git a/m3/reporter_test.go b/m3/reporter_test.go index 54a044e8..9e55a097 100644 --- a/m3/reporter_test.go +++ b/m3/reporter_test.go @@ -22,6 +22,7 @@ package m3 import ( "bytes" + "fmt" "math/rand" "net" "os" @@ -33,7 +34,7 @@ import ( "github.com/uber-go/tally" customtransport "github.com/uber-go/tally/m3/customtransports" - m3thrift "github.com/uber-go/tally/m3/thrift" + m3thrift "github.com/uber-go/tally/m3/thrift/v2" "github.com/uber-go/tally/m3/thriftudp" "github.com/uber-go/tally/thirdparty/github.com/apache/thrift/lib/go/thrift" @@ -102,11 +103,11 @@ func TestReporter(t *testing.T) { require.NotNil(t, batch) require.True(t, batch.IsSetCommonTags()) require.Equal(t, len(commonTags)+1, len(batch.GetCommonTags())) - for tag := range batch.GetCommonTags() { - if tag.GetTagName() == ServiceTag { - require.Equal(t, "test-service", tag.GetTagValue()) + for _, tag := range batch.GetCommonTags() { + if tag.GetName() == ServiceTag { + require.Equal(t, "test-service", tag.GetValue()) } else { - require.Equal(t, commonTags[tag.GetTagName()], tag.GetTagValue()) + require.Equal(t, commonTags[tag.GetName()], tag.GetValue()) } } } @@ -125,26 +126,18 @@ func TestReporter(t *testing.T) { require.Equal(t, "my-counter", emittedCounter.GetName()) require.True(t, emittedCounter.IsSetTags()) require.Equal(t, len(tags), len(emittedCounter.GetTags())) - for tag := range emittedCounter.GetTags() { - require.Equal(t, tags[tag.GetTagName()], tag.GetTagValue()) + for _, tag := range emittedCounter.GetTags() { + require.Equal(t, tags[tag.GetName()], tag.GetValue()) } - require.True(t, emittedCounter.IsSetMetricValue()) - emittedVal := emittedCounter.GetMetricValue() - require.True(t, emittedVal.IsSetCount()) - require.False(t, emittedVal.IsSetGauge()) - require.False(t, emittedVal.IsSetTimer()) + require.True(t, emittedCounter.IsSetValue()) + emittedVal := emittedCounter.GetValue() emittedCount := emittedVal.GetCount() - require.True(t, emittedCount.IsSetI64Value()) - require.EqualValues(t, int64(10), emittedCount.GetI64Value()) - - require.True(t, emittedTimer.IsSetMetricValue()) - emittedVal = emittedTimer.GetMetricValue() - require.False(t, emittedVal.IsSetCount()) - require.False(t, emittedVal.IsSetGauge()) - require.True(t, emittedVal.IsSetTimer()) + require.EqualValues(t, int64(10), emittedCount) + + require.True(t, emittedTimer.IsSetValue()) + emittedVal = emittedTimer.GetValue() emittedTimerVal := emittedVal.GetTimer() - require.True(t, emittedTimerVal.IsSetI64Value()) - require.EqualValues(t, int64(5*1000*1000), emittedTimerVal.GetI64Value()) + require.EqualValues(t, int64(5*1000*1000), emittedTimerVal) } } @@ -327,43 +320,36 @@ func TestReporterHistogram(t *testing.T) { counter := server.Service.getBatches()[0].GetMetrics()[0] require.Equal(t, "my-histogram", counter.GetName()) require.True(t, counter.IsSetTags()) - require.Equal(t, 3, len(counter.GetTags())) - for tag := range counter.GetTags() { + for _, tag := range counter.GetTags() { + fmt.Println("Checking tag", tag.GetName(), tag.GetValue()) require.Equal(t, map[string]string{ "foo": "bar", "bucketid": "0001", "bucket": "0-25ms", - }[tag.GetTagName()], tag.GetTagValue()) + }[tag.GetName()], tag.GetValue()) } - require.True(t, counter.IsSetMetricValue()) - val := counter.GetMetricValue() - require.True(t, val.IsSetCount()) - require.False(t, val.IsSetGauge()) - require.False(t, val.IsSetTimer()) + require.Equal(t, 3, len(counter.GetTags())) + require.True(t, counter.IsSetValue()) + val := counter.GetValue() count := val.GetCount() - require.True(t, count.IsSetI64Value()) - require.Equal(t, int64(7), count.GetI64Value()) + require.Equal(t, int64(7), count) // Verify second bucket counter = server.Service.getBatches()[0].GetMetrics()[1] require.Equal(t, "my-histogram", counter.GetName()) require.True(t, counter.IsSetTags()) require.Equal(t, 3, len(counter.GetTags())) - for tag := range counter.GetTags() { + for _, tag := range counter.GetTags() { require.Equal(t, map[string]string{ "foo": "bar", "bucketid": "0003", "bucket": "50ms-75ms", - }[tag.GetTagName()], tag.GetTagValue()) + }[tag.GetName()], tag.GetValue()) } - require.True(t, counter.IsSetMetricValue()) - val = counter.GetMetricValue() - require.True(t, val.IsSetCount()) - require.False(t, val.IsSetGauge()) - require.False(t, val.IsSetTimer()) + require.True(t, counter.IsSetValue()) + val = counter.GetValue() count = val.GetCount() - require.True(t, count.IsSetI64Value()) - require.Equal(t, int64(3), count.GetI64Value()) + require.Equal(t, int64(3), count) } func TestBatchSizes(t *testing.T) { @@ -455,14 +441,14 @@ func TestReporterSpecifyService(t *testing.T) { reporter, ok := r.(*reporter) require.True(t, ok) assert.Equal(t, 3, len(reporter.commonTags)) - for tag := range reporter.commonTags { - switch tag.GetTagName() { + for _, tag := range reporter.commonTags { + switch tag.GetName() { case ServiceTag: - assert.Equal(t, "overrideService", tag.GetTagValue()) + assert.Equal(t, "overrideService", tag.GetValue()) case EnvTag: - assert.Equal(t, "test", tag.GetTagValue()) + assert.Equal(t, "test", tag.GetValue()) case HostTag: - assert.Equal(t, "overrideHost", tag.GetTagValue()) + assert.Equal(t, "overrideHost", tag.GetValue()) } } } @@ -555,8 +541,8 @@ func TestReporterResetTagsAfterReturnToPool(t *testing.T) { metrics := server.Service.getMetrics() require.Equal(t, 2, len(metrics)) require.Equal(t, len(tags), len(metrics[0].GetTags())) - for tag := range metrics[0].GetTags() { - require.Equal(t, tags[tag.GetTagName()], tag.GetTagValue()) + for _, tag := range metrics[0].GetTags() { + require.Equal(t, tags[tag.GetName()], tag.GetValue()) } require.Equal(t, 0, len(metrics[1].GetTags())) } @@ -660,25 +646,25 @@ func newFakeM3Service(wg *sync.WaitGroup, countBatches bool) *fakeM3Service { type fakeM3Service struct { lock sync.RWMutex - batches []*m3thrift.MetricBatch - metrics []*m3thrift.Metric + batches []m3thrift.MetricBatch + metrics []m3thrift.Metric wg *sync.WaitGroup countBatches bool } -func (m *fakeM3Service) getBatches() []*m3thrift.MetricBatch { +func (m *fakeM3Service) getBatches() []m3thrift.MetricBatch { m.lock.RLock() defer m.lock.RUnlock() return m.batches } -func (m *fakeM3Service) getMetrics() []*m3thrift.Metric { +func (m *fakeM3Service) getMetrics() []m3thrift.Metric { m.lock.RLock() defer m.lock.RUnlock() return m.metrics } -func (m *fakeM3Service) EmitMetricBatch(batch *m3thrift.MetricBatch) (err error) { +func (m *fakeM3Service) EmitMetricBatch(batch m3thrift.MetricBatch) (err error) { m.lock.Lock() m.batches = append(m.batches, batch) if m.wg != nil && m.countBatches { @@ -704,19 +690,19 @@ func hostname() string { return host } -func tagIncluded(tags map[*m3thrift.MetricTag]bool, tagName string) bool { - for k, v := range tags { - if v && k.TagName == tagName { +func tagIncluded(tags []m3thrift.MetricTag, tagName string) bool { + for _, tag := range tags { + if tag.Name == tagName { return true } } return false } -func tagEquals(tags map[*m3thrift.MetricTag]bool, tagName, tagValue string) bool { - for k, v := range tags { - if v && k.GetTagName() == tagName { - return k.GetTagValue() == tagValue +func tagEquals(tags []m3thrift.MetricTag, tagName string, tagValue string) bool { + for _, tag := range tags { + if tag.GetName() == tagName && tag.GetValue() == tagValue { + return true } } return false diff --git a/m3/resource_pool.go b/m3/resource_pool.go index 10da73f4..fbf7a127 100644 --- a/m3/resource_pool.go +++ b/m3/resource_pool.go @@ -23,66 +23,31 @@ package m3 import ( "github.com/uber-go/tally" customtransport "github.com/uber-go/tally/m3/customtransports" - m3thrift "github.com/uber-go/tally/m3/thrift" + m3thrift "github.com/uber-go/tally/m3/thrift/v2" "github.com/uber-go/tally/thirdparty/github.com/apache/thrift/lib/go/thrift" ) const ( - batchPoolSize = 10 - metricPoolSize = DefaultMaxQueueSize - valuePoolSize = DefaultMaxQueueSize - timerPoolSize = DefaultMaxQueueSize - tagPoolSize = DefaultMaxQueueSize - counterPoolSize = DefaultMaxQueueSize - gaugePoolSize = DefaultMaxQueueSize - protoPoolSize = 10 + batchPoolSize = 10 + metricPoolSize = DefaultMaxQueueSize + protoPoolSize = 10 ) type resourcePool struct { - batchPool *tally.ObjectPool - metricPool *tally.ObjectPool - tagPool *tally.ObjectPool - valuePool *tally.ObjectPool - counterPool *tally.ObjectPool - gaugePool *tally.ObjectPool - timerPool *tally.ObjectPool - protoPool *tally.ObjectPool + metricSlicePool *tally.ObjectPool + metricTagSlicePool *tally.ObjectPool + protoPool *tally.ObjectPool } func newResourcePool(protoFac thrift.TProtocolFactory) *resourcePool { - batchPool := tally.NewObjectPool(batchPoolSize) - batchPool.Init(func() interface{} { - return m3thrift.NewMetricBatch() + metricSlicePool := tally.NewObjectPool(batchPoolSize) + metricSlicePool.Init(func() interface{} { + return make([]m3thrift.Metric, 0, batchPoolSize) }) - metricPool := tally.NewObjectPool(metricPoolSize) - metricPool.Init(func() interface{} { - return m3thrift.NewMetric() - }) - - tagPool := tally.NewObjectPool(tagPoolSize) - tagPool.Init(func() interface{} { - return m3thrift.NewMetricTag() - }) - - valuePool := tally.NewObjectPool(valuePoolSize) - valuePool.Init(func() interface{} { - return m3thrift.NewMetricValue() - }) - - counterPool := tally.NewObjectPool(counterPoolSize) - counterPool.Init(func() interface{} { - return m3thrift.NewCountValue() - }) - - gaugePool := tally.NewObjectPool(gaugePoolSize) - gaugePool.Init(func() interface{} { - return m3thrift.NewGaugeValue() - }) - - timerPool := tally.NewObjectPool(timerPoolSize) - timerPool.Init(func() interface{} { - return m3thrift.NewTimerValue() + metricTagSlicePool := tally.NewObjectPool(DefaultMaxQueueSize) + metricTagSlicePool.Init(func() interface{} { + return make([]m3thrift.MetricTag, 0, batchPoolSize) }) protoPool := tally.NewObjectPool(protoPoolSize) @@ -91,54 +56,18 @@ func newResourcePool(protoFac thrift.TProtocolFactory) *resourcePool { }) return &resourcePool{ - batchPool: batchPool, - metricPool: metricPool, - tagPool: tagPool, - valuePool: valuePool, - counterPool: counterPool, - gaugePool: gaugePool, - timerPool: timerPool, - protoPool: protoPool, + metricSlicePool: metricSlicePool, + metricTagSlicePool: metricTagSlicePool, + protoPool: protoPool, } } -func (r *resourcePool) getBatch() *m3thrift.MetricBatch { - o := r.batchPool.Get() - return o.(*m3thrift.MetricBatch) -} - -func (r *resourcePool) getMetric() *m3thrift.Metric { - o := r.metricPool.Get() - return o.(*m3thrift.Metric) -} - -func (r *resourcePool) getTagList() map[*m3thrift.MetricTag]bool { - return map[*m3thrift.MetricTag]bool{} -} - -func (r *resourcePool) getTag() *m3thrift.MetricTag { - o := r.tagPool.Get() - return o.(*m3thrift.MetricTag) -} - -func (r *resourcePool) getValue() *m3thrift.MetricValue { - o := r.valuePool.Get() - return o.(*m3thrift.MetricValue) -} - -func (r *resourcePool) getCount() *m3thrift.CountValue { - o := r.counterPool.Get() - return o.(*m3thrift.CountValue) -} - -func (r *resourcePool) getGauge() *m3thrift.GaugeValue { - o := r.gaugePool.Get() - return o.(*m3thrift.GaugeValue) +func (r *resourcePool) getMetricSlice() []m3thrift.Metric { + return r.metricSlicePool.Get().([]m3thrift.Metric) } -func (r *resourcePool) getTimer() *m3thrift.TimerValue { - o := r.timerPool.Get() - return o.(*m3thrift.TimerValue) +func (r *resourcePool) getMetricTagSlice() []m3thrift.MetricTag { + return r.metricTagSlicePool.Get().([]m3thrift.MetricTag) } func (r *resourcePool) getProto() thrift.TProtocol { @@ -153,73 +82,15 @@ func (r *resourcePool) releaseProto(proto thrift.TProtocol) { r.protoPool.Put(proto) } -func (r *resourcePool) releaseBatch(batch *m3thrift.MetricBatch) { - batch.CommonTags = nil - for _, metric := range batch.Metrics { - r.releaseMetric(metric) +func (r *resourcePool) releaseMetricSlice(metrics []m3thrift.Metric) { + for i := 0; i < len(metrics); i++ { + metrics[i].Tags = nil } - batch.Metrics = nil - r.batchPool.Put(batch) -} -func (r *resourcePool) releaseMetricValue(metVal *m3thrift.MetricValue) { - if metVal.IsSetCount() { - metVal.Count.I64Value = nil - r.counterPool.Put(metVal.Count) - metVal.Count = nil - } else if metVal.IsSetGauge() { - metVal.Gauge.I64Value = nil - metVal.Gauge.DValue = nil - r.gaugePool.Put(metVal.Gauge) - metVal.Gauge = nil - } else if metVal.IsSetTimer() { - metVal.Timer.I64Value = nil - metVal.Timer.DValue = nil - r.timerPool.Put(metVal.Timer) - metVal.Timer = nil - } - r.valuePool.Put(metVal) + r.metricSlicePool.Put(metrics[:0]) } //nolint:unused -func (r *resourcePool) releaseMetrics(mets []*m3thrift.Metric) { - for _, m := range mets { - r.releaseMetric(m) - } -} - -func (r *resourcePool) releaseShallowMetrics(mets []*m3thrift.Metric) { - for _, m := range mets { - r.releaseShallowMetric(m) - } -} - -func (r *resourcePool) releaseMetric(metric *m3thrift.Metric) { - metric.Name = "" - // Release Tags - for tag := range metric.Tags { - tag.TagName = "" - tag.TagValue = nil - r.tagPool.Put(tag) - } - metric.Tags = nil - - r.releaseShallowMetric(metric) -} - -func (r *resourcePool) releaseShallowMetric(metric *m3thrift.Metric) { - metric.Name = "" - metric.Tags = nil - metric.Timestamp = nil - - metVal := metric.MetricValue - if metVal == nil { - r.metricPool.Put(metric) - return - } - - r.releaseMetricValue(metVal) - metric.MetricValue = nil - - r.metricPool.Put(metric) +func (r *resourcePool) releaseMetricTagSlice(tags []m3thrift.MetricTag) { + r.metricSlicePool.Put(tags[:0]) } diff --git a/m3/resource_pool_test.go b/m3/resource_pool_test.go index 7f6c2916..528b30ab 100644 --- a/m3/resource_pool_test.go +++ b/m3/resource_pool_test.go @@ -23,7 +23,7 @@ package m3 import ( "testing" - m3thrift "github.com/uber-go/tally/m3/thrift" + m3thrift "github.com/uber-go/tally/m3/thrift/v2" "github.com/uber-go/tally/thirdparty/github.com/apache/thrift/lib/go/thrift" "github.com/stretchr/testify/require" @@ -32,78 +32,17 @@ import ( func TestM3ResourcePoolMetric(t *testing.T) { p := newResourcePool(thrift.NewTCompactProtocolFactory()) - var v int64 - cm := p.getMetric() - cmv := p.getValue() - cm.MetricValue = cmv - cv := p.getCount() - cmv.Count = cv - cv.I64Value = &v - cm.Tags = map[*m3thrift.MetricTag]bool{createTag(p, "t1", "v1"): true} - - gm := p.getMetric() - gmv := p.getValue() - gm.MetricValue = gmv - gv := p.getGauge() - gmv.Gauge = gv - gv.I64Value = &v - - tm := p.getMetric() - tmv := p.getValue() - tm.MetricValue = tmv - tv := p.getTimer() - tmv.Timer = tv - tv.I64Value = &v - - p.releaseMetric(tm) - p.releaseMetric(gm) - p.releaseMetric(cm) - - cm2 := p.getMetric() - gm2 := p.getMetric() - tm2 := p.getMetric() - - require.Nil(t, cm2.MetricValue) - require.Nil(t, gm2.MetricValue) - require.Nil(t, tm2.MetricValue) -} - -func TestM3ResourcePoolMetricValue(t *testing.T) { - p := newResourcePool(thrift.NewTCompactProtocolFactory()) - var v int64 - cmv := p.getValue() - cv := p.getCount() - cmv.Count = cv - cv.I64Value = &v - - gmv := p.getValue() - gv := p.getGauge() - gmv.Gauge = gv - gv.I64Value = &v - - tmv := p.getValue() - tv := p.getTimer() - tmv.Timer = tv - tv.I64Value = &v - - p.releaseMetricValue(tmv) - p.releaseMetricValue(gmv) - p.releaseMetricValue(cmv) - - cmv2 := p.getValue() - gmv2 := p.getValue() - tmv2 := p.getValue() - - require.Nil(t, cmv2.Count) - require.Nil(t, gmv2.Gauge) - require.Nil(t, tmv2.Timer) -} - -func TestM3ResourcePoolBatch(t *testing.T) { - p := newResourcePool(thrift.NewTCompactProtocolFactory()) - b := p.getBatch() - b.Metrics = append(b.Metrics, p.getMetric()) - p.releaseBatch(b) - b2 := p.getBatch() - require.Equal(t, 0, len(b2.Metrics)) + metrics := p.getMetricSlice() + metrics = append(metrics, m3thrift.Metric{}) + require.Equal(t, 1, len(metrics)) + p.releaseMetricSlice(metrics) + metrics = p.getMetricSlice() + require.Equal(t, 0, len(metrics)) + + tags := p.getMetricTagSlice() + tags = append(tags, m3thrift.MetricTag{}) + require.Equal(t, 1, len(tags)) + p.releaseMetricTagSlice(tags) + tags = p.getMetricTagSlice() + require.Equal(t, 0, len(tags)) } diff --git a/m3/thrift/Makefile b/m3/thrift/Makefile index 56023b20..34905045 100644 --- a/m3/thrift/Makefile +++ b/m3/thrift/Makefile @@ -1,9 +1,7 @@ -thrift_version := v1.0.0 +# thrift_version := v1.0.0 gen-thrift: - @thrift --gen go:thrift_import="github.com/apache/thrift/lib/go/thrift" -out . $(thrift_version)/m3.thrift - @rm -rf m3/m3-remote - @mv m3/* . - @rm -rf m3 - @echo Generated thrift go files in metrics/m3/thrift/ - + @thrift --gen go:thrift_import="github.com/uber-go/tally/thirdparty/github.com/apache/thrift/lib/go/thrift" -out . v1/v1.thrift && rm -rf v1/m3-remote + @echo Generated v1 Go Thrift in metrics/m3/thrift/v1. + @thrift --gen go:thrift_import="github.com/uber-go/tally/thirdparty/github.com/apache/thrift/lib/go/thrift" -out . v2/v2.thrift && rm -rf v2/m3-remote + @echo Generated v2 Go Thrift in metrics/m3/thrift/v2. diff --git a/m3/thrift/constants.go b/m3/thrift/constants.go deleted file mode 100644 index de804618..00000000 --- a/m3/thrift/constants.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Autogenerated by Thrift Compiler (0.9.2) @generated -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package m3 - -import ( - "bytes" - "fmt" - - "github.com/uber-go/tally/thirdparty/github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -func init() { -} diff --git a/m3/thrift/v1/constants.go b/m3/thrift/v1/constants.go new file mode 100644 index 00000000..d50c0b96 --- /dev/null +++ b/m3/thrift/v1/constants.go @@ -0,0 +1,18 @@ +// Autogenerated by Thrift Compiler (0.9.3) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package v1 + +import ( + "bytes" + "fmt" + "github.com/uber-go/tally/thirdparty/github.com/apache/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = bytes.Equal + +func init() { +} diff --git a/m3/thrift/m3.go b/m3/thrift/v1/m3.go similarity index 64% rename from m3/thrift/m3.go rename to m3/thrift/v1/m3.go index 5b75d29a..3339d84b 100644 --- a/m3/thrift/m3.go +++ b/m3/thrift/v1/m3.go @@ -1,32 +1,11 @@ -// Copyright (c) 2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Autogenerated by Thrift Compiler (0.9.2) @generated +// Autogenerated by Thrift Compiler (0.9.3) // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -package m3 +package v1 import ( "bytes" "fmt" - "github.com/uber-go/tally/thirdparty/github.com/apache/thrift/lib/go/thrift" ) @@ -92,7 +71,7 @@ func (p *M3Client) sendEmitMetricBatch(batch *MetricBatch) (err error) { if err = oprot.WriteMessageBegin("emitMetricBatch", thrift.ONEWAY, p.SeqId); err != nil { return } - args := EmitMetricBatchArgs{ + args := M3EmitMetricBatchArgs{ Batch: batch, } if err = args.Write(oprot); err != nil { @@ -153,7 +132,7 @@ type m3ProcessorEmitMetricBatch struct { } func (p *m3ProcessorEmitMetricBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := EmitMetricBatchArgs{} + args := M3EmitMetricBatchArgs{} if err = args.Read(iprot); err != nil { iprot.ReadMessageEnd() return false, err @@ -169,41 +148,44 @@ func (p *m3ProcessorEmitMetricBatch) Process(seqId int32, iprot, oprot thrift.TP // HELPER FUNCTIONS AND STRUCTURES -type EmitMetricBatchArgs struct { +// Attributes: +// - Batch +type M3EmitMetricBatchArgs struct { Batch *MetricBatch `thrift:"batch,1" json:"batch"` } -func NewEmitMetricBatchArgs() *EmitMetricBatchArgs { - return &EmitMetricBatchArgs{} +func NewM3EmitMetricBatchArgs() *M3EmitMetricBatchArgs { + return &M3EmitMetricBatchArgs{} } -var EmitMetricBatchArgs_Batch_DEFAULT *MetricBatch +var M3EmitMetricBatchArgs_Batch_DEFAULT *MetricBatch -func (p *EmitMetricBatchArgs) GetBatch() *MetricBatch { +func (p *M3EmitMetricBatchArgs) GetBatch() *MetricBatch { if !p.IsSetBatch() { - return EmitMetricBatchArgs_Batch_DEFAULT + return M3EmitMetricBatchArgs_Batch_DEFAULT } return p.Batch } -func (p *EmitMetricBatchArgs) IsSetBatch() bool { +func (p *M3EmitMetricBatchArgs) IsSetBatch() bool { return p.Batch != nil } -func (p *EmitMetricBatchArgs) Read(iprot thrift.TProtocol) error { +func (p *M3EmitMetricBatchArgs) Read(iprot thrift.TProtocol) error { if _, err := iprot.ReadStructBegin(); err != nil { - return fmt.Errorf("%T read error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) } + for { _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() if err != nil { - return fmt.Errorf("%T field %d read error: %s", p, fieldId, err) + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) } if fieldTypeId == thrift.STOP { break } switch fieldId { case 1: - if err := p.ReadField1(iprot); err != nil { + if err := p.readField1(iprot); err != nil { return err } default: @@ -216,51 +198,51 @@ func (p *EmitMetricBatchArgs) Read(iprot thrift.TProtocol) error { } } if err := iprot.ReadStructEnd(); err != nil { - return fmt.Errorf("%T read struct end error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } return nil } -func (p *EmitMetricBatchArgs) ReadField1(iprot thrift.TProtocol) error { +func (p *M3EmitMetricBatchArgs) readField1(iprot thrift.TProtocol) error { p.Batch = &MetricBatch{} if err := p.Batch.Read(iprot); err != nil { - return fmt.Errorf("%T error reading struct: %s", p.Batch, err) + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) } return nil } -func (p *EmitMetricBatchArgs) Write(oprot thrift.TProtocol) error { +func (p *M3EmitMetricBatchArgs) Write(oprot thrift.TProtocol) error { if err := oprot.WriteStructBegin("emitMetricBatch_args"); err != nil { - return fmt.Errorf("%T write struct begin error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } if err := p.writeField1(oprot); err != nil { return err } if err := oprot.WriteFieldStop(); err != nil { - return fmt.Errorf("write field stop error: %s", err) + return thrift.PrependError("write field stop error: ", err) } if err := oprot.WriteStructEnd(); err != nil { - return fmt.Errorf("write struct stop error: %s", err) + return thrift.PrependError("write struct stop error: ", err) } return nil } -func (p *EmitMetricBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *M3EmitMetricBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil { - return fmt.Errorf("%T write field begin error 1:batch: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) } if err := p.Batch.Write(oprot); err != nil { - return fmt.Errorf("%T error writing struct: %s", p.Batch, err) + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err) } if err := oprot.WriteFieldEnd(); err != nil { - return fmt.Errorf("%T write field end error 1:batch: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) } return err } -func (p *EmitMetricBatchArgs) String() string { +func (p *M3EmitMetricBatchArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("EmitMetricBatchArgs(%+v)", *p) + return fmt.Sprintf("M3EmitMetricBatchArgs(%+v)", *p) } diff --git a/m3/thrift/ttypes.go b/m3/thrift/v1/ttypes.go similarity index 58% rename from m3/thrift/ttypes.go rename to m3/thrift/v1/ttypes.go index ef883e29..a5888114 100644 --- a/m3/thrift/ttypes.go +++ b/m3/thrift/v1/ttypes.go @@ -1,32 +1,11 @@ -// Copyright (c) 2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Autogenerated by Thrift Compiler (0.9.2) @generated +// Autogenerated by Thrift Compiler (0.9.3) // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING -package m3 +package v1 import ( "bytes" "fmt" - "github.com/uber-go/tally/thirdparty/github.com/apache/thrift/lib/go/thrift" ) @@ -37,10 +16,17 @@ var _ = bytes.Equal var GoUnusedProtection__ int +// Different types of values that m3 emits. Each metric +// must contain one of these values +// +// Attributes: +// - Count +// - Gauge +// - Timer type MetricValue struct { - Count *CountValue `thrift:"count,1" json:"count"` - Gauge *GaugeValue `thrift:"gauge,2" json:"gauge"` - Timer *TimerValue `thrift:"timer,3" json:"timer"` + Count *CountValue `thrift:"count,1" json:"count,omitempty"` + Gauge *GaugeValue `thrift:"gauge,2" json:"gauge,omitempty"` + Timer *TimerValue `thrift:"timer,3" json:"timer,omitempty"` } func NewMetricValue() *MetricValue { @@ -73,6 +59,21 @@ func (p *MetricValue) GetTimer() TimerValue { } return *p.Timer } +func (p *MetricValue) CountSetFieldsMetricValue() int { + count := 0 + if p.IsSetCount() { + count++ + } + if p.IsSetGauge() { + count++ + } + if p.IsSetTimer() { + count++ + } + return count + +} + func (p *MetricValue) IsSetCount() bool { return p.Count != nil } @@ -87,27 +88,28 @@ func (p *MetricValue) IsSetTimer() bool { func (p *MetricValue) Read(iprot thrift.TProtocol) error { if _, err := iprot.ReadStructBegin(); err != nil { - return fmt.Errorf("%T read error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) } + for { _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() if err != nil { - return fmt.Errorf("%T field %d read error: %s", p, fieldId, err) + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) } if fieldTypeId == thrift.STOP { break } switch fieldId { case 1: - if err := p.ReadField1(iprot); err != nil { + if err := p.readField1(iprot); err != nil { return err } case 2: - if err := p.ReadField2(iprot); err != nil { + if err := p.readField2(iprot); err != nil { return err } case 3: - if err := p.ReadField3(iprot); err != nil { + if err := p.readField3(iprot); err != nil { return err } default: @@ -120,38 +122,41 @@ func (p *MetricValue) Read(iprot thrift.TProtocol) error { } } if err := iprot.ReadStructEnd(); err != nil { - return fmt.Errorf("%T read struct end error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } return nil } -func (p *MetricValue) ReadField1(iprot thrift.TProtocol) error { +func (p *MetricValue) readField1(iprot thrift.TProtocol) error { p.Count = &CountValue{} if err := p.Count.Read(iprot); err != nil { - return fmt.Errorf("%T error reading struct: %s", p.Count, err) + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Count), err) } return nil } -func (p *MetricValue) ReadField2(iprot thrift.TProtocol) error { +func (p *MetricValue) readField2(iprot thrift.TProtocol) error { p.Gauge = &GaugeValue{} if err := p.Gauge.Read(iprot); err != nil { - return fmt.Errorf("%T error reading struct: %s", p.Gauge, err) + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Gauge), err) } return nil } -func (p *MetricValue) ReadField3(iprot thrift.TProtocol) error { +func (p *MetricValue) readField3(iprot thrift.TProtocol) error { p.Timer = &TimerValue{} if err := p.Timer.Read(iprot); err != nil { - return fmt.Errorf("%T error reading struct: %s", p.Timer, err) + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Timer), err) } return nil } func (p *MetricValue) Write(oprot thrift.TProtocol) error { + if c := p.CountSetFieldsMetricValue(); c != 1 { + return fmt.Errorf("%T write union: exactly one field must be set (%d set).", p, c) + } if err := oprot.WriteStructBegin("MetricValue"); err != nil { - return fmt.Errorf("%T write struct begin error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } if err := p.writeField1(oprot); err != nil { return err @@ -163,10 +168,10 @@ func (p *MetricValue) Write(oprot thrift.TProtocol) error { return err } if err := oprot.WriteFieldStop(); err != nil { - return fmt.Errorf("write field stop error: %s", err) + return thrift.PrependError("write field stop error: ", err) } if err := oprot.WriteStructEnd(); err != nil { - return fmt.Errorf("write struct stop error: %s", err) + return thrift.PrependError("write struct stop error: ", err) } return nil } @@ -174,13 +179,13 @@ func (p *MetricValue) Write(oprot thrift.TProtocol) error { func (p *MetricValue) writeField1(oprot thrift.TProtocol) (err error) { if p.IsSetCount() { if err := oprot.WriteFieldBegin("count", thrift.STRUCT, 1); err != nil { - return fmt.Errorf("%T write field begin error 1:count: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:count: ", p), err) } if err := p.Count.Write(oprot); err != nil { - return fmt.Errorf("%T error writing struct: %s", p.Count, err) + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Count), err) } if err := oprot.WriteFieldEnd(); err != nil { - return fmt.Errorf("%T write field end error 1:count: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:count: ", p), err) } } return err @@ -189,13 +194,13 @@ func (p *MetricValue) writeField1(oprot thrift.TProtocol) (err error) { func (p *MetricValue) writeField2(oprot thrift.TProtocol) (err error) { if p.IsSetGauge() { if err := oprot.WriteFieldBegin("gauge", thrift.STRUCT, 2); err != nil { - return fmt.Errorf("%T write field begin error 2:gauge: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:gauge: ", p), err) } if err := p.Gauge.Write(oprot); err != nil { - return fmt.Errorf("%T error writing struct: %s", p.Gauge, err) + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Gauge), err) } if err := oprot.WriteFieldEnd(); err != nil { - return fmt.Errorf("%T write field end error 2:gauge: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:gauge: ", p), err) } } return err @@ -204,13 +209,13 @@ func (p *MetricValue) writeField2(oprot thrift.TProtocol) (err error) { func (p *MetricValue) writeField3(oprot thrift.TProtocol) (err error) { if p.IsSetTimer() { if err := oprot.WriteFieldBegin("timer", thrift.STRUCT, 3); err != nil { - return fmt.Errorf("%T write field begin error 3:timer: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:timer: ", p), err) } if err := p.Timer.Write(oprot); err != nil { - return fmt.Errorf("%T error writing struct: %s", p.Timer, err) + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Timer), err) } if err := oprot.WriteFieldEnd(); err != nil { - return fmt.Errorf("%T write field end error 3:timer: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:timer: ", p), err) } } return err @@ -223,8 +228,12 @@ func (p *MetricValue) String() string { return fmt.Sprintf("MetricValue(%+v)", *p) } +// Different types of count values +// +// Attributes: +// - I64Value type CountValue struct { - I64Value *int64 `thrift:"i64Value,1" json:"i64Value"` + I64Value *int64 `thrift:"i64Value,1" json:"i64Value,omitempty"` } func NewCountValue() *CountValue { @@ -239,25 +248,35 @@ func (p *CountValue) GetI64Value() int64 { } return *p.I64Value } +func (p *CountValue) CountSetFieldsCountValue() int { + count := 0 + if p.IsSetI64Value() { + count++ + } + return count + +} + func (p *CountValue) IsSetI64Value() bool { return p.I64Value != nil } func (p *CountValue) Read(iprot thrift.TProtocol) error { if _, err := iprot.ReadStructBegin(); err != nil { - return fmt.Errorf("%T read error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) } + for { _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() if err != nil { - return fmt.Errorf("%T field %d read error: %s", p, fieldId, err) + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) } if fieldTypeId == thrift.STOP { break } switch fieldId { case 1: - if err := p.ReadField1(iprot); err != nil { + if err := p.readField1(iprot); err != nil { return err } default: @@ -270,14 +289,14 @@ func (p *CountValue) Read(iprot thrift.TProtocol) error { } } if err := iprot.ReadStructEnd(); err != nil { - return fmt.Errorf("%T read struct end error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } return nil } -func (p *CountValue) ReadField1(iprot thrift.TProtocol) error { +func (p *CountValue) readField1(iprot thrift.TProtocol) error { if v, err := iprot.ReadI64(); err != nil { - return fmt.Errorf("error reading field 1: %s", err) + return thrift.PrependError("error reading field 1: ", err) } else { p.I64Value = &v } @@ -285,17 +304,20 @@ func (p *CountValue) ReadField1(iprot thrift.TProtocol) error { } func (p *CountValue) Write(oprot thrift.TProtocol) error { + if c := p.CountSetFieldsCountValue(); c != 1 { + return fmt.Errorf("%T write union: exactly one field must be set (%d set).", p, c) + } if err := oprot.WriteStructBegin("CountValue"); err != nil { - return fmt.Errorf("%T write struct begin error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } if err := p.writeField1(oprot); err != nil { return err } if err := oprot.WriteFieldStop(); err != nil { - return fmt.Errorf("write field stop error: %s", err) + return thrift.PrependError("write field stop error: ", err) } if err := oprot.WriteStructEnd(); err != nil { - return fmt.Errorf("write struct stop error: %s", err) + return thrift.PrependError("write struct stop error: ", err) } return nil } @@ -303,13 +325,13 @@ func (p *CountValue) Write(oprot thrift.TProtocol) error { func (p *CountValue) writeField1(oprot thrift.TProtocol) (err error) { if p.IsSetI64Value() { if err := oprot.WriteFieldBegin("i64Value", thrift.I64, 1); err != nil { - return fmt.Errorf("%T write field begin error 1:i64Value: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:i64Value: ", p), err) } if err := oprot.WriteI64(int64(*p.I64Value)); err != nil { - return fmt.Errorf("%T.i64Value (1) field write error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T.i64Value (1) field write error: ", p), err) } if err := oprot.WriteFieldEnd(); err != nil { - return fmt.Errorf("%T write field end error 1:i64Value: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:i64Value: ", p), err) } } return err @@ -322,9 +344,14 @@ func (p *CountValue) String() string { return fmt.Sprintf("CountValue(%+v)", *p) } +// Different types of gauge values +// +// Attributes: +// - I64Value +// - DValue type GaugeValue struct { - I64Value *int64 `thrift:"i64Value,1" json:"i64Value"` - DValue *float64 `thrift:"dValue,2" json:"dValue"` + I64Value *int64 `thrift:"i64Value,1" json:"i64Value,omitempty"` + DValue *float64 `thrift:"dValue,2" json:"dValue,omitempty"` } func NewGaugeValue() *GaugeValue { @@ -348,6 +375,18 @@ func (p *GaugeValue) GetDValue() float64 { } return *p.DValue } +func (p *GaugeValue) CountSetFieldsGaugeValue() int { + count := 0 + if p.IsSetI64Value() { + count++ + } + if p.IsSetDValue() { + count++ + } + return count + +} + func (p *GaugeValue) IsSetI64Value() bool { return p.I64Value != nil } @@ -358,23 +397,24 @@ func (p *GaugeValue) IsSetDValue() bool { func (p *GaugeValue) Read(iprot thrift.TProtocol) error { if _, err := iprot.ReadStructBegin(); err != nil { - return fmt.Errorf("%T read error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) } + for { _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() if err != nil { - return fmt.Errorf("%T field %d read error: %s", p, fieldId, err) + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) } if fieldTypeId == thrift.STOP { break } switch fieldId { case 1: - if err := p.ReadField1(iprot); err != nil { + if err := p.readField1(iprot); err != nil { return err } case 2: - if err := p.ReadField2(iprot); err != nil { + if err := p.readField2(iprot); err != nil { return err } default: @@ -387,23 +427,23 @@ func (p *GaugeValue) Read(iprot thrift.TProtocol) error { } } if err := iprot.ReadStructEnd(); err != nil { - return fmt.Errorf("%T read struct end error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } return nil } -func (p *GaugeValue) ReadField1(iprot thrift.TProtocol) error { +func (p *GaugeValue) readField1(iprot thrift.TProtocol) error { if v, err := iprot.ReadI64(); err != nil { - return fmt.Errorf("error reading field 1: %s", err) + return thrift.PrependError("error reading field 1: ", err) } else { p.I64Value = &v } return nil } -func (p *GaugeValue) ReadField2(iprot thrift.TProtocol) error { +func (p *GaugeValue) readField2(iprot thrift.TProtocol) error { if v, err := iprot.ReadDouble(); err != nil { - return fmt.Errorf("error reading field 2: %s", err) + return thrift.PrependError("error reading field 2: ", err) } else { p.DValue = &v } @@ -411,8 +451,11 @@ func (p *GaugeValue) ReadField2(iprot thrift.TProtocol) error { } func (p *GaugeValue) Write(oprot thrift.TProtocol) error { + if c := p.CountSetFieldsGaugeValue(); c != 1 { + return fmt.Errorf("%T write union: exactly one field must be set (%d set).", p, c) + } if err := oprot.WriteStructBegin("GaugeValue"); err != nil { - return fmt.Errorf("%T write struct begin error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } if err := p.writeField1(oprot); err != nil { return err @@ -421,10 +464,10 @@ func (p *GaugeValue) Write(oprot thrift.TProtocol) error { return err } if err := oprot.WriteFieldStop(); err != nil { - return fmt.Errorf("write field stop error: %s", err) + return thrift.PrependError("write field stop error: ", err) } if err := oprot.WriteStructEnd(); err != nil { - return fmt.Errorf("write struct stop error: %s", err) + return thrift.PrependError("write struct stop error: ", err) } return nil } @@ -432,13 +475,13 @@ func (p *GaugeValue) Write(oprot thrift.TProtocol) error { func (p *GaugeValue) writeField1(oprot thrift.TProtocol) (err error) { if p.IsSetI64Value() { if err := oprot.WriteFieldBegin("i64Value", thrift.I64, 1); err != nil { - return fmt.Errorf("%T write field begin error 1:i64Value: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:i64Value: ", p), err) } if err := oprot.WriteI64(int64(*p.I64Value)); err != nil { - return fmt.Errorf("%T.i64Value (1) field write error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T.i64Value (1) field write error: ", p), err) } if err := oprot.WriteFieldEnd(); err != nil { - return fmt.Errorf("%T write field end error 1:i64Value: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:i64Value: ", p), err) } } return err @@ -447,13 +490,13 @@ func (p *GaugeValue) writeField1(oprot thrift.TProtocol) (err error) { func (p *GaugeValue) writeField2(oprot thrift.TProtocol) (err error) { if p.IsSetDValue() { if err := oprot.WriteFieldBegin("dValue", thrift.DOUBLE, 2); err != nil { - return fmt.Errorf("%T write field begin error 2:dValue: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:dValue: ", p), err) } if err := oprot.WriteDouble(float64(*p.DValue)); err != nil { - return fmt.Errorf("%T.dValue (2) field write error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T.dValue (2) field write error: ", p), err) } if err := oprot.WriteFieldEnd(); err != nil { - return fmt.Errorf("%T write field end error 2:dValue: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:dValue: ", p), err) } } return err @@ -466,9 +509,14 @@ func (p *GaugeValue) String() string { return fmt.Sprintf("GaugeValue(%+v)", *p) } +// Different types of timer values +// +// Attributes: +// - I64Value +// - DValue type TimerValue struct { - I64Value *int64 `thrift:"i64Value,1" json:"i64Value"` - DValue *float64 `thrift:"dValue,2" json:"dValue"` + I64Value *int64 `thrift:"i64Value,1" json:"i64Value,omitempty"` + DValue *float64 `thrift:"dValue,2" json:"dValue,omitempty"` } func NewTimerValue() *TimerValue { @@ -492,6 +540,18 @@ func (p *TimerValue) GetDValue() float64 { } return *p.DValue } +func (p *TimerValue) CountSetFieldsTimerValue() int { + count := 0 + if p.IsSetI64Value() { + count++ + } + if p.IsSetDValue() { + count++ + } + return count + +} + func (p *TimerValue) IsSetI64Value() bool { return p.I64Value != nil } @@ -502,23 +562,24 @@ func (p *TimerValue) IsSetDValue() bool { func (p *TimerValue) Read(iprot thrift.TProtocol) error { if _, err := iprot.ReadStructBegin(); err != nil { - return fmt.Errorf("%T read error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) } + for { _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() if err != nil { - return fmt.Errorf("%T field %d read error: %s", p, fieldId, err) + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) } if fieldTypeId == thrift.STOP { break } switch fieldId { case 1: - if err := p.ReadField1(iprot); err != nil { + if err := p.readField1(iprot); err != nil { return err } case 2: - if err := p.ReadField2(iprot); err != nil { + if err := p.readField2(iprot); err != nil { return err } default: @@ -531,23 +592,23 @@ func (p *TimerValue) Read(iprot thrift.TProtocol) error { } } if err := iprot.ReadStructEnd(); err != nil { - return fmt.Errorf("%T read struct end error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } return nil } -func (p *TimerValue) ReadField1(iprot thrift.TProtocol) error { +func (p *TimerValue) readField1(iprot thrift.TProtocol) error { if v, err := iprot.ReadI64(); err != nil { - return fmt.Errorf("error reading field 1: %s", err) + return thrift.PrependError("error reading field 1: ", err) } else { p.I64Value = &v } return nil } -func (p *TimerValue) ReadField2(iprot thrift.TProtocol) error { +func (p *TimerValue) readField2(iprot thrift.TProtocol) error { if v, err := iprot.ReadDouble(); err != nil { - return fmt.Errorf("error reading field 2: %s", err) + return thrift.PrependError("error reading field 2: ", err) } else { p.DValue = &v } @@ -555,8 +616,11 @@ func (p *TimerValue) ReadField2(iprot thrift.TProtocol) error { } func (p *TimerValue) Write(oprot thrift.TProtocol) error { + if c := p.CountSetFieldsTimerValue(); c != 1 { + return fmt.Errorf("%T write union: exactly one field must be set (%d set).", p, c) + } if err := oprot.WriteStructBegin("TimerValue"); err != nil { - return fmt.Errorf("%T write struct begin error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } if err := p.writeField1(oprot); err != nil { return err @@ -565,10 +629,10 @@ func (p *TimerValue) Write(oprot thrift.TProtocol) error { return err } if err := oprot.WriteFieldStop(); err != nil { - return fmt.Errorf("write field stop error: %s", err) + return thrift.PrependError("write field stop error: ", err) } if err := oprot.WriteStructEnd(); err != nil { - return fmt.Errorf("write struct stop error: %s", err) + return thrift.PrependError("write struct stop error: ", err) } return nil } @@ -576,13 +640,13 @@ func (p *TimerValue) Write(oprot thrift.TProtocol) error { func (p *TimerValue) writeField1(oprot thrift.TProtocol) (err error) { if p.IsSetI64Value() { if err := oprot.WriteFieldBegin("i64Value", thrift.I64, 1); err != nil { - return fmt.Errorf("%T write field begin error 1:i64Value: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:i64Value: ", p), err) } if err := oprot.WriteI64(int64(*p.I64Value)); err != nil { - return fmt.Errorf("%T.i64Value (1) field write error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T.i64Value (1) field write error: ", p), err) } if err := oprot.WriteFieldEnd(); err != nil { - return fmt.Errorf("%T write field end error 1:i64Value: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:i64Value: ", p), err) } } return err @@ -591,13 +655,13 @@ func (p *TimerValue) writeField1(oprot thrift.TProtocol) (err error) { func (p *TimerValue) writeField2(oprot thrift.TProtocol) (err error) { if p.IsSetDValue() { if err := oprot.WriteFieldBegin("dValue", thrift.DOUBLE, 2); err != nil { - return fmt.Errorf("%T write field begin error 2:dValue: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:dValue: ", p), err) } if err := oprot.WriteDouble(float64(*p.DValue)); err != nil { - return fmt.Errorf("%T.dValue (2) field write error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T.dValue (2) field write error: ", p), err) } if err := oprot.WriteFieldEnd(); err != nil { - return fmt.Errorf("%T write field end error 2:dValue: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:dValue: ", p), err) } } return err @@ -610,9 +674,14 @@ func (p *TimerValue) String() string { return fmt.Sprintf("TimerValue(%+v)", *p) } +// Tags that can be applied to a metric +// +// Attributes: +// - TagName +// - TagValue type MetricTag struct { TagName string `thrift:"tagName,1" json:"tagName"` - TagValue *string `thrift:"tagValue,2" json:"tagValue"` + TagValue *string `thrift:"tagValue,2" json:"tagValue,omitempty"` } func NewMetricTag() *MetricTag { @@ -637,23 +706,24 @@ func (p *MetricTag) IsSetTagValue() bool { func (p *MetricTag) Read(iprot thrift.TProtocol) error { if _, err := iprot.ReadStructBegin(); err != nil { - return fmt.Errorf("%T read error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) } + for { _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() if err != nil { - return fmt.Errorf("%T field %d read error: %s", p, fieldId, err) + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) } if fieldTypeId == thrift.STOP { break } switch fieldId { case 1: - if err := p.ReadField1(iprot); err != nil { + if err := p.readField1(iprot); err != nil { return err } case 2: - if err := p.ReadField2(iprot); err != nil { + if err := p.readField2(iprot); err != nil { return err } default: @@ -666,23 +736,23 @@ func (p *MetricTag) Read(iprot thrift.TProtocol) error { } } if err := iprot.ReadStructEnd(); err != nil { - return fmt.Errorf("%T read struct end error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } return nil } -func (p *MetricTag) ReadField1(iprot thrift.TProtocol) error { +func (p *MetricTag) readField1(iprot thrift.TProtocol) error { if v, err := iprot.ReadString(); err != nil { - return fmt.Errorf("error reading field 1: %s", err) + return thrift.PrependError("error reading field 1: ", err) } else { p.TagName = v } return nil } -func (p *MetricTag) ReadField2(iprot thrift.TProtocol) error { +func (p *MetricTag) readField2(iprot thrift.TProtocol) error { if v, err := iprot.ReadString(); err != nil { - return fmt.Errorf("error reading field 2: %s", err) + return thrift.PrependError("error reading field 2: ", err) } else { p.TagValue = &v } @@ -691,7 +761,7 @@ func (p *MetricTag) ReadField2(iprot thrift.TProtocol) error { func (p *MetricTag) Write(oprot thrift.TProtocol) error { if err := oprot.WriteStructBegin("MetricTag"); err != nil { - return fmt.Errorf("%T write struct begin error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } if err := p.writeField1(oprot); err != nil { return err @@ -700,23 +770,23 @@ func (p *MetricTag) Write(oprot thrift.TProtocol) error { return err } if err := oprot.WriteFieldStop(); err != nil { - return fmt.Errorf("write field stop error: %s", err) + return thrift.PrependError("write field stop error: ", err) } if err := oprot.WriteStructEnd(); err != nil { - return fmt.Errorf("write struct stop error: %s", err) + return thrift.PrependError("write struct stop error: ", err) } return nil } func (p *MetricTag) writeField1(oprot thrift.TProtocol) (err error) { if err := oprot.WriteFieldBegin("tagName", thrift.STRING, 1); err != nil { - return fmt.Errorf("%T write field begin error 1:tagName: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:tagName: ", p), err) } if err := oprot.WriteString(string(p.TagName)); err != nil { - return fmt.Errorf("%T.tagName (1) field write error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T.tagName (1) field write error: ", p), err) } if err := oprot.WriteFieldEnd(); err != nil { - return fmt.Errorf("%T write field end error 1:tagName: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:tagName: ", p), err) } return err } @@ -724,13 +794,13 @@ func (p *MetricTag) writeField1(oprot thrift.TProtocol) (err error) { func (p *MetricTag) writeField2(oprot thrift.TProtocol) (err error) { if p.IsSetTagValue() { if err := oprot.WriteFieldBegin("tagValue", thrift.STRING, 2); err != nil { - return fmt.Errorf("%T write field begin error 2:tagValue: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tagValue: ", p), err) } if err := oprot.WriteString(string(*p.TagValue)); err != nil { - return fmt.Errorf("%T.tagValue (2) field write error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T.tagValue (2) field write error: ", p), err) } if err := oprot.WriteFieldEnd(); err != nil { - return fmt.Errorf("%T write field end error 2:tagValue: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tagValue: ", p), err) } } return err @@ -743,11 +813,18 @@ func (p *MetricTag) String() string { return fmt.Sprintf("MetricTag(%+v)", *p) } +// The metric that is being emitted +// +// Attributes: +// - Name +// - MetricValue +// - Timestamp +// - Tags type Metric struct { Name string `thrift:"name,1" json:"name"` - MetricValue *MetricValue `thrift:"metricValue,2" json:"metricValue"` - Timestamp *int64 `thrift:"timestamp,3" json:"timestamp"` - Tags map[*MetricTag]bool `thrift:"tags,4" json:"tags"` + MetricValue *MetricValue `thrift:"metricValue,2" json:"metricValue,omitempty"` + Timestamp *int64 `thrift:"timestamp,3" json:"timestamp,omitempty"` + Tags map[*MetricTag]bool `thrift:"tags,4" json:"tags,omitempty"` } func NewMetric() *Metric { @@ -795,31 +872,32 @@ func (p *Metric) IsSetTags() bool { func (p *Metric) Read(iprot thrift.TProtocol) error { if _, err := iprot.ReadStructBegin(); err != nil { - return fmt.Errorf("%T read error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) } + for { _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() if err != nil { - return fmt.Errorf("%T field %d read error: %s", p, fieldId, err) + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) } if fieldTypeId == thrift.STOP { break } switch fieldId { case 1: - if err := p.ReadField1(iprot); err != nil { + if err := p.readField1(iprot); err != nil { return err } case 2: - if err := p.ReadField2(iprot); err != nil { + if err := p.readField2(iprot); err != nil { return err } case 3: - if err := p.ReadField3(iprot); err != nil { + if err := p.readField3(iprot); err != nil { return err } case 4: - if err := p.ReadField4(iprot); err != nil { + if err := p.readField4(iprot); err != nil { return err } default: @@ -832,60 +910,60 @@ func (p *Metric) Read(iprot thrift.TProtocol) error { } } if err := iprot.ReadStructEnd(); err != nil { - return fmt.Errorf("%T read struct end error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } return nil } -func (p *Metric) ReadField1(iprot thrift.TProtocol) error { +func (p *Metric) readField1(iprot thrift.TProtocol) error { if v, err := iprot.ReadString(); err != nil { - return fmt.Errorf("error reading field 1: %s", err) + return thrift.PrependError("error reading field 1: ", err) } else { p.Name = v } return nil } -func (p *Metric) ReadField2(iprot thrift.TProtocol) error { +func (p *Metric) readField2(iprot thrift.TProtocol) error { p.MetricValue = &MetricValue{} if err := p.MetricValue.Read(iprot); err != nil { - return fmt.Errorf("%T error reading struct: %s", p.MetricValue, err) + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.MetricValue), err) } return nil } -func (p *Metric) ReadField3(iprot thrift.TProtocol) error { +func (p *Metric) readField3(iprot thrift.TProtocol) error { if v, err := iprot.ReadI64(); err != nil { - return fmt.Errorf("error reading field 3: %s", err) + return thrift.PrependError("error reading field 3: ", err) } else { p.Timestamp = &v } return nil } -func (p *Metric) ReadField4(iprot thrift.TProtocol) error { +func (p *Metric) readField4(iprot thrift.TProtocol) error { _, size, err := iprot.ReadSetBegin() if err != nil { - return fmt.Errorf("error reading set begin: %s", err) + return thrift.PrependError("error reading set begin: ", err) } tSet := make(map[*MetricTag]bool, size) p.Tags = tSet for i := 0; i < size; i++ { _elem0 := &MetricTag{} if err := _elem0.Read(iprot); err != nil { - return fmt.Errorf("%T error reading struct: %s", _elem0, err) + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) } p.Tags[_elem0] = true } if err := iprot.ReadSetEnd(); err != nil { - return fmt.Errorf("error reading set end: %s", err) + return thrift.PrependError("error reading set end: ", err) } return nil } func (p *Metric) Write(oprot thrift.TProtocol) error { if err := oprot.WriteStructBegin("Metric"); err != nil { - return fmt.Errorf("%T write struct begin error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } if err := p.writeField1(oprot); err != nil { return err @@ -900,23 +978,23 @@ func (p *Metric) Write(oprot thrift.TProtocol) error { return err } if err := oprot.WriteFieldStop(); err != nil { - return fmt.Errorf("write field stop error: %s", err) + return thrift.PrependError("write field stop error: ", err) } if err := oprot.WriteStructEnd(); err != nil { - return fmt.Errorf("write struct stop error: %s", err) + return thrift.PrependError("write struct stop error: ", err) } return nil } func (p *Metric) writeField1(oprot thrift.TProtocol) (err error) { if err := oprot.WriteFieldBegin("name", thrift.STRING, 1); err != nil { - return fmt.Errorf("%T write field begin error 1:name: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:name: ", p), err) } if err := oprot.WriteString(string(p.Name)); err != nil { - return fmt.Errorf("%T.name (1) field write error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T.name (1) field write error: ", p), err) } if err := oprot.WriteFieldEnd(); err != nil { - return fmt.Errorf("%T write field end error 1:name: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:name: ", p), err) } return err } @@ -924,13 +1002,13 @@ func (p *Metric) writeField1(oprot thrift.TProtocol) (err error) { func (p *Metric) writeField2(oprot thrift.TProtocol) (err error) { if p.IsSetMetricValue() { if err := oprot.WriteFieldBegin("metricValue", thrift.STRUCT, 2); err != nil { - return fmt.Errorf("%T write field begin error 2:metricValue: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:metricValue: ", p), err) } if err := p.MetricValue.Write(oprot); err != nil { - return fmt.Errorf("%T error writing struct: %s", p.MetricValue, err) + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.MetricValue), err) } if err := oprot.WriteFieldEnd(); err != nil { - return fmt.Errorf("%T write field end error 2:metricValue: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:metricValue: ", p), err) } } return err @@ -939,13 +1017,13 @@ func (p *Metric) writeField2(oprot thrift.TProtocol) (err error) { func (p *Metric) writeField3(oprot thrift.TProtocol) (err error) { if p.IsSetTimestamp() { if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 3); err != nil { - return fmt.Errorf("%T write field begin error 3:timestamp: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:timestamp: ", p), err) } if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil { - return fmt.Errorf("%T.timestamp (3) field write error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T.timestamp (3) field write error: ", p), err) } if err := oprot.WriteFieldEnd(); err != nil { - return fmt.Errorf("%T write field end error 3:timestamp: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:timestamp: ", p), err) } } return err @@ -954,21 +1032,21 @@ func (p *Metric) writeField3(oprot thrift.TProtocol) (err error) { func (p *Metric) writeField4(oprot thrift.TProtocol) (err error) { if p.IsSetTags() { if err := oprot.WriteFieldBegin("tags", thrift.SET, 4); err != nil { - return fmt.Errorf("%T write field begin error 4:tags: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:tags: ", p), err) } if err := oprot.WriteSetBegin(thrift.STRUCT, len(p.Tags)); err != nil { - return fmt.Errorf("error writing set begin: %s", err) + return thrift.PrependError("error writing set begin: ", err) } - for v := range p.Tags { + for v, _ := range p.Tags { if err := v.Write(oprot); err != nil { - return fmt.Errorf("%T error writing struct: %s", v, err) + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) } } if err := oprot.WriteSetEnd(); err != nil { - return fmt.Errorf("error writing set end: %s", err) + return thrift.PrependError("error writing set end: ", err) } if err := oprot.WriteFieldEnd(); err != nil { - return fmt.Errorf("%T write field end error 4:tags: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:tags: ", p), err) } } return err @@ -981,9 +1059,15 @@ func (p *Metric) String() string { return fmt.Sprintf("Metric(%+v)", *p) } +// Structure that holds a group of metrics which share +// common properties like the cluster and service. +// +// Attributes: +// - Metrics +// - CommonTags type MetricBatch struct { Metrics []*Metric `thrift:"metrics,1" json:"metrics"` - CommonTags map[*MetricTag]bool `thrift:"commonTags,2" json:"commonTags"` + CommonTags map[*MetricTag]bool `thrift:"commonTags,2" json:"commonTags,omitempty"` } func NewMetricBatch() *MetricBatch { @@ -1005,23 +1089,24 @@ func (p *MetricBatch) IsSetCommonTags() bool { func (p *MetricBatch) Read(iprot thrift.TProtocol) error { if _, err := iprot.ReadStructBegin(); err != nil { - return fmt.Errorf("%T read error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) } + for { _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() if err != nil { - return fmt.Errorf("%T field %d read error: %s", p, fieldId, err) + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) } if fieldTypeId == thrift.STOP { break } switch fieldId { case 1: - if err := p.ReadField1(iprot); err != nil { + if err := p.readField1(iprot); err != nil { return err } case 2: - if err := p.ReadField2(iprot); err != nil { + if err := p.readField2(iprot); err != nil { return err } default: @@ -1034,54 +1119,54 @@ func (p *MetricBatch) Read(iprot thrift.TProtocol) error { } } if err := iprot.ReadStructEnd(); err != nil { - return fmt.Errorf("%T read struct end error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } return nil } -func (p *MetricBatch) ReadField1(iprot thrift.TProtocol) error { +func (p *MetricBatch) readField1(iprot thrift.TProtocol) error { _, size, err := iprot.ReadListBegin() if err != nil { - return fmt.Errorf("error reading list begin: %s", err) + return thrift.PrependError("error reading list begin: ", err) } tSlice := make([]*Metric, 0, size) p.Metrics = tSlice for i := 0; i < size; i++ { _elem1 := &Metric{} if err := _elem1.Read(iprot); err != nil { - return fmt.Errorf("%T error reading struct: %s", _elem1, err) + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) } p.Metrics = append(p.Metrics, _elem1) } if err := iprot.ReadListEnd(); err != nil { - return fmt.Errorf("error reading list end: %s", err) + return thrift.PrependError("error reading list end: ", err) } return nil } -func (p *MetricBatch) ReadField2(iprot thrift.TProtocol) error { +func (p *MetricBatch) readField2(iprot thrift.TProtocol) error { _, size, err := iprot.ReadSetBegin() if err != nil { - return fmt.Errorf("error reading set begin: %s", err) + return thrift.PrependError("error reading set begin: ", err) } tSet := make(map[*MetricTag]bool, size) p.CommonTags = tSet for i := 0; i < size; i++ { _elem2 := &MetricTag{} if err := _elem2.Read(iprot); err != nil { - return fmt.Errorf("%T error reading struct: %s", _elem2, err) + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) } p.CommonTags[_elem2] = true } if err := iprot.ReadSetEnd(); err != nil { - return fmt.Errorf("error reading set end: %s", err) + return thrift.PrependError("error reading set end: ", err) } return nil } func (p *MetricBatch) Write(oprot thrift.TProtocol) error { if err := oprot.WriteStructBegin("MetricBatch"); err != nil { - return fmt.Errorf("%T write struct begin error: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } if err := p.writeField1(oprot); err != nil { return err @@ -1090,31 +1175,31 @@ func (p *MetricBatch) Write(oprot thrift.TProtocol) error { return err } if err := oprot.WriteFieldStop(); err != nil { - return fmt.Errorf("write field stop error: %s", err) + return thrift.PrependError("write field stop error: ", err) } if err := oprot.WriteStructEnd(); err != nil { - return fmt.Errorf("write struct stop error: %s", err) + return thrift.PrependError("write struct stop error: ", err) } return nil } func (p *MetricBatch) writeField1(oprot thrift.TProtocol) (err error) { if err := oprot.WriteFieldBegin("metrics", thrift.LIST, 1); err != nil { - return fmt.Errorf("%T write field begin error 1:metrics: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:metrics: ", p), err) } if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Metrics)); err != nil { - return fmt.Errorf("error writing list begin: %s", err) + return thrift.PrependError("error writing list begin: ", err) } for _, v := range p.Metrics { if err := v.Write(oprot); err != nil { - return fmt.Errorf("%T error writing struct: %s", v, err) + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) } } if err := oprot.WriteListEnd(); err != nil { - return fmt.Errorf("error writing list end: %s", err) + return thrift.PrependError("error writing list end: ", err) } if err := oprot.WriteFieldEnd(); err != nil { - return fmt.Errorf("%T write field end error 1:metrics: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:metrics: ", p), err) } return err } @@ -1122,21 +1207,21 @@ func (p *MetricBatch) writeField1(oprot thrift.TProtocol) (err error) { func (p *MetricBatch) writeField2(oprot thrift.TProtocol) (err error) { if p.IsSetCommonTags() { if err := oprot.WriteFieldBegin("commonTags", thrift.SET, 2); err != nil { - return fmt.Errorf("%T write field begin error 2:commonTags: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:commonTags: ", p), err) } if err := oprot.WriteSetBegin(thrift.STRUCT, len(p.CommonTags)); err != nil { - return fmt.Errorf("error writing set begin: %s", err) + return thrift.PrependError("error writing set begin: ", err) } - for v := range p.CommonTags { + for v, _ := range p.CommonTags { if err := v.Write(oprot); err != nil { - return fmt.Errorf("%T error writing struct: %s", v, err) + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) } } if err := oprot.WriteSetEnd(); err != nil { - return fmt.Errorf("error writing set end: %s", err) + return thrift.PrependError("error writing set end: ", err) } if err := oprot.WriteFieldEnd(); err != nil { - return fmt.Errorf("%T write field end error 2:commonTags: %s", p, err) + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:commonTags: ", p), err) } } return err diff --git a/m3/thrift/v1.0.0/m3.thrift b/m3/thrift/v1/v1.thrift similarity index 100% rename from m3/thrift/v1.0.0/m3.thrift rename to m3/thrift/v1/v1.thrift diff --git a/m3/thrift/v2/constants.go b/m3/thrift/v2/constants.go new file mode 100644 index 00000000..1e7ad2d6 --- /dev/null +++ b/m3/thrift/v2/constants.go @@ -0,0 +1,18 @@ +// Autogenerated by Thrift Compiler (0.9.3) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package v2 + +import ( + "bytes" + "fmt" + "github.com/uber-go/tally/thirdparty/github.com/apache/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = bytes.Equal + +func init() { +} diff --git a/m3/thrift/v2/m3.go b/m3/thrift/v2/m3.go new file mode 100644 index 00000000..f0a288ca --- /dev/null +++ b/m3/thrift/v2/m3.go @@ -0,0 +1,242 @@ +// Autogenerated by Thrift Compiler (0.9.3) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package v2 + +import ( + "bytes" + "fmt" + "github.com/uber-go/tally/thirdparty/github.com/apache/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = bytes.Equal + +type M3 interface { + // Parameters: + // - Batch + EmitMetricBatch(batch MetricBatch) (err error) +} + +type M3Client struct { + Transport thrift.TTransport + ProtocolFactory thrift.TProtocolFactory + InputProtocol thrift.TProtocol + OutputProtocol thrift.TProtocol + SeqId int32 +} + +func NewM3ClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *M3Client { + return &M3Client{Transport: t, + ProtocolFactory: f, + InputProtocol: f.GetProtocol(t), + OutputProtocol: f.GetProtocol(t), + SeqId: 0, + } +} + +func NewM3ClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *M3Client { + return &M3Client{Transport: t, + ProtocolFactory: nil, + InputProtocol: iprot, + OutputProtocol: oprot, + SeqId: 0, + } +} + +// Parameters: +// - Batch +func (p *M3Client) EmitMetricBatch(batch MetricBatch) (err error) { + if err = p.sendEmitMetricBatch(batch); err != nil { + return + } + return +} + +func (p *M3Client) sendEmitMetricBatch(batch MetricBatch) (err error) { + oprot := p.OutputProtocol + if oprot == nil { + oprot = p.ProtocolFactory.GetProtocol(p.Transport) + p.OutputProtocol = oprot + } + p.SeqId++ + if err = oprot.WriteMessageBegin("emitMetricBatch", thrift.ONEWAY, p.SeqId); err != nil { + return + } + args := M3EmitMetricBatchArgs{ + Batch: batch, + } + if err = args.Write(oprot); err != nil { + return + } + if err = oprot.WriteMessageEnd(); err != nil { + return + } + return oprot.Flush() +} + +type M3Processor struct { + processorMap map[string]thrift.TProcessorFunction + handler M3 +} + +func (p *M3Processor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *M3Processor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *M3Processor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewM3Processor(handler M3) *M3Processor { + + self3 := &M3Processor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self3.processorMap["emitMetricBatch"] = &m3ProcessorEmitMetricBatch{handler: handler} + return self3 +} + +func (p *M3Processor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x4.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush() + return false, x4 + +} + +type m3ProcessorEmitMetricBatch struct { + handler M3 +} + +func (p *m3ProcessorEmitMetricBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := M3EmitMetricBatchArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + if err2 = p.handler.EmitMetricBatch(args.Batch); err2 != nil { + return true, err2 + } + return true, nil +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Batch +type M3EmitMetricBatchArgs struct { + Batch MetricBatch `thrift:"batch,1" json:"batch"` +} + +func NewM3EmitMetricBatchArgs() *M3EmitMetricBatchArgs { + return &M3EmitMetricBatchArgs{} +} + +var M3EmitMetricBatchArgs_Batch_DEFAULT MetricBatch + +func (p *M3EmitMetricBatchArgs) GetBatch() MetricBatch { + if !p.IsSetBatch() { + return M3EmitMetricBatchArgs_Batch_DEFAULT + } + return p.Batch +} +func (p *M3EmitMetricBatchArgs) IsSetBatch() bool { + return p.Batch.Metrics != nil || p.Batch.CommonTags != nil +} + +func (p *M3EmitMetricBatchArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *M3EmitMetricBatchArgs) readField1(iprot thrift.TProtocol) error { + p.Batch = MetricBatch{} + if err := p.Batch.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) + } + return nil +} + +func (p *M3EmitMetricBatchArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("emitMetricBatch_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *M3EmitMetricBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) + } + if err := p.Batch.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) + } + return err +} + +func (p *M3EmitMetricBatchArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("M3EmitMetricBatchArgs(%+v)", *p) +} diff --git a/m3/thrift/v2/ttypes.go b/m3/thrift/v2/ttypes.go new file mode 100644 index 00000000..3e6c33b8 --- /dev/null +++ b/m3/thrift/v2/ttypes.go @@ -0,0 +1,849 @@ +// Autogenerated by Thrift Compiler (0.9.3) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package v2 + +import ( + "bytes" + "fmt" + "github.com/uber-go/tally/thirdparty/github.com/apache/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = bytes.Equal + +var GoUnusedProtection__ int + +type MetricType int64 + +const ( + MetricType_INVALID MetricType = 0 + MetricType_COUNTER MetricType = 1 + MetricType_GAUGE MetricType = 2 + MetricType_TIMER MetricType = 3 +) + +func (p MetricType) String() string { + switch p { + case MetricType_INVALID: + return "INVALID" + case MetricType_COUNTER: + return "COUNTER" + case MetricType_GAUGE: + return "GAUGE" + case MetricType_TIMER: + return "TIMER" + } + return "" +} + +func MetricTypeFromString(s string) (MetricType, error) { + switch s { + case "INVALID": + return MetricType_INVALID, nil + case "COUNTER": + return MetricType_COUNTER, nil + case "GAUGE": + return MetricType_GAUGE, nil + case "TIMER": + return MetricType_TIMER, nil + } + return MetricType(0), fmt.Errorf("not a valid MetricType string") +} + +func MetricTypePtr(v MetricType) *MetricType { return &v } + +func (p MetricType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *MetricType) UnmarshalText(text []byte) error { + q, err := MetricTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +// Attributes: +// - MetricType +// - Count +// - Gauge +// - Timer +type MetricValue struct { + MetricType MetricType `thrift:"metricType,1,required" json:"metricType"` + Count int64 `thrift:"count,2,required" json:"count"` + Gauge float64 `thrift:"gauge,3,required" json:"gauge"` + Timer int64 `thrift:"timer,4,required" json:"timer"` +} + +func NewMetricValue() *MetricValue { + return &MetricValue{} +} + +func (p *MetricValue) GetMetricType() MetricType { + return p.MetricType +} + +func (p *MetricValue) GetCount() int64 { + return p.Count +} + +func (p *MetricValue) GetGauge() float64 { + return p.Gauge +} + +func (p *MetricValue) GetTimer() int64 { + return p.Timer +} +func (p *MetricValue) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetMetricType bool = false + var issetCount bool = false + var issetGauge bool = false + var issetTimer bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + issetMetricType = true + case 2: + if err := p.readField2(iprot); err != nil { + return err + } + issetCount = true + case 3: + if err := p.readField3(iprot); err != nil { + return err + } + issetGauge = true + case 4: + if err := p.readField4(iprot); err != nil { + return err + } + issetTimer = true + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetMetricType { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MetricType is not set")) + } + if !issetCount { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Count is not set")) + } + if !issetGauge { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Gauge is not set")) + } + if !issetTimer { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timer is not set")) + } + return nil +} + +func (p *MetricValue) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + temp := MetricType(v) + p.MetricType = temp + } + return nil +} + +func (p *MetricValue) readField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Count = v + } + return nil +} + +func (p *MetricValue) readField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Gauge = v + } + return nil +} + +func (p *MetricValue) readField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.Timer = v + } + return nil +} + +func (p *MetricValue) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("MetricValue"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *MetricValue) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("metricType", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:metricType: ", p), err) + } + if err := oprot.WriteI32(int32(p.MetricType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.metricType (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:metricType: ", p), err) + } + return err +} + +func (p *MetricValue) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("count", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:count: ", p), err) + } + if err := oprot.WriteI64(int64(p.Count)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:count: ", p), err) + } + return err +} + +func (p *MetricValue) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("gauge", thrift.DOUBLE, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:gauge: ", p), err) + } + if err := oprot.WriteDouble(float64(p.Gauge)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.gauge (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:gauge: ", p), err) + } + return err +} + +func (p *MetricValue) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("timer", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:timer: ", p), err) + } + if err := oprot.WriteI64(int64(p.Timer)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timer (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:timer: ", p), err) + } + return err +} + +func (p *MetricValue) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MetricValue(%+v)", *p) +} + +// Attributes: +// - Name +// - Value +type MetricTag struct { + Name string `thrift:"name,1,required" json:"name"` + Value string `thrift:"value,2,required" json:"value"` +} + +func NewMetricTag() *MetricTag { + return &MetricTag{} +} + +func (p *MetricTag) GetName() string { + return p.Name +} + +func (p *MetricTag) GetValue() string { + return p.Value +} +func (p *MetricTag) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetName bool = false + var issetValue bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + issetName = true + case 2: + if err := p.readField2(iprot); err != nil { + return err + } + issetValue = true + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetName { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Name is not set")) + } + if !issetValue { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Value is not set")) + } + return nil +} + +func (p *MetricTag) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Name = v + } + return nil +} + +func (p *MetricTag) readField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Value = v + } + return nil +} + +func (p *MetricTag) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("MetricTag"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *MetricTag) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:name: ", p), err) + } + if err := oprot.WriteString(string(p.Name)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:name: ", p), err) + } + return err +} + +func (p *MetricTag) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := oprot.WriteString(string(p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *MetricTag) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MetricTag(%+v)", *p) +} + +// Attributes: +// - Name +// - Value +// - Timestamp +// - Tags +type Metric struct { + Name string `thrift:"name,1,required" json:"name"` + Value MetricValue `thrift:"value,2,required" json:"value"` + Timestamp int64 `thrift:"timestamp,3,required" json:"timestamp"` + Tags []MetricTag `thrift:"tags,4" json:"tags,omitempty"` +} + +func NewMetric() *Metric { + return &Metric{} +} + +func (p *Metric) GetName() string { + return p.Name +} + +var Metric_Value_DEFAULT MetricValue + +func (p *Metric) GetValue() MetricValue { + if !p.IsSetValue() { + return Metric_Value_DEFAULT + } + return p.Value +} + +func (p *Metric) GetTimestamp() int64 { + return p.Timestamp +} + +var Metric_Tags_DEFAULT []MetricTag + +func (p *Metric) GetTags() []MetricTag { + return p.Tags +} +func (p *Metric) IsSetValue() bool { + return p.Value.GetMetricType() != MetricType_INVALID || p.Value.Count != 0 || p.Value.Gauge != 0 || p.Value.Timer != 0 +} + +func (p *Metric) IsSetTags() bool { + return p.Tags != nil +} + +func (p *Metric) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetName bool = false + var issetValue bool = false + var issetTimestamp bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + issetName = true + case 2: + if err := p.readField2(iprot); err != nil { + return err + } + issetValue = true + case 3: + if err := p.readField3(iprot); err != nil { + return err + } + issetTimestamp = true + case 4: + if err := p.readField4(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetName { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Name is not set")) + } + if !issetValue { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Value is not set")) + } + if !issetTimestamp { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set")) + } + return nil +} + +func (p *Metric) readField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Name = v + } + return nil +} + +func (p *Metric) readField2(iprot thrift.TProtocol) error { + p.Value = MetricValue{} + if err := p.Value.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Value), err) + } + return nil +} + +func (p *Metric) readField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Timestamp = v + } + return nil +} + +func (p *Metric) readField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]MetricTag, 0, size) + p.Tags = tSlice + for i := 0; i < size; i++ { + _elem0 := MetricTag{} + if err := _elem0.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Tags = append(p.Tags, _elem0) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Metric) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("Metric"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Metric) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:name: ", p), err) + } + if err := oprot.WriteString(string(p.Name)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:name: ", p), err) + } + return err +} + +func (p *Metric) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("value", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := p.Value.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Value), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *Metric) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:timestamp: ", p), err) + } + if err := oprot.WriteI64(int64(p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:timestamp: ", p), err) + } + return err +} + +func (p *Metric) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetTags() { + if err := oprot.WriteFieldBegin("tags", thrift.LIST, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:tags: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Tags { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:tags: ", p), err) + } + } + return err +} + +func (p *Metric) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Metric(%+v)", *p) +} + +// Attributes: +// - Metrics +// - CommonTags +type MetricBatch struct { + Metrics []Metric `thrift:"metrics,1,required" json:"metrics"` + CommonTags []MetricTag `thrift:"commonTags,2" json:"commonTags,omitempty"` +} + +func NewMetricBatch() *MetricBatch { + return &MetricBatch{} +} + +func (p *MetricBatch) GetMetrics() []Metric { + return p.Metrics +} + +var MetricBatch_CommonTags_DEFAULT []MetricTag + +func (p *MetricBatch) GetCommonTags() []MetricTag { + return p.CommonTags +} +func (p *MetricBatch) IsSetCommonTags() bool { + return p.CommonTags != nil +} + +func (p *MetricBatch) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetMetrics bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + issetMetrics = true + case 2: + if err := p.readField2(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetMetrics { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Metrics is not set")) + } + return nil +} + +func (p *MetricBatch) readField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]Metric, 0, size) + p.Metrics = tSlice + for i := 0; i < size; i++ { + _elem1 := Metric{} + if err := _elem1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) + } + p.Metrics = append(p.Metrics, _elem1) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *MetricBatch) readField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]MetricTag, 0, size) + p.CommonTags = tSlice + for i := 0; i < size; i++ { + _elem2 := MetricTag{} + if err := _elem2.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) + } + p.CommonTags = append(p.CommonTags, _elem2) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *MetricBatch) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("MetricBatch"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *MetricBatch) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("metrics", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:metrics: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Metrics)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Metrics { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:metrics: ", p), err) + } + return err +} + +func (p *MetricBatch) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetCommonTags() { + if err := oprot.WriteFieldBegin("commonTags", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:commonTags: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.CommonTags)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.CommonTags { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:commonTags: ", p), err) + } + } + return err +} + +func (p *MetricBatch) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MetricBatch(%+v)", *p) +} diff --git a/scope.go b/scope.go index 943c8d0e..950c8d5f 100644 --- a/scope.go +++ b/scope.go @@ -22,8 +22,11 @@ package tally import ( "io" + "math" "sync" "time" + + "github.com/uber-go/tally/internal/identity" ) const ( @@ -84,6 +87,25 @@ type scope struct { timers map[string]*timer // nb: deliberately skipping timersSlice as we report timers immediately, // no buffering is involved. + + bucketCache map[uint64]bucketStorage +} + +func getBucketsIdentity(buckets Buckets) uint64 { + acc := identity.NewAccumulator() + + if dbuckets, ok := buckets.(DurationBuckets); ok { + for _, dur := range dbuckets { + acc = acc.AddUint64(uint64(dur)) + } + } else { + vbuckets := buckets.(ValueBuckets) + for _, val := range vbuckets { + acc = acc.AddUint64(math.Float64bits(val)) + } + } + + return acc.Value() } type scopeStatus struct { @@ -165,6 +187,7 @@ func newRootScope(opts ScopeOptions, interval time.Duration) *scope { histograms: make(map[string]*histogram), histogramsSlice: make([]*histogram, 0, _defaultInitialSliceSize), timers: make(map[string]*timer), + bucketCache: make(map[uint64]bucketStorage), } // NB(r): Take a copy of the tags on creation @@ -382,6 +405,11 @@ func (s *scope) Histogram(name string, b Buckets) Histogram { b = s.defaultBuckets } + htype := valueHistogramType + if _, ok := b.(DurationBuckets); ok { + htype = durationHistogramType + } + s.hm.Lock() defer s.hm.Unlock() @@ -396,8 +424,15 @@ func (s *scope) Histogram(name string, b Buckets) Histogram { ) } + bid := getBucketsIdentity(b) + storage, ok := s.bucketCache[bid] + if !ok { + storage = newBucketStorage(htype, b, cachedHistogram) + s.bucketCache[bid] = storage + } + h := newHistogram( - s.fullyQualifiedName(name), s.tags, s.reporter, b, cachedHistogram, + htype, s.fullyQualifiedName(name), s.tags, s.reporter, storage, ) s.histograms[name] = h s.histogramsSlice = append(s.histogramsSlice, h) diff --git a/scope_registry.go b/scope_registry.go index 847a2327..915a65ee 100644 --- a/scope_registry.go +++ b/scope_registry.go @@ -102,6 +102,7 @@ func (r *scopeRegistry) Subscope(parent *scope, prefix string, tags map[string]s histograms: make(map[string]*histogram), histogramsSlice: make([]*histogram, 0, _defaultInitialSliceSize), timers: make(map[string]*timer), + bucketCache: parent.bucketCache, } r.subscopes[key] = subscope return subscope diff --git a/scope_test.go b/scope_test.go index 484f9b27..28c65a13 100644 --- a/scope_test.go +++ b/scope_test.go @@ -388,21 +388,31 @@ func TestWriteOnce(t *testing.T) { r.hg.Add(1) s.Histogram("baz", MustMakeLinearValueBuckets(0, 10, 10)). RecordValue(42.42) + r.hg.Add(1) + s.Histogram("bat", MustMakeLinearValueBuckets(1, 1, 3)).RecordValue(2.1) + r.hg.Add(1) + s.SubScope("test").Histogram("qux", MustMakeLinearValueBuckets(100, 10, 3)).RecordValue(135.0) + + s.reportLoopRun() - s.report(r) r.WaitAll() assert.EqualValues(t, 1, r.counters["bar"].val) assert.EqualValues(t, 1, r.gauges["zed"].val) assert.EqualValues(t, time.Millisecond*175, r.timers["ticky"].val) assert.EqualValues(t, 1, r.histograms["baz"].valueSamples[50.0]) + assert.EqualValues(t, 1, r.histograms["bat"].valueSamples[3.0]) + assert.EqualValues(t, 1, r.histograms["test.qux"].valueSamples[math.MaxFloat64]) r = newTestStatsReporter() - s.report(r) + s.reportLoopRun() assert.Nil(t, r.counters["bar"]) assert.Nil(t, r.gauges["zed"]) assert.Nil(t, r.timers["ticky"]) + assert.Nil(t, r.histograms["baz"]) + assert.Nil(t, r.histograms["bat"]) + assert.Nil(t, r.histograms["test.qux"]) } func TestCounterSanitized(t *testing.T) { diff --git a/stats.go b/stats.go index 0559367a..9f36af1c 100644 --- a/stats.go +++ b/stats.go @@ -262,6 +262,7 @@ type histogram struct { reporter StatsReporter specification Buckets buckets []histogramBucket + samples []*counter lookupByValue []float64 lookupByDuration []int } @@ -273,68 +274,35 @@ const ( durationHistogramType ) -func newHistogram( +func newHistogram( // need to be able to reuse internal histogram buckets and lookups + htype histogramType, name string, tags map[string]string, reporter StatsReporter, - buckets Buckets, - cachedHistogram CachedHistogram, + storage bucketStorage, ) *histogram { - var ( - pairs = BucketPairs(buckets) - htype = valueHistogramType - ) - - if _, ok := buckets.(DurationBuckets); ok { - htype = durationHistogramType - } - h := &histogram{ - htype: htype, - name: name, - tags: tags, - reporter: reporter, - specification: buckets, - buckets: make([]histogramBucket, 0, len(pairs)), + htype: htype, + name: name, + tags: tags, + reporter: reporter, + specification: storage.buckets, + buckets: storage.hbuckets, + samples: make([]*counter, len(storage.hbuckets)), + lookupByValue: storage.lookupByValue, + lookupByDuration: storage.lookupByDuration, } - switch htype { - case valueHistogramType: - h.lookupByValue = make([]float64, 0, len(pairs)) - case durationHistogramType: - h.lookupByDuration = make([]int, 0, len(pairs)) - } - - for _, pair := range pairs { - h.addBucket(newHistogramBucket( - h, - pair.LowerBoundValue(), - pair.UpperBoundValue(), - pair.LowerBoundDuration(), - pair.UpperBoundDuration(), - cachedHistogram, - )) + for i := range h.samples { + h.samples[i] = newCounter(nil) } return h } -func (h *histogram) addBucket(b histogramBucket) { - h.buckets = append(h.buckets, b) - - switch h.htype { - case durationHistogramType: - h.lookupByDuration = append(h.lookupByDuration, int(b.durationUpperBound)) - case valueHistogramType: - h.lookupByValue = append(h.lookupByValue, b.valueUpperBound) - default: - // nop - } -} - func (h *histogram) report(name string, tags map[string]string, r StatsReporter) { for i := range h.buckets { - samples := h.buckets[i].samples.value() + samples := h.samples[i].value() if samples == 0 { continue } @@ -364,7 +332,7 @@ func (h *histogram) report(name string, tags map[string]string, r StatsReporter) func (h *histogram) cachedReport() { for i := range h.buckets { - samples := h.buckets[i].samples.value() + samples := h.samples[i].value() if samples == 0 { continue } @@ -388,7 +356,7 @@ func (h *histogram) RecordValue(value float64) { // buckets there will always be an inclusive bucket as // we always have a math.MaxFloat64 bucket. idx := sort.SearchFloat64s(h.lookupByValue, value) - h.buckets[idx].samples.Inc(1) + h.samples[idx].Inc(1) } func (h *histogram) RecordDuration(value time.Duration) { @@ -401,7 +369,7 @@ func (h *histogram) RecordDuration(value time.Duration) { // buckets there will always be an inclusive bucket as // we always have a math.MaxInt64 bucket. idx := sort.SearchInts(h.lookupByDuration, int(value)) - h.buckets[idx].samples.Inc(1) + h.samples[idx].Inc(1) } func (h *histogram) Start() Stopwatch { @@ -420,7 +388,7 @@ func (h *histogram) snapshotValues() map[float64]int64 { vals := make(map[float64]int64, len(h.buckets)) for i := range h.buckets { - vals[h.buckets[i].valueUpperBound] = h.buckets[i].samples.snapshot() + vals[h.buckets[i].valueUpperBound] = h.samples[i].snapshot() } return vals @@ -433,15 +401,13 @@ func (h *histogram) snapshotDurations() map[time.Duration]int64 { durations := make(map[time.Duration]int64, len(h.buckets)) for i := range h.buckets { - durations[h.buckets[i].durationUpperBound] = h.buckets[i].samples.snapshot() + durations[h.buckets[i].durationUpperBound] = h.samples[i].snapshot() } return durations } type histogramBucket struct { - h *histogram - samples *counter valueLowerBound float64 valueUpperBound float64 durationLowerBound time.Duration @@ -450,37 +416,62 @@ type histogramBucket struct { cachedDurationBucket CachedHistogramBucket } -func newHistogramBucket( - h *histogram, - valueLowerBound float64, - valueUpperBound float64, - durationLowerBound time.Duration, - durationUpperBound time.Duration, +type bucketStorage struct { + buckets Buckets + hbuckets []histogramBucket + lookupByValue []float64 + lookupByDuration []int +} + +func newBucketStorage( + htype histogramType, + buckets Buckets, cachedHistogram CachedHistogram, -) histogramBucket { - bucket := histogramBucket{ - samples: newCounter(nil), - valueLowerBound: valueLowerBound, - valueUpperBound: valueUpperBound, - durationLowerBound: durationLowerBound, - durationUpperBound: durationUpperBound, +) bucketStorage { + var ( + pairs = BucketPairs(buckets) + storage bucketStorage + ) + + storage.buckets = buckets + storage.hbuckets = make([]histogramBucket, 0, len(pairs)) + + switch htype { + case valueHistogramType: + storage.lookupByValue = make([]float64, 0, len(pairs)) + case durationHistogramType: + storage.lookupByDuration = make([]int, 0, len(pairs)) } - if cachedHistogram != nil { - if h.htype == valueHistogramType { - bucket.cachedValueBucket = cachedHistogram.ValueBucket( - bucket.valueLowerBound, bucket.valueUpperBound, - ) + for _, pair := range pairs { + bucket := histogramBucket{ + valueLowerBound: pair.LowerBoundValue(), + valueUpperBound: pair.UpperBoundValue(), + durationLowerBound: pair.LowerBoundDuration(), + durationUpperBound: pair.UpperBoundDuration(), } - if h.htype == durationHistogramType { - bucket.cachedDurationBucket = cachedHistogram.DurationBucket( - bucket.durationLowerBound, bucket.durationUpperBound, - ) + switch htype { + case valueHistogramType: + if cachedHistogram != nil { + bucket.cachedValueBucket = cachedHistogram.ValueBucket( + bucket.valueLowerBound, bucket.valueUpperBound, + ) + } + storage.lookupByValue = append(storage.lookupByValue, bucket.valueUpperBound) + case durationHistogramType: + if cachedHistogram != nil { + bucket.cachedDurationBucket = cachedHistogram.DurationBucket( + bucket.durationLowerBound, bucket.durationUpperBound, + ) + } + storage.lookupByDuration = append(storage.lookupByDuration, int(bucket.durationUpperBound)) } + + storage.hbuckets = append(storage.hbuckets, bucket) } - return bucket + return storage } // NullStatsReporter is an implementation of StatsReporter than simply does nothing. diff --git a/stats_test.go b/stats_test.go index e1108d74..82059768 100644 --- a/stats_test.go +++ b/stats_test.go @@ -128,7 +128,8 @@ func TestTimer(t *testing.T) { func TestHistogramValueSamples(t *testing.T) { r := newStatsTestReporter() buckets := MustMakeLinearValueBuckets(0, 10, 10) - h := newHistogram("h1", nil, r, buckets, nil) + storage := newBucketStorage(valueHistogramType, buckets, nil) + h := newHistogram(valueHistogramType, "h1", nil, r, storage) var offset float64 for i := 0; i < 3; i++ { @@ -149,7 +150,8 @@ func TestHistogramValueSamples(t *testing.T) { func TestHistogramDurationSamples(t *testing.T) { r := newStatsTestReporter() buckets := MustMakeLinearDurationBuckets(0, 10*time.Millisecond, 10) - h := newHistogram("h1", nil, r, buckets, nil) + storage := newBucketStorage(durationHistogramType, buckets, nil) + h := newHistogram(durationHistogramType, "h1", nil, r, storage) var offset time.Duration for i := 0; i < 3; i++ { From 63e0ac52ff178db8f8065a75af18d9fb348f002b Mon Sep 17 00:00:00 2001 From: Matt Way Date: Mon, 22 Feb 2021 12:53:41 -0500 Subject: [PATCH 03/25] Tests and lint --- Makefile | 2 +- internal/identity/accumulator.go | 20 ++++++++++++++++++++ m3/reporter_test.go | 2 -- m3/thrift/v1/constants.go | 20 ++++++++++++++++++++ m3/thrift/v1/m3.go | 20 ++++++++++++++++++++ m3/thrift/v1/ttypes.go | 20 ++++++++++++++++++++ m3/thrift/v2/constants.go | 20 ++++++++++++++++++++ m3/thrift/v2/m3.go | 20 ++++++++++++++++++++ m3/thrift/v2/ttypes.go | 20 ++++++++++++++++++++ scope_test.go | 9 +++++++++ 10 files changed, 150 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 6b268caf..4f3134e8 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ export GO15VENDOREXPERIMENT=1 BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem PKGS ?= $(shell glide novendor) -PKG_FILES ?= *.go example/*.go m3 +PKG_FILES ?= *.go example/*.go m3/*.go m3/customtransports m3/thriftudp LINT_IGNORE = m3/thrift\|thirdparty LICENSE_IGNORE = thirdparty diff --git a/internal/identity/accumulator.go b/internal/identity/accumulator.go index 7a8194cd..09764226 100644 --- a/internal/identity/accumulator.go +++ b/internal/identity/accumulator.go @@ -1,3 +1,23 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + package identity import ( diff --git a/m3/reporter_test.go b/m3/reporter_test.go index 9e55a097..ad631a97 100644 --- a/m3/reporter_test.go +++ b/m3/reporter_test.go @@ -22,7 +22,6 @@ package m3 import ( "bytes" - "fmt" "math/rand" "net" "os" @@ -321,7 +320,6 @@ func TestReporterHistogram(t *testing.T) { require.Equal(t, "my-histogram", counter.GetName()) require.True(t, counter.IsSetTags()) for _, tag := range counter.GetTags() { - fmt.Println("Checking tag", tag.GetName(), tag.GetValue()) require.Equal(t, map[string]string{ "foo": "bar", "bucketid": "0001", diff --git a/m3/thrift/v1/constants.go b/m3/thrift/v1/constants.go index d50c0b96..8fff86e7 100644 --- a/m3/thrift/v1/constants.go +++ b/m3/thrift/v1/constants.go @@ -1,3 +1,23 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + // Autogenerated by Thrift Compiler (0.9.3) // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING diff --git a/m3/thrift/v1/m3.go b/m3/thrift/v1/m3.go index 3339d84b..51aa9542 100644 --- a/m3/thrift/v1/m3.go +++ b/m3/thrift/v1/m3.go @@ -1,3 +1,23 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + // Autogenerated by Thrift Compiler (0.9.3) // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING diff --git a/m3/thrift/v1/ttypes.go b/m3/thrift/v1/ttypes.go index a5888114..2c3991ac 100644 --- a/m3/thrift/v1/ttypes.go +++ b/m3/thrift/v1/ttypes.go @@ -1,3 +1,23 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + // Autogenerated by Thrift Compiler (0.9.3) // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING diff --git a/m3/thrift/v2/constants.go b/m3/thrift/v2/constants.go index 1e7ad2d6..dc44663f 100644 --- a/m3/thrift/v2/constants.go +++ b/m3/thrift/v2/constants.go @@ -1,3 +1,23 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + // Autogenerated by Thrift Compiler (0.9.3) // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING diff --git a/m3/thrift/v2/m3.go b/m3/thrift/v2/m3.go index f0a288ca..cb8e5f74 100644 --- a/m3/thrift/v2/m3.go +++ b/m3/thrift/v2/m3.go @@ -1,3 +1,23 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + // Autogenerated by Thrift Compiler (0.9.3) // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING diff --git a/m3/thrift/v2/ttypes.go b/m3/thrift/v2/ttypes.go index 3e6c33b8..02e8d23e 100644 --- a/m3/thrift/v2/ttypes.go +++ b/m3/thrift/v2/ttypes.go @@ -1,3 +1,23 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + // Autogenerated by Thrift Compiler (0.9.3) // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING diff --git a/scope_test.go b/scope_test.go index 28c65a13..72a7dc18 100644 --- a/scope_test.go +++ b/scope_test.go @@ -391,7 +391,13 @@ func TestWriteOnce(t *testing.T) { r.hg.Add(1) s.Histogram("bat", MustMakeLinearValueBuckets(1, 1, 3)).RecordValue(2.1) r.hg.Add(1) + s.SubScope("test").Histogram("bat", MustMakeLinearValueBuckets(1, 1, 3)).RecordValue(1.1) + r.hg.Add(1) + s.SubScope("test").Histogram("bat", MustMakeLinearValueBuckets(1, 1, 3)).RecordValue(2.1) + r.hg.Add(1) s.SubScope("test").Histogram("qux", MustMakeLinearValueBuckets(100, 10, 3)).RecordValue(135.0) + r.hg.Add(1) + s.SubScope("test").Histogram("quux", MustMakeLinearValueBuckets(100, 10, 3)).RecordValue(101.0) s.reportLoopRun() @@ -402,7 +408,10 @@ func TestWriteOnce(t *testing.T) { assert.EqualValues(t, time.Millisecond*175, r.timers["ticky"].val) assert.EqualValues(t, 1, r.histograms["baz"].valueSamples[50.0]) assert.EqualValues(t, 1, r.histograms["bat"].valueSamples[3.0]) + assert.EqualValues(t, 1, r.histograms["test.bat"].valueSamples[2.0]) + assert.EqualValues(t, 1, r.histograms["test.bat"].valueSamples[3.0]) assert.EqualValues(t, 1, r.histograms["test.qux"].valueSamples[math.MaxFloat64]) + assert.EqualValues(t, 1, r.histograms["test.quux"].valueSamples[110.0]) r = newTestStatsReporter() s.reportLoopRun() From a0e46f574c5807cbb1b241b72695442efe7f2c90 Mon Sep 17 00:00:00 2001 From: Matt Way Date: Mon, 22 Feb 2021 13:50:13 -0500 Subject: [PATCH 04/25] Build 1.15.x, remove tip build --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 1b1e8c16..4b0ee7f0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,7 @@ sudo: false go: - 1.13.x - 1.14.x - - tip + - 1.15.xgi env: global: - GO15VENDOREXPERIMENT=1 From 70f45551d0a7d6b8b389509670ac328b668efb39 Mon Sep 17 00:00:00 2001 From: Matt Way Date: Mon, 22 Feb 2021 13:51:35 -0500 Subject: [PATCH 05/25] Fix 1.15.x build version --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 4b0ee7f0..a2ea531a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,7 @@ sudo: false go: - 1.13.x - 1.14.x - - 1.15.xgi + - 1.15.x env: global: - GO15VENDOREXPERIMENT=1 From 9c54efe672bc22d6692823ff53f4b9fca969498e Mon Sep 17 00:00:00 2001 From: Matt Way Date: Mon, 22 Feb 2021 14:10:47 -0500 Subject: [PATCH 06/25] Upstream patches for 1.15.x --- thirdparty/github.com/apache/thrift/lib/go/thrift/field.go | 4 +++- .../apache/thrift/lib/go/thrift/json_protocol_test.go | 2 +- thirdparty/github.com/apache/thrift/lib/go/thrift/numeric.go | 4 ++-- .../apache/thrift/lib/go/thrift/simple_json_protocol_test.go | 2 +- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/thirdparty/github.com/apache/thrift/lib/go/thrift/field.go b/thirdparty/github.com/apache/thrift/lib/go/thrift/field.go index 9d665255..98c5aa12 100755 --- a/thirdparty/github.com/apache/thrift/lib/go/thrift/field.go +++ b/thirdparty/github.com/apache/thrift/lib/go/thrift/field.go @@ -19,6 +19,8 @@ package thrift +import "strconv" + // Helper class that encapsulates field metadata. type field struct { name string @@ -55,7 +57,7 @@ func (p *field) String() string { if p == nil { return "" } - return "" + return "" } var ANONYMOUS_FIELD *field diff --git a/thirdparty/github.com/apache/thrift/lib/go/thrift/json_protocol_test.go b/thirdparty/github.com/apache/thrift/lib/go/thrift/json_protocol_test.go index 4dcdeede..690a1c2e 100755 --- a/thirdparty/github.com/apache/thrift/lib/go/thrift/json_protocol_test.go +++ b/thirdparty/github.com/apache/thrift/lib/go/thrift/json_protocol_test.go @@ -608,7 +608,7 @@ func TestWriteJSONProtocolMap(t *testing.T) { for k, value := range DOUBLE_VALUES { ik, err := p.ReadI32() if err != nil { - t.Fatalf("Bad key for %s index %v, wrote: %v, expected: %v, error: %s", thetype, k, ik, string(k), err.Error()) + t.Fatalf("Bad key for %s index %v, wrote: %v, expected: %v, error: %s", thetype, k, ik, strconv.Itoa(k), err.Error()) } if int(ik) != k { t.Fatalf("Bad key for %s index %v, wrote: %v, expected: %v", thetype, k, ik, k) diff --git a/thirdparty/github.com/apache/thrift/lib/go/thrift/numeric.go b/thirdparty/github.com/apache/thrift/lib/go/thrift/numeric.go index aa8daa9b..e4512d20 100755 --- a/thirdparty/github.com/apache/thrift/lib/go/thrift/numeric.go +++ b/thirdparty/github.com/apache/thrift/lib/go/thrift/numeric.go @@ -69,14 +69,14 @@ func NewNumericFromDouble(dValue float64) Numeric { func NewNumericFromI64(iValue int64) Numeric { dValue := float64(iValue) - sValue := string(iValue) + sValue := strconv.FormatInt(iValue, 10) isNil := false return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} } func NewNumericFromI32(iValue int32) Numeric { dValue := float64(iValue) - sValue := string(iValue) + sValue := strconv.FormatInt(int64(iValue), 10) isNil := false return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil} } diff --git a/thirdparty/github.com/apache/thrift/lib/go/thrift/simple_json_protocol_test.go b/thirdparty/github.com/apache/thrift/lib/go/thrift/simple_json_protocol_test.go index 35d39c60..5d2f3226 100755 --- a/thirdparty/github.com/apache/thrift/lib/go/thrift/simple_json_protocol_test.go +++ b/thirdparty/github.com/apache/thrift/lib/go/thrift/simple_json_protocol_test.go @@ -681,7 +681,7 @@ func TestWriteSimpleJSONProtocolMap(t *testing.T) { strv := l[k*2+4] ik, err := strconv.Atoi(strk) if err != nil { - t.Fatalf("Bad value for %s index %v, wrote: %v, expected: %v, error: %s", thetype, k, strk, string(k), err.Error()) + t.Fatalf("Bad value for %s index %v, wrote: %v, expected: %v, error: %s", thetype, k, strk, strconv.Itoa(k), err.Error()) } if ik != k { t.Fatalf("Bad value for %s index %v, wrote: %v, expected: %v", thetype, k, strk, k) From b199e32fb0befca7e6d9a9feb9ed5bfd737ae406 Mon Sep 17 00:00:00 2001 From: Matt Way Date: Mon, 22 Feb 2021 14:28:17 -0500 Subject: [PATCH 07/25] Remove 1.13.x build, add 1.16.x --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index a2ea531a..990e93bd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,9 +1,9 @@ language: go sudo: false go: - - 1.13.x - 1.14.x - 1.15.x + - 1.16.x env: global: - GO15VENDOREXPERIMENT=1 From b0e4b2707dc3fe60200cd7d35096edcc9e1010bf Mon Sep 17 00:00:00 2001 From: Matt Way Date: Mon, 22 Feb 2021 14:38:16 -0500 Subject: [PATCH 08/25] More cleanup --- m3/reporter.go | 46 +++++++++++++--------------------- m3/thriftudp/transport_test.go | 2 -- scope.go | 4 +++ 3 files changed, 22 insertions(+), 30 deletions(-) diff --git a/m3/reporter.go b/m3/reporter.go index c13db93f..52c3c405 100644 --- a/m3/reporter.go +++ b/m3/reporter.go @@ -38,10 +38,6 @@ import ( "github.com/uber-go/tally/thirdparty/github.com/apache/thrift/lib/go/thrift" ) -// need to pool: -// []MetricTag -// []Metric - // Protocol describes a M3 thrift transport protocol. type Protocol int @@ -105,23 +101,19 @@ type Reporter interface { // remote M3 collector, metrics are batched together and emitted // via either thrift compact or binary protocol in batch UDP packets. type reporter struct { - client *m3thrift.M3Client - // curBatch *m3thrift.MetricBatch - // curBatchLock sync.Mutex - calc *customtransport.TCalcTransport - calcProto thrift.TProtocol - calcLock sync.Mutex - // commonTags map[*m3thrift.MetricTag]bool + bucketIDTagName string + bucketTagName string + bucketValFmt string + calc *customtransport.TCalcTransport + calcLock sync.Mutex + calcProto thrift.TProtocol + client *m3thrift.M3Client commonTags []m3thrift.MetricTag freeBytes int32 + metCh chan sizedMetric processors sync.WaitGroup resourcePool *resourcePool - bucketIDTagName string - bucketTagName string - bucketValFmt string - - status reporterStatus - metCh chan sizedMetric + status reporterStatus } type reporterStatus struct { @@ -191,32 +183,30 @@ func NewReporter(opts Options) (Reporter, error) { ) // Create common tags - // tags := resourcePool.getTagList() for k, v := range opts.CommonTags { tagm[k] = v - // tags[createTag(resourcePool, k, v)] = true } + if opts.CommonTags[ServiceTag] == "" { if opts.Service == "" { return nil, fmt.Errorf("%s common tag is required", ServiceTag) } - // tags[createTag(resourcePool, ServiceTag, opts.Service)] = true tagm[ServiceTag] = opts.Service } + if opts.CommonTags[EnvTag] == "" { if opts.Env == "" { return nil, fmt.Errorf("%s common tag is required", EnvTag) } - // tags[createTag(resourcePool, EnvTag, opts.Env)] = true tagm[EnvTag] = opts.Env } + if opts.IncludeHost { if opts.CommonTags[HostTag] == "" { hostname, err := os.Hostname() if err != nil { return nil, errors.WithMessage(err, "error resolving host tag") } - // tags[createTag(resourcePool, HostTag, hostname)] = true tagm[HostTag] = hostname } } @@ -258,16 +248,16 @@ func NewReporter(opts Options) (Reporter, error) { } r := &reporter{ - client: client, + bucketIDTagName: opts.HistogramBucketIDName, + bucketTagName: opts.HistogramBucketName, + bucketValFmt: "%." + strconv.Itoa(int(opts.HistogramBucketTagPrecision)) + "f", calc: calc, calcProto: proto, + client: client, commonTags: tags, freeBytes: freeBytes, - resourcePool: resourcePool, - bucketIDTagName: opts.HistogramBucketIDName, - bucketTagName: opts.HistogramBucketName, - bucketValFmt: "%." + strconv.Itoa(int(opts.HistogramBucketTagPrecision)) + "f", metCh: make(chan sizedMetric, opts.MaxQueueSize), + resourcePool: resourcePool, } r.processors.Add(1) @@ -507,7 +497,7 @@ func (r *reporter) reportCopyMetric(m m3thrift.Metric, size int32) { select { case r.metCh <- sm: default: - // TODO + // TODO: don't drop when full, or add metric to track } } r.status.RUnlock() diff --git a/m3/thriftudp/transport_test.go b/m3/thriftudp/transport_test.go index f3ee3bc4..ccf02a24 100644 --- a/m3/thriftudp/transport_test.go +++ b/m3/thriftudp/transport_test.go @@ -280,8 +280,6 @@ func TestFlushErrors(t *testing.T) { _, err = trans.Write([]byte{1, 2, 3, 4}) require.NoError(t, err) - - // err = trans.Flush() require.Error(t, trans.Flush(), "Flush with data should fail") }) } diff --git a/scope.go b/scope.go index 950c8d5f..3b5f803e 100644 --- a/scope.go +++ b/scope.go @@ -91,6 +91,10 @@ type scope struct { bucketCache map[uint64]bucketStorage } +// n.b. This function is used to uniquely identify a given set of buckets +// commutatively through hash folding, in order to do cache lookups and +// avoid allocating additional storage for data that is shared among all +// instances of a particular set of buckets. func getBucketsIdentity(buckets Buckets) uint64 { acc := identity.NewAccumulator() From 31c8870e842f69b6474b1d212e8c19971341e1d0 Mon Sep 17 00:00:00 2001 From: Matt Way Date: Mon, 22 Feb 2021 15:59:14 -0500 Subject: [PATCH 09/25] Re-add v2.thrift --- m3/thrift/thrift.diff | 190 +++++++++++++++++++++++++++++++++++++++++ m3/thrift/v2/ttypes.go | 2 +- m3/thrift/v2/v2.thrift | 34 ++++++++ 3 files changed, 225 insertions(+), 1 deletion(-) create mode 100644 m3/thrift/thrift.diff create mode 100644 m3/thrift/v2/v2.thrift diff --git a/m3/thrift/thrift.diff b/m3/thrift/thrift.diff new file mode 100644 index 00000000..92087e3f --- /dev/null +++ b/m3/thrift/thrift.diff @@ -0,0 +1,190 @@ +diff --git a/m3/thrift/v2/m3.go b/m3/thrift/v2/m3.go +index fc46a31..f0a288c 100644 +--- a/m3/thrift/v2/m3.go ++++ b/m3/thrift/v2/m3.go +@@ -17,7 +17,7 @@ var _ = bytes.Equal + type M3 interface { + // Parameters: + // - Batch +- EmitMetricBatch(batch *MetricBatch) (err error) ++ EmitMetricBatch(batch MetricBatch) (err error) + } + + type M3Client struct { +@@ -48,14 +48,14 @@ func NewM3ClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thri + + // Parameters: + // - Batch +-func (p *M3Client) EmitMetricBatch(batch *MetricBatch) (err error) { ++func (p *M3Client) EmitMetricBatch(batch MetricBatch) (err error) { + if err = p.sendEmitMetricBatch(batch); err != nil { + return + } + return + } + +-func (p *M3Client) sendEmitMetricBatch(batch *MetricBatch) (err error) { ++func (p *M3Client) sendEmitMetricBatch(batch MetricBatch) (err error) { + oprot := p.OutputProtocol + if oprot == nil { + oprot = p.ProtocolFactory.GetProtocol(p.Transport) +@@ -145,23 +145,23 @@ func (p *m3ProcessorEmitMetricBatch) Process(seqId int32, iprot, oprot thrift.TP + // Attributes: + // - Batch + type M3EmitMetricBatchArgs struct { +- Batch *MetricBatch `thrift:"batch,1" json:"batch"` ++ Batch MetricBatch `thrift:"batch,1" json:"batch"` + } + + func NewM3EmitMetricBatchArgs() *M3EmitMetricBatchArgs { + return &M3EmitMetricBatchArgs{} + } + +-var M3EmitMetricBatchArgs_Batch_DEFAULT *MetricBatch ++var M3EmitMetricBatchArgs_Batch_DEFAULT MetricBatch + +-func (p *M3EmitMetricBatchArgs) GetBatch() *MetricBatch { ++func (p *M3EmitMetricBatchArgs) GetBatch() MetricBatch { + if !p.IsSetBatch() { + return M3EmitMetricBatchArgs_Batch_DEFAULT + } + return p.Batch + } + func (p *M3EmitMetricBatchArgs) IsSetBatch() bool { +- return p.Batch != nil ++ return p.Batch.Metrics != nil || p.Batch.CommonTags != nil + } + + func (p *M3EmitMetricBatchArgs) Read(iprot thrift.TProtocol) error { +@@ -198,7 +198,7 @@ func (p *M3EmitMetricBatchArgs) Read(iprot thrift.TProtocol) error { + } + + func (p *M3EmitMetricBatchArgs) readField1(iprot thrift.TProtocol) error { +- p.Batch = &MetricBatch{} ++ p.Batch = MetricBatch{} + if err := p.Batch.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) + } +diff --git a/m3/thrift/v2/ttypes.go b/m3/thrift/v2/ttypes.go +index 5f83caa..3e6c33b 100644 +--- a/m3/thrift/v2/ttypes.go ++++ b/m3/thrift/v2/ttypes.go +@@ -429,10 +429,10 @@ func (p *MetricTag) String() string { + // - Timestamp + // - Tags + type Metric struct { +- Name string `thrift:"name,1,required" json:"name"` +- Value *MetricValue `thrift:"value,2,required" json:"value"` +- Timestamp int64 `thrift:"timestamp,3,required" json:"timestamp"` +- Tags []*MetricTag `thrift:"tags,4" json:"tags,omitempty"` ++ Name string `thrift:"name,1,required" json:"name"` ++ Value MetricValue `thrift:"value,2,required" json:"value"` ++ Timestamp int64 `thrift:"timestamp,3,required" json:"timestamp"` ++ Tags []MetricTag `thrift:"tags,4" json:"tags,omitempty"` + } + + func NewMetric() *Metric { +@@ -443,9 +443,9 @@ func (p *Metric) GetName() string { + return p.Name + } + +-var Metric_Value_DEFAULT *MetricValue ++var Metric_Value_DEFAULT MetricValue + +-func (p *Metric) GetValue() *MetricValue { ++func (p *Metric) GetValue() MetricValue { + if !p.IsSetValue() { + return Metric_Value_DEFAULT + } +@@ -456,13 +456,13 @@ func (p *Metric) GetTimestamp() int64 { + return p.Timestamp + } + +-var Metric_Tags_DEFAULT []*MetricTag ++var Metric_Tags_DEFAULT []MetricTag + +-func (p *Metric) GetTags() []*MetricTag { ++func (p *Metric) GetTags() []MetricTag { + return p.Tags + } + func (p *Metric) IsSetValue() bool { +- return p.Value != nil ++ return p.Value.GetMetricType() != MetricType_INVALID || p.Value.Count != 0 || p.Value.Gauge != 0 || p.Value.Timer != 0 + } + + func (p *Metric) IsSetTags() bool { +@@ -540,7 +540,7 @@ func (p *Metric) readField1(iprot thrift.TProtocol) error { + } + + func (p *Metric) readField2(iprot thrift.TProtocol) error { +- p.Value = &MetricValue{} ++ p.Value = MetricValue{} + if err := p.Value.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Value), err) + } +@@ -561,10 +561,10 @@ func (p *Metric) readField4(iprot thrift.TProtocol) error { + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } +- tSlice := make([]*MetricTag, 0, size) ++ tSlice := make([]MetricTag, 0, size) + p.Tags = tSlice + for i := 0; i < size; i++ { +- _elem0 := &MetricTag{} ++ _elem0 := MetricTag{} + if err := _elem0.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } +@@ -674,21 +674,21 @@ func (p *Metric) String() string { + // - Metrics + // - CommonTags + type MetricBatch struct { +- Metrics []*Metric `thrift:"metrics,1,required" json:"metrics"` +- CommonTags []*MetricTag `thrift:"commonTags,2" json:"commonTags,omitempty"` ++ Metrics []Metric `thrift:"metrics,1,required" json:"metrics"` ++ CommonTags []MetricTag `thrift:"commonTags,2" json:"commonTags,omitempty"` + } + + func NewMetricBatch() *MetricBatch { + return &MetricBatch{} + } + +-func (p *MetricBatch) GetMetrics() []*Metric { ++func (p *MetricBatch) GetMetrics() []Metric { + return p.Metrics + } + +-var MetricBatch_CommonTags_DEFAULT []*MetricTag ++var MetricBatch_CommonTags_DEFAULT []MetricTag + +-func (p *MetricBatch) GetCommonTags() []*MetricTag { ++func (p *MetricBatch) GetCommonTags() []MetricTag { + return p.CommonTags + } + func (p *MetricBatch) IsSetCommonTags() bool { +@@ -743,10 +743,10 @@ func (p *MetricBatch) readField1(iprot thrift.TProtocol) error { + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } +- tSlice := make([]*Metric, 0, size) ++ tSlice := make([]Metric, 0, size) + p.Metrics = tSlice + for i := 0; i < size; i++ { +- _elem1 := &Metric{} ++ _elem1 := Metric{} + if err := _elem1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) + } +@@ -763,10 +763,10 @@ func (p *MetricBatch) readField2(iprot thrift.TProtocol) error { + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } +- tSlice := make([]*MetricTag, 0, size) ++ tSlice := make([]MetricTag, 0, size) + p.CommonTags = tSlice + for i := 0; i < size; i++ { +- _elem2 := &MetricTag{} ++ _elem2 := MetricTag{} + if err := _elem2.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) + } diff --git a/m3/thrift/v2/ttypes.go b/m3/thrift/v2/ttypes.go index 02e8d23e..e1b429bf 100644 --- a/m3/thrift/v2/ttypes.go +++ b/m3/thrift/v2/ttypes.go @@ -18,7 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -// Autogenerated by Thrift Compiler (0.9.3) +// autogenerated by thrift compiler (0.9.3) // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING package v2 diff --git a/m3/thrift/v2/v2.thrift b/m3/thrift/v2/v2.thrift new file mode 100644 index 00000000..b10df82f --- /dev/null +++ b/m3/thrift/v2/v2.thrift @@ -0,0 +1,34 @@ +enum MetricType { + INVALID = 0 + COUNTER = 1 + GAUGE = 2 + TIMER = 3 +} + +struct MetricValue { + 1: required MetricType metricType + 2: required i64 count + 3: required double gauge + 4: required i64 timer +} + +struct MetricTag { + 1: required string name + 2: required string value +} + +struct Metric { + 1: required string name + 2: required MetricValue value + 3: required i64 timestamp + 4: optional list tags +} + +struct MetricBatch { + 1: required list metrics + 2: optional list commonTags +} + +service M3 { + oneway void emitMetricBatch(1: MetricBatch batch) +} \ No newline at end of file From f60b3b7889107845aef31e7a8026a5af70174f4f Mon Sep 17 00:00:00 2001 From: Matt Way Date: Mon, 22 Feb 2021 17:53:52 -0500 Subject: [PATCH 10/25] feedback --- internal/identity/accumulator.go | 6 ++ m3/reporter.go | 94 ++++++++++++-------------------- m3/thrift/v2/v2.thrift | 12 ++-- 3 files changed, 48 insertions(+), 64 deletions(-) diff --git a/internal/identity/accumulator.go b/internal/identity/accumulator.go index 09764226..1f4ba667 100644 --- a/internal/identity/accumulator.go +++ b/internal/identity/accumulator.go @@ -33,6 +33,12 @@ const ( type Accumulator uint64 // NewAccumulator creates a new Accumulator with a default seed value. +// +// n.b. Here and elsewhere, we use nosplit to avoid stack size checks, which +// are unnecessary as memory width is bounded to each instance of `a` (a +// uint64) and, potentially, a single stack-local loop temporary while +// iterating. +// //go:nosplit func NewAccumulator() Accumulator { return Accumulator(_hashSeed) diff --git a/m3/reporter.go b/m3/reporter.go index 52c3c405..beeed3c4 100644 --- a/m3/reporter.go +++ b/m3/reporter.go @@ -348,61 +348,38 @@ func (r *reporter) AllocateHistogram( } for i, pair := range tally.BucketPairs(buckets) { - idTagValue := fmt.Sprintf(bucketIDFmt, i) + var ( + counter = r.allocateCounter(name, htags) + idTagValue = fmt.Sprintf(bucketIDFmt, i) + hbucket = cachedHistogramBucket{ + valueUpperBound: pair.UpperBoundValue(), + durationUpperBound: pair.UpperBoundDuration(), + metric: counter, + } + ) + hbucket.metric.metric.Tags = append( + hbucket.metric.metric.Tags, + m3thrift.MetricTag{ + Name: r.bucketIDTagName, + Value: idTagValue, + }, + m3thrift.MetricTag{ + Name: r.bucketTagName, + }, + ) + + bucketIdx := len(hbucket.metric.metric.Tags) - 1 if isDuration { - var ( - counter = r.allocateCounter(name, htags) - bucket = r.durationBucketString(pair.LowerBoundDuration()) + + hbucket.metric.metric.Tags[bucketIdx].Value = + r.durationBucketString(pair.LowerBoundDuration()) + "-" + r.durationBucketString(pair.UpperBoundDuration()) - ) - counter.metric.Tags = append( - counter.metric.Tags, - m3thrift.MetricTag{ - Name: r.bucketIDTagName, - Value: idTagValue, - }, - m3thrift.MetricTag{ - Name: r.bucketTagName, - Value: bucket, - }, - ) - - cachedDurationBuckets = append( - cachedDurationBuckets, - cachedHistogramBucket{ - valueUpperBound: pair.UpperBoundValue(), - durationUpperBound: pair.UpperBoundDuration(), - metric: counter, - }, - ) + cachedDurationBuckets = append(cachedDurationBuckets, hbucket) } else { - var ( - counter = r.allocateCounter(name, htags) - bucket = r.valueBucketString(pair.LowerBoundValue()) + + hbucket.metric.metric.Tags[bucketIdx].Value = + r.valueBucketString(pair.LowerBoundValue()) + "-" + r.valueBucketString(pair.UpperBoundValue()) - ) - - counter.metric.Tags = append( - counter.metric.Tags, - m3thrift.MetricTag{ - Name: r.bucketIDTagName, - Value: idTagValue, - }, - m3thrift.MetricTag{ - Name: r.bucketTagName, - Value: bucket, - }, - ) - - cachedValueBuckets = append( - cachedValueBuckets, - cachedHistogramBucket{ - valueUpperBound: pair.UpperBoundValue(), - durationUpperBound: pair.UpperBoundDuration(), - metric: counter, - }, - ) + cachedValueBuckets = append(cachedValueBuckets, hbucket) } } @@ -460,15 +437,16 @@ func (r *reporter) newMetric( m.Value.Timer = _maxInt64 } - if len(tags) > 0 { - m.Tags = r.resourcePool.getMetricTagSlice() + if len(tags) == 0 { + return m + } - for k, v := range tags { - m.Tags = append(m.Tags, m3thrift.MetricTag{ - Name: k, - Value: v, - }) - } + m.Tags = r.resourcePool.getMetricTagSlice() + for k, v := range tags { + m.Tags = append(m.Tags, m3thrift.MetricTag{ + Name: k, + Value: v, + }) } return m diff --git a/m3/thrift/v2/v2.thrift b/m3/thrift/v2/v2.thrift index b10df82f..25c1dccc 100644 --- a/m3/thrift/v2/v2.thrift +++ b/m3/thrift/v2/v2.thrift @@ -13,22 +13,22 @@ struct MetricValue { } struct MetricTag { - 1: required string name - 2: required string value + 1: required string name + 2: required string value } struct Metric { - 1: required string name + 1: required string name 2: required MetricValue value 3: required i64 timestamp 4: optional list tags } struct MetricBatch { - 1: required list metrics - 2: optional list commonTags + 1: required list metrics + 2: optional list commonTags } service M3 { - oneway void emitMetricBatch(1: MetricBatch batch) + oneway void emitMetricBatch(1: MetricBatch batch) } \ No newline at end of file From 43f5419a64286536be37cc3d312deb823654931a Mon Sep 17 00:00:00 2001 From: Matt Way Date: Tue, 23 Feb 2021 13:07:00 -0500 Subject: [PATCH 11/25] Rename API method --- m3/thrift/thrift.diff | 78 +++++++++++++++++++++--------------------- m3/thrift/v2/m3.go | 50 +++++++++++++-------------- m3/thrift/v2/ttypes.go | 2 +- m3/thrift/v2/v2.thrift | 2 +- 4 files changed, 66 insertions(+), 66 deletions(-) diff --git a/m3/thrift/thrift.diff b/m3/thrift/thrift.diff index 92087e3f..24d32c18 100644 --- a/m3/thrift/thrift.diff +++ b/m3/thrift/thrift.diff @@ -6,60 +6,60 @@ index fc46a31..f0a288c 100644 type M3 interface { // Parameters: // - Batch -- EmitMetricBatch(batch *MetricBatch) (err error) -+ EmitMetricBatch(batch MetricBatch) (err error) +- EmitMetricBatchV2(batch *MetricBatch) (err error) ++ EmitMetricBatchV2(batch MetricBatch) (err error) } - + type M3Client struct { @@ -48,14 +48,14 @@ func NewM3ClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thri - + // Parameters: // - Batch --func (p *M3Client) EmitMetricBatch(batch *MetricBatch) (err error) { -+func (p *M3Client) EmitMetricBatch(batch MetricBatch) (err error) { - if err = p.sendEmitMetricBatch(batch); err != nil { +-func (p *M3Client) EmitMetricBatchV2(batch *MetricBatch) (err error) { ++func (p *M3Client) EmitMetricBatchV2(batch MetricBatch) (err error) { + if err = p.sendEmitMetricBatchV2(batch); err != nil { return } return } - --func (p *M3Client) sendEmitMetricBatch(batch *MetricBatch) (err error) { -+func (p *M3Client) sendEmitMetricBatch(batch MetricBatch) (err error) { + +-func (p *M3Client) sendEmitMetricBatchV2(batch *MetricBatch) (err error) { ++func (p *M3Client) sendEmitMetricBatchV2(batch MetricBatch) (err error) { oprot := p.OutputProtocol if oprot == nil { oprot = p.ProtocolFactory.GetProtocol(p.Transport) -@@ -145,23 +145,23 @@ func (p *m3ProcessorEmitMetricBatch) Process(seqId int32, iprot, oprot thrift.TP +@@ -145,23 +145,23 @@ func (p *m3ProcessorEmitMetricBatchV2) Process(seqId int32, iprot, oprot thrift.TP // Attributes: // - Batch - type M3EmitMetricBatchArgs struct { + type M3EmitMetricBatchV2Args struct { - Batch *MetricBatch `thrift:"batch,1" json:"batch"` + Batch MetricBatch `thrift:"batch,1" json:"batch"` } - - func NewM3EmitMetricBatchArgs() *M3EmitMetricBatchArgs { - return &M3EmitMetricBatchArgs{} + + func NewM3EmitMetricBatchV2Args() *M3EmitMetricBatchV2Args { + return &M3EmitMetricBatchV2Args{} } - --var M3EmitMetricBatchArgs_Batch_DEFAULT *MetricBatch -+var M3EmitMetricBatchArgs_Batch_DEFAULT MetricBatch - --func (p *M3EmitMetricBatchArgs) GetBatch() *MetricBatch { -+func (p *M3EmitMetricBatchArgs) GetBatch() MetricBatch { + +-var M3EmitMetricBatchV2Args_Batch_DEFAULT *MetricBatch ++var M3EmitMetricBatchV2Args_Batch_DEFAULT MetricBatch + +-func (p *M3EmitMetricBatchV2Args) GetBatch() *MetricBatch { ++func (p *M3EmitMetricBatchV2Args) GetBatch() MetricBatch { if !p.IsSetBatch() { - return M3EmitMetricBatchArgs_Batch_DEFAULT + return M3EmitMetricBatchV2Args_Batch_DEFAULT } return p.Batch } - func (p *M3EmitMetricBatchArgs) IsSetBatch() bool { + func (p *M3EmitMetricBatchV2Args) IsSetBatch() bool { - return p.Batch != nil + return p.Batch.Metrics != nil || p.Batch.CommonTags != nil } - - func (p *M3EmitMetricBatchArgs) Read(iprot thrift.TProtocol) error { -@@ -198,7 +198,7 @@ func (p *M3EmitMetricBatchArgs) Read(iprot thrift.TProtocol) error { + + func (p *M3EmitMetricBatchV2Args) Read(iprot thrift.TProtocol) error { +@@ -198,7 +198,7 @@ func (p *M3EmitMetricBatchV2Args) Read(iprot thrift.TProtocol) error { } - - func (p *M3EmitMetricBatchArgs) readField1(iprot thrift.TProtocol) error { + + func (p *M3EmitMetricBatchV2Args) readField1(iprot thrift.TProtocol) error { - p.Batch = &MetricBatch{} + p.Batch = MetricBatch{} if err := p.Batch.Read(iprot); err != nil { @@ -82,15 +82,15 @@ index 5f83caa..3e6c33b 100644 + Timestamp int64 `thrift:"timestamp,3,required" json:"timestamp"` + Tags []MetricTag `thrift:"tags,4" json:"tags,omitempty"` } - + func NewMetric() *Metric { @@ -443,9 +443,9 @@ func (p *Metric) GetName() string { return p.Name } - + -var Metric_Value_DEFAULT *MetricValue +var Metric_Value_DEFAULT MetricValue - + -func (p *Metric) GetValue() *MetricValue { +func (p *Metric) GetValue() MetricValue { if !p.IsSetValue() { @@ -99,10 +99,10 @@ index 5f83caa..3e6c33b 100644 @@ -456,13 +456,13 @@ func (p *Metric) GetTimestamp() int64 { return p.Timestamp } - + -var Metric_Tags_DEFAULT []*MetricTag +var Metric_Tags_DEFAULT []MetricTag - + -func (p *Metric) GetTags() []*MetricTag { +func (p *Metric) GetTags() []MetricTag { return p.Tags @@ -111,11 +111,11 @@ index 5f83caa..3e6c33b 100644 - return p.Value != nil + return p.Value.GetMetricType() != MetricType_INVALID || p.Value.Count != 0 || p.Value.Gauge != 0 || p.Value.Timer != 0 } - + func (p *Metric) IsSetTags() bool { @@ -540,7 +540,7 @@ func (p *Metric) readField1(iprot thrift.TProtocol) error { } - + func (p *Metric) readField2(iprot thrift.TProtocol) error { - p.Value = &MetricValue{} + p.Value = MetricValue{} @@ -144,19 +144,19 @@ index 5f83caa..3e6c33b 100644 + Metrics []Metric `thrift:"metrics,1,required" json:"metrics"` + CommonTags []MetricTag `thrift:"commonTags,2" json:"commonTags,omitempty"` } - + func NewMetricBatch() *MetricBatch { return &MetricBatch{} } - + -func (p *MetricBatch) GetMetrics() []*Metric { +func (p *MetricBatch) GetMetrics() []Metric { return p.Metrics } - + -var MetricBatch_CommonTags_DEFAULT []*MetricTag +var MetricBatch_CommonTags_DEFAULT []MetricTag - + -func (p *MetricBatch) GetCommonTags() []*MetricTag { +func (p *MetricBatch) GetCommonTags() []MetricTag { return p.CommonTags diff --git a/m3/thrift/v2/m3.go b/m3/thrift/v2/m3.go index cb8e5f74..6597a924 100644 --- a/m3/thrift/v2/m3.go +++ b/m3/thrift/v2/m3.go @@ -37,7 +37,7 @@ var _ = bytes.Equal type M3 interface { // Parameters: // - Batch - EmitMetricBatch(batch MetricBatch) (err error) + EmitMetricBatchV2(batch MetricBatch) (err error) } type M3Client struct { @@ -68,24 +68,24 @@ func NewM3ClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thri // Parameters: // - Batch -func (p *M3Client) EmitMetricBatch(batch MetricBatch) (err error) { - if err = p.sendEmitMetricBatch(batch); err != nil { +func (p *M3Client) EmitMetricBatchV2(batch MetricBatch) (err error) { + if err = p.sendEmitMetricBatchV2(batch); err != nil { return } return } -func (p *M3Client) sendEmitMetricBatch(batch MetricBatch) (err error) { +func (p *M3Client) sendEmitMetricBatchV2(batch MetricBatch) (err error) { oprot := p.OutputProtocol if oprot == nil { oprot = p.ProtocolFactory.GetProtocol(p.Transport) p.OutputProtocol = oprot } p.SeqId++ - if err = oprot.WriteMessageBegin("emitMetricBatch", thrift.ONEWAY, p.SeqId); err != nil { + if err = oprot.WriteMessageBegin("emitMetricBatchV2", thrift.ONEWAY, p.SeqId); err != nil { return } - args := M3EmitMetricBatchArgs{ + args := M3EmitMetricBatchV2Args{ Batch: batch, } if err = args.Write(oprot); err != nil { @@ -118,7 +118,7 @@ func (p *M3Processor) ProcessorMap() map[string]thrift.TProcessorFunction { func NewM3Processor(handler M3) *M3Processor { self3 := &M3Processor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self3.processorMap["emitMetricBatch"] = &m3ProcessorEmitMetricBatch{handler: handler} + self3.processorMap["emitMetricBatchV2"] = &m3ProcessorEmitMetricBatchV2{handler: handler} return self3 } @@ -141,12 +141,12 @@ func (p *M3Processor) Process(iprot, oprot thrift.TProtocol) (success bool, err } -type m3ProcessorEmitMetricBatch struct { +type m3ProcessorEmitMetricBatchV2 struct { handler M3 } -func (p *m3ProcessorEmitMetricBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := M3EmitMetricBatchArgs{} +func (p *m3ProcessorEmitMetricBatchV2) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := M3EmitMetricBatchV2Args{} if err = args.Read(iprot); err != nil { iprot.ReadMessageEnd() return false, err @@ -154,7 +154,7 @@ func (p *m3ProcessorEmitMetricBatch) Process(seqId int32, iprot, oprot thrift.TP iprot.ReadMessageEnd() var err2 error - if err2 = p.handler.EmitMetricBatch(args.Batch); err2 != nil { + if err2 = p.handler.EmitMetricBatchV2(args.Batch); err2 != nil { return true, err2 } return true, nil @@ -164,27 +164,27 @@ func (p *m3ProcessorEmitMetricBatch) Process(seqId int32, iprot, oprot thrift.TP // Attributes: // - Batch -type M3EmitMetricBatchArgs struct { +type M3EmitMetricBatchV2Args struct { Batch MetricBatch `thrift:"batch,1" json:"batch"` } -func NewM3EmitMetricBatchArgs() *M3EmitMetricBatchArgs { - return &M3EmitMetricBatchArgs{} +func NewM3EmitMetricBatchV2Args() *M3EmitMetricBatchV2Args { + return &M3EmitMetricBatchV2Args{} } -var M3EmitMetricBatchArgs_Batch_DEFAULT MetricBatch +var M3EmitMetricBatchV2Args_Batch_DEFAULT MetricBatch -func (p *M3EmitMetricBatchArgs) GetBatch() MetricBatch { +func (p *M3EmitMetricBatchV2Args) GetBatch() MetricBatch { if !p.IsSetBatch() { - return M3EmitMetricBatchArgs_Batch_DEFAULT + return M3EmitMetricBatchV2Args_Batch_DEFAULT } return p.Batch } -func (p *M3EmitMetricBatchArgs) IsSetBatch() bool { +func (p *M3EmitMetricBatchV2Args) IsSetBatch() bool { return p.Batch.Metrics != nil || p.Batch.CommonTags != nil } -func (p *M3EmitMetricBatchArgs) Read(iprot thrift.TProtocol) error { +func (p *M3EmitMetricBatchV2Args) Read(iprot thrift.TProtocol) error { if _, err := iprot.ReadStructBegin(); err != nil { return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) } @@ -217,7 +217,7 @@ func (p *M3EmitMetricBatchArgs) Read(iprot thrift.TProtocol) error { return nil } -func (p *M3EmitMetricBatchArgs) readField1(iprot thrift.TProtocol) error { +func (p *M3EmitMetricBatchV2Args) readField1(iprot thrift.TProtocol) error { p.Batch = MetricBatch{} if err := p.Batch.Read(iprot); err != nil { return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) @@ -225,8 +225,8 @@ func (p *M3EmitMetricBatchArgs) readField1(iprot thrift.TProtocol) error { return nil } -func (p *M3EmitMetricBatchArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("emitMetricBatch_args"); err != nil { +func (p *M3EmitMetricBatchV2Args) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("emitMetricBatchV2_args"); err != nil { return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } if err := p.writeField1(oprot); err != nil { @@ -241,7 +241,7 @@ func (p *M3EmitMetricBatchArgs) Write(oprot thrift.TProtocol) error { return nil } -func (p *M3EmitMetricBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *M3EmitMetricBatchV2Args) writeField1(oprot thrift.TProtocol) (err error) { if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil { return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) } @@ -254,9 +254,9 @@ func (p *M3EmitMetricBatchArgs) writeField1(oprot thrift.TProtocol) (err error) return err } -func (p *M3EmitMetricBatchArgs) String() string { +func (p *M3EmitMetricBatchV2Args) String() string { if p == nil { return "" } - return fmt.Sprintf("M3EmitMetricBatchArgs(%+v)", *p) + return fmt.Sprintf("M3EmitMetricBatchV2Args(%+v)", *p) } diff --git a/m3/thrift/v2/ttypes.go b/m3/thrift/v2/ttypes.go index e1b429bf..02e8d23e 100644 --- a/m3/thrift/v2/ttypes.go +++ b/m3/thrift/v2/ttypes.go @@ -18,7 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -// autogenerated by thrift compiler (0.9.3) +// Autogenerated by Thrift Compiler (0.9.3) // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING package v2 diff --git a/m3/thrift/v2/v2.thrift b/m3/thrift/v2/v2.thrift index 25c1dccc..ab2b12f3 100644 --- a/m3/thrift/v2/v2.thrift +++ b/m3/thrift/v2/v2.thrift @@ -30,5 +30,5 @@ struct MetricBatch { } service M3 { - oneway void emitMetricBatch(1: MetricBatch batch) + oneway void emitMetricBatchV2(1: MetricBatch batch) } \ No newline at end of file From 66660d99608bd9b6f4aaff1cac81d39128993e1e Mon Sep 17 00:00:00 2001 From: Matt Way Date: Tue, 23 Feb 2021 13:37:45 -0500 Subject: [PATCH 12/25] Use new V2 method in M3 reporter --- m3/reporter.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/m3/reporter.go b/m3/reporter.go index beeed3c4..443de0f4 100644 --- a/m3/reporter.go +++ b/m3/reporter.go @@ -556,7 +556,7 @@ func (r *reporter) flush( mets []m3thrift.Metric, ) []m3thrift.Metric { //nolint:errcheck - r.client.EmitMetricBatch(m3thrift.MetricBatch{ + r.client.EmitMetricBatchV2(m3thrift.MetricBatch{ Metrics: mets, CommonTags: r.commonTags, }) From d119b4b6d337374ba019dd0cb59f54fb8234803c Mon Sep 17 00:00:00 2001 From: Matt Way Date: Tue, 23 Feb 2021 14:51:14 -0500 Subject: [PATCH 13/25] Fix test --- m3/reporter_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/m3/reporter_test.go b/m3/reporter_test.go index ad631a97..1d8e5d72 100644 --- a/m3/reporter_test.go +++ b/m3/reporter_test.go @@ -662,7 +662,7 @@ func (m *fakeM3Service) getMetrics() []m3thrift.Metric { return m.metrics } -func (m *fakeM3Service) EmitMetricBatch(batch m3thrift.MetricBatch) (err error) { +func (m *fakeM3Service) EmitMetricBatchV2(batch m3thrift.MetricBatch) (err error) { m.lock.Lock() m.batches = append(m.batches, batch) if m.wg != nil && m.countBatches { From 2425ab56f37634b8ac96f6b81cd82738b6c50abd Mon Sep 17 00:00:00 2001 From: Matt Way Date: Mon, 15 Mar 2021 17:06:24 -0400 Subject: [PATCH 14/25] Fix race condition with bucket cache --- scope.go | 41 +++++++---------------------------------- stats.go | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 34 deletions(-) diff --git a/scope.go b/scope.go index 3b5f803e..7407eb70 100644 --- a/scope.go +++ b/scope.go @@ -22,11 +22,8 @@ package tally import ( "io" - "math" "sync" "time" - - "github.com/uber-go/tally/internal/identity" ) const ( @@ -88,28 +85,7 @@ type scope struct { // nb: deliberately skipping timersSlice as we report timers immediately, // no buffering is involved. - bucketCache map[uint64]bucketStorage -} - -// n.b. This function is used to uniquely identify a given set of buckets -// commutatively through hash folding, in order to do cache lookups and -// avoid allocating additional storage for data that is shared among all -// instances of a particular set of buckets. -func getBucketsIdentity(buckets Buckets) uint64 { - acc := identity.NewAccumulator() - - if dbuckets, ok := buckets.(DurationBuckets); ok { - for _, dur := range dbuckets { - acc = acc.AddUint64(uint64(dur)) - } - } else { - vbuckets := buckets.(ValueBuckets) - for _, val := range vbuckets { - acc = acc.AddUint64(math.Float64bits(val)) - } - } - - return acc.Value() + bucketCache *bucketCache } type scopeStatus struct { @@ -191,7 +167,7 @@ func newRootScope(opts ScopeOptions, interval time.Duration) *scope { histograms: make(map[string]*histogram), histogramsSlice: make([]*histogram, 0, _defaultInitialSliceSize), timers: make(map[string]*timer), - bucketCache: make(map[uint64]bucketStorage), + bucketCache: newBucketCache(), } // NB(r): Take a copy of the tags on creation @@ -428,15 +404,12 @@ func (s *scope) Histogram(name string, b Buckets) Histogram { ) } - bid := getBucketsIdentity(b) - storage, ok := s.bucketCache[bid] - if !ok { - storage = newBucketStorage(htype, b, cachedHistogram) - s.bucketCache[bid] = storage - } - h := newHistogram( - htype, s.fullyQualifiedName(name), s.tags, s.reporter, storage, + htype, + s.fullyQualifiedName(name), + s.tags, + s.reporter, + s.bucketCache.Get(htype, b, cachedHistogram), ) s.histograms[name] = h s.histogramsSlice = append(s.histogramsSlice, h) diff --git a/stats.go b/stats.go index 9f36af1c..8ab07a00 100644 --- a/stats.go +++ b/stats.go @@ -26,6 +26,8 @@ import ( "sync" "sync/atomic" "time" + + "github.com/uber-go/tally/internal/identity" ) var ( @@ -474,6 +476,59 @@ func newBucketStorage( return storage } +type bucketCache struct { + mtx sync.RWMutex + cache map[uint64]bucketStorage +} + +func newBucketCache() *bucketCache { + return &bucketCache{ + cache: make(map[uint64]bucketStorage), + } +} + +func (c *bucketCache) Get( + htype histogramType, + buckets Buckets, + cachedHistogram CachedHistogram, +) bucketStorage { + id := getBucketsIdentity(buckets) + + c.mtx.RLock() + storage, ok := c.cache[id] + if !ok { + c.mtx.RUnlock() + c.mtx.Lock() + storage = newBucketStorage(htype, buckets, cachedHistogram) + c.cache[id] = storage + c.mtx.Unlock() + } else { + c.mtx.RUnlock() + } + return storage +} + +// n.b. This function is used to uniquely identify a given set of buckets +// commutatively through hash folding, in order to do cache lookups and +// avoid allocating additional storage for data that is shared among all +// instances of a particular set of buckets. +func getBucketsIdentity(buckets Buckets) uint64 { + acc := identity.NewAccumulator() + + if dbuckets, ok := buckets.(DurationBuckets); ok { + for _, dur := range dbuckets { + acc = acc.AddUint64(uint64(dur)) + } + } else { + vbuckets := buckets.(ValueBuckets) + for _, val := range vbuckets { + acc = acc.AddUint64(math.Float64bits(val)) + } + } + + return acc.Value() +} + // NullStatsReporter is an implementation of StatsReporter than simply does nothing. var NullStatsReporter StatsReporter = nullStatsReporter{} From fe2f60a798db0d93814b5f9821c97499368009d0 Mon Sep 17 00:00:00 2001 From: Matt Way Date: Tue, 16 Mar 2021 18:28:47 -0400 Subject: [PATCH 15/25] Fix cached bucket sharing --- scope.go | 1 + scope_test.go | 369 +++++++++++++++++++++++++++++++++++++++----------- stats.go | 92 ++++++++----- stats_test.go | 8 +- 4 files changed, 355 insertions(+), 115 deletions(-) diff --git a/scope.go b/scope.go index 7407eb70..61a08a64 100644 --- a/scope.go +++ b/scope.go @@ -410,6 +410,7 @@ func (s *scope) Histogram(name string, b Buckets) Histogram { s.tags, s.reporter, s.bucketCache.Get(htype, b, cachedHistogram), + cachedHistogram, ) s.histograms[name] = h s.histogramsSlice = append(s.histogramsSlice, h) diff --git a/scope_test.go b/scope_test.go index 72a7dc18..496dc35f 100644 --- a/scope_test.go +++ b/scope_test.go @@ -23,12 +23,16 @@ package tally import ( "fmt" "math" + "math/rand" + "strconv" + "strings" "sync" "sync/atomic" "testing" "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var ( @@ -117,6 +121,74 @@ func newTestStatsReporter() *testStatsReporter { } } +func (r *testStatsReporter) getCounters() map[string]*testIntValue { + dst := make(map[string]*testIntValue, len(r.counters)) + for k, v := range r.counters { + var ( + parts = strings.Split(k, "+") + name string + ) + if len(parts) > 0 { + name = parts[0] + } + + dst[name] = v + } + + return dst +} + +func (r *testStatsReporter) getGauges() map[string]*testFloatValue { + dst := make(map[string]*testFloatValue, len(r.gauges)) + for k, v := range r.gauges { + var ( + parts = strings.Split(k, "+") + name string + ) + if len(parts) > 0 { + name = parts[0] + } + + dst[name] = v + } + + return dst +} + +func (r *testStatsReporter) getTimers() map[string]*testIntValue { + dst := make(map[string]*testIntValue, len(r.timers)) + for k, v := range r.timers { + var ( + parts = strings.Split(k, "+") + name string + ) + if len(parts) > 0 { + name = parts[0] + } + + dst[name] = v + } + + return dst +} + +func (r *testStatsReporter) getHistograms() map[string]*testHistogramValue { + dst := make(map[string]*testHistogramValue, len(r.histograms)) + for k, v := range r.histograms { + var ( + parts = strings.Split(k, "+") + name string + ) + if len(parts) > 0 { + name = parts[0] + } + + dst[name] = v + } + + return dst +} + func (r *testStatsReporter) WaitAll() { r.cg.Wait() r.gg.Wait() @@ -237,15 +309,16 @@ func (r *testStatsReporter) ReportHistogramValueSamples( name string, tags map[string]string, buckets Buckets, - bucketLowerBound, + bucketLowerBound float64, bucketUpperBound float64, samples int64, ) { - value, ok := r.histograms[name] + key := KeyForPrefixedStringMap(name, tags) + value, ok := r.histograms[key] if !ok { value = newTestHistogramValue() value.tags = tags - r.histograms[name] = value + r.histograms[key] = value } value.valueSamples[bucketUpperBound] = int(samples) r.hg.Done() @@ -255,15 +328,16 @@ func (r *testStatsReporter) ReportHistogramDurationSamples( name string, tags map[string]string, buckets Buckets, - bucketLowerBound, + bucketLowerBound time.Duration, bucketUpperBound time.Duration, samples int64, ) { - value, ok := r.histograms[name] + key := KeyForPrefixedStringMap(name, tags) + value, ok := r.histograms[key] if !ok { value = newTestHistogramValue() value.tags = tags - r.histograms[name] = value + r.histograms[key] = value } value.durationSamples[bucketUpperBound] = int(samples) r.hg.Done() @@ -394,34 +468,118 @@ func TestWriteOnce(t *testing.T) { s.SubScope("test").Histogram("bat", MustMakeLinearValueBuckets(1, 1, 3)).RecordValue(1.1) r.hg.Add(1) s.SubScope("test").Histogram("bat", MustMakeLinearValueBuckets(1, 1, 3)).RecordValue(2.1) + + buckets := MustMakeLinearValueBuckets(100, 10, 3) r.hg.Add(1) - s.SubScope("test").Histogram("qux", MustMakeLinearValueBuckets(100, 10, 3)).RecordValue(135.0) + s.SubScope("test").Histogram("qux", buckets).RecordValue(135.0) r.hg.Add(1) - s.SubScope("test").Histogram("quux", MustMakeLinearValueBuckets(100, 10, 3)).RecordValue(101.0) + s.SubScope("test").Histogram("quux", buckets).RecordValue(101.0) + r.hg.Add(1) + s.SubScope("test2").Histogram("quux", buckets).RecordValue(101.0) s.reportLoopRun() r.WaitAll() - assert.EqualValues(t, 1, r.counters["bar"].val) - assert.EqualValues(t, 1, r.gauges["zed"].val) - assert.EqualValues(t, time.Millisecond*175, r.timers["ticky"].val) - assert.EqualValues(t, 1, r.histograms["baz"].valueSamples[50.0]) - assert.EqualValues(t, 1, r.histograms["bat"].valueSamples[3.0]) - assert.EqualValues(t, 1, r.histograms["test.bat"].valueSamples[2.0]) - assert.EqualValues(t, 1, r.histograms["test.bat"].valueSamples[3.0]) - assert.EqualValues(t, 1, r.histograms["test.qux"].valueSamples[math.MaxFloat64]) - assert.EqualValues(t, 1, r.histograms["test.quux"].valueSamples[110.0]) + var ( + counters = r.getCounters() + gauges = r.getGauges() + timers = r.getTimers() + histograms = r.getHistograms() + ) + + assert.EqualValues(t, 1, counters["bar"].val) + assert.EqualValues(t, 1, gauges["zed"].val) + assert.EqualValues(t, time.Millisecond*175, timers["ticky"].val) + assert.EqualValues(t, 1, histograms["baz"].valueSamples[50.0]) + assert.EqualValues(t, 1, histograms["bat"].valueSamples[3.0]) + assert.EqualValues(t, 1, histograms["test.bat"].valueSamples[2.0]) + assert.EqualValues(t, 1, histograms["test.bat"].valueSamples[3.0]) + assert.EqualValues(t, 1, histograms["test.qux"].valueSamples[math.MaxFloat64]) + assert.EqualValues(t, 1, histograms["test.quux"].valueSamples[110.0]) + assert.EqualValues(t, 1, histograms["test2.quux"].valueSamples[110.0]) r = newTestStatsReporter() s.reportLoopRun() - assert.Nil(t, r.counters["bar"]) - assert.Nil(t, r.gauges["zed"]) - assert.Nil(t, r.timers["ticky"]) - assert.Nil(t, r.histograms["baz"]) - assert.Nil(t, r.histograms["bat"]) - assert.Nil(t, r.histograms["test.qux"]) + counters = r.getCounters() + gauges = r.getGauges() + timers = r.getTimers() + histograms = r.getHistograms() + + assert.Nil(t, counters["bar"]) + assert.Nil(t, gauges["zed"]) + assert.Nil(t, timers["ticky"]) + assert.Nil(t, histograms["baz"]) + assert.Nil(t, histograms["bat"]) + assert.Nil(t, histograms["test.qux"]) +} + +func TestHistogramSharedBucketMetrics(t *testing.T) { + var ( + r = newTestStatsReporter() + scope = newRootScope(ScopeOptions{ + Prefix: "", + Tags: nil, + CachedReporter: r, + }, 0) + builder = func(s Scope) func(map[string]string) { + buckets := MustMakeLinearValueBuckets(10, 10, 3) + return func(tags map[string]string) { + s.Tagged(tags).Histogram("hist", buckets).RecordValue(19.0) + } + } + ) + + var ( + wg = &sync.WaitGroup{} + record = builder(scope) + ) + + r.hg.Add(4) + for i := 0; i < 10000; i++ { + i := i + wg.Add(1) + go func() { + defer wg.Done() + + val := strconv.Itoa(i % 4) + record(map[string]string{ + "key": val, + }) + + time.Sleep(time.Duration(rand.Float64() * float64(time.Second))) + }() + } + + wg.Wait() + scope.status.Lock() + scope.reportRegistryWithLock() + scope.status.Unlock() + r.WaitAll() + + unseen := map[string]struct{}{ + "0": {}, + "1": {}, + "2": {}, + "3": {}, + } + + require.Equal(t, len(unseen), len(r.histograms)) + + for name, value := range r.histograms { + if !strings.HasPrefix(name, "hist+") { + continue + } + + count, ok := value.valueSamples[20.0] + require.True(t, ok) + require.Equal(t, 2500, count) + + delete(unseen, value.tags["key"]) + } + + require.Equal(t, 0, len(unseen), fmt.Sprintf("%v", unseen)) } func TestCounterSanitized(t *testing.T) { @@ -448,26 +606,38 @@ func TestCounterSanitized(t *testing.T) { s.report(r) r.WaitAll() - assert.Nil(t, r.counters["how?"]) - assert.EqualValues(t, 1, r.counters["how_"].val) - assert.Nil(t, r.gauges["does!"]) - assert.EqualValues(t, 1, r.gauges["does_"].val) - assert.Nil(t, r.timers["this!"]) - assert.EqualValues(t, time.Millisecond*175, r.timers["this_"].val) - assert.Nil(t, r.histograms["work1!?"]) - assert.EqualValues(t, 1, r.histograms["work1__"].valueSamples[50.0]) + var ( + counters = r.getCounters() + gauges = r.getGauges() + timers = r.getTimers() + histograms = r.getHistograms() + ) + + assert.Nil(t, counters["how?"]) + assert.EqualValues(t, 1, counters["how_"].val) + assert.Nil(t, gauges["does!"]) + assert.EqualValues(t, 1, gauges["does_"].val) + assert.Nil(t, timers["this!"]) + assert.EqualValues(t, time.Millisecond*175, timers["this_"].val) + assert.Nil(t, histograms["work1!?"]) + assert.EqualValues(t, 1, histograms["work1__"].valueSamples[50.0]) r = newTestStatsReporter() s.report(r) - assert.Nil(t, r.counters["how?"]) - assert.Nil(t, r.counters["how_"]) - assert.Nil(t, r.gauges["does!"]) - assert.Nil(t, r.gauges["does_"]) - assert.Nil(t, r.timers["this!"]) - assert.Nil(t, r.timers["this_"]) - assert.Nil(t, r.histograms["work1!?"]) - assert.Nil(t, r.histograms["work1__"]) + counters = r.getCounters() + gauges = r.getGauges() + timers = r.getTimers() + histograms = r.getHistograms() + + assert.Nil(t, counters["how?"]) + assert.Nil(t, counters["how_"]) + assert.Nil(t, gauges["does!"]) + assert.Nil(t, gauges["does_"]) + assert.Nil(t, timers["this!"]) + assert.Nil(t, timers["this_"]) + assert.Nil(t, histograms["work1!?"]) + assert.Nil(t, histograms["work1__"]) } func TestCachedReporter(t *testing.T) { @@ -493,11 +663,18 @@ func TestCachedReporter(t *testing.T) { s.cachedReport() r.WaitAll() - assert.EqualValues(t, 1, r.counters["bar"].val) - assert.EqualValues(t, 1, r.gauges["zed"].val) - assert.EqualValues(t, time.Millisecond*175, r.timers["ticky"].val) - assert.EqualValues(t, 1, r.histograms["baz"].valueSamples[50.0]) - assert.EqualValues(t, 1, r.histograms["qux"].durationSamples[50*time.Millisecond]) + var ( + counters = r.getCounters() + gauges = r.getGauges() + timers = r.getTimers() + histograms = r.getHistograms() + ) + + assert.EqualValues(t, 1, counters["bar"].val) + assert.EqualValues(t, 1, gauges["zed"].val) + assert.EqualValues(t, time.Millisecond*175, timers["ticky"].val) + assert.EqualValues(t, 1, histograms["baz"].valueSamples[50.0]) + assert.EqualValues(t, 1, histograms["qux"].durationSamples[50*time.Millisecond]) } func TestRootScopeWithoutPrefix(t *testing.T) { @@ -521,10 +698,17 @@ func TestRootScopeWithoutPrefix(t *testing.T) { s.report(r) r.WaitAll() - assert.EqualValues(t, 21, r.counters["bar"].val) - assert.EqualValues(t, 1, r.gauges["zed"].val) - assert.EqualValues(t, time.Millisecond*175, r.timers["blork"].val) - assert.EqualValues(t, 1, r.histograms["baz"].valueSamples[50.0]) + var ( + counters = r.getCounters() + gauges = r.getGauges() + timers = r.getTimers() + histograms = r.getHistograms() + ) + + assert.EqualValues(t, 21, counters["bar"].val) + assert.EqualValues(t, 1, gauges["zed"].val) + assert.EqualValues(t, time.Millisecond*175, timers["blork"].val) + assert.EqualValues(t, 1, histograms["baz"].valueSamples[50.0]) } func TestRootScopeWithPrefix(t *testing.T) { @@ -548,10 +732,17 @@ func TestRootScopeWithPrefix(t *testing.T) { s.report(r) r.WaitAll() - assert.EqualValues(t, 21, r.counters["foo.bar"].val) - assert.EqualValues(t, 1, r.gauges["foo.zed"].val) - assert.EqualValues(t, time.Millisecond*175, r.timers["foo.blork"].val) - assert.EqualValues(t, 1, r.histograms["foo.baz"].valueSamples[50.0]) + var ( + counters = r.getCounters() + gauges = r.getGauges() + timers = r.getTimers() + histograms = r.getHistograms() + ) + + assert.EqualValues(t, 21, counters["foo.bar"].val) + assert.EqualValues(t, 1, gauges["foo.zed"].val) + assert.EqualValues(t, time.Millisecond*175, timers["foo.blork"].val) + assert.EqualValues(t, 1, histograms["foo.baz"].valueSamples[50.0]) } func TestRootScopeWithDifferentSeparator(t *testing.T) { @@ -575,10 +766,17 @@ func TestRootScopeWithDifferentSeparator(t *testing.T) { s.report(r) r.WaitAll() - assert.EqualValues(t, 21, r.counters["foo_bar"].val) - assert.EqualValues(t, 1, r.gauges["foo_zed"].val) - assert.EqualValues(t, time.Millisecond*175, r.timers["foo_blork"].val) - assert.EqualValues(t, 1, r.histograms["foo_baz"].valueSamples[50.0]) + var ( + counters = r.getCounters() + gauges = r.getGauges() + timers = r.getTimers() + histograms = r.getHistograms() + ) + + assert.EqualValues(t, 21, counters["foo_bar"].val) + assert.EqualValues(t, 1, gauges["foo_zed"].val) + assert.EqualValues(t, time.Millisecond*175, timers["foo_blork"].val) + assert.EqualValues(t, 1, histograms["foo_baz"].valueSamples[50.0]) } func TestSubScope(t *testing.T) { @@ -603,17 +801,24 @@ func TestSubScope(t *testing.T) { s.report(r) r.WaitAll() + var ( + counters = r.getCounters() + gauges = r.getGauges() + timers = r.getTimers() + histograms = r.getHistograms() + ) + // Assert prefixed correctly - assert.EqualValues(t, 21, r.counters["foo.mork.bar"].val) - assert.EqualValues(t, 1, r.gauges["foo.mork.zed"].val) - assert.EqualValues(t, time.Millisecond*175, r.timers["foo.mork.blork"].val) - assert.EqualValues(t, 1, r.histograms["foo.mork.baz"].valueSamples[50.0]) + assert.EqualValues(t, 21, counters["foo.mork.bar"].val) + assert.EqualValues(t, 1, gauges["foo.mork.zed"].val) + assert.EqualValues(t, time.Millisecond*175, timers["foo.mork.blork"].val) + assert.EqualValues(t, 1, histograms["foo.mork.baz"].valueSamples[50.0]) // Assert tags inherited - assert.Equal(t, tags, r.counters["foo.mork.bar"].tags) - assert.Equal(t, tags, r.gauges["foo.mork.zed"].tags) - assert.Equal(t, tags, r.timers["foo.mork.blork"].tags) - assert.Equal(t, tags, r.histograms["foo.mork.baz"].tags) + assert.Equal(t, tags, counters["foo.mork.bar"].tags) + assert.Equal(t, tags, gauges["foo.mork.zed"].tags) + assert.Equal(t, tags, timers["foo.mork.blork"].tags) + assert.Equal(t, tags, histograms["foo.mork.baz"].tags) } func TestTaggedSubScope(t *testing.T) { @@ -643,23 +848,28 @@ func TestTaggedSubScope(t *testing.T) { tscope.report(r) r.cg.Wait() - assert.EqualValues(t, 1, r.counters["foo.beep"].val) - assert.EqualValues(t, ts, r.counters["foo.beep"].tags) + var ( + counters = r.getCounters() + histograms = r.getHistograms() + ) - assert.EqualValues(t, 1, r.counters["foo.boop"].val) + assert.EqualValues(t, 1, counters["foo.beep"].val) + assert.EqualValues(t, ts, counters["foo.beep"].tags) + + assert.EqualValues(t, 1, counters["foo.boop"].val) assert.EqualValues(t, map[string]string{ "env": "test", "service": "test", - }, r.counters["foo.boop"].tags) + }, counters["foo.boop"].tags) - assert.EqualValues(t, 1, r.histograms["foo.baz"].valueSamples[50.0]) - assert.EqualValues(t, ts, r.histograms["foo.baz"].tags) + assert.EqualValues(t, 1, histograms["foo.baz"].valueSamples[50.0]) + assert.EqualValues(t, ts, histograms["foo.baz"].tags) - assert.EqualValues(t, 1, r.histograms["foo.bar"].valueSamples[50.0]) + assert.EqualValues(t, 1, histograms["foo.bar"].valueSamples[50.0]) assert.EqualValues(t, map[string]string{ "env": "test", "service": "test", - }, r.histograms["foo.bar"].tags) + }, histograms["foo.bar"].tags) } func TestTaggedSanitizedSubScope(t *testing.T) { @@ -685,11 +895,12 @@ func TestTaggedSanitizedSubScope(t *testing.T) { tscope.report(r) r.cg.Wait() - assert.EqualValues(t, 1, r.counters["foo_beep"].val) + counters := r.getCounters() + assert.EqualValues(t, 1, counters["foo_beep"].val) assert.EqualValues(t, map[string]string{ "env": "test_env", "service": "test_service", - }, r.counters["foo_beep"].tags) + }, counters["foo_beep"].tags) } func TestTaggedExistingReturnsSameScope(t *testing.T) { @@ -845,7 +1056,8 @@ func TestReturnByValue(t *testing.T) { s.report(r) r.cg.Wait() - assert.EqualValues(t, 3, r.counters["honk"].val) + counters := r.getCounters() + assert.EqualValues(t, 3, counters["honk"].val) } func TestScopeAvoidReportLoopRunOnClose(t *testing.T) { @@ -869,11 +1081,12 @@ func TestScopeFlushOnClose(t *testing.T) { r.cg.Add(1) root.Counter("foo").Inc(1) - assert.Nil(t, r.counters["foo"]) + counters := r.getCounters() + assert.Nil(t, counters["foo"]) assert.NoError(t, closer.Close()) - assert.EqualValues(t, 1, r.counters["foo"].val) - + counters = r.getCounters() + assert.EqualValues(t, 1, counters["foo"].val) assert.NoError(t, closer.Close()) } diff --git a/stats.go b/stats.go index 8ab07a00..96c479db 100644 --- a/stats.go +++ b/stats.go @@ -234,7 +234,7 @@ func (r *timerNoReporterSink) ReportHistogramValueSamples( name string, tags map[string]string, buckets Buckets, - bucketLowerBound, + bucketLowerBound float64, bucketUpperBound float64, samples int64, ) { @@ -244,7 +244,7 @@ func (r *timerNoReporterSink) ReportHistogramDurationSamples( name string, tags map[string]string, buckets Buckets, - bucketLowerBound, + bucketLowerBound time.Duration, bucketUpperBound time.Duration, samples int64, ) { @@ -257,14 +257,20 @@ func (r *timerNoReporterSink) Capabilities() Capabilities { func (r *timerNoReporterSink) Flush() { } +type sampleCounter struct { + counter *counter + cachedBucket CachedHistogramBucket +} + type histogram struct { - htype histogramType - name string - tags map[string]string - reporter StatsReporter - specification Buckets - buckets []histogramBucket - samples []*counter + htype histogramType + name string + tags map[string]string + reporter StatsReporter + specification Buckets + buckets []histogramBucket + // samples []*counter + samples []sampleCounter lookupByValue []float64 lookupByDuration []int } @@ -276,12 +282,13 @@ const ( durationHistogramType ) -func newHistogram( // need to be able to reuse internal histogram buckets and lookups +func newHistogram( htype histogramType, name string, tags map[string]string, reporter StatsReporter, storage bucketStorage, + cachedHistogram CachedHistogram, ) *histogram { h := &histogram{ htype: htype, @@ -290,13 +297,28 @@ func newHistogram( // need to be able to reuse internal histogram buckets and lo reporter: reporter, specification: storage.buckets, buckets: storage.hbuckets, - samples: make([]*counter, len(storage.hbuckets)), + samples: make([]sampleCounter, len(storage.hbuckets)), lookupByValue: storage.lookupByValue, lookupByDuration: storage.lookupByDuration, } - for i := range h.samples { - h.samples[i] = newCounter(nil) + for i := range h.buckets { + h.samples[i].counter = newCounter(nil) + + if cachedHistogram != nil { + switch htype { + case durationHistogramType: + h.samples[i].cachedBucket = cachedHistogram.DurationBucket( + h.buckets[i].durationLowerBound, + h.buckets[i].durationUpperBound, + ) + case valueHistogramType: + h.samples[i].cachedBucket = cachedHistogram.ValueBucket( + h.buckets[i].valueLowerBound, + h.buckets[i].valueUpperBound, + ) + } + } } return h @@ -304,7 +326,7 @@ func newHistogram( // need to be able to reuse internal histogram buckets and lo func (h *histogram) report(name string, tags map[string]string, r StatsReporter) { for i := range h.buckets { - samples := h.samples[i].value() + samples := h.samples[i].counter.value() if samples == 0 { continue } @@ -334,16 +356,16 @@ func (h *histogram) report(name string, tags map[string]string, r StatsReporter) func (h *histogram) cachedReport() { for i := range h.buckets { - samples := h.samples[i].value() + samples := h.samples[i].counter.value() if samples == 0 { continue } switch h.htype { case valueHistogramType: - h.buckets[i].cachedValueBucket.ReportSamples(samples) + h.samples[i].cachedBucket.ReportSamples(samples) case durationHistogramType: - h.buckets[i].cachedDurationBucket.ReportSamples(samples) + h.samples[i].cachedBucket.ReportSamples(samples) } } } @@ -358,7 +380,7 @@ func (h *histogram) RecordValue(value float64) { // buckets there will always be an inclusive bucket as // we always have a math.MaxFloat64 bucket. idx := sort.SearchFloat64s(h.lookupByValue, value) - h.samples[idx].Inc(1) + h.samples[idx].counter.Inc(1) } func (h *histogram) RecordDuration(value time.Duration) { @@ -371,7 +393,7 @@ func (h *histogram) RecordDuration(value time.Duration) { // buckets there will always be an inclusive bucket as // we always have a math.MaxInt64 bucket. idx := sort.SearchInts(h.lookupByDuration, int(value)) - h.samples[idx].Inc(1) + h.samples[idx].counter.Inc(1) } func (h *histogram) Start() Stopwatch { @@ -390,7 +412,7 @@ func (h *histogram) snapshotValues() map[float64]int64 { vals := make(map[float64]int64, len(h.buckets)) for i := range h.buckets { - vals[h.buckets[i].valueUpperBound] = h.samples[i].snapshot() + vals[h.buckets[i].valueUpperBound] = h.samples[i].counter.snapshot() } return vals @@ -403,7 +425,7 @@ func (h *histogram) snapshotDurations() map[time.Duration]int64 { durations := make(map[time.Duration]int64, len(h.buckets)) for i := range h.buckets { - durations[h.buckets[i].durationUpperBound] = h.samples[i].snapshot() + durations[h.buckets[i].durationUpperBound] = h.samples[i].counter.snapshot() } return durations @@ -428,7 +450,7 @@ type bucketStorage struct { func newBucketStorage( htype histogramType, buckets Buckets, - cachedHistogram CachedHistogram, + // cachedHistogram CachedHistogram, ) bucketStorage { var ( pairs = BucketPairs(buckets) @@ -455,18 +477,22 @@ func newBucketStorage( switch htype { case valueHistogramType: - if cachedHistogram != nil { - bucket.cachedValueBucket = cachedHistogram.ValueBucket( - bucket.valueLowerBound, bucket.valueUpperBound, - ) - } + /* + if cachedHistogram != nil { + bucket.cachedValueBucket = cachedHistogram.ValueBucket( + bucket.valueLowerBound, bucket.valueUpperBound, + ) + } + */ storage.lookupByValue = append(storage.lookupByValue, bucket.valueUpperBound) case durationHistogramType: - if cachedHistogram != nil { - bucket.cachedDurationBucket = cachedHistogram.DurationBucket( - bucket.durationLowerBound, bucket.durationUpperBound, - ) - } + /* + if cachedHistogram != nil { + bucket.cachedDurationBucket = cachedHistogram.DurationBucket( + bucket.durationLowerBound, bucket.durationUpperBound, + ) + } + */ storage.lookupByDuration = append(storage.lookupByDuration, int(bucket.durationUpperBound)) } @@ -499,7 +525,7 @@ func (c *bucketCache) Get( if !ok { c.mtx.RUnlock() c.mtx.Lock() - storage = newBucketStorage(htype, buckets, cachedHistogram) + storage = newBucketStorage(htype, buckets) //, cachedHistogram) c.cache[id] = storage c.mtx.Unlock() } else { diff --git a/stats_test.go b/stats_test.go index 82059768..7d29e90e 100644 --- a/stats_test.go +++ b/stats_test.go @@ -128,8 +128,8 @@ func TestTimer(t *testing.T) { func TestHistogramValueSamples(t *testing.T) { r := newStatsTestReporter() buckets := MustMakeLinearValueBuckets(0, 10, 10) - storage := newBucketStorage(valueHistogramType, buckets, nil) - h := newHistogram(valueHistogramType, "h1", nil, r, storage) + storage := newBucketStorage(valueHistogramType, buckets) + h := newHistogram(valueHistogramType, "h1", nil, r, storage, nil) var offset float64 for i := 0; i < 3; i++ { @@ -150,8 +150,8 @@ func TestHistogramValueSamples(t *testing.T) { func TestHistogramDurationSamples(t *testing.T) { r := newStatsTestReporter() buckets := MustMakeLinearDurationBuckets(0, 10*time.Millisecond, 10) - storage := newBucketStorage(durationHistogramType, buckets, nil) - h := newHistogram(durationHistogramType, "h1", nil, r, storage) + storage := newBucketStorage(durationHistogramType, buckets) + h := newHistogram(durationHistogramType, "h1", nil, r, storage, nil) var offset time.Duration for i := 0; i < 3; i++ { From 1ce667ce2db844124249b38fa1396ebeaa1e7980 Mon Sep 17 00:00:00 2001 From: Matt Way Date: Tue, 16 Mar 2021 18:42:59 -0400 Subject: [PATCH 16/25] Cleanup --- scope.go | 2 +- stats.go | 18 +----------------- 2 files changed, 2 insertions(+), 18 deletions(-) diff --git a/scope.go b/scope.go index 61a08a64..264b6c35 100644 --- a/scope.go +++ b/scope.go @@ -409,7 +409,7 @@ func (s *scope) Histogram(name string, b Buckets) Histogram { s.fullyQualifiedName(name), s.tags, s.reporter, - s.bucketCache.Get(htype, b, cachedHistogram), + s.bucketCache.Get(htype, b), cachedHistogram, ) s.histograms[name] = h diff --git a/stats.go b/stats.go index 96c479db..e1d7e2dd 100644 --- a/stats.go +++ b/stats.go @@ -450,7 +450,6 @@ type bucketStorage struct { func newBucketStorage( htype histogramType, buckets Buckets, - // cachedHistogram CachedHistogram, ) bucketStorage { var ( pairs = BucketPairs(buckets) @@ -477,22 +476,8 @@ func newBucketStorage( switch htype { case valueHistogramType: - /* - if cachedHistogram != nil { - bucket.cachedValueBucket = cachedHistogram.ValueBucket( - bucket.valueLowerBound, bucket.valueUpperBound, - ) - } - */ storage.lookupByValue = append(storage.lookupByValue, bucket.valueUpperBound) case durationHistogramType: - /* - if cachedHistogram != nil { - bucket.cachedDurationBucket = cachedHistogram.DurationBucket( - bucket.durationLowerBound, bucket.durationUpperBound, - ) - } - */ storage.lookupByDuration = append(storage.lookupByDuration, int(bucket.durationUpperBound)) } @@ -516,7 +501,6 @@ func newBucketCache() *bucketCache { func (c *bucketCache) Get( htype histogramType, buckets Buckets, - cachedHistogram CachedHistogram, ) bucketStorage { id := getBucketsIdentity(buckets) @@ -525,7 +509,7 @@ func (c *bucketCache) Get( if !ok { c.mtx.RUnlock() c.mtx.Lock() - storage = newBucketStorage(htype, buckets) //, cachedHistogram) + storage = newBucketStorage(htype, buckets) c.cache[id] = storage c.mtx.Unlock() } else { From 51810c7cc0a18fbd98403de6887b45b83a7ba546 Mon Sep 17 00:00:00 2001 From: Matt Way Date: Tue, 16 Mar 2021 18:47:20 -0400 Subject: [PATCH 17/25] More cleanup --- stats.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/stats.go b/stats.go index e1d7e2dd..64048dbb 100644 --- a/stats.go +++ b/stats.go @@ -263,13 +263,12 @@ type sampleCounter struct { } type histogram struct { - htype histogramType - name string - tags map[string]string - reporter StatsReporter - specification Buckets - buckets []histogramBucket - // samples []*counter + htype histogramType + name string + tags map[string]string + reporter StatsReporter + specification Buckets + buckets []histogramBucket samples []sampleCounter lookupByValue []float64 lookupByDuration []int From 70f8c2f94d1249da582f4e279cd096c87bbb5da0 Mon Sep 17 00:00:00 2001 From: Matt Way Date: Tue, 16 Mar 2021 19:08:39 -0400 Subject: [PATCH 18/25] Fix test --- scope_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scope_test.go b/scope_test.go index 496dc35f..71e1f77d 100644 --- a/scope_test.go +++ b/scope_test.go @@ -1028,8 +1028,9 @@ func TestScopeDefaultBuckets(t *testing.T) { s.report(r) r.WaitAll() - assert.EqualValues(t, 1, r.histograms["baz"].durationSamples[60*time.Millisecond]) - assert.EqualValues(t, 2, r.histograms["baz"].durationSamples[90*time.Millisecond]) + histograms := r.getHistograms() + assert.EqualValues(t, 1, histograms["baz"].durationSamples[60*time.Millisecond]) + assert.EqualValues(t, 2, histograms["baz"].durationSamples[90*time.Millisecond]) } type testMets struct { From c27bf68ae37cb963a423633894749a506f650bb3 Mon Sep 17 00:00:00 2001 From: Matt Way Date: Thu, 18 Mar 2021 11:33:57 -0400 Subject: [PATCH 19/25] Remove go:nosplit for inlineable funcs --- internal/identity/accumulator.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/internal/identity/accumulator.go b/internal/identity/accumulator.go index 1f4ba667..80cccd45 100644 --- a/internal/identity/accumulator.go +++ b/internal/identity/accumulator.go @@ -38,20 +38,16 @@ type Accumulator uint64 // are unnecessary as memory width is bounded to each instance of `a` (a // uint64) and, potentially, a single stack-local loop temporary while // iterating. -// -//go:nosplit func NewAccumulator() Accumulator { return Accumulator(_hashSeed) } // NewAccumulatorWithSeed creates a new Accumulator with the provided seed value. -//go:nosplit func NewAccumulatorWithSeed(seed uint64) Accumulator { return Accumulator(seed) } // AddString hashes str and folds it into the accumulator. -//go:nosplit func (a Accumulator) AddString(str string) Accumulator { return a + (Accumulator(murmur3.StringSum64(str)) * Accumulator(_hashFold)) } @@ -67,7 +63,6 @@ func (a Accumulator) AddStrings(strs ...string) Accumulator { } // AddUint64 folds u64 into the accumulator. -//go:nosplit func (a Accumulator) AddUint64(u64 uint64) Accumulator { return a + Accumulator(u64*_hashFold) } @@ -83,7 +78,6 @@ func (a Accumulator) AddUint64s(u64s ...uint64) Accumulator { } // Value returns the accumulated value. -//go:nosplit func (a Accumulator) Value() uint64 { return uint64(a) } From 30b859b59e49f6a211fafb9b20389db6c1be2083 Mon Sep 17 00:00:00 2001 From: Matt Way Date: Tue, 23 Feb 2021 14:50:48 -0500 Subject: [PATCH 20/25] Enable TTL-based scope/metric deallocation --- m3/reporter.go | 67 +++++++++++++++++++ reporter.go | 12 ++++ scope.go | 142 ++++++++++++++++++++++++++++++++++++---- scope_benchmark_test.go | 9 +++ scope_registry.go | 49 ++++++++++++-- scope_test.go | 8 +++ stats.go | 59 +++++++++++++---- 7 files changed, 317 insertions(+), 29 deletions(-) diff --git a/m3/reporter.go b/m3/reporter.go index 443de0f4..c85eef02 100644 --- a/m3/reporter.go +++ b/m3/reporter.go @@ -290,6 +290,21 @@ func (r *reporter) allocateCounter( } } +// DeallocateCounter implements tally.CachedStatsReporter. +func (r *reporter) DeallocateCounter(counter tally.CachedCount) { + if counter == nil { + return + } + + c, ok := counter.(cachedMetric) + if !ok { + return + } + + r.resourcePool.releaseMetricTagSlice(c.metric.Tags) + c.metric.Tags = nil +} + // AllocateGauge implements tally.CachedStatsReporter. func (r *reporter) AllocateGauge( name string, @@ -307,6 +322,21 @@ func (r *reporter) AllocateGauge( } } +// DeallocateGauge implements tally.CachedStatsReporter. +func (r *reporter) DeallocateGauge(gauge tally.CachedGauge) { + if gauge == nil { + return + } + + g, ok := gauge.(cachedMetric) + if !ok { + return + } + + r.resourcePool.releaseMetricTagSlice(g.metric.Tags) + g.metric.Tags = nil +} + // AllocateTimer implements tally.CachedStatsReporter. func (r *reporter) AllocateTimer( name string, @@ -324,6 +354,21 @@ func (r *reporter) AllocateTimer( } } +// DeallocateTimer implements tally.CachedStatsReporter. +func (r *reporter) DeallocateTimer(timer tally.CachedTimer) { + if timer == nil { + return + } + + t, ok := timer.(cachedMetric) + if !ok { + return + } + + r.resourcePool.releaseMetricTagSlice(t.metric.Tags) + t.metric.Tags = nil +} + // AllocateHistogram implements tally.CachedStatsReporter. func (r *reporter) AllocateHistogram( name string, @@ -392,6 +437,28 @@ func (r *reporter) AllocateHistogram( } } +// DeallocateHistogram implements tally.CachedStatsReporter. +func (r *reporter) DeallocateHistogram(histogram tally.CachedHistogram) { + if histogram == nil { + return + } + + h, ok := histogram.(cachedHistogram) + if !ok { + return + } + + for _, hbucket := range h.cachedDurationBuckets { + r.DeallocateCounter(hbucket.metric) + } + h.cachedDurationBuckets = nil + + for _, hbucket := range h.cachedValueBuckets { + r.DeallocateCounter(hbucket.metric) + } + h.cachedValueBuckets = nil +} + func (r *reporter) valueBucketString(v float64) string { if v == math.MaxFloat64 { return "infinity" diff --git a/reporter.go b/reporter.go index 22e0f266..48a77ff6 100644 --- a/reporter.go +++ b/reporter.go @@ -88,18 +88,27 @@ type CachedStatsReporter interface { tags map[string]string, ) CachedCount + // DeallocateCounter deallocates a counter that is no longer needed. + DeallocateCounter(CachedCount) + // AllocateGauge pre allocates a gauge data structure with name & tags. AllocateGauge( name string, tags map[string]string, ) CachedGauge + // DeallocateGauge deallocates a gauge that is no longer needed. + DeallocateGauge(CachedGauge) + // AllocateTimer pre allocates a timer data structure with name & tags. AllocateTimer( name string, tags map[string]string, ) CachedTimer + // DeallocateTimer deallocates a timer that is no longer needed. + DeallocateTimer(CachedTimer) + // AllocateHistogram pre allocates a histogram data structure with name, tags, // value buckets and duration buckets. AllocateHistogram( @@ -107,6 +116,9 @@ type CachedStatsReporter interface { tags map[string]string, buckets Buckets, ) CachedHistogram + + // DeallocateHistogram deallocates a histogram that is no longer needed. + DeallocateHistogram(CachedHistogram) } // CachedCount interface for reporting an individual counter diff --git a/scope.go b/scope.go index 264b6c35..d1b52762 100644 --- a/scope.go +++ b/scope.go @@ -82,10 +82,11 @@ type scope struct { histograms map[string]*histogram histogramsSlice []*histogram timers map[string]*timer - // nb: deliberately skipping timersSlice as we report timers immediately, - // no buffering is involved. + timersSlice []*timer bucketCache *bucketCache + lastReport time.Time + root bool } type scopeStatus struct { @@ -103,6 +104,18 @@ type ScopeOptions struct { Separator string DefaultBuckets Buckets SanitizeOptions *SanitizeOptions + + // UnusedScopeTTL configures scopes and the metrics they hold to be + // evicted from cache and deallocated if no metrics for a given scope have + // been reported within UnusedScopeTTL. Importantly, any pointers to scopes + // or metrics that are deallocated through this feature are invalidated. + // This setting does not affect root scopes. + UnusedScopeTTL time.Duration + + // UnusedScopeDeepEviction controls whether unused scopes evicted from cache + // after UnusedScopeTTL has elapsed simply have their metrics deallocated + // (shallow) or both the metrics and the scope itself (deep). + UnusedScopeDeepEviction bool } // NewRootScope creates a new root Scope with a set of options and @@ -155,6 +168,7 @@ func newRootScope(opts ScopeOptions, interval time.Duration) *scope { baseReporter: baseReporter, defaultBuckets: opts.DefaultBuckets, sanitizer: sanitizer, + root: true, status: scopeStatus{ closed: false, quit: make(chan struct{}, 1), @@ -168,6 +182,7 @@ func newRootScope(opts ScopeOptions, interval time.Duration) *scope { histogramsSlice: make([]*histogram, 0, _defaultInitialSliceSize), timers: make(map[string]*timer), bucketCache: newBucketCache(), + timersSlice: make([]*timer, 0, _defaultInitialSliceSize), } // NB(r): Take a copy of the tags on creation @@ -175,7 +190,7 @@ func newRootScope(opts ScopeOptions, interval time.Duration) *scope { s.tags = s.copyAndSanitizeMap(opts.Tags) // Register the root scope - s.registry = newScopeRegistry(s) + s.registry = newScopeRegistry(s, opts) if interval > 0 { go s.reportLoop(interval) @@ -185,48 +200,78 @@ func newRootScope(opts ScopeOptions, interval time.Duration) *scope { } // report dumps all aggregated stats into the reporter. Should be called automatically by the root scope periodically. -func (s *scope) report(r StatsReporter) { +func (s *scope) report(r StatsReporter) (reported bool) { s.cm.RLock() for name, counter := range s.counters { - counter.report(s.fullyQualifiedName(name), s.tags, r) + if rep := counter.report(s.fullyQualifiedName(name), s.tags, r); rep { + reported = true + } } s.cm.RUnlock() s.gm.RLock() for name, gauge := range s.gauges { - gauge.report(s.fullyQualifiedName(name), s.tags, r) + if rep := gauge.report(s.fullyQualifiedName(name), s.tags, r); rep { + reported = true + } } s.gm.RUnlock() // we do nothing for timers here because timers report directly to ths StatsReporter without buffering + s.tm.RLock() + for _, timer := range s.timersSlice { + if rep := timer.hasReported(); rep { + reported = true + } + } + s.tm.RUnlock() s.hm.RLock() for name, histogram := range s.histograms { - histogram.report(s.fullyQualifiedName(name), s.tags, r) + if rep := histogram.report(s.fullyQualifiedName(name), s.tags, r); rep { + reported = true + } } s.hm.RUnlock() + + return } -func (s *scope) cachedReport() { +func (s *scope) cachedReport() (reported bool) { s.cm.RLock() for _, counter := range s.countersSlice { - counter.cachedReport() + if rep := counter.cachedReport(); rep { + reported = true + } } s.cm.RUnlock() s.gm.RLock() for _, gauge := range s.gaugesSlice { - gauge.cachedReport() + if rep := gauge.cachedReport(); rep { + reported = true + } } s.gm.RUnlock() // we do nothing for timers here because timers report directly to ths StatsReporter without buffering + s.tm.RLock() + for _, timer := range s.timersSlice { + if rep := timer.hasReported(); rep { + reported = true + } + } + s.tm.RUnlock() s.hm.RLock() for _, histogram := range s.histogramsSlice { - histogram.cachedReport() + if rep := histogram.cachedReport(); rep { + reported = true + } } s.hm.RUnlock() + + return } // reportLoop is used by the root scope for periodic reporting @@ -363,6 +408,7 @@ func (s *scope) Timer(name string) Timer { s.fullyQualifiedName(name), s.tags, s.reporter, cachedTimer, ) s.timers[name] = t + s.timersSlice = append(s.timersSlice, t) return t } @@ -399,6 +445,7 @@ func (s *scope) Histogram(name string, b Buckets) Histogram { var cachedHistogram CachedHistogram if s.cachedReporter != nil { + // TODO: reuse common cached histogram storage cachedHistogram = s.cachedReporter.AllocateHistogram( s.fullyQualifiedName(name), s.tags, b, ) @@ -552,6 +599,79 @@ func (s *scope) copyAndSanitizeMap(tags map[string]string) map[string]string { return result } +func (s *scope) release() { + if s.root { + return + } + + // Release all internal state. This invalidates any pointers held by users. + // As we're cleaning up, avoid doing work inside map loops so the compiler + // can optimize map clears; instead, do deallocs inside slice loops. This + // is effectively a scope-local STW, so lock everything at once: if we're + // calling this method, there's nothing left for this scope to do anyway. + // + // Since it's possible that there will be dangling pointers, delete all + // storage, not solely pooled storage. + + s.cm.Lock() + defer s.cm.Unlock() + s.gm.Lock() + defer s.gm.Unlock() + s.tm.Lock() + defer s.tm.Unlock() + s.hm.Lock() + defer s.hm.Unlock() + + for k := range s.counters { + delete(s.counters, k) + } + for i := range s.countersSlice { + s.cachedReporter.DeallocateCounter(s.countersSlice[i].cachedCount) + s.countersSlice[i].cachedCount = nil + } + s.countersSlice = s.countersSlice[:0] + + for k := range s.gauges { + delete(s.gauges, k) + } + for i := range s.gaugesSlice { + s.cachedReporter.DeallocateGauge(s.gaugesSlice[i].cachedGauge) + s.gaugesSlice[i].cachedGauge = nil + } + s.gaugesSlice = s.gaugesSlice[:0] + + for k := range s.timers { + delete(s.timers, k) + } + for i := range s.timersSlice { + s.cachedReporter.DeallocateTimer(s.timersSlice[i].cachedTimer) + s.timersSlice[i].cachedTimer = nil + for k := range s.timersSlice[i].tags { + delete(s.timersSlice[i].tags, k) + } + s.timersSlice[i].tags = nil + } + s.timersSlice = s.timersSlice[:0] + + for k := range s.histograms { + delete(s.histograms, k) + } + for i := range s.histogramsSlice { + // n.b. We don't want to deallocate any shared storage, so instead just + // deallocate bucket sample counters, which are unique to each + // bucket within a given histogram instance. + for j := range s.histogramsSlice[i].samples { + s.cachedReporter.DeallocateCounter(s.histogramsSlice[i].samples[j].cachedCount) + } + s.histogramsSlice[i].samples = s.histogramsSlice[i].samples[:0] + for k := range s.histogramsSlice[i].tags { + delete(s.histogramsSlice[i].tags, k) + } + s.histogramsSlice[i].tags = nil + } + s.histogramsSlice = s.histogramsSlice[:0] +} + // TestScope is a metrics collector that has no reporting, ensuring that // all emitted values have a given prefix or set of tags type TestScope interface { diff --git a/scope_benchmark_test.go b/scope_benchmark_test.go index c7254e9e..7dce9d22 100644 --- a/scope_benchmark_test.go +++ b/scope_benchmark_test.go @@ -239,13 +239,22 @@ func (n noopCachedReporter) AllocateCounter(name string, tags map[string]string) return noopStat{} } +func (n noopCachedReporter) DeallocateCounter(CachedCount) {} + func (n noopCachedReporter) AllocateGauge(name string, tags map[string]string) CachedGauge { return noopStat{} } +func (n noopCachedReporter) DeallocateGauge(CachedGauge) {} + func (n noopCachedReporter) AllocateTimer(name string, tags map[string]string) CachedTimer { return noopStat{} } + +func (n noopCachedReporter) DeallocateTimer(CachedTimer) {} + func (n noopCachedReporter) AllocateHistogram(name string, tags map[string]string, buckets Buckets) CachedHistogram { return noopStat{} } + +func (n noopCachedReporter) DeallocateHistogram(CachedHistogram) {} diff --git a/scope_registry.go b/scope_registry.go index 915a65ee..c5fa8511 100644 --- a/scope_registry.go +++ b/scope_registry.go @@ -20,18 +20,25 @@ package tally -import "sync" +import ( + "sync" + "time" +) var scopeRegistryKey = keyForPrefixedStringMaps type scopeRegistry struct { mu sync.RWMutex subscopes map[string]*scope + ttl time.Duration + deep bool } -func newScopeRegistry(root *scope) *scopeRegistry { +func newScopeRegistry(root *scope, opts ScopeOptions) *scopeRegistry { r := &scopeRegistry{ subscopes: make(map[string]*scope), + ttl: opts.UnusedScopeTTL, + deep: opts.UnusedScopeDeepEviction, } r.subscopes[scopeRegistryKey(root.prefix, root.tags)] = root return r @@ -41,8 +48,23 @@ func (r *scopeRegistry) Report(reporter StatsReporter) { r.mu.RLock() defer r.mu.RUnlock() - for _, s := range r.subscopes { - s.report(reporter) + now := time.Now() + for key, s := range r.subscopes { + if s.report(reporter) { + s.lastReport = now + continue + } + + if r.ttl > 0 && now.Sub(s.lastReport) > r.ttl { + s.release() + + if r.deep { + delete(r.subscopes, key) + for k := range s.tags { + delete(s.tags, k) + } + } + } } } @@ -50,8 +72,23 @@ func (r *scopeRegistry) CachedReport() { r.mu.RLock() defer r.mu.RUnlock() - for _, s := range r.subscopes { - s.cachedReport() + now := time.Now() + for key, s := range r.subscopes { + if s.cachedReport() { + s.lastReport = now + continue + } + + if r.ttl > 0 && now.Sub(s.lastReport) > r.ttl { + s.release() + + if r.deep { + delete(r.subscopes, key) + for k := range s.tags { + delete(s.tags, k) + } + } + } } } diff --git a/scope_test.go b/scope_test.go index 71e1f77d..f5b285ac 100644 --- a/scope_test.go +++ b/scope_test.go @@ -208,6 +208,8 @@ func (r *testStatsReporter) AllocateCounter( return counter } +func (r *testStatsReporter) DeallocateCounter(CachedCount) {} + func (r *testStatsReporter) ReportCounter(name string, tags map[string]string, value int64) { r.counters[name] = &testIntValue{ val: value, @@ -228,6 +230,8 @@ func (r *testStatsReporter) AllocateGauge( return gauge } +func (r *testStatsReporter) DeallocateGauge(CachedGauge) {} + func (r *testStatsReporter) ReportGauge(name string, tags map[string]string, value float64) { r.gauges[name] = &testFloatValue{ val: value, @@ -248,6 +252,8 @@ func (r *testStatsReporter) AllocateTimer( return timer } +func (r *testStatsReporter) DeallocateTimer(CachedTimer) {} + func (r *testStatsReporter) ReportTimer(name string, tags map[string]string, interval time.Duration) { r.timers[name] = &testIntValue{ val: int64(interval), @@ -264,6 +270,8 @@ func (r *testStatsReporter) AllocateHistogram( return testStatsReporterCachedHistogram{r, name, tags, buckets} } +func (r *testStatsReporter) DeallocateHistogram(CachedHistogram) {} + type testStatsReporterCachedHistogram struct { r *testStatsReporter name string diff --git a/stats.go b/stats.go index 64048dbb..a460cadd 100644 --- a/stats.go +++ b/stats.go @@ -83,22 +83,28 @@ func (c *counter) value() int64 { return curr - prev } -func (c *counter) report(name string, tags map[string]string, r StatsReporter) { +func (c *counter) report( + name string, + tags map[string]string, + r StatsReporter, +) bool { delta := c.value() if delta == 0 { - return + return false } r.ReportCounter(name, tags, delta) + return true } -func (c *counter) cachedReport() { +func (c *counter) cachedReport() bool { delta := c.value() if delta == 0 { - return + return false } c.cachedCount.ReportCount(delta) + return true } func (c *counter) snapshot() int64 { @@ -124,16 +130,26 @@ func (g *gauge) value() float64 { return math.Float64frombits(atomic.LoadUint64(&g.curr)) } -func (g *gauge) report(name string, tags map[string]string, r StatsReporter) { - if atomic.SwapUint64(&g.updated, 0) == 1 { - r.ReportGauge(name, tags, g.value()) +func (g *gauge) report( + name string, + tags map[string]string, + r StatsReporter, +) bool { + if atomic.SwapUint64(&g.updated, 0) == 0 { + return false } + + r.ReportGauge(name, tags, g.value()) + return true } -func (g *gauge) cachedReport() { - if atomic.SwapUint64(&g.updated, 0) == 1 { - g.cachedGauge.ReportGauge(g.value()) +func (g *gauge) cachedReport() bool { + if atomic.SwapUint64(&g.updated, 0) == 0 { + return false } + + g.cachedGauge.ReportGauge(g.value()) + return true } func (g *gauge) snapshot() float64 { @@ -149,6 +165,7 @@ type timer struct { reporter StatsReporter cachedTimer CachedTimer unreported timerValues + updated uint32 } type timerValues struct { @@ -180,6 +197,8 @@ func (t *timer) Record(interval time.Duration) { } else { t.reporter.ReportTimer(t.name, t.tags, interval) } + + atomic.StoreUint32(&t.updated, 1) } func (t *timer) Start() Stopwatch { @@ -191,6 +210,10 @@ func (t *timer) RecordStopwatch(stopwatchStart time.Time) { t.Record(d) } +func (t *timer) hasReported() bool { + return atomic.SwapUint32(&t.updated, 0) == 1 +} + func (t *timer) snapshot() []time.Duration { t.unreported.RLock() snap := make([]time.Duration, len(t.unreported.values)) @@ -323,13 +346,19 @@ func newHistogram( return h } -func (h *histogram) report(name string, tags map[string]string, r StatsReporter) { +func (h *histogram) report( + name string, + tags map[string]string, + r StatsReporter, +) (reported bool) { for i := range h.buckets { samples := h.samples[i].counter.value() if samples == 0 { continue } + reported = true + switch h.htype { case valueHistogramType: r.ReportHistogramValueSamples( @@ -351,15 +380,19 @@ func (h *histogram) report(name string, tags map[string]string, r StatsReporter) ) } } + + return } -func (h *histogram) cachedReport() { +func (h *histogram) cachedReport() (reported bool) { for i := range h.buckets { samples := h.samples[i].counter.value() if samples == 0 { continue } + reported = true + switch h.htype { case valueHistogramType: h.samples[i].cachedBucket.ReportSamples(samples) @@ -367,6 +400,8 @@ func (h *histogram) cachedReport() { h.samples[i].cachedBucket.ReportSamples(samples) } } + + return } func (h *histogram) RecordValue(value float64) { From 5c473641f2cb3d40fb2c603c9933c5cb7d563062 Mon Sep 17 00:00:00 2001 From: Matt Way Date: Tue, 23 Feb 2021 14:56:47 -0500 Subject: [PATCH 21/25] Clear all storage when using deep eviction --- scope.go | 13 ++++++++++++- scope_registry.go | 4 ++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/scope.go b/scope.go index d1b52762..d28cc671 100644 --- a/scope.go +++ b/scope.go @@ -599,7 +599,7 @@ func (s *scope) copyAndSanitizeMap(tags map[string]string) map[string]string { return result } -func (s *scope) release() { +func (s *scope) release(deep bool) { if s.root { return } @@ -670,6 +670,17 @@ func (s *scope) release() { s.histogramsSlice[i].tags = nil } s.histogramsSlice = s.histogramsSlice[:0] + + if deep { + s.counters = nil + s.countersSlice = nil + s.gauges = nil + s.gaugesSlice = nil + s.timers = nil + s.timersSlice = nil + s.histograms = nil + s.histogramsSlice = nil + } } // TestScope is a metrics collector that has no reporting, ensuring that diff --git a/scope_registry.go b/scope_registry.go index c5fa8511..08421cc1 100644 --- a/scope_registry.go +++ b/scope_registry.go @@ -56,7 +56,7 @@ func (r *scopeRegistry) Report(reporter StatsReporter) { } if r.ttl > 0 && now.Sub(s.lastReport) > r.ttl { - s.release() + s.release(r.deep) if r.deep { delete(r.subscopes, key) @@ -80,7 +80,7 @@ func (r *scopeRegistry) CachedReport() { } if r.ttl > 0 && now.Sub(s.lastReport) > r.ttl { - s.release() + s.release(r.deep) if r.deep { delete(r.subscopes, key) From 8af529f6b42db3583a215419ea7361f0c58c17ae Mon Sep 17 00:00:00 2001 From: Matt Way Date: Tue, 23 Feb 2021 15:26:59 -0500 Subject: [PATCH 22/25] Add nop methods for Prometheus --- prometheus/reporter.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/prometheus/reporter.go b/prometheus/reporter.go index 4ea9f622..df5ee3b9 100644 --- a/prometheus/reporter.go +++ b/prometheus/reporter.go @@ -369,6 +369,10 @@ func (r *reporter) AllocateCounter(name string, tags map[string]string) tally.Ca return &cachedMetric{counter: counterVec.With(tags)} } +// DeallocateCounter implements tally.CachedStatsReporter. +// This is currently a nop method. +func (r *reporter) DeallocateCounter(tally.CachedCount) {} + func (r *reporter) RegisterGauge( name string, tagKeys []string, @@ -418,6 +422,10 @@ func (r *reporter) AllocateGauge(name string, tags map[string]string) tally.Cach return &cachedMetric{gauge: gaugeVec.With(tags)} } +// DeallocateGauge implements tally.CachedStatsReporter. +// This is currently a nop method. +func (r *reporter) DeallocateGauge(tally.CachedGauge) {} + func (r *reporter) RegisterTimer( name string, tagKeys []string, @@ -557,6 +565,10 @@ func (r *reporter) AllocateTimer(name string, tags map[string]string) tally.Cach return timer } +// DeallocateTimer implements tally.CachedStatsReporter. +// This is currently a nop method. +func (r *reporter) DeallocateTimer(tally.CachedTimer) {} + func (r *reporter) AllocateHistogram( name string, tags map[string]string, @@ -571,6 +583,10 @@ func (r *reporter) AllocateHistogram( return &cachedMetric{histogram: histogramVec.With(tags)} } +// DeallocateHistogram implements tally.CachedStatsReporter. +// This is currently a nop method. +func (r *reporter) DeallocateHistogram(tally.CachedHistogram) {} + func (r *reporter) Capabilities() tally.Capabilities { return r } From 4e8b9389107c46bb518bb210ba5e49aca96d6f5c Mon Sep 17 00:00:00 2001 From: Matt Way Date: Tue, 23 Feb 2021 16:06:59 -0500 Subject: [PATCH 23/25] Fix multi reporter, ensure idempotency --- m3/resource_pool.go | 8 ++++++++ multi/reporter.go | 24 ++++++++++++++++++++++++ multi/reporter_test.go | 8 ++++++++ 3 files changed, 40 insertions(+) diff --git a/m3/resource_pool.go b/m3/resource_pool.go index fbf7a127..5d06a768 100644 --- a/m3/resource_pool.go +++ b/m3/resource_pool.go @@ -83,6 +83,10 @@ func (r *resourcePool) releaseProto(proto thrift.TProtocol) { } func (r *resourcePool) releaseMetricSlice(metrics []m3thrift.Metric) { + if metrics == nil { + return + } + for i := 0; i < len(metrics); i++ { metrics[i].Tags = nil } @@ -92,5 +96,9 @@ func (r *resourcePool) releaseMetricSlice(metrics []m3thrift.Metric) { //nolint:unused func (r *resourcePool) releaseMetricTagSlice(tags []m3thrift.MetricTag) { + if tags == nil { + return + } + r.metricSlicePool.Put(tags[:0]) } diff --git a/multi/reporter.go b/multi/reporter.go index 9f73d3d7..d9347db4 100644 --- a/multi/reporter.go +++ b/multi/reporter.go @@ -141,6 +141,12 @@ func (r *multiCached) AllocateCounter( return multiMetric{counters: metrics} } +func (r *multiCached) DeallocateCounter(counter tally.CachedCount) { + for _, rep := range r.reporters { + rep.DeallocateCounter(counter) + } +} + func (r *multiCached) AllocateGauge( name string, tags map[string]string, @@ -152,6 +158,12 @@ func (r *multiCached) AllocateGauge( return multiMetric{gauges: metrics} } +func (r *multiCached) DeallocateGauge(gauge tally.CachedGauge) { + for _, rep := range r.reporters { + rep.DeallocateGauge(gauge) + } +} + func (r *multiCached) AllocateTimer( name string, tags map[string]string, @@ -163,6 +175,12 @@ func (r *multiCached) AllocateTimer( return multiMetric{timers: metrics} } +func (r *multiCached) DeallocateTimer(timer tally.CachedTimer) { + for _, rep := range r.reporters { + rep.DeallocateTimer(timer) + } +} + func (r *multiCached) AllocateHistogram( name string, tags map[string]string, @@ -175,6 +193,12 @@ func (r *multiCached) AllocateHistogram( return multiMetric{histograms: metrics} } +func (r *multiCached) DeallocateHistogram(histogram tally.CachedHistogram) { + for _, rep := range r.reporters { + rep.DeallocateHistogram(histogram) + } +} + func (r *multiCached) Capabilities() tally.Capabilities { return r.multiBaseReporters.Capabilities() } diff --git a/multi/reporter_test.go b/multi/reporter_test.go index 9adb2105..239c393a 100644 --- a/multi/reporter_test.go +++ b/multi/reporter_test.go @@ -281,6 +281,8 @@ func (r *capturingStatsReporter) AllocateCounter( }} } +func (r *capturingStatsReporter) DeallocateCounter(tally.CachedCount) {} + func (r *capturingStatsReporter) AllocateGauge( name string, tags map[string]string, @@ -290,6 +292,8 @@ func (r *capturingStatsReporter) AllocateGauge( }} } +func (r *capturingStatsReporter) DeallocateGauge(tally.CachedGauge) {} + func (r *capturingStatsReporter) AllocateTimer( name string, tags map[string]string, @@ -299,6 +303,8 @@ func (r *capturingStatsReporter) AllocateTimer( }} } +func (r *capturingStatsReporter) DeallocateTimer(tally.CachedTimer) {} + func (r *capturingStatsReporter) AllocateHistogram( name string, tags map[string]string, @@ -320,6 +326,8 @@ func (r *capturingStatsReporter) AllocateHistogram( } } +func (r *capturingStatsReporter) DeallocateHistogram(tally.CachedHistogram) {} + func (r *capturingStatsReporter) Capabilities() tally.Capabilities { r.capabilities++ return r From 1c6e75d4e39f48c61f27190ce033d0293a07097a Mon Sep 17 00:00:00 2001 From: Matt Way Date: Tue, 23 Feb 2021 16:54:25 -0500 Subject: [PATCH 24/25] Feedback --- scope.go | 37 +++++++++++-------------------------- scope_registry.go | 9 +++++---- 2 files changed, 16 insertions(+), 30 deletions(-) diff --git a/scope.go b/scope.go index d28cc671..1f219833 100644 --- a/scope.go +++ b/scope.go @@ -85,7 +85,7 @@ type scope struct { timersSlice []*timer bucketCache *bucketCache - lastReport time.Time + lastReport int64 root bool } @@ -181,8 +181,9 @@ func newRootScope(opts ScopeOptions, interval time.Duration) *scope { histograms: make(map[string]*histogram), histogramsSlice: make([]*histogram, 0, _defaultInitialSliceSize), timers: make(map[string]*timer), - bucketCache: newBucketCache(), timersSlice: make([]*timer, 0, _defaultInitialSliceSize), + bucketCache: newBucketCache(), + lastReport: time.Now().UnixNano(), } // NB(r): Take a copy of the tags on creation @@ -203,34 +204,26 @@ func newRootScope(opts ScopeOptions, interval time.Duration) *scope { func (s *scope) report(r StatsReporter) (reported bool) { s.cm.RLock() for name, counter := range s.counters { - if rep := counter.report(s.fullyQualifiedName(name), s.tags, r); rep { - reported = true - } + reported = counter.report(s.fullyQualifiedName(name), s.tags, r) || reported } s.cm.RUnlock() s.gm.RLock() for name, gauge := range s.gauges { - if rep := gauge.report(s.fullyQualifiedName(name), s.tags, r); rep { - reported = true - } + reported = gauge.report(s.fullyQualifiedName(name), s.tags, r) || reported } s.gm.RUnlock() // we do nothing for timers here because timers report directly to ths StatsReporter without buffering s.tm.RLock() for _, timer := range s.timersSlice { - if rep := timer.hasReported(); rep { - reported = true - } + reported = timer.hasReported() || reported } s.tm.RUnlock() s.hm.RLock() for name, histogram := range s.histograms { - if rep := histogram.report(s.fullyQualifiedName(name), s.tags, r); rep { - reported = true - } + reported = histogram.report(s.fullyQualifiedName(name), s.tags, r) || reported } s.hm.RUnlock() @@ -240,34 +233,26 @@ func (s *scope) report(r StatsReporter) (reported bool) { func (s *scope) cachedReport() (reported bool) { s.cm.RLock() for _, counter := range s.countersSlice { - if rep := counter.cachedReport(); rep { - reported = true - } + reported = counter.cachedReport() || reported } s.cm.RUnlock() s.gm.RLock() for _, gauge := range s.gaugesSlice { - if rep := gauge.cachedReport(); rep { - reported = true - } + reported = gauge.cachedReport() || reported } s.gm.RUnlock() // we do nothing for timers here because timers report directly to ths StatsReporter without buffering s.tm.RLock() for _, timer := range s.timersSlice { - if rep := timer.hasReported(); rep { - reported = true - } + reported = timer.hasReported() || reported } s.tm.RUnlock() s.hm.RLock() for _, histogram := range s.histogramsSlice { - if rep := histogram.cachedReport(); rep { - reported = true - } + reported = histogram.cachedReport() || reported } s.hm.RUnlock() diff --git a/scope_registry.go b/scope_registry.go index 08421cc1..7bd01d23 100644 --- a/scope_registry.go +++ b/scope_registry.go @@ -48,14 +48,14 @@ func (r *scopeRegistry) Report(reporter StatsReporter) { r.mu.RLock() defer r.mu.RUnlock() - now := time.Now() + now := time.Now().UnixNano() for key, s := range r.subscopes { if s.report(reporter) { s.lastReport = now continue } - if r.ttl > 0 && now.Sub(s.lastReport) > r.ttl { + if r.ttl > 0 && time.Duration(now-s.lastReport) > r.ttl { s.release(r.deep) if r.deep { @@ -72,14 +72,14 @@ func (r *scopeRegistry) CachedReport() { r.mu.RLock() defer r.mu.RUnlock() - now := time.Now() + now := time.Now().UnixNano() for key, s := range r.subscopes { if s.cachedReport() { s.lastReport = now continue } - if r.ttl > 0 && now.Sub(s.lastReport) > r.ttl { + if r.ttl > 0 && time.Duration(now-s.lastReport) > r.ttl { s.release(r.deep) if r.deep { @@ -140,6 +140,7 @@ func (r *scopeRegistry) Subscope(parent *scope, prefix string, tags map[string]s histogramsSlice: make([]*histogram, 0, _defaultInitialSliceSize), timers: make(map[string]*timer), bucketCache: parent.bucketCache, + lastReport: time.Now().UnixNano(), } r.subscopes[key] = subscope return subscope From 7e58f12ae60abe04446ea53587659bd79778999b Mon Sep 17 00:00:00 2001 From: Matt Way Date: Tue, 16 Mar 2021 19:12:30 -0400 Subject: [PATCH 25/25] Fix build --- scope.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scope.go b/scope.go index 1f219833..752e82e5 100644 --- a/scope.go +++ b/scope.go @@ -646,7 +646,7 @@ func (s *scope) release(deep bool) { // deallocate bucket sample counters, which are unique to each // bucket within a given histogram instance. for j := range s.histogramsSlice[i].samples { - s.cachedReporter.DeallocateCounter(s.histogramsSlice[i].samples[j].cachedCount) + s.cachedReporter.DeallocateCounter(s.histogramsSlice[i].samples[j].counter.cachedCount) } s.histogramsSlice[i].samples = s.histogramsSlice[i].samples[:0] for k := range s.histogramsSlice[i].tags {