Skip to content
This repository has been archived by the owner on Aug 23, 2023. It is now read-only.

Commit

Permalink
More build fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
ywwg committed Oct 7, 2022
1 parent 2404746 commit 834ab7b
Show file tree
Hide file tree
Showing 13 changed files with 45 additions and 52 deletions.
8 changes: 4 additions & 4 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ jobs:
- image: circleci/golang:1.17.3
steps:
- checkout
- run: GO111MODULE=off go test -v -race --short ./...
- run: go test -v -race --short ./...

qa:
working_directory: /go/src/github.com/grafana/metrictank
Expand Down Expand Up @@ -53,8 +53,8 @@ jobs:
- run: go version
- run: scripts/qa/docs.sh
- run: docker load -i build_docker/metrictank.tar
- run: GO111MODULE=off go test -v ./stacktest/tests/end2end_carbon
- run: GO111MODULE=off go test -v ./stacktest/tests/end2end_carbon_bigtable
- run: go test -v ./stacktest/tests/end2end_carbon
- run: go test -v ./stacktest/tests/end2end_carbon_bigtable

qa-chaos:
working_directory: /home/circleci/.go_workspace/src/github.com/grafana/metrictank
Expand All @@ -75,7 +75,7 @@ jobs:
- run: docker pull jaegertracing/all-in-one
# kafka broker advertises itself as 'kafka' but that doesn't resolve. we do have a docker proxy on localhost
- run: echo "127.0.0.1 kafka" | sudo tee -a /etc/hosts
- run: GO111MODULE=off go test -v ./stacktest/tests/chaos_cluster
- run: go test -v ./stacktest/tests/chaos_cluster

deploy:
docker:
Expand Down
12 changes: 6 additions & 6 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,19 @@
default:
$(MAKE) all
test:
GO111MODULE=off CGO_ENABLED=1 go test -race -short ./...
CGO_ENABLED=1 go test -race -short ./...
test-all:
GO111MODULE=off CGO_ENABLED=1 go test -race ./...
CGO_ENABLED=1 go test -race ./...
benchmark:
GO111MODULE=off CGO_ENABLED=0 go test -count=10 -run='^$$' -bench=. -benchtime=100ms ./... | tee benchmark.txt
CGO_ENABLED=0 go test -count=10 -run='^$$' -bench=. -benchtime=100ms ./... | tee benchmark.txt

stacktest:
# count=1 forces uncached runs
# not using stacktest/... here because Go would run them all in parallel,
# or at least the TestMain's, and the stacks would conflict with each other
GO111MODULE=off go test -count=1 -v ./stacktest/tests/chaos_cluster
GO111MODULE=off go test -count=1 -v ./stacktest/tests/end2end_carbon
GO111MODULE=off go test -count=1 -v ./stacktest/tests/end2end_carbon_bigtable
go test -count=1 -v ./stacktest/tests/chaos_cluster
go test -count=1 -v ./stacktest/tests/end2end_carbon
go test -count=1 -v ./stacktest/tests/end2end_carbon_bigtable

check:
$(MAKE) test
Expand Down
6 changes: 3 additions & 3 deletions cmd/mt-gateway/api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ import (
"testing"
)

//Set up a mock http.ServeMux that returns the name of the service routed to.
//We then verify that we're routing to the expected service
// Set up a mock http.ServeMux that returns the name of the service routed to.
// We then verify that we're routing to the expected service
func TestApi(t *testing.T) {
mux := Api{
ingestHandler: stubHandler("ingest"),
Expand Down Expand Up @@ -66,7 +66,7 @@ func TestApi(t *testing.T) {

}

//creates a new http.Handler that always responds with the name of the service
// creates a new http.Handler that always responds with the name of the service
func stubHandler(svc string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(svc))
Expand Down
1 change: 0 additions & 1 deletion pkg/api/dataprocessor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -643,7 +643,6 @@ func generateChunks(span uint32, start uint32, end uint32) []chunk.Chunk {
//
// query: |--------|
// result: |-----|-----|
//
func TestGetSeriesCachedStore(t *testing.T) {
// reduce overhead of creating accounter over and over
old := accnt.EventQSize
Expand Down
8 changes: 4 additions & 4 deletions pkg/api/query_engine_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -318,10 +318,10 @@ func TestPlanRequests_Singles_DifferentTimeRanges(t *testing.T) {

// TestPlanRequestsMaxPointsPerReqSoft tests how maxPointsPerReqSoft gets applied.
// we validate that:
// * requests are coarsened, PNGroup by PNGroup (we cannot predict PNGroup map iteration order, so we only test with 1 PNGroup),
// and singles in groups by retention (in schemaID order)
// * PNGroups obviously will need a common interval, which gets interesting when using multiple schemas
// * coarsening continues until all data is fetched at its coarsest. At that point we may breach soft, but never hard
// - requests are coarsened, PNGroup by PNGroup (we cannot predict PNGroup map iteration order, so we only test with 1 PNGroup),
// and singles in groups by retention (in schemaID order)
// - PNGroups obviously will need a common interval, which gets interesting when using multiple schemas
// - coarsening continues until all data is fetched at its coarsest. At that point we may breach soft, but never hard
func TestPlanRequestsMaxPointsPerReqSoft(t *testing.T) {
in, out := generate(0, 1000, []reqProp{
// 4 singles from 2 different retentions
Expand Down
2 changes: 1 addition & 1 deletion pkg/expr/func_aggregate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ func TestAggregateMultipleDiffQuery(t *testing.T) {
)
}

//mimic target=Aggregate(foo.*,foo.*,a,a)
// mimic target=Aggregate(foo.*,foo.*,a,a)
func TestAggregateMultipleTimesSameInput(t *testing.T) {
input := [][]models.Series{
{
Expand Down
16 changes: 8 additions & 8 deletions pkg/idx/memory/time_limit.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,15 +50,15 @@ func (l *TimeLimiter) add(now time.Time, d time.Duration) {
}

// Wait returns when we are not rate limited
// * if we passed the window, we reset everything (this is only safe for callers
// - if we passed the window, we reset everything (this is only safe for callers
// that behave correctly, i.e. that wait the instructed time after each add)
// * if limit is not reached, no sleep is needed
// * if limit has been exceeded, sleep until next period + extra multiple to compensate
// this is perhaps best explained with an example:
// if window is 1s and limit 100ms, but we spent 250ms, then we spent effectively 2.5 seconds worth of work.
// let's say we are 800ms into the 1s window, that means we should sleep 2500-800 = 1.7s
// in order to maximize work while honoring the imposed limit.
// * if limit has been met exactly, sleep until next period (this is a special case of the above)
// - if limit is not reached, no sleep is needed
// - if limit has been exceeded, sleep until next period + extra multiple to compensate
// this is perhaps best explained with an example:
// if window is 1s and limit 100ms, but we spent 250ms, then we spent effectively 2.5 seconds worth of work.
// let's say we are 800ms into the 1s window, that means we should sleep 2500-800 = 1.7s
// in order to maximize work while honoring the imposed limit.
// - if limit has been met exactly, sleep until next period (this is a special case of the above)
func (l *TimeLimiter) Wait() {
time.Sleep(l.wait(time.Now()))
}
Expand Down
9 changes: 5 additions & 4 deletions pkg/input/kafkamdm/lag_monitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,10 +144,11 @@ func NewLagMonitor(size int, partitions []int32) *LagMonitor {
// (minimum lag seen in last N measurements) / input rate.
// example:
// lag (in messages/metrics) input rate ---> score (seconds behind)
// 10k 1k/second 10
// 200 1k/second 0 (less than 1s behind)
// 0 * 0 (perfectly in sync)
// anything 0 (after startup) same as lag
//
// 10k 1k/second 10
// 200 1k/second 0 (less than 1s behind)
// 0 * 0 (perfectly in sync)
// anything 0 (after startup) same as lag
//
// The returned total score for the node is the max of the scores of individual partitions.
// Note that one or more StoreOffset() (rate) calls may have been made but no StoreLag().
Expand Down
8 changes: 4 additions & 4 deletions pkg/mdata/chunk/tsz/tszlong.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@ import (
)

// SeriesLong similar to Series4h, except:
// * it doesn't write t0 to the stream (for callers that track t0 corresponding to a chunk separately)
// * it doesn't store an initial delta. instead, it assumes a starting delta of 60 and uses delta-of-delta
// encoding from the get-go.
// * it uses a more compact way to mark end-of-stream
// - it doesn't write t0 to the stream (for callers that track t0 corresponding to a chunk separately)
// - it doesn't store an initial delta. instead, it assumes a starting delta of 60 and uses delta-of-delta
// encoding from the get-go.
// - it uses a more compact way to mark end-of-stream
type SeriesLong struct {
// TODO(dgryski): timestamps in the paper are uint64
T0 uint32 // exposed for caller convenience. do NOT set directly. set via constructor
Expand Down
6 changes: 3 additions & 3 deletions pkg/schema/partition.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,9 +108,9 @@ func (m *MetricDefinition) PartitionID(method PartitionByMethod, partitions int3
partition = -partition
}
case PartitionBySeriesWithTags:
h := xxhash.New()
h.WriteString(m.NameWithTags())
partition = jump.Hash(h.Sum64(), int(partitions))
// h := xxhash.New()
// h.WriteString(m.NameWithTags())
// partition = jump.Hash(h.Sum64(), int(partitions))
case PartitionBySeriesWithTagsFnv:
h := util.NewFnv32aStringWriter()
if len(m.nameWithTags) > 0 {
Expand Down
17 changes: 5 additions & 12 deletions scripts/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@

set -e

export GO111MODULE=off

# Find the directory we exist within
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
cd ${DIR}/..
Expand Down Expand Up @@ -34,23 +32,18 @@ function fail () {
}

# Build binary
cd cmd
for bin in ${target}; do
cd $bin
for bin in $(ls -1 cmd/); do
echo Building $bin
if [ "$1" == "-race" ]
then
set -x
# -race requires CGO
CGO_ENABLED=1 go build -race -ldflags "-X main.version=$version" -o $BUILDDIR/$bin || fail
CGO_ENABLED=1 go build -race -ldflags "-X main.version=$version" -o $BUILDDIR/$bin "github.com/grafana/metrictank/cmd/${bin}" || fail
elif [ "$1" == "-debug" ]
then
set -x
# -debug flags
CGO_ENABLED=0 go build -gcflags "all=-N -l" -ldflags "-X main.version=${version}-debug" -o $BUILDDIR/$bin || fail
CGO_ENABLED=0 go build -gcflags "all=-N -l" -ldflags "-X main.version=${version}-debug" -o $BUILDDIR/$bin "github.com/grafana/metrictank/cmd/${bin}" || fail
else
set -x
go build -ldflags "-X main.version=$version" -o $BUILDDIR/$bin || fail
go build -ldflags "-X main.version=$version" -o $BUILDDIR/$bin "github.com/grafana/metrictank/cmd/${bin}" || fail
fi
set +x
cd ..
done
2 changes: 1 addition & 1 deletion scripts/qa-subjective/vet.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@ DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
# and cd into root project dir
cd ${DIR}/../..

GO111MODULE=off go vet ./...
go vet ./...
2 changes: 1 addition & 1 deletion scripts/qa/misspell.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@ DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
# and cd into root project dir
cd ${DIR}/../..
go install github.com/client9/misspell/cmd/misspell@latest
misspell -error $(find . -type f | grep -v vendor | grep -v '.git' | grep -v Gopkg.lock)
misspell -error $(find . -type f | grep -v vendor | grep -v "go\." | grep -v '.git' | grep -v Gopkg.lock)

0 comments on commit 834ab7b

Please sign in to comment.