diff --git a/README.md b/README.md index 8a48a41..ed22b03 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,8 @@ Required authentication scopes: To authenticate this way, only set `CF_API_TOKEN` (omit `CF_API_EMAIL` and `CF_API_KEY`) +[Shortcut to create the API token](https://dash.cloudflare.com/profile/api-tokens?permissionGroupKeys=%5B%7B%22key%22%3A%22account_analytics%22%2C%22type%22%3A%22read%22%7D%2C%7B%22key%22%3A%22account_settings%22%2C%22type%22%3A%22read%22%7D%2C%7B%22key%22%3A%22analytics%22%2C%22type%22%3A%22read%22%7D%2C%7B%22key%22%3A%22firewall_services%22%2C%22type%22%3A%22read%22%7D%5D&name=Cloudflare+Exporter&accountId=*&zoneId=all) + ### User email + API key To authenticate with user email + API key, use the `Global API Key` from the Cloudflare dashboard. Beware that this key authenticates with write access to every Cloudflare resource. @@ -104,6 +106,9 @@ Note: `ZONE_` configuration is not supported as flag. # HELP cloudflare_zone_pool_requests_total Requests per pool # HELP cloudflare_logpush_failed_jobs_account_count Number of failed logpush jobs on the account level # HELP cloudflare_logpush_failed_jobs_zone_count Number of failed logpush jobs on the zone level +# HELP cloudflare_r2_operation_count Number of operations performed by R2 +# HELP cloudflare_r2_storage_bytes Storage used by R2 +# HELP cloudflare_r2_storage_total_bytes Total storage used by R2 ``` ## Helm chart repository diff --git a/cloudflare.go b/cloudflare.go index 4b96d94..3f054f6 100644 --- a/cloudflare.go +++ b/cloudflare.go @@ -5,13 +5,13 @@ import ( "strings" "time" - cloudflare "github.com/cloudflare/cloudflare-go" + "github.com/cloudflare/cloudflare-go" "github.com/machinebox/graphql" log "github.com/sirupsen/logrus" "github.com/spf13/viper" ) -var ( +const ( cfGraphQLEndpoint = "https://api.cloudflare.com/client/v4/graphql/" ) @@ -45,6 +45,35 @@ type cloudflareResponseLogpushAccount struct { } `json:"viewer"` } +type r2AccountResp struct { + R2StorageGroups []struct { + Dimensions struct { + BucketName string `json:"bucketName"` + } `json:"dimensions"` + Max struct { + MetadataSize uint64 `json:"metadataSize"` + PayloadSize uint64 `json:"payloadSize"` + ObjectCount uint64 `json:"objectCount"` + } `json:"max"` + } `json:"r2StorageAdaptiveGroups"` + + R2StorageOperations []struct { + Dimensions struct { + Action string `json:"actionType"` + BucketName string `json:"bucketName"` + } `json:"dimensions"` + Sum struct { + Requests uint64 `json:"requests"` + } `json:"sum"` + } `json:"r2OperationsAdaptiveGroups"` +} + +type cloudflareResponseR2Account struct { + Viewer struct { + Accounts []r2AccountResp `json:"accounts"` + } +} + type cloudflareResponseLogpushZone struct { Viewer struct { Zones []logpushResponse `json:"zones"` @@ -251,44 +280,22 @@ type lbResp struct { } func fetchZones() []cloudflare.Zone { - var api *cloudflare.API - var err error - if len(viper.GetString("cf_api_token")) > 0 { - api, err = cloudflare.NewWithAPIToken(viper.GetString("cf_api_token")) - } else { - api, err = cloudflare.New(viper.GetString("cf_api_key"), viper.GetString("cf_api_email")) - } - if err != nil { - log.Fatal(err) - } - ctx := context.Background() - z, err := api.ListZones(ctx) + z, err := cloudflareAPI.ListZones(ctx) if err != nil { - log.Fatal(err) + log.Fatalf("Error fetching zones: %s", err) } return z } func fetchFirewallRules(zoneID string) map[string]string { - var api *cloudflare.API - var err error - if len(viper.GetString("cf_api_token")) > 0 { - api, err = cloudflare.NewWithAPIToken(viper.GetString("cf_api_token")) - } else { - api, err = cloudflare.New(viper.GetString("cf_api_key"), viper.GetString("cf_api_email")) - } - if err != nil { - log.Fatal(err) - } - ctx := context.Background() - listOfRules, _, err := api.FirewallRules(ctx, + listOfRules, _, err := cloudflareAPI.FirewallRules(ctx, cloudflare.ZoneIdentifier(zoneID), cloudflare.FirewallRuleListParams{}) if err != nil { - log.Fatal(err) + log.Fatalf("Error fetching firewall rules: %s", err) } firewallRulesMap := make(map[string]string) @@ -296,15 +303,15 @@ func fetchFirewallRules(zoneID string) map[string]string { firewallRulesMap[rule.ID] = rule.Description } - listOfRulesets, err := api.ListRulesets(ctx, cloudflare.ZoneIdentifier(zoneID), cloudflare.ListRulesetsParams{}) + listOfRulesets, err := cloudflareAPI.ListRulesets(ctx, cloudflare.ZoneIdentifier(zoneID), cloudflare.ListRulesetsParams{}) if err != nil { - log.Fatal(err) + log.Fatalf("Error listing rulesets: %s", err) } for _, rulesetDesc := range listOfRulesets { if rulesetDesc.Phase == "http_request_firewall_managed" { - ruleset, err := api.GetRuleset(ctx, cloudflare.ZoneIdentifier(zoneID), rulesetDesc.ID) + ruleset, err := cloudflareAPI.GetRuleset(ctx, cloudflare.ZoneIdentifier(zoneID), rulesetDesc.ID) if err != nil { - log.Fatal(err) + log.Fatalf("Error fetching ruleset: %s", err) } for _, rule := range ruleset.Rules { firewallRulesMap[rule.ID] = rule.Description @@ -316,21 +323,10 @@ func fetchFirewallRules(zoneID string) map[string]string { } func fetchAccounts() []cloudflare.Account { - var api *cloudflare.API - var err error - if len(viper.GetString("cf_api_token")) > 0 { - api, err = cloudflare.NewWithAPIToken(viper.GetString("cf_api_token")) - } else { - api, err = cloudflare.New(viper.GetString("cf_api_key"), viper.GetString("cf_api_email")) - } - if err != nil { - log.Fatal(err) - } - ctx := context.Background() - a, _, err := api.Accounts(ctx, cloudflare.AccountsListParams{PaginationOptions: cloudflare.PaginationOptions{PerPage: 100}}) + a, _, err := cloudflareAPI.Accounts(ctx, cloudflare.AccountsListParams{PaginationOptions: cloudflare.PaginationOptions{PerPage: 100}}) if err != nil { - log.Fatal(err) + log.Fatalf("Error fetching accounts: %s", err) } return a @@ -570,7 +566,7 @@ func fetchWorkerTotals(accountID string) (*cloudflareResponseAccts, error) { graphqlClient := graphql.NewClient(cfGraphQLEndpoint) var resp cloudflareResponseAccts if err := graphqlClient.Run(ctx, request, &resp); err != nil { - log.Error(err) + log.Errorf("Error fetching worker totals: %s", err) return nil, err } @@ -647,7 +643,7 @@ func fetchLoadBalancerTotals(zoneIDs []string) (*cloudflareResponseLb, error) { graphqlClient := graphql.NewClient(cfGraphQLEndpoint) var resp cloudflareResponseLb if err := graphqlClient.Run(ctx, request, &resp); err != nil { - log.Error(err) + log.Errorf("Error fetching load balancer totals: %s", err) return nil, err } return &resp, nil @@ -699,7 +695,7 @@ func fetchLogpushAccount(accountID string) (*cloudflareResponseLogpushAccount, e graphqlClient := graphql.NewClient(cfGraphQLEndpoint) var resp cloudflareResponseLogpushAccount if err := graphqlClient.Run(ctx, request, &resp); err != nil { - log.Error(err) + log.Errorf("Error fetching logpush account totals: %s", err) return nil, err } return &resp, nil @@ -751,13 +747,71 @@ func fetchLogpushZone(zoneIDs []string) (*cloudflareResponseLogpushZone, error) graphqlClient := graphql.NewClient(cfGraphQLEndpoint) var resp cloudflareResponseLogpushZone if err := graphqlClient.Run(ctx, request, &resp); err != nil { - log.Error(err) + log.Errorf("Error fetching logpush zone totals: %s", err) return nil, err } return &resp, nil } +func fetchR2Account(accountID string) (*cloudflareResponseR2Account, error) { + now := time.Now().Add(-time.Duration(viper.GetInt("scrape_delay")) * time.Second).UTC() + s := 60 * time.Second + now = now.Truncate(s) + + request := graphql.NewRequest(`query($accountID: String!, $limit: Int!, $date: String!) { + viewer { + accounts(filter: {accountTag : $accountID }) { + r2StorageAdaptiveGroups( + filter: { + date: $date + }, + limit: $limit + ) { + dimensions { + bucketName + } + max { + metadataSize + payloadSize + objectCount + } + } + r2OperationsAdaptiveGroups(filter: { date: $date }, limit: $limit) { + dimensions { + actionType + bucketName + } + sum { + requests + } + } + } + } + }`) + + if len(viper.GetString("cf_api_token")) > 0 { + request.Header.Set("Authorization", "Bearer "+viper.GetString("cf_api_token")) + } else { + request.Header.Set("X-AUTH-EMAIL", viper.GetString("cf_api_email")) + request.Header.Set("X-AUTH-KEY", viper.GetString("cf_api_key")) + } + + request.Var("accountID", accountID) + request.Var("limit", 9999) + request.Var("date", now.Format("2006-01-02")) + + ctx := context.Background() + graphqlClient := graphql.NewClient(cfGraphQLEndpoint) + graphqlClient.Log = func(s string) { log.Debug(s) } + var resp cloudflareResponseR2Account + if err := graphqlClient.Run(ctx, request, &resp); err != nil { + log.Errorf("Error fetching R2 account: %s", err) + return nil, err + } + return &resp, nil +} + func findZoneAccountName(zones []cloudflare.Zone, ID string) (string, string) { for _, z := range zones { if z.ID == ID { diff --git a/main.go b/main.go index c31f522..8e994b4 100644 --- a/main.go +++ b/main.go @@ -1,8 +1,8 @@ package main import ( + "context" "net/http" - "os" "strings" "sync" "time" @@ -12,7 +12,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" - cloudflare "github.com/cloudflare/cloudflare-go" + "github.com/cloudflare/cloudflare-go" log "github.com/sirupsen/logrus" ) @@ -30,19 +30,15 @@ import ( // cfgMetricsDenylist = "" // ) +var ( + cloudflareAPI *cloudflare.API +) + func getTargetZones() []string { var zoneIDs []string if len(viper.GetString("cf_zones")) > 0 { zoneIDs = strings.Split(viper.GetString("cf_zones"), ",") - } else { - // deprecated - for _, e := range os.Environ() { - if strings.HasPrefix(e, "ZONE_") { - split := strings.SplitN(e, "=", 2) - zoneIDs = append(zoneIDs, split[1]) - } - } } return zoneIDs } @@ -111,6 +107,7 @@ func fetchMetrics() { for _, a := range accounts { go fetchWorkerAnalytics(a, &wg) go fetchLogpushAnalyticsForAccount(a, &wg) + go fetchR2StorageForAccount(a, &wg) } // Make requests in groups of cfgBatchSize to avoid rate limit @@ -133,16 +130,7 @@ func fetchMetrics() { wg.Wait() } -func runExpoter() { - // fmt.Println(" :", viper.GetString("cf_api_email")) - // fmt.Println(" :", viper.GetString("cf_api_key")) - - // fmt.Println(" :", viper.GetString("metrics_path")) - - // fmt.Println(":ASD :", viper.GetString("listen")) - - // fmt.Println(" :", cfgListen) - +func runExporter() { cfgMetricsPath := viper.GetString("metrics_path") if !(len(viper.GetString("cf_api_token")) > 0 || (len(viper.GetString("cf_api_email")) > 0 && len(viper.GetString("cf_api_key")) > 0)) { @@ -155,16 +143,40 @@ func runExpoter() { customFormatter.TimestampFormat = "2006-01-02 15:04:05" log.SetFormatter(customFormatter) customFormatter.FullTimestamp = true + logLevel, err := log.ParseLevel(viper.GetString("log_level")) + if err != nil { + log.Fatalf("Invalid log level: %s", viper.GetString("log_level")) + } + log.SetLevel(logLevel) - metricsDenylist := []string{} + if len(viper.GetString("cf_api_token")) > 0 { + cloudflareAPI, err = cloudflare.NewWithAPIToken(viper.GetString("cf_api_token")) + + } else { + cloudflareAPI, err = cloudflare.New(viper.GetString("cf_api_key"), viper.GetString("cf_api_email")) + } + if err != nil { + log.Fatalf("Error creating Cloudflare API client: %s", err) + } + + if len(viper.GetString("cf_api_token")) > 0 { + status, err := cloudflareAPI.VerifyAPIToken(context.Background()) + if err != nil { + log.Fatalf("Error creating Cloudflare API client: %s", err) + } + log.Debugf("API Token status: %s", status.Status) + } + + var metricsDenylist []string if len(viper.GetString("metrics_denylist")) > 0 { metricsDenylist = strings.Split(viper.GetString("metrics_denylist"), ",") } - deniedMetricsSet, err := buildDeniedMetricsSet(metricsDenylist) + metricsSet, err := buildFilteredMetricsSet(metricsDenylist) if err != nil { - log.Fatal(err) + log.Fatalf("Error building metrics set: %s", err) } - mustRegisterMetrics(deniedMetricsSet) + log.Debugf("Metrics set: %v", metricsSet) + mustRegisterMetrics(metricsSet) go func() { for ; true; <-time.NewTicker(60 * time.Second).C { @@ -194,30 +206,29 @@ func runExpoter() { func main() { var cmd = &cobra.Command{ - Use: "viper-test", - Short: "testing viper", + Use: "cloudflare_exporter", + Short: "Export Cloudflare metrics to Prometheus", Run: func(_ *cobra.Command, _ []string) { - runExpoter() + runExporter() }, } - //vip := viper.New() viper.AutomaticEnv() flags := cmd.Flags() - flags.String("listen", ":8080", "listen on addr:port ( default :8080), omit addr to listen on all interfaces") + flags.String("listen", ":8080", "listen on addr:port, omit addr to listen on all interfaces") viper.BindEnv("listen") viper.SetDefault("listen", ":8080") - flags.String("metrics_path", "/metrics", "path for metrics, default /metrics") + flags.String("metrics_path", "/metrics", "path for metrics") viper.BindEnv("metrics_path") viper.SetDefault("metrics_path", "/metrics") - flags.String("cf_api_key", "", "cloudflare api key, works with api_email flag") + flags.String("cf_api_key", "", "cloudflare api key, required with api_email flag") viper.BindEnv("cf_api_key") - flags.String("cf_api_email", "", "cloudflare api email, works with api_key flag") + flags.String("cf_api_email", "", "cloudflare api email, required with api_key flag") viper.BindEnv("cf_api_email") flags.String("cf_api_token", "", "cloudflare api token (preferred)") @@ -231,11 +242,11 @@ func main() { viper.BindEnv("cf_exclude_zones") viper.SetDefault("cf_exclude_zones", "") - flags.Int("scrape_delay", 300, "scrape delay in seconds, defaults to 300") + flags.Int("scrape_delay", 300, "scrape delay in seconds0") viper.BindEnv("scrape_delay") viper.SetDefault("scrape_delay", 300) - flags.Int("cf_batch_size", 10, "cloudflare zones batch size (1-10), defaults to 10") + flags.Int("cf_batch_size", 10, "cloudflare zones batch size (1-10)") viper.BindEnv("cf_batch_size") viper.SetDefault("cf_batch_size", 10) @@ -247,6 +258,10 @@ func main() { viper.BindEnv("metrics_denylist") viper.SetDefault("metrics_denylist", "") + flags.String("log_level", "info", "log level") + viper.BindEnv("log_level") + viper.SetDefault("log_level", "info") + viper.BindPFlags(flags) cmd.Execute() } diff --git a/prometheus.go b/prometheus.go index 77b05af..5d5f100 100644 --- a/prometheus.go +++ b/prometheus.go @@ -51,6 +51,9 @@ const ( poolRequestsTotalMetricName MetricName = "cloudflare_zone_pool_requests_total" logpushFailedJobsAccountMetricName MetricName = "cloudflare_logpush_failed_jobs_account_count" logpushFailedJobsZoneMetricName MetricName = "cloudflare_logpush_failed_jobs_zone_count" + r2StorageTotalMetricName MetricName = "cloudflare_r2_storage_total_bytes" + r2StorageMetricName MetricName = "cloudflare_r2_storage_bytes" + r2OperationMetricName MetricName = "cloudflare_r2_operation_count" ) type MetricsSet map[MetricName]struct{} @@ -262,6 +265,21 @@ var ( }, []string{"destination", "job_id", "final"}, ) + + r2StorageTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: r2StorageTotalMetricName.String(), + Help: "Total storage used by R2", + }, []string{"account"}) + + r2Storage = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: r2StorageMetricName.String(), + Help: "Storage used by R2", + }, []string{"account", "bucket"}) + + r2Operation = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: r2OperationMetricName.String(), + Help: "Number of operations performed by R2", + }, []string{"account", "bucket", "operation"}) ) func buildAllMetricsSet() MetricsSet { @@ -298,12 +316,15 @@ func buildAllMetricsSet() MetricsSet { allMetricsSet.Add(poolRequestsTotalMetricName) allMetricsSet.Add(logpushFailedJobsAccountMetricName) allMetricsSet.Add(logpushFailedJobsZoneMetricName) + allMetricsSet.Add(r2StorageTotalMetricName) + allMetricsSet.Add(r2OperationMetricName) return allMetricsSet } -func buildDeniedMetricsSet(metricsDenylist []string) (MetricsSet, error) { +func buildFilteredMetricsSet(metricsDenylist []string) (MetricsSet, error) { deniedMetricsSet := MetricsSet{} allMetricsSet := buildAllMetricsSet() + for _, metric := range metricsDenylist { if !allMetricsSet.Has(MetricName(metric)) { return nil, fmt.Errorf("metric %s doesn't exists", metric) @@ -410,6 +431,16 @@ func mustRegisterMetrics(deniedMetrics MetricsSet) { if !deniedMetrics.Has(logpushFailedJobsZoneMetricName) { prometheus.MustRegister(logpushFailedJobsZone) } + if !deniedMetrics.Has(r2StorageTotalMetricName) { + prometheus.MustRegister(r2StorageTotal) + } + if !deniedMetrics.Has(r2StorageMetricName) { + prometheus.MustRegister(r2Storage) + } + if !deniedMetrics.Has(r2OperationMetricName) { + prometheus.MustRegister(r2Operation) + } + } func fetchWorkerAnalytics(account cloudflare.Account, wg *sync.WaitGroup) { @@ -464,6 +495,28 @@ func fetchLogpushAnalyticsForAccount(account cloudflare.Account, wg *sync.WaitGr } } +func fetchR2StorageForAccount(account cloudflare.Account, wg *sync.WaitGroup) { + wg.Add(1) + defer wg.Done() + + r, err := fetchR2Account(account.ID) + + if err != nil { + return + } + for _, acc := range r.Viewer.Accounts { + var totalStorage uint64 + for _, bucket := range acc.R2StorageGroups { + totalStorage += bucket.Max.PayloadSize + r2Storage.With(prometheus.Labels{"account": account.Name, "bucket": bucket.Dimensions.BucketName}).Set(float64(bucket.Max.PayloadSize)) + } + for _, operation := range acc.R2StorageOperations { + r2Operation.With(prometheus.Labels{"account": account.Name, "bucket": operation.Dimensions.BucketName, "operation": operation.Dimensions.Action}).Set(float64(operation.Sum.Requests)) + } + r2StorageTotal.With(prometheus.Labels{"account": account.Name}).Set(float64(totalStorage)) + } +} + func fetchLogpushAnalyticsForZone(zones []cloudflare.Zone, wg *sync.WaitGroup) { wg.Add(1) defer wg.Done()