diff --git a/admin/database/database.go b/admin/database/database.go index 1e2a3a11560..fcc27fc5161 100644 --- a/admin/database/database.go +++ b/admin/database/database.go @@ -891,7 +891,7 @@ type InsertVirtualFileOptions struct { type Asset struct { ID string - OrganizationID string `db:"org_id"` + OrganizationID *string `db:"org_id"` Path string `db:"path"` OwnerID string `db:"owner_id"` CreatedOn time.Time `db:"created_on"` diff --git a/admin/database/postgres/migrations/0034.sql b/admin/database/postgres/migrations/0034.sql index 181f6c9d7f1..f22d6f667dd 100644 --- a/admin/database/postgres/migrations/0034.sql +++ b/admin/database/postgres/migrations/0034.sql @@ -1,8 +1 @@ -CREATE TABLE project_access_requests ( - id UUID NOT NULL DEFAULT uuid_generate_v4() PRIMARY KEY, - user_id UUID REFERENCES users (id) ON DELETE CASCADE, - project_id UUID REFERENCES projects (id) ON DELETE CASCADE, - created_on TIMESTAMPTZ NOT NULL DEFAULT now() -); - -CREATE UNIQUE INDEX project_access_requests_user_id_project_idx ON project_access_requests (user_id, project_id); \ No newline at end of file +ALTER TABLE assets ALTER COLUMN org_id DROP NOT NULL; \ No newline at end of file diff --git a/admin/database/postgres/migrations/0035.sql b/admin/database/postgres/migrations/0035.sql new file mode 100644 index 00000000000..181f6c9d7f1 --- /dev/null +++ b/admin/database/postgres/migrations/0035.sql @@ -0,0 +1,8 @@ +CREATE TABLE project_access_requests ( + id UUID NOT NULL DEFAULT uuid_generate_v4() PRIMARY KEY, + user_id UUID REFERENCES users (id) ON DELETE CASCADE, + project_id UUID REFERENCES projects (id) ON DELETE CASCADE, + created_on TIMESTAMPTZ NOT NULL DEFAULT now() +); + +CREATE UNIQUE INDEX project_access_requests_user_id_project_idx ON project_access_requests (user_id, project_id); \ No newline at end of file diff --git a/admin/database/postgres/postgres.go b/admin/database/postgres/postgres.go index 4eede4d419a..cd61574e317 100644 --- a/admin/database/postgres/postgres.go +++ b/admin/database/postgres/postgres.go @@ -725,7 +725,7 @@ func (c *connection) InsertUsergroup(ctx context.Context, opts *database.InsertU func (c *connection) UpdateUsergroupName(ctx context.Context, name, groupID string) (*database.Usergroup, error) { res := &database.Usergroup{} - err := c.getDB(ctx).QueryRowxContext(ctx, "UPDATE usergroups SET name=$1 WHERE id=$2 RETURNING *", name, groupID).StructScan(res) + err := c.getDB(ctx).QueryRowxContext(ctx, "UPDATE usergroups SET name=$1, updated_on=now() WHERE id=$2 RETURNING *", name, groupID).StructScan(res) if err != nil { return nil, parseErr("usergroup", err) } @@ -734,7 +734,7 @@ func (c *connection) UpdateUsergroupName(ctx context.Context, name, groupID stri func (c *connection) UpdateUsergroupDescription(ctx context.Context, description, groupID string) (*database.Usergroup, error) { res := &database.Usergroup{} - err := c.getDB(ctx).QueryRowxContext(ctx, "UPDATE usergroups SET description=$1 WHERE id=$2 RETURNING *", description, groupID).StructScan(res) + err := c.getDB(ctx).QueryRowxContext(ctx, "UPDATE usergroups SET description=$1, updated_on=now() WHERE id=$2 RETURNING *", description, groupID).StructScan(res) if err != nil { return nil, parseErr("usergroup", err) } diff --git a/admin/deployments.go b/admin/deployments.go index 25262c0facd..41fe822bdd7 100644 --- a/admin/deployments.go +++ b/admin/deployments.go @@ -315,7 +315,7 @@ func (s *Service) HibernateDeployments(ctx context.Context) error { s.Logger.Info("hibernate: deleting deployment", zap.String("project_id", proj.ID), zap.String("deployment_id", depl.ID)) - err = s.teardownDeployment(ctx, depl) + err = s.TeardownDeployment(ctx, depl) if err != nil { s.Logger.Error("hibernate: teardown deployment error", zap.String("project_id", proj.ID), zap.String("deployment_id", depl.ID), zap.Error(err), observability.ZapCtx(ctx)) continue @@ -366,7 +366,7 @@ func (s *Service) OpenRuntimeClient(host, audience string) (*client.Client, erro return rt, nil } -func (s *Service) teardownDeployment(ctx context.Context, depl *database.Deployment) error { +func (s *Service) TeardownDeployment(ctx context.Context, depl *database.Deployment) error { // Delete the deployment err := s.DB.DeleteDeployment(ctx, depl.ID) if err != nil { diff --git a/admin/projects.go b/admin/projects.go index 8a9f0bc1165..7339960eeb7 100644 --- a/admin/projects.go +++ b/admin/projects.go @@ -100,7 +100,7 @@ func (s *Service) CreateProject(ctx context.Context, org *database.Organization, Annotations: proj.Annotations, }) if err != nil { - err2 := s.teardownDeployment(ctx, depl) + err2 := s.TeardownDeployment(ctx, depl) err3 := s.DB.DeleteProject(ctx, proj.ID) return nil, multierr.Combine(err, err2, err3) } @@ -119,7 +119,7 @@ func (s *Service) TeardownProject(ctx context.Context, p *database.Project) erro } for _, d := range ds { - err := s.teardownDeployment(ctx, d) + err := s.TeardownDeployment(ctx, d) if err != nil { return err } @@ -285,13 +285,13 @@ func (s *Service) TriggerRedeploy(ctx context.Context, proj *database.Project, p Annotations: proj.Annotations, }) if err != nil { - err2 := s.teardownDeployment(ctx, newDepl) + err2 := s.TeardownDeployment(ctx, newDepl) return nil, multierr.Combine(err, err2) } // Delete old prod deployment if exists if prevDepl != nil { - err = s.teardownDeployment(ctx, prevDepl) + err = s.TeardownDeployment(ctx, prevDepl) if err != nil { s.Logger.Error("trigger redeploy: could not teardown old deployment", zap.String("deployment_id", prevDepl.ID), zap.Error(err), observability.ZapCtx(ctx)) } diff --git a/admin/provisioner/kubernetes.go b/admin/provisioner/kubernetes.go index c49922ffabc..018974f4932 100644 --- a/admin/provisioner/kubernetes.go +++ b/admin/provisioner/kubernetes.go @@ -3,9 +3,12 @@ package provisioner import ( "bytes" "context" + "crypto/sha256" + "encoding/hex" "encoding/json" "fmt" "net/url" + "os" "path/filepath" "strings" "text/template" @@ -44,9 +47,10 @@ type KubernetesTemplatePaths struct { } type KubernetesProvisioner struct { - Spec *KubernetesSpec - clientset *kubernetes.Clientset - templates *template.Template + Spec *KubernetesSpec + clientset *kubernetes.Clientset + templates *template.Template + templatesChecksum string } type TemplateData struct { @@ -94,18 +98,33 @@ func NewKubernetes(spec json.RawMessage) (*KubernetesProvisioner, error) { delete(funcMap, "env") delete(funcMap, "expandenv") - // Parse the template definitions - templates := template.Must(template.New("").Funcs(funcMap).ParseFiles( + // Define template files + templateFiles := []string{ ksp.TemplatePaths.HTTPIngress, ksp.TemplatePaths.GRPCIngress, ksp.TemplatePaths.Service, ksp.TemplatePaths.StatefulSet, - )) + } + + // Parse the template definitions + templates := template.Must(template.New("").Funcs(funcMap).ParseFiles(templateFiles...)) + + // Calculate the combined sha256 sum of all the template files + h := sha256.New() + for _, f := range templateFiles { + d, err := os.ReadFile(f) + if err != nil { + return nil, err + } + h.Write(d) + } + templatesChecksum := hex.EncodeToString(h.Sum(nil)) return &KubernetesProvisioner{ - Spec: ksp, - clientset: clientset, - templates: templates, + Spec: ksp, + clientset: clientset, + templates: templates, + templatesChecksum: templatesChecksum, }, nil } @@ -165,6 +184,7 @@ func (p *KubernetesProvisioner) Provision(ctx context.Context, opts *ProvisionOp // Create statefulset sts.ObjectMeta.Name = names.StatefulSet + sts.ObjectMeta.Annotations["checksum/templates"] = p.templatesChecksum p.addCommonLabels(opts.ProvisionID, sts.ObjectMeta.Labels) _, err = p.clientset.AppsV1().StatefulSets(p.Spec.Namespace).Create(ctx, sts, metav1.CreateOptions{}) if err != nil { @@ -308,6 +328,28 @@ func (p *KubernetesProvisioner) CheckCapacity(ctx context.Context) error { return nil } +func (p *KubernetesProvisioner) ValidateConfig(ctx context.Context, provisionID string) (bool, error) { + // Get Kubernetes resource names + names := p.getResourceNames(provisionID) + + // Get the statefulset + sts, err := p.clientset.AppsV1().StatefulSets(p.Spec.Namespace).Get(ctx, names.StatefulSet, metav1.GetOptions{}) + if err != nil { + return false, err + } + + // Compare the provisioned templates checksum with the current one + if sts.ObjectMeta.Annotations["checksum/templates"] != p.templatesChecksum { + return false, nil + } + + return true, nil +} + +func (p *KubernetesProvisioner) Type() string { + return "kubernetes" +} + func (p *KubernetesProvisioner) getResourceNames(provisionID string) ResourceNames { return ResourceNames{ StatefulSet: fmt.Sprintf("runtime-%s", provisionID), diff --git a/admin/provisioner/provisioner.go b/admin/provisioner/provisioner.go index 846cff85081..8c032e0a509 100644 --- a/admin/provisioner/provisioner.go +++ b/admin/provisioner/provisioner.go @@ -15,6 +15,8 @@ type Provisioner interface { AwaitReady(ctx context.Context, provisionID string) error Update(ctx context.Context, provisionID string, newVersion string) error CheckCapacity(ctx context.Context) error + ValidateConfig(ctx context.Context, provisionID string) (bool, error) + Type() string } type ProvisionOptions struct { diff --git a/admin/provisioner/static.go b/admin/provisioner/static.go index 393988c6d91..07a8608e2e5 100644 --- a/admin/provisioner/static.go +++ b/admin/provisioner/static.go @@ -129,3 +129,12 @@ func (p *StaticProvisioner) Update(ctx context.Context, provisionID, newVersion // No-op return nil } + +func (p *StaticProvisioner) ValidateConfig(ctx context.Context, provisionID string) (bool, error) { + // No-op + return true, nil +} + +func (p *StaticProvisioner) Type() string { + return "static" +} diff --git a/admin/server/projects.go b/admin/server/projects.go index f7fd1b4d82d..46af9ace95a 100644 --- a/admin/server/projects.go +++ b/admin/server/projects.go @@ -168,10 +168,10 @@ func (s *Server) GetProject(ctx context.Context, req *adminv1.GetProjectRequest) Rule: &runtimev1.SecurityRule_Access{ Access: &runtimev1.SecurityRuleAccess{ Condition: fmt.Sprintf( - "NOT ('{{.self.kind}}'='%s' OR '{{.self.kind}}'='%s' AND '{{ .self.name }}'=%s)", + "NOT ('{{.self.kind}}'='%s' OR '{{.self.kind}}'='%s' AND '{{ lower .self.name }}'=%s)", runtime.ResourceKindTheme, runtime.ResourceKindMetricsView, - duckdbsql.EscapeStringValue(mdl.MetricsView), + duckdbsql.EscapeStringValue(strings.ToLower(mdl.MetricsView)), ), Allow: false, }, @@ -331,13 +331,6 @@ func (s *Server) CreateProject(ctx context.Context, req *adminv1.CreateProjectRe attribute.String("args.archive_asset_id", req.ArchiveAssetId), ) - // Check the request is made by a user - claims := auth.GetClaims(ctx) - if claims.OwnerType() != auth.OwnerTypeUser { - return nil, status.Error(codes.Unauthenticated, "not authenticated as a user") - } - userID := claims.OwnerID() - // Find parent org org, err := s.admin.DB.FindOrganizationByName(ctx, req.OrganizationName) if err != nil { @@ -345,6 +338,7 @@ func (s *Server) CreateProject(ctx context.Context, req *adminv1.CreateProjectRe } // Check permissions + claims := auth.GetClaims(ctx) if !claims.OrganizationPermissions(ctx, org.ID).CreateProjects { return nil, status.Error(codes.PermissionDenied, "does not have permission to create projects") } @@ -387,12 +381,19 @@ func (s *Server) CreateProject(ctx context.Context, req *adminv1.CreateProjectRe req.ProdVersion = "latest" } + // Capture creating user (can be nil if created with a service token) + var userID *string + if claims.OwnerType() == auth.OwnerTypeUser { + tmp := claims.OwnerID() + userID = &tmp + } + opts := &database.InsertProjectOptions{ OrganizationID: org.ID, Name: req.Name, Description: req.Description, Public: req.Public, - CreatedByUserID: &userID, + CreatedByUserID: userID, Provisioner: req.Provisioner, ProdVersion: req.ProdVersion, ProdOLAPDriver: req.ProdOlapDriver, @@ -403,8 +404,13 @@ func (s *Server) CreateProject(ctx context.Context, req *adminv1.CreateProjectRe } if req.GithubUrl != "" { + // Github projects must be configured by a user so we can ensure that they're allowed to access the repo. + if userID == nil { + return nil, status.Error(codes.Unauthenticated, "not authenticated as a user") + } + // Check Github app is installed and caller has access on the repo - installationID, err := s.getAndCheckGithubInstallationID(ctx, req.GithubUrl, userID) + installationID, err := s.getAndCheckGithubInstallationID(ctx, req.GithubUrl, *userID) if err != nil { return nil, err } @@ -495,18 +501,13 @@ func (s *Server) UpdateProject(ctx context.Context, req *adminv1.UpdateProjectRe observability.AddRequestAttributes(ctx, attribute.String("args.new_name", *req.NewName)) } - // Check the request is made by a user - claims := auth.GetClaims(ctx) - if claims.OwnerType() != auth.OwnerTypeUser { - return nil, status.Error(codes.Unauthenticated, "not authenticated") - } - // Find project proj, err := s.admin.DB.FindProjectByName(ctx, req.OrganizationName, req.Name) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } + claims := auth.GetClaims(ctx) if !claims.ProjectPermissions(ctx, proj.OrganizationID, proj.ID).ManageProject { return nil, status.Error(codes.PermissionDenied, "does not have permission to delete project") } @@ -519,6 +520,11 @@ func (s *Server) UpdateProject(ctx context.Context, req *adminv1.UpdateProjectRe if req.GithubUrl != nil { // If changing the Github URL, check github app is installed and caller has access on the repo if safeStr(proj.GithubURL) != *req.GithubUrl { + // Github projects must be configured by a user so we can ensure that they're allowed to access the repo. + if claims.OwnerType() != auth.OwnerTypeUser { + return nil, status.Error(codes.Unauthenticated, "not authenticated as a user") + } + _, err = s.getAndCheckGithubInstallationID(ctx, *req.GithubUrl, claims.OwnerID()) if err != nil { return nil, err @@ -598,12 +604,7 @@ func (s *Server) UpdateProjectVariables(ctx context.Context, req *adminv1.Update return nil, status.Error(codes.InvalidArgument, err.Error()) } - // Check the request is made by a user claims := auth.GetClaims(ctx) - if claims.OwnerType() != auth.OwnerTypeUser { - return nil, status.Error(codes.Unauthenticated, "not authenticated") - } - if !claims.ProjectPermissions(ctx, proj.OrganizationID, proj.ID).ManageProject { return nil, status.Error(codes.PermissionDenied, "does not have permission to update project variables") } @@ -1476,7 +1477,7 @@ func (s *Server) hasAssetUsagePermission(ctx context.Context, id, orgID, ownerID if err != nil { return false } - return asset.OrganizationID == orgID && asset.OwnerID == ownerID + return asset.OrganizationID != nil && *asset.OrganizationID == orgID && asset.OwnerID == ownerID } func deploymentToDTO(d *database.Deployment) *adminv1.Deployment { diff --git a/admin/worker/run_autoscaler.go b/admin/worker/run_autoscaler.go index a478cec81a4..89cc82c111b 100644 --- a/admin/worker/run_autoscaler.go +++ b/admin/worker/run_autoscaler.go @@ -129,8 +129,21 @@ func (w *Worker) allRecommendations(ctx context.Context) ([]metrics.AutoscalerSl return recs, true, nil } +// shouldScale determines whether scaling operations should be initiated based on the comparison of +// the current number of slots (originSlots) and the recommended number of slots (recommendSlots). func shouldScale(originSlots, recommendSlots int) bool { + // Temproray disable scale DOWN - Tony + if recommendSlots <= originSlots { + return false + } + lowerBound := float64(originSlots) * (1 - scaleThreshold) upperBound := float64(originSlots) * (1 + scaleThreshold) - return float64(recommendSlots) < lowerBound || float64(recommendSlots) > upperBound + if float64(recommendSlots) >= lowerBound && float64(recommendSlots) <= upperBound { + return false + } + + // TODO: Skip scaling for manually assigned slots + + return true } diff --git a/admin/worker/upgrade_latest_version_projects.go b/admin/worker/upgrade_latest_version_projects.go deleted file mode 100644 index 815252da635..00000000000 --- a/admin/worker/upgrade_latest_version_projects.go +++ /dev/null @@ -1,94 +0,0 @@ -package worker - -import ( - "context" - "time" - - "github.com/rilldata/rill/admin" - "github.com/rilldata/rill/admin/database" - "github.com/rilldata/rill/runtime/pkg/observability" - "go.uber.org/zap" -) - -const upgradeLatestVersionForProjectTimeout = 5 * time.Minute - -func (w *Worker) upgradeLatestVersionProjects(ctx context.Context) error { - // Resolve 'latest' version - latestVersion := w.admin.ResolveLatestRuntimeVersion() - - // Verify version is valid - err := w.admin.ValidateRuntimeVersion(latestVersion) - if err != nil { - return err - } - - // Iterate over batches of projects with 'latest' version - limit := 100 - afterName := "" - stop := false - for !stop { - // Get batch and update iterator variables - projs, err := w.admin.DB.FindProjectsByVersion(ctx, "latest", afterName, limit) - if err != nil { - return err - } - if len(projs) < limit { - stop = true - } - if len(projs) != 0 { - afterName = projs[len(projs)-1].Name - } - - // Process batch - for _, proj := range projs { - err := w.upgradeAllDeploymentsForProject(ctx, proj, latestVersion) - if err != nil { - // We log the error, but continues to the next deployment - w.logger.Error("upgrade latest version projects: failed to upgrade project deployments", zap.String("project_id", proj.ID), zap.String("version", latestVersion), observability.ZapCtx(ctx), zap.Error(err)) - } - } - } - - return nil -} - -func (w *Worker) upgradeAllDeploymentsForProject(ctx context.Context, proj *database.Project, latestVersion string) error { - // Apply timeout - ctx, cancel := context.WithTimeout(ctx, upgradeLatestVersionForProjectTimeout) - defer cancel() - - // Get all project deployments - depls, err := w.admin.DB.FindDeploymentsForProject(ctx, proj.ID) - if err != nil { - return err - } - - // Get project organization, we need this to create the deployment annotations - org, err := w.admin.DB.FindOrganization(ctx, proj.OrganizationID) - if err != nil { - return err - } - - for _, depl := range depls { - if depl.RuntimeVersion != latestVersion { - w.logger.Info("upgrade latest version projects: upgrading deployment", zap.String("deployment_id", depl.ID), zap.String("provision_id", depl.ProvisionID), zap.String("instance_id", depl.RuntimeInstanceID), zap.String("version", latestVersion), observability.ZapCtx(ctx)) - - // Update deployment to latest version - err = w.admin.UpdateDeployment(ctx, depl, &admin.UpdateDeploymentOptions{ - Version: latestVersion, - Branch: depl.Branch, - Variables: proj.ProdVariables, - Annotations: w.admin.NewDeploymentAnnotations(org, proj), - EvictCachedRepo: false, - }) - if err != nil { - w.logger.Error("upgrade latest version projects: failed to upgrade deployment", zap.String("deployment_id", depl.ID), zap.String("version", latestVersion), observability.ZapCtx(ctx), zap.Error(err)) - return err - } - - w.logger.Info("upgrade latest version projects: upgraded deployment", zap.String("deployment_id", depl.ID), zap.String("version", latestVersion), observability.ZapCtx(ctx)) - } - } - - return nil -} diff --git a/admin/worker/validate_deployments.go b/admin/worker/validate_deployments.go new file mode 100644 index 00000000000..d3e5fd8aced --- /dev/null +++ b/admin/worker/validate_deployments.go @@ -0,0 +1,133 @@ +package worker + +import ( + "context" + "fmt" + "time" + + "github.com/rilldata/rill/admin" + "github.com/rilldata/rill/admin/database" + "github.com/rilldata/rill/runtime/pkg/observability" + "go.uber.org/zap" +) + +const validateAllDeploymentsForProjectTimeout = 5 * time.Minute + +func (w *Worker) validateDeployments(ctx context.Context) error { + // Resolve 'latest' version + latestVersion := w.admin.ResolveLatestRuntimeVersion() + + // Verify version is valid + err := w.admin.ValidateRuntimeVersion(latestVersion) + if err != nil { + return err + } + + // Iterate over batches of projects + limit := 100 + afterName := "" + stop := false + for !stop { + // Get batch and update iterator variables + projs, err := w.admin.DB.FindProjects(ctx, afterName, limit) + if err != nil { + return err + } + if len(projs) < limit { + stop = true + } + if len(projs) != 0 { + afterName = projs[len(projs)-1].Name + } + + // Process batch + for _, proj := range projs { + err := w.reconcileAllDeploymentsForProject(ctx, proj, latestVersion) + if err != nil { + // We log the error, but continues to the next project + w.logger.Error("validate deployments: failed to reconcile project deployments", zap.String("project_id", proj.ID), zap.String("version", latestVersion), observability.ZapCtx(ctx), zap.Error(err)) + } + } + } + + return nil +} + +func (w *Worker) reconcileAllDeploymentsForProject(ctx context.Context, proj *database.Project, latestVersion string) error { + // Apply timeout + ctx, cancel := context.WithTimeout(ctx, validateAllDeploymentsForProjectTimeout) + defer cancel() + + // Get all project deployments + depls, err := w.admin.DB.FindDeploymentsForProject(ctx, proj.ID) + if err != nil { + return err + } + + // Get project organization, we need this to create the deployment annotations + org, err := w.admin.DB.FindOrganization(ctx, proj.OrganizationID) + if err != nil { + return err + } + + var prodDeplID string + if proj.ProdDeploymentID != nil { + prodDeplID = *proj.ProdDeploymentID + } + + for _, depl := range depls { + if depl.ID == prodDeplID { + // Get deployment provisioner + p, ok := w.admin.ProvisionerSet[depl.Provisioner] + if !ok { + return fmt.Errorf("validate deployments: %q is not in the provisioner set", depl.Provisioner) + } + + v, err := p.ValidateConfig(ctx, depl.ProvisionID) + if err != nil { + w.logger.Warn("validate deployments: error validating provisioner config", zap.String("organization_id", org.ID), zap.String("project_id", proj.ID), zap.String("deployment_id", depl.ID), zap.String("provisioner", depl.Provisioner), zap.String("provision_id", depl.ProvisionID), zap.Error(err), observability.ZapCtx(ctx)) + return err + } + + // Trigger a redeploy if config is no longer valid + if !v { + w.logger.Info("validate deployments: config no longer valid, triggering redeploy", zap.String("organization_id", org.ID), zap.String("project_id", proj.ID), zap.String("deployment_id", depl.ID), observability.ZapCtx(ctx)) + _, err = w.admin.TriggerRedeploy(ctx, proj, depl) + if err != nil { + return err + } + w.logger.Info("validate deployments: redeployed", zap.String("organization_id", org.ID), zap.String("project_id", proj.ID), observability.ZapCtx(ctx)) + continue + } + + // If project is running 'latest' version then update if needed, skip if 'static' provisioner type + if p.Type() != "static" && proj.ProdVersion == "latest" && depl.RuntimeVersion != latestVersion { + w.logger.Info("validate deployments: upgrading deployment", zap.String("organization_id", org.ID), zap.String("project_id", proj.ID), zap.String("deployment_id", depl.ID), zap.String("provisioner", depl.Provisioner), zap.String("provision_id", depl.ProvisionID), zap.String("instance_id", depl.RuntimeInstanceID), zap.String("version", latestVersion), observability.ZapCtx(ctx)) + + // Update deployment to latest version + err = w.admin.UpdateDeployment(ctx, depl, &admin.UpdateDeploymentOptions{ + Version: latestVersion, + Branch: depl.Branch, + Variables: proj.ProdVariables, + Annotations: w.admin.NewDeploymentAnnotations(org, proj), + EvictCachedRepo: false, + }) + if err != nil { + w.logger.Error("validate deployments: failed to upgrade deployment", zap.String("organization_id", org.ID), zap.String("project_id", proj.ID), zap.String("deployment_id", depl.ID), zap.String("provisioner", depl.Provisioner), zap.String("provision_id", depl.ProvisionID), zap.String("instance_id", depl.RuntimeInstanceID), zap.String("version", latestVersion), observability.ZapCtx(ctx), zap.Error(err)) + return err + } + w.logger.Info("validate deployments: upgraded deployment", zap.String("organization_id", org.ID), zap.String("project_id", proj.ID), zap.String("deployment_id", depl.ID), zap.String("provisioner", depl.Provisioner), zap.String("provision_id", depl.ProvisionID), zap.String("instance_id", depl.RuntimeInstanceID), zap.String("version", latestVersion), observability.ZapCtx(ctx)) + } + } else if depl.UpdatedOn.Add(3 * time.Hour).Before(time.Now()) { + // Teardown old orphan non-prod deployment if more than 3 hours since last update + w.logger.Info("validate deployments: teardown deployment", zap.String("organization_id", org.ID), zap.String("project_id", proj.ID), zap.String("deployment_id", depl.ID), zap.String("provisioner", depl.Provisioner), zap.String("provision_id", depl.ProvisionID), zap.String("instance_id", depl.RuntimeInstanceID), observability.ZapCtx(ctx)) + err = w.admin.TeardownDeployment(ctx, depl) + if err != nil { + w.logger.Error("validate deployments: teardown deployment error", zap.String("organization_id", org.ID), zap.String("project_id", proj.ID), zap.String("deployment_id", depl.ID), zap.String("provisioner", depl.Provisioner), zap.String("provision_id", depl.ProvisionID), zap.String("instance_id", depl.RuntimeInstanceID), observability.ZapCtx(ctx), zap.Error(err)) + continue + } + } + } + + return nil +} diff --git a/admin/worker/worker.go b/admin/worker/worker.go index 50b7d984631..63db9367e82 100644 --- a/admin/worker/worker.go +++ b/admin/worker/worker.go @@ -59,7 +59,7 @@ func (w *Worker) Run(ctx context.Context) error { return w.schedule(ctx, "hibernate_expired_deployments", w.hibernateExpiredDeployments, 15*time.Minute) }) group.Go(func() error { - return w.schedule(ctx, "upgrade_latest_version_projects", w.upgradeLatestVersionProjects, 6*time.Hour) + return w.schedule(ctx, "validate_deployments", w.validateDeployments, 6*time.Hour) }) group.Go(func() error { return w.scheduleCron(ctx, "run_autoscaler", w.runAutoscaler, w.admin.AutoscalerCron) @@ -90,8 +90,8 @@ func (w *Worker) RunJob(ctx context.Context, name string) error { return w.runJob(ctx, name, w.checkProvisionerCapacity) case "reset_all_deployments": return w.runJob(ctx, name, w.resetAllDeployments) - case "upgrade_latest_version_projects": - return w.runJob(ctx, name, w.upgradeLatestVersionProjects) + case "validate_deployments": + return w.runJob(ctx, name, w.validateDeployments) // NOTE: Add new ad-hoc jobs here default: return fmt.Errorf("unknown job: %s", name) diff --git a/cli/cmd/deploy/deploy.go b/cli/cmd/deploy/deploy.go index 4c7daf9d6cc..09040352a19 100644 --- a/cli/cmd/deploy/deploy.go +++ b/cli/cmd/deploy/deploy.go @@ -315,9 +315,11 @@ func DeployFlow(ctx context.Context, ch *cmdutil.Helper, opts *Options) error { // Open browser if res.Project.FrontendUrl != "" { ch.PrintfSuccess("Your project can be accessed at: %s\n", res.Project.FrontendUrl) - ch.PrintfSuccess("Opening project in browser...\n") - time.Sleep(3 * time.Second) - _ = browser.Open(res.Project.FrontendUrl) + if ch.Interactive { + ch.PrintfSuccess("Opening project in browser...\n") + time.Sleep(3 * time.Second) + _ = browser.Open(res.Project.FrontendUrl) + } } ch.Telemetry(ctx).RecordBehavioralLegacy(activity.BehavioralEventDeploySuccess) @@ -332,8 +334,10 @@ func deployWithUploadFlow(ctx context.Context, ch *cmdutil.Helper, opts *Options return err } } - localProjectPath := opts.GitPath - + _, localProjectPath, err := validateLocalProject(ctx, ch, opts) + if err != nil { + return err + } // If no project name was provided, default to dir name if opts.Name == "" { opts.Name = filepath.Base(localProjectPath) @@ -452,9 +456,11 @@ func deployWithUploadFlow(ctx context.Context, ch *cmdutil.Helper, opts *Options // Open browser if res.Project.FrontendUrl != "" { ch.PrintfSuccess("Your project can be accessed at: %s\n", res.Project.FrontendUrl) - ch.PrintfSuccess("Opening project in browser...\n") - time.Sleep(3 * time.Second) - _ = browser.Open(res.Project.FrontendUrl) + if ch.Interactive { + ch.PrintfSuccess("Opening project in browser...\n") + time.Sleep(3 * time.Second) + _ = browser.Open(res.Project.FrontendUrl) + } } ch.Telemetry(ctx).RecordBehavioralLegacy(activity.BehavioralEventDeploySuccess) return nil @@ -723,11 +729,12 @@ func createGithubRepository(ctx context.Context, ch *cmdutil.Helper, pollRes *ad repoOwner = "" } repoName := filepath.Base(localGitPath) + private := true var githubRepo *github.Repository var err error for i := 1; i <= 10; i++ { - githubRepo, _, err = githubClient.Repositories.Create(ctx, repoOwner, &github.Repository{Name: &repoName, DefaultBranch: &defaultBranch}) + githubRepo, _, err = githubClient.Repositories.Create(ctx, repoOwner, &github.Repository{Name: &repoName, DefaultBranch: &defaultBranch, Private: &private}) if err == nil { break } diff --git a/cli/cmd/shareurl/create.go b/cli/cmd/shareurl/create.go index e32b0779365..1fa54b99082 100644 --- a/cli/cmd/shareurl/create.go +++ b/cli/cmd/shareurl/create.go @@ -1,11 +1,15 @@ package shareurl import ( + "context" "fmt" + "strings" "github.com/rilldata/rill/cli/pkg/cmdutil" adminv1 "github.com/rilldata/rill/proto/gen/rill/admin/v1" runtimev1 "github.com/rilldata/rill/proto/gen/rill/runtime/v1" + "github.com/rilldata/rill/runtime" + runtimeclient "github.com/rilldata/rill/runtime/client" "github.com/spf13/cobra" "google.golang.org/protobuf/encoding/protojson" ) @@ -48,6 +52,11 @@ func CreateCmd(ch *cmdutil.Helper) *cobra.Command { } } + err = validateMetricsView(cmd.Context(), ch, project, metricsView, fields) + if err != nil { + return err + } + res, err := client.IssueMagicAuthToken(cmd.Context(), &adminv1.IssueMagicAuthTokenRequest{ Organization: ch.Org, Project: project, @@ -74,3 +83,73 @@ func CreateCmd(ch *cmdutil.Helper) *cobra.Command { return createCmd } + +func validateMetricsView(ctx context.Context, ch *cmdutil.Helper, project, metricsView string, fields []string) error { + client, err := ch.Client() + if err != nil { + return err + } + + proj, err := client.GetProject(ctx, &adminv1.GetProjectRequest{ + OrganizationName: ch.Org, + Name: project, + }) + if err != nil { + return err + } + + if proj.ProdDeployment == nil { + ch.PrintfWarn("Could not validate metrics view: project has no production deployment") + return nil + } + depl := proj.ProdDeployment + + rt, err := runtimeclient.New(depl.RuntimeHost, proj.Jwt) + if err != nil { + return fmt.Errorf("failed to connect to runtime: %w", err) + } + + mv, err := rt.GetResource(ctx, &runtimev1.GetResourceRequest{ + InstanceId: depl.RuntimeInstanceId, + Name: &runtimev1.ResourceName{ + Kind: runtime.ResourceKindMetricsView, + Name: metricsView, + }, + }) + if err != nil { + return fmt.Errorf("failed to get metrics view %q: %w", metricsView, err) + } + + spec := mv.Resource.GetMetricsView().Spec + + for _, f := range fields { + if strings.EqualFold(f, spec.TimeDimension) { + continue + } + + found := false + for _, dim := range spec.Dimensions { + if strings.EqualFold(f, dim.Name) { + found = true + break + } + } + if found { + continue + } + + for _, m := range spec.Measures { + if strings.EqualFold(f, m.Name) { + found = true + break + } + } + if found { + continue + } + + return fmt.Errorf("field %q not found in metrics view %q", f, metricsView) + } + + return nil +} diff --git a/docs/docs/reference/olap-engines/multiple-olap.md b/docs/docs/reference/olap-engines/multiple-olap.md index 7f353f19f60..35692ea0a3a 100644 --- a/docs/docs/reference/olap-engines/multiple-olap.md +++ b/docs/docs/reference/olap-engines/multiple-olap.md @@ -2,7 +2,7 @@ title: Using Multiple OLAP Engines description: Using multiple OLAP Engines to power dashboards in the same project sidebar_label: Using Multiple OLAP Engines -sidebar_position: 4 +sidebar_position: 5 --- ## Overview diff --git a/docs/docs/reference/olap-engines/olap-engines.md b/docs/docs/reference/olap-engines/olap-engines.md index 232712f0628..8f7d9b1d677 100644 --- a/docs/docs/reference/olap-engines/olap-engines.md +++ b/docs/docs/reference/olap-engines/olap-engines.md @@ -19,6 +19,7 @@ If there's an OLAP engine that you'd like to use with Rill but don't see it belo - [DuckDB (default)](duckdb.md) - [Druid](druid.md) - [ClickHouse](clickhouse.md) +- [Pinot](pinot.md) ## Reference diff --git a/docs/docs/reference/project-files/connectors.md b/docs/docs/reference/project-files/connectors.md new file mode 100644 index 00000000000..04889fd754c --- /dev/null +++ b/docs/docs/reference/project-files/connectors.md @@ -0,0 +1,69 @@ +--- +title: Connector YAML +sidebar_label: Connector YAML +sidebar_position: 70 +hide_table_of_contents: true +--- + + +When you add olap_connector to your rill.yaml file, you will need to set up a `.yaml` file in the 'connectors' directory. This file requires the following parameters,`type` and `driver` (see below for more parameter options). Rill will automatically test the connectivity to the OLAP engine upon saving the file. This can be viewed in the connectors tab in the UI. + +:::tip Did you know? + +Starting from Rill 0.46, you can directly create OLAP engines from the UI! +Select + Add -> Data -> Connect an OLAP engine + +::: + + +## Properties + +**`type`** - refers to the resource type and must be 'connector' + +**`driver`** - refers to the [OLAP engine](../olap-engines/multiple-olap.md) +- _`duckdb`_ link to [ DuckDB Documentation](https://duckdb.org/docs/guides/overview.html) +- _`clickhouse`_ link to[ Clickhouse documentation](https://clickhouse.com/docs/en/intro) +- _`druid`_ link to[ Druid documentation](https://druid.apache.org/docs/latest/design/) +- _`pinot`_ link to[ Pinot documentation](https://docs.pinot.apache.org/) + +:::tip A note on OLAP engines + +You can have multiple OLAP engines in a single project and have each dashboard run on a different engine using the olap_connector parameter directly on a dashboard. + +::: + +**`host`** - refers to the hostname + +**`port`** - refers to the port + +**`username`** - the username, in plaintext + +**`password`** - the password, in plaintext + +**`ssl`** - depending on the engine, this parameter may be required (_pinot_) + + +You can also connect using a dsn parameter. You cannot use the above parameters along with the **`dsn`** parameter. + +**`dsn`** - connection string containing all the details above, in a single string. Note that each engine's syntax is slightly different. Please refer to [our documentation](https://docs.rilldata.com/reference/olap-engines/) for further details. + +--- + +_Example #1: Connecting to a local running Clickhouse server (no security enabled)_ +```yaml +type: connector +driver: clickhouse + +host: "localhost" +port: "9000" +``` + +_Example #2: Connecting to a ClickHouse Cloud _ +```yaml +type: connector +driver: clickhouse + + +dsn: "https://:?username=&password=&secure=true&skip_verify=true" + +``` diff --git a/runtime/compilers/rillv1/parser.go b/runtime/compilers/rillv1/parser.go index 22823ee7cf0..1cf88ac2288 100644 --- a/runtime/compilers/rillv1/parser.go +++ b/runtime/compilers/rillv1/parser.go @@ -261,7 +261,7 @@ func (p *Parser) Reparse(ctx context.Context, paths []string) (*Diff, error) { // IsSkippable returns true if the path will be skipped by Reparse. // It's useful for callers to avoid triggering a reparse when they know the path is not relevant. func (p *Parser) IsSkippable(path string) bool { - return !pathIsYAML(path) && !pathIsSQL(path) && !pathIsDotEnv(path) + return pathIsIgnored(path) || !pathIsYAML(path) && !pathIsSQL(path) && !pathIsDotEnv(path) } // TrackedPathsInDir returns the paths under the given directory that the parser currently has cached results for. @@ -361,6 +361,11 @@ func (p *Parser) reparseExceptRillYAML(ctx context.Context, paths []string) (*Di } seenPaths[path] = true + // Skip ignored paths + if pathIsIgnored(path) { + continue + } + isSQL := pathIsSQL(path) isYAML := pathIsYAML(path) isDotEnv := pathIsDotEnv(path) @@ -990,6 +995,13 @@ func pathIsDotEnv(path string) bool { return path == "/.env" } +// pathIsIgnored returns true if the path should be ignored by the parser. +// Note: Generally these defaults should be applied at the repo level (in the defaults for ignore_paths in rill.yaml). +// This is only for files we DO want to list and show in the UI, but don't want to parse. +func pathIsIgnored(p string) bool { + return strings.HasPrefix(p, "/.rillcloud/") +} + // normalizePath normalizes a user-provided path to the format returned from ListRecursive. // TODO: Change this once ListRecursive returns paths without leading slash. func normalizePath(path string) string { diff --git a/runtime/metricsview/astexpr.go b/runtime/metricsview/astexpr.go index a78eee284b0..02c3c7ac49e 100644 --- a/runtime/metricsview/astexpr.go +++ b/runtime/metricsview/astexpr.go @@ -339,9 +339,9 @@ func (b *sqlExprBuilder) writeILikeCondition(left, right *Expression, leftOverri b.writeString(")") if not { - b.writeString(" NOT ILIKE ") + b.writeString(" NOT LIKE ") } else { - b.writeString(" ILIKE ") + b.writeString(" LIKE ") } b.writeString("LOWER(") diff --git a/runtime/queries/column_desc_stats.go b/runtime/queries/column_desc_stats.go index 373700dbc7b..e1ac4a6daa2 100644 --- a/runtime/queries/column_desc_stats.go +++ b/runtime/queries/column_desc_stats.go @@ -71,21 +71,15 @@ func (q *ColumnDescriptiveStatistics) Resolve(ctx context.Context, rt *runtime.R sanitizedColumnName, olap.Dialect().EscapeTable(q.Database, q.DatabaseSchema, q.TableName)) case drivers.DialectClickHouse: - descriptiveStatisticsSQL = fmt.Sprintf("SELECT "+ - "min(%s)::DOUBLE as min, "+ - "quantileTDigest(0.25)(%s)::DOUBLE as q25, "+ - "quantileTDigest(0.5)(%s)::DOUBLE as q50, "+ - "quantileTDigest(0.75)(%s)::DOUBLE as q75, "+ - "max(%s)::DOUBLE as max, "+ - "avg(%s)::DOUBLE as mean, "+ - "stddevSamp(%s)::DOUBLE as sd "+ - "FROM %s", - sanitizedColumnName, - sanitizedColumnName, - sanitizedColumnName, - sanitizedColumnName, - sanitizedColumnName, - sanitizedColumnName, + descriptiveStatisticsSQL = fmt.Sprintf(`SELECT + min(%[1]s)::DOUBLE as min, + quantileTDigest(0.25)(%[1]s)::DOUBLE as q25, + quantileTDigest(0.5)(%[1]s)::DOUBLE as q50, + quantileTDigest(0.75)(%[1]s)::DOUBLE as q75, + max(%[1]s)::DOUBLE as max, + avg(%[1]s)::DOUBLE as mean, + stddevSamp(%[1]s)::DOUBLE as sd + FROM %[2]s WHERE `+isNonNullFinite(olap.Dialect(), sanitizedColumnName)+``, sanitizedColumnName, olap.Dialect().EscapeTable(q.Database, q.DatabaseSchema, q.TableName)) default: diff --git a/runtime/queries/column_numeric_histogram.go b/runtime/queries/column_numeric_histogram.go index 82457cb0b00..ace5090bbb9 100644 --- a/runtime/queries/column_numeric_histogram.go +++ b/runtime/queries/column_numeric_histogram.go @@ -67,7 +67,7 @@ func (q *ColumnNumericHistogram) Resolve(ctx context.Context, rt *runtime.Runtim return err } } else { - return fmt.Errorf("Unknown histogram method %v", q.Method) + return fmt.Errorf("unknown histogram method %q", q.Method) } return nil @@ -146,7 +146,7 @@ func (q *ColumnNumericHistogram) calculateFDMethod(ctx context.Context, rt *runt defer release() if olap.Dialect() != drivers.DialectDuckDB && olap.Dialect() != drivers.DialectClickHouse { - return fmt.Errorf("not available for dialect '%s'", olap.Dialect()) + return fmt.Errorf("not available for dialect %q", olap.Dialect()) } min, max, rng, err := getMinMaxRange(ctx, olap, q.ColumnName, q.Database, q.DatabaseSchema, q.TableName, priority) @@ -173,10 +173,10 @@ func (q *ColumnNumericHistogram) calculateFDMethod(ctx context.Context, rt *runt WITH data_table AS ( SELECT %[1]s as %[2]s FROM %[3]s - WHERE %[2]s IS NOT NULL AND NOT isinf(%[2]s) + WHERE `+isNonNullFinite(olap.Dialect(), sanitizedColumnName)+` ), values AS ( SELECT %[2]s as value from data_table - WHERE %[2]s IS NOT NULL AND NOT isinf(%[2]s) + WHERE `+isNonNullFinite(olap.Dialect(), sanitizedColumnName)+` ), buckets AS ( SELECT `+rangeNumbersCol(olap.Dialect())+`::DOUBLE as bucket, @@ -293,7 +293,7 @@ func (q *ColumnNumericHistogram) calculateDiagnosticMethod(ctx context.Context, WITH data_table AS ( SELECT %[1]s as %[2]s FROM %[3]s - WHERE %[2]s IS NOT NULL AND NOT isinf(%[2]s) + WHERE `+isNonNullFinite(olap.Dialect(), sanitizedColumnName)+` ), S AS ( SELECT min(%[2]s) as minVal, @@ -398,7 +398,7 @@ func getMinMaxRange(ctx context.Context, olap drivers.OLAPStore, columnName, dat max(%[2]s) AS max, max(%[2]s) - min(%[2]s) AS range FROM %[1]s - WHERE %[2]s IS NOT NULL AND NOT isinf(%[2]s) + WHERE `+isNonNullFinite(olap.Dialect(), sanitizedColumnName)+` `, olap.Dialect().EscapeTable(database, databaseSchema, tableName), selectColumn, @@ -428,3 +428,14 @@ func getMinMaxRange(ctx context.Context, olap drivers.OLAPStore, columnName, dat return min, max, rng, nil } + +func isNonNullFinite(d drivers.Dialect, floatCol string) string { + switch d { + case drivers.DialectClickHouse: + return fmt.Sprintf("%s IS NOT NULL AND isFinite(%s)", floatCol, floatCol) + case drivers.DialectDuckDB: + return fmt.Sprintf("%s IS NOT NULL AND NOT isinf(%s)", floatCol, floatCol) + default: + return "1=1" + } +} diff --git a/runtime/reconcilers/model.go b/runtime/reconcilers/model.go index 4116d91513d..d87eca48e79 100644 --- a/runtime/reconcilers/model.go +++ b/runtime/reconcilers/model.go @@ -566,6 +566,7 @@ func (r *ModelReconciler) resolveIncrementalState(ctx context.Context, mdl *runt InstanceID: r.C.InstanceID, Resolver: mdl.Spec.IncrementalStateResolver, ResolverProperties: mdl.Spec.IncrementalStateResolverProperties.AsMap(), + Claims: &runtime.SecurityClaims{SkipChecks: true}, }) if err != nil { return nil, nil, err diff --git a/runtime/resolver.go b/runtime/resolver.go index 24da44e17af..bf6a7e4a96a 100644 --- a/runtime/resolver.go +++ b/runtime/resolver.go @@ -156,6 +156,12 @@ type ResolveResult struct { // Resolve resolves a query using the given options. func (r *Runtime) Resolve(ctx context.Context, opts *ResolveOptions) (ResolveResult, error) { + // Since claims don't really make sense for some resolver use cases, it's easy to forget to set them. + // Adding an early panic to catch this. + if opts.Claims == nil { + panic("received nil claims") + } + // Initialize the resolver initializer, ok := ResolverInitializers[opts.Resolver] if !ok { diff --git a/runtime/server/queries_metrics_comparison_toplist_test.go b/runtime/server/queries_metrics_comparison_toplist_test.go index 0e6510593a1..c093d261c60 100644 --- a/runtime/server/queries_metrics_comparison_toplist_test.go +++ b/runtime/server/queries_metrics_comparison_toplist_test.go @@ -168,6 +168,9 @@ func TestServer_MetricsViewComparison_inline_measures(t *testing.T) { } func TestServer_MetricsViewComparison_nulls(t *testing.T) { + // NOTE: Unstable due to sleep. Commenting until we support configuring settings at instance create time. + t.Skip() + t.Parallel() server, instanceId := getMetricsTestServer(t, "ad_bids_2rows") diff --git a/web-admin/src/features/bookmarks/Bookmarks.svelte b/web-admin/src/features/bookmarks/Bookmarks.svelte index 8a528cfff3d..cd22245b592 100644 --- a/web-admin/src/features/bookmarks/Bookmarks.svelte +++ b/web-admin/src/features/bookmarks/Bookmarks.svelte @@ -24,7 +24,7 @@ import { BookmarkIcon } from "lucide-svelte"; import { eventBus } from "@rilldata/web-common/lib/event-bus/event-bus"; - $: metricsViewName = $page.params.dashboard; + export let metricsViewName: string; let createBookmark = false; let editBookmark = false; @@ -101,6 +101,7 @@ bookmark = detail; }} on:select={({ detail }) => selectBookmark(detail)} + {metricsViewName} /> diff --git a/web-admin/src/features/bookmarks/BookmarksDropdownMenuContent.svelte b/web-admin/src/features/bookmarks/BookmarksDropdownMenuContent.svelte index 8478535b67b..1959e67d9c3 100644 --- a/web-admin/src/features/bookmarks/BookmarksDropdownMenuContent.svelte +++ b/web-admin/src/features/bookmarks/BookmarksDropdownMenuContent.svelte @@ -20,6 +20,8 @@ import { createEventDispatcher } from "svelte"; import HomeBookmarkPlus from "@rilldata/web-common/components/icons/HomeBookmarkPlus.svelte"; + export let metricsViewName: string; + const dispatch = createEventDispatcher(); const queryClient = useQueryClient(); @@ -33,7 +35,7 @@ $runtime?.instanceId, organization, project, - $page.params.dashboard, + metricsViewName, ); $: filteredBookmarks = searchBookmarks($bookmarks.data, searchText); diff --git a/web-admin/src/features/navigation/TopNavigationBar.svelte b/web-admin/src/features/navigation/TopNavigationBar.svelte index a2a163e97fa..6b3217e2486 100644 --- a/web-admin/src/features/navigation/TopNavigationBar.svelte +++ b/web-admin/src/features/navigation/TopNavigationBar.svelte @@ -176,7 +176,7 @@ {#if $user.isSuccess && $user.data.user && !onMagicLinkPage} - + {/if} diff --git a/web-admin/src/features/shareable-urls/CreateShareableURLForm.svelte b/web-admin/src/features/shareable-urls/CreateShareableURLForm.svelte index e0e2acab42b..cc7a2cd6b20 100644 --- a/web-admin/src/features/shareable-urls/CreateShareableURLForm.svelte +++ b/web-admin/src/features/shareable-urls/CreateShareableURLForm.svelte @@ -12,7 +12,7 @@ import { yup } from "sveltekit-superforms/adapters"; import { object, string } from "yup"; - $: ({ organization, project, dashboard } = $page.params); + $: ({ organization, project } = $page.params); let token: string; let setExpiration = false; @@ -20,6 +20,8 @@ const { dashboardStore, + + metricsViewName, selectors: { measures: { visibleMeasures }, dimensions: { visibleDimensions }, @@ -51,7 +53,7 @@ organization, project, data: { - metricsView: dashboard, + metricsView: $metricsViewName, metricsViewFilter: hasDashboardWhereFilter() ? $dashboardStore.whereFilter : undefined, @@ -108,7 +110,7 @@ {#if hasDashboardWhereFilter()}
- {:else} - {#key dashboardName} - + {:else if metricViewName} + {#key metricViewName} + {#if $user.isSuccess && $user.data.user} - - + + - + {:else} - - + + - + diff --git a/web-common/src/components/icons/ExternalLink.svelte b/web-common/src/components/icons/ExternalLink.svelte new file mode 100644 index 00000000000..ae3fdd34356 --- /dev/null +++ b/web-common/src/components/icons/ExternalLink.svelte @@ -0,0 +1,19 @@ + + + + + + diff --git a/web-common/src/features/charts/render/VegaLiteRenderer.svelte b/web-common/src/features/charts/render/VegaLiteRenderer.svelte index ad84c42fada..10ef92737f4 100644 --- a/web-common/src/features/charts/render/VegaLiteRenderer.svelte +++ b/web-common/src/features/charts/render/VegaLiteRenderer.svelte @@ -1,6 +1,8 @@ -
- - + + + {#if renderedBarValue > 1.001} @@ -181,8 +217,17 @@ /> - diff --git a/web-common/src/features/dashboards/state-managers/StateManagersProvider.svelte b/web-common/src/features/dashboards/state-managers/StateManagersProvider.svelte index 41932ced4f8..54254d7eb06 100644 --- a/web-common/src/features/dashboards/state-managers/StateManagersProvider.svelte +++ b/web-common/src/features/dashboards/state-managers/StateManagersProvider.svelte @@ -17,11 +17,6 @@ orgName && projectName ? `__${orgName}__${projectName}` : "", }); setContext(DEFAULT_STORE_KEY, stateManagers); - - $: { - // update metrics view name - stateManagers.setMetricsViewName(metricsViewName); - } diff --git a/web-common/src/features/dashboards/state-managers/state-managers.ts b/web-common/src/features/dashboards/state-managers/state-managers.ts index 35bc7b5f73b..926eddcbb1c 100644 --- a/web-common/src/features/dashboards/state-managers/state-managers.ts +++ b/web-common/src/features/dashboards/state-managers/state-managers.ts @@ -44,7 +44,6 @@ export type StateManagers = { QueryObserverResult >; queryClient: QueryClient; - setMetricsViewName: (s: string) => void; updateDashboard: DashboardCallbackExecutor; /** * A collection of Readables that can be used to select data from the dashboard. @@ -95,9 +94,19 @@ export function createStateManagers({ r.instanceId, metricViewName, ResourceKind.MetricsView, - (data) => data.metricsView?.state?.validSpec, + undefined, queryClient, - ).subscribe(set); + ).subscribe((result) => { + // In case the store was created with a name that has incorrect casing + if (result.data?.meta?.name?.name) { + metricsViewNameStore.set(result.data.meta.name.name); + } + + return set({ + ...result, + data: result.data?.metricsView?.state?.validSpec, + }); + }); }); const timeRangeSummaryStore: Readable< @@ -141,9 +150,7 @@ export function createStateManagers({ timeRangeSummaryStore, queryClient, dashboardStore, - setMetricsViewName: (name) => { - metricsViewNameStore.set(name); - }, + updateDashboard, /** * A collection of Readables that can be used to select data from the dashboard. diff --git a/web-common/src/features/dashboards/stores/dashboard-stores-test-data.ts b/web-common/src/features/dashboards/stores/dashboard-stores-test-data.ts index b9955e2dc71..719853707bc 100644 --- a/web-common/src/features/dashboards/stores/dashboard-stores-test-data.ts +++ b/web-common/src/features/dashboards/stores/dashboard-stores-test-data.ts @@ -399,10 +399,11 @@ export function getOffsetByHour(time: Date) { export function initStateManagers( dashboardFetchMocks?: DashboardFetchMocks, resp?: V1MetricsViewSpec, + name?: string, ) { initAdBidsInStore(); if (dashboardFetchMocks && resp) - dashboardFetchMocks.mockMetricsView(AD_BIDS_NAME, resp); + dashboardFetchMocks.mockMetricsView(name ?? AD_BIDS_NAME, resp); const queryClient = new QueryClient({ defaultOptions: { @@ -417,7 +418,7 @@ export function initStateManagers( }); const stateManagers = createStateManagers({ queryClient, - metricsViewName: AD_BIDS_NAME, + metricsViewName: name ?? AD_BIDS_NAME, }); return { stateManagers, queryClient }; diff --git a/web-common/src/features/dashboards/stores/dashboard-stores.spec.ts b/web-common/src/features/dashboards/stores/dashboard-stores.spec.ts index b35d657a1b3..4b0a8e66e15 100644 --- a/web-common/src/features/dashboards/stores/dashboard-stores.spec.ts +++ b/web-common/src/features/dashboards/stores/dashboard-stores.spec.ts @@ -183,13 +183,17 @@ describe("dashboard-stores", () => { it("Should work when time range is not available", () => { const AD_BIDS_NO_TIMESTAMP_NAME = "AdBids_no_timestamp"; - const { stateManagers } = initStateManagers(); + const { stateManagers } = initStateManagers( + undefined, + undefined, + AD_BIDS_NO_TIMESTAMP_NAME, + ); const { actions: { dimensionsFilter: { toggleDimensionValueSelection }, }, } = stateManagers; - stateManagers.setMetricsViewName(AD_BIDS_NO_TIMESTAMP_NAME); + metricsExplorerStore.init( AD_BIDS_NO_TIMESTAMP_NAME, AD_BIDS_INIT, diff --git a/web-common/src/features/models/workspace/inspector/References.svelte b/web-common/src/features/models/workspace/inspector/References.svelte index cd75513a3cd..278669e2ac2 100644 --- a/web-common/src/features/models/workspace/inspector/References.svelte +++ b/web-common/src/features/models/workspace/inspector/References.svelte @@ -45,7 +45,7 @@ (referencedThings) => referencedThings, ); - $: references = $referencedWithMetadata; + $: references = $referencedWithMetadata.filter((ref) => ref.reference); function blur() { eventBus.emit("highlightSelection", []); diff --git a/web-local/src/routes/(viz)/+layout.svelte b/web-local/src/routes/(viz)/+layout.svelte index 0b11c8a7a90..78cfd350cfc 100644 --- a/web-local/src/routes/(viz)/+layout.svelte +++ b/web-local/src/routes/(viz)/+layout.svelte @@ -45,7 +45,13 @@ dashboardOptions, ]; - $: currentPath = [projectTitle, dashboardName]; + $: currentPath = [projectTitle, dashboardName.toLowerCase()]; + + $: currentDashboard = dashboards.find( + (d) => d.meta?.name?.name?.toLowerCase() === dashboardName.toLowerCase(), + ); + + $: metricsViewName = currentDashboard?.meta?.name?.name;
@@ -60,9 +66,9 @@ PREVIEW - {#if route.id?.includes("dashboard")} - - + {#if route.id?.includes("dashboard") && metricsViewName} + + {/if}