diff --git a/internal/provider/cloud_providers_data_source.go b/internal/provider/cloud_providers_data_source.go new file mode 100644 index 0000000..40aa743 --- /dev/null +++ b/internal/provider/cloud_providers_data_source.go @@ -0,0 +1,132 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package provider + +import ( + "context" + "fmt" + zilliz "github.com/zilliztech/terraform-provider-zillizcloud/client" + "strconv" + "time" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Ensure provider defined types fully satisfy framework interfaces. +var _ datasource.DataSource = &CloudProvidersDataSource{} + +func NewCloudProvidersDataSource() datasource.DataSource { + return &CloudProvidersDataSource{} +} + +// CloudProvidersDataSource defines the data source implementation. +type CloudProvidersDataSource struct { + client *zilliz.Client +} + +// CloudProviderDataSourceModel describes the data source data model. +type CloudProviderModel struct { + Description types.String `tfsdk:"description"` + CloudId types.String `tfsdk:"cloud_id"` +} + +func (p CloudProviderModel) AttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "cloud_id": types.StringType, + "description": types.StringType, + } +} + +// CloudProvidersDataSourceModel describes the data source data model. +type CloudProvidersDataSourceModel struct { + CloudProviders types.List `tfsdk:"cloud_providers"` + Id types.String `tfsdk:"id"` +} + +func (d *CloudProvidersDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cloud_providers" +} + +func (d *CloudProvidersDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + // This description is used by the documentation generator and the language server. + MarkdownDescription: "Cloud Providers data source", + + Attributes: map[string]schema.Attribute{ + "cloud_providers": schema.ListNestedAttribute{ + MarkdownDescription: "List of Cloud Providers", + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "cloud_id": schema.StringAttribute{ + MarkdownDescription: "Cloud Provider Identifier", + Computed: true, + }, + "description": schema.StringAttribute{ + MarkdownDescription: "Cloud Provider Description", + Computed: true, + }, + }, + }, + }, + "id": schema.StringAttribute{ + MarkdownDescription: "Cluster identifier", + Computed: true, + }, + }, + } +} + +func (d *CloudProvidersDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*zilliz.Client) + + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *http.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + d.client = client +} + +func (d *CloudProvidersDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data CloudProvidersDataSourceModel + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + cloudProviders, err := d.client.ListCloudProviders() + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to ListCloudProviders, got error: %s", err)) + return + } + + // Save data into Terraform state + data.Id = types.StringValue(strconv.FormatInt(time.Now().Unix(), 10)) + + var cps []CloudProviderModel + for _, cp := range cloudProviders { + cps = append(cps, CloudProviderModel{CloudId: types.StringValue(cp.CloudId), Description: types.StringValue(cp.Description)}) + } + var diag diag.Diagnostics + data.CloudProviders, diag = types.ListValueFrom(ctx, types.ObjectType{AttrTypes: CloudProviderModel{}.AttrTypes()}, cps) + resp.Diagnostics.Append(diag...) + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} diff --git a/internal/provider/cloud_regions_data_source.go b/internal/provider/cloud_regions_data_source.go new file mode 100644 index 0000000..edfeebf --- /dev/null +++ b/internal/provider/cloud_regions_data_source.go @@ -0,0 +1,145 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package provider + +import ( + "context" + "fmt" + + zilliz "github.com/zilliztech/terraform-provider-zillizcloud/client" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Ensure Region defined types fully satisfy framework interfaces. +var _ datasource.DataSource = &CloudRegionsDataSource{} + +func NewCloudRegionsDataSource() datasource.DataSource { + return &CloudRegionsDataSource{} +} + +// CloudRegionsDataSource defines the data source implementation. +type CloudRegionsDataSource struct { + client *zilliz.Client +} + +// CloudRegionDataSourceModel describes the data source data model. +type CloudRegionModel struct { + ApiBaseUrl types.String `tfsdk:"api_base_url"` + CloudId types.String `tfsdk:"cloud_id"` + RegionId types.String `tfsdk:"region_id"` +} + +func (p CloudRegionModel) AttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "api_base_url": types.StringType, + "cloud_id": types.StringType, + "region_id": types.StringType, + } +} + +// CloudRegionsDataSourceModel describes the data source data model. +type CloudRegionsDataSourceModel struct { + CloudRegions types.List `tfsdk:"cloud_regions"` + CloudId types.String `tfsdk:"cloud_id"` + Id types.String `tfsdk:"id"` +} + +func (d *CloudRegionsDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cloud_regions" +} + +func (d *CloudRegionsDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + // This description is used by the documentation generator and the language server. + MarkdownDescription: "Cloud Regions data source", + + Attributes: map[string]schema.Attribute{ + "cloud_regions": schema.ListNestedAttribute{ + MarkdownDescription: "List of Cloud Regions", + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "api_base_url": schema.StringAttribute{ + MarkdownDescription: "Cloud Region API Base URL", + Computed: true, + }, + "cloud_id": schema.StringAttribute{ + MarkdownDescription: "Cloud Region Identifier", + Computed: true, + }, + "region_id": schema.StringAttribute{ + MarkdownDescription: "Cloud Region Id", + Computed: true, + }, + }, + }, + }, + "cloud_id": schema.StringAttribute{ + MarkdownDescription: "Cloud ID", + Required: true, + }, + "id": schema.StringAttribute{ + MarkdownDescription: "Cloud Regions identifier", + Computed: true, + }, + }, + } +} + +func (d *CloudRegionsDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*zilliz.Client) + + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *http.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + d.client = client +} + +func (d *CloudRegionsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data CloudRegionsDataSourceModel + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + cloudRegions, err := d.client.ListCloudRegions(data.CloudId.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to ListCloudRegions, got error: %s", err)) + return + } + + // Save data into Terraform state + data.Id = data.CloudId + + var crs []CloudRegionModel + for _, cr := range cloudRegions { + crs = append(crs, CloudRegionModel{ + ApiBaseUrl: types.StringValue(cr.ApiBaseUrl), + CloudId: types.StringValue(cr.CloudId), + RegionId: types.StringValue(cr.RegionId)}) + } + var diag diag.Diagnostics + data.CloudRegions, diag = types.ListValueFrom(ctx, types.ObjectType{AttrTypes: CloudRegionModel{}.AttrTypes()}, crs) + resp.Diagnostics.Append(diag...) + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} diff --git a/internal/provider/cluster_data_source.go b/internal/provider/cluster_data_source.go new file mode 100644 index 0000000..852a8f6 --- /dev/null +++ b/internal/provider/cluster_data_source.go @@ -0,0 +1,160 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package provider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + zilliz "github.com/zilliztech/terraform-provider-zillizcloud/client" +) + +// Ensure provider defined types fully satisfy framework interfaces. +var _ datasource.DataSource = &ClusterDataSource{} + +func NewClusterDataSource() datasource.DataSource { + return &ClusterDataSource{} +} + +// ClusterDataSource defines the data source implementation. +type ClusterDataSource struct { + client *zilliz.Client +} + +// ClusterModel describes the cluster data model. +type ClusterModel struct { + ClusterId types.String `tfsdk:"id"` + ClusterName types.String `tfsdk:"cluster_name"` + Description types.String `tfsdk:"description"` + RegionId types.String `tfsdk:"region_id"` + ClusterType types.String `tfsdk:"cluster_type"` + CuSize types.Int64 `tfsdk:"cu_size"` + Status types.String `tfsdk:"status"` + ConnectAddress types.String `tfsdk:"connect_address"` + PrivateLinkAddress types.String `tfsdk:"private_link_address"` + CreateTime types.String `tfsdk:"create_time"` +} + +func (p ClusterModel) AttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "id": types.StringType, + "cluster_name": types.StringType, + "description": types.StringType, + "region_id": types.StringType, + "cluster_type": types.StringType, + "cu_size": types.Int64Type, + "status": types.StringType, + "connect_address": types.StringType, + "private_link_address": types.StringType, + "create_time": types.StringType, + } +} + +func (d *ClusterDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cluster" +} + +func (d *ClusterDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + // This description is used by the documentation generator and the language server. + MarkdownDescription: "Cluster data source", + + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + MarkdownDescription: "The ID of the cluster.", + Required: true, + }, + "cluster_name": schema.StringAttribute{ + MarkdownDescription: "The name of the cluster.", + Computed: true, + }, + "description": schema.StringAttribute{ + MarkdownDescription: "An optional description about the cluster.", + Computed: true, + }, + "region_id": schema.StringAttribute{ + MarkdownDescription: "The ID of the region where the cluster exists.", + Computed: true, + }, + "cluster_type": schema.StringAttribute{ + MarkdownDescription: "The type of CU associated with the cluster. Possible values are Performance-optimized and Capacity-optimized.", + Computed: true, + }, + "cu_size": schema.Int64Attribute{ + MarkdownDescription: "The size of the CU associated with the cluster.", + Computed: true, + }, + "status": schema.StringAttribute{ + MarkdownDescription: "The current status of the cluster. Possible values are INITIALIZING, RUNNING, SUSPENDING, and RESUMING.", + Computed: true, + }, + "connect_address": schema.StringAttribute{ + MarkdownDescription: "The public endpoint of the cluster. You can connect to the cluster using this endpoint from the public network.", + Computed: true, + }, + "private_link_address": schema.StringAttribute{ + MarkdownDescription: "The private endpoint of the cluster. You can set up a private link to allow your VPS in the same cloud region to access your cluster.", + Computed: true, + }, + "create_time": schema.StringAttribute{ + MarkdownDescription: "The time at which the cluster has been created.", + Computed: true, + }, + }, + } +} + +func (d *ClusterDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*zilliz.Client) + + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *http.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + d.client = client +} + +func (d *ClusterDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data ClusterModel + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + c, err := d.client.DescribeCluster(data.ClusterId.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to DescribeCluster, got error: %s", err)) + return + } + + // Save data into Terraform state + data.ClusterId = types.StringValue(c.ClusterId) + data.ClusterName = types.StringValue(c.ClusterName) + data.Description = types.StringValue(c.Description) + data.RegionId = types.StringValue(c.RegionId) + data.ClusterType = types.StringValue(c.ClusterType) + data.CuSize = types.Int64Value(c.CuSize) + data.Status = types.StringValue(c.Status) + data.ConnectAddress = types.StringValue(c.ConnectAddress) + data.PrivateLinkAddress = types.StringValue(c.PrivateLinkAddress) + data.CreateTime = types.StringValue(c.CreateTime) + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} diff --git a/internal/provider/cluster_resource.go b/internal/provider/cluster_resource.go new file mode 100644 index 0000000..32d42ce --- /dev/null +++ b/internal/provider/cluster_resource.go @@ -0,0 +1,376 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package provider + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + zilliz "github.com/zilliztech/terraform-provider-zillizcloud/client" +) + +const ( + defaultClusterCreateTimeout time.Duration = 5 * time.Minute + defaultClusterUpdateTimeout time.Duration = 5 * time.Minute +) + +// Ensure provider defined types fully satisfy framework interfaces. +var _ resource.Resource = &ClusterResource{} + +// var _ resource.ResourceWithImportState = &ClusterResource{} + +func NewClusterResource() resource.Resource { + return &ClusterResource{} +} + +// ClusterResource defines the resource implementation. +type ClusterResource struct { + client *zilliz.Client +} + +func (r *ClusterResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cluster" +} + +func (r *ClusterResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + MarkdownDescription: "Cluster resource. If 'plan', 'cu_size' and 'cu-type' are not specified, then a serverless cluster is created.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + MarkdownDescription: "Cluster identifier", + Computed: true, + }, + "cluster_name": schema.StringAttribute{ + MarkdownDescription: "The name of the cluster to be created. It is a string of no more than 32 characters.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "project_id": schema.StringAttribute{ + MarkdownDescription: "The ID of the project where the cluster is to be created.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "plan": schema.StringAttribute{ + MarkdownDescription: "The plan tier of the Zilliz Cloud service. Available options are Standard and Enterprise.", + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.AlsoRequires( + path.MatchRelative().AtParent().AtName("cu_size"), + path.MatchRelative().AtParent().AtName("cu_type"), + ), + }, + }, + "cu_size": schema.Int64Attribute{ + MarkdownDescription: "The size of the CU to be used for the created cluster. It is an integer from 1 to 256.", + Optional: true, + Computed: true, + Validators: []validator.Int64{ + int64validator.AlsoRequires( + path.MatchRelative().AtParent().AtName("plan"), + path.MatchRelative().AtParent().AtName("cu_type"), + ), + }, + }, + "cu_type": schema.StringAttribute{ + MarkdownDescription: "The type of the CU used for the Zilliz Cloud cluster to be created. Available options are Performance-optimized, Capacity-optimized, and Cost-optimized. This parameter defaults to Performance-optimized. The value defaults to Performance-optimized.", + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.AlsoRequires( + path.MatchRelative().AtParent().AtName("cu_size"), + path.MatchRelative().AtParent().AtName("plan"), + ), + }, + }, + "username": schema.StringAttribute{ + MarkdownDescription: "The name of the cluster user generated by default.", + Computed: true, + }, + "password": schema.StringAttribute{ + MarkdownDescription: "The password of the cluster user generated by default. It will not be displayed again, so note it down and securely store it.", + Computed: true, + Sensitive: true, + }, + "prompt": schema.StringAttribute{ + MarkdownDescription: "The statement indicating that this operation succeeds.", + Computed: true, + }, + "description": schema.StringAttribute{ + MarkdownDescription: "An optional description about the cluster.", + Computed: true, + }, + "region_id": schema.StringAttribute{ + MarkdownDescription: "The ID of the region where the cluster exists.", + Computed: true, + }, + "cluster_type": schema.StringAttribute{ + MarkdownDescription: "The type of CU associated with the cluster. Possible values are Performance-optimized and Capacity-optimized.", + Computed: true, + }, + "status": schema.StringAttribute{ + MarkdownDescription: "The current status of the cluster. Possible values are INITIALIZING, RUNNING, SUSPENDING, and RESUMING.", + Computed: true, + }, + "connect_address": schema.StringAttribute{ + MarkdownDescription: "The public endpoint of the cluster. You can connect to the cluster using this endpoint from the public network.", + Computed: true, + }, + "private_link_address": schema.StringAttribute{ + MarkdownDescription: "The private endpoint of the cluster. You can set up a private link to allow your VPS in the same cloud region to access your cluster.", + Computed: true, + }, + "create_time": schema.StringAttribute{ + MarkdownDescription: "The time at which the cluster has been created.", + Computed: true, + }, + }, + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, + timeouts.Opts{ + Create: true, + CreateDescription: `Timeout defaults to 5 mins. Accepts a string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) ` + + `consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are ` + + `"s" (seconds), "m" (minutes), "h" (hours).`, + Update: true, + UpdateDescription: `Timeout defaults to 5 mins. Accepts a string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) ` + + `consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are ` + + `"s" (seconds), "m" (minutes), "h" (hours).`, + }, + ), + }, + } +} + +func (r *ClusterResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*zilliz.Client) + + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *http.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + r.client = client +} + +func (r *ClusterResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var data ClusterResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + var response *zilliz.CreateClusterResponse + var err error + + if data.Plan.IsNull() && data.CuSize.IsUnknown() && data.CuType.IsNull() { + response, err = r.client.CreateServerlessCluster(zilliz.CreateServerlessClusterParams{ + ClusterName: data.ClusterName.ValueString(), + ProjectId: data.ProjectId.ValueString(), + }) + } else { + response, err = r.client.CreateCluster(zilliz.CreateClusterParams{ + Plan: data.Plan.ValueString(), + ClusterName: data.ClusterName.ValueString(), + CUSize: int(data.CuSize.ValueInt64()), + CUType: data.CuType.ValueString(), + ProjectId: data.ProjectId.ValueString(), + }) + } + if err != nil { + resp.Diagnostics.AddError("Failed to create cluster", err.Error()) + return + } + + data.ClusterId = types.StringValue(response.ClusterId) + data.Username = types.StringValue(response.Username) + data.Password = types.StringValue(response.Password) + data.Prompt = types.StringValue(response.Prompt) + + // Wait for cluster to be RUNNING + // Create() is passed a default timeout to use if no value + // has been supplied in the Terraform configuration. + createTimeout, diags := data.Timeouts.Create(ctx, defaultClusterCreateTimeout) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(data.waitForStatus(ctx, createTimeout, r.client, "RUNNING")...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(data.refresh(r.client)...) + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func (r *ClusterResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var data ClusterResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(data.refresh(r.client)...) + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func (r *ClusterResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var plan ClusterResourceModel + var state ClusterResourceModel + + // Read Terraform plan data into the model + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + // Read Terraform state data into the model + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + // Only support changes of cuSize - all other attributes are set to ForceNew + _, err := r.client.ModifyCluster(state.ClusterId.ValueString(), &zilliz.ModifyClusterParams{ + CuSize: int(plan.CuSize.ValueInt64()), + }) + if err != nil { + resp.Diagnostics.AddError("Failed to modify cluster", err.Error()) + return + } + + // Wait for cluster to be RUNNING + // Update() is passed a default timeout to use if no value + // has been supplied in the Terraform configuration. + updateTimeout, diags := plan.Timeouts.Update(ctx, defaultClusterUpdateTimeout) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(state.waitForStatus(ctx, updateTimeout, r.client, "RUNNING")...) + if resp.Diagnostics.HasError() { + return + } + + // Save updated data into Terraform state + resp.Diagnostics.Append(state.refresh(r.client)...) + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *ClusterResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var data ClusterResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + _, err := r.client.DropCluster(data.ClusterId.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Failed to drop cluster", err.Error()) + return + } +} + +// Cannot support import due to the username/password attributes only available on creation +// func (r *ClusterResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { +// resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +// } + +// ClusterResourceModel describes the resource data model. +type ClusterResourceModel struct { + ClusterId types.String `tfsdk:"id"` + Plan types.String `tfsdk:"plan"` + ClusterName types.String `tfsdk:"cluster_name"` + CuSize types.Int64 `tfsdk:"cu_size"` + CuType types.String `tfsdk:"cu_type"` + ProjectId types.String `tfsdk:"project_id"` + Username types.String `tfsdk:"username"` + Password types.String `tfsdk:"password"` + Prompt types.String `tfsdk:"prompt"` + Description types.String `tfsdk:"description"` + RegionId types.String `tfsdk:"region_id"` + ClusterType types.String `tfsdk:"cluster_type"` + Status types.String `tfsdk:"status"` + ConnectAddress types.String `tfsdk:"connect_address"` + PrivateLinkAddress types.String `tfsdk:"private_link_address"` + CreateTime types.String `tfsdk:"create_time"` + Timeouts timeouts.Value `tfsdk:"timeouts"` +} + +func (data *ClusterResourceModel) refresh(client *zilliz.Client) diag.Diagnostics { + var diags diag.Diagnostics + + c, err := client.DescribeCluster(data.ClusterId.ValueString()) + if err != nil { + diags.AddError("Client Error", fmt.Sprintf("Unable to DescribeCluster, got error: %s", err)) + return diags + } + + // Save data into Terraform state + data.ClusterId = types.StringValue(c.ClusterId) + data.ClusterName = types.StringValue(c.ClusterName) + data.CuSize = types.Int64Value(c.CuSize) + + data.Description = types.StringValue(c.Description) + data.RegionId = types.StringValue(c.RegionId) + data.ClusterType = types.StringValue(c.ClusterType) + data.Status = types.StringValue(c.Status) + data.ConnectAddress = types.StringValue(c.ConnectAddress) + data.PrivateLinkAddress = types.StringValue(c.PrivateLinkAddress) + data.CreateTime = types.StringValue(c.CreateTime) + + return diags +} + +func (data *ClusterResourceModel) waitForStatus(ctx context.Context, timeout time.Duration, client *zilliz.Client, status string) diag.Diagnostics { + var diags diag.Diagnostics + + err := retry.RetryContext(ctx, timeout, func() *retry.RetryError { + cluster, err := client.DescribeCluster(data.ClusterId.ValueString()) + if err != nil { + return retry.NonRetryableError(err) + } + if cluster.Status != status { + return retry.RetryableError(fmt.Errorf("cluster not yet in the %s state. Current state: %s", status, cluster.Status)) + } + return nil + }) + if err != nil { + diags.AddError("Failed to wait for cluster to enter the RUNNING state.", err.Error()) + } + + return diags +} diff --git a/internal/provider/clusters_data_source.go b/internal/provider/clusters_data_source.go new file mode 100644 index 0000000..4f395a2 --- /dev/null +++ b/internal/provider/clusters_data_source.go @@ -0,0 +1,160 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package provider + +import ( + "context" + "fmt" + "strconv" + "time" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + zilliz "github.com/zilliztech/terraform-provider-zillizcloud/client" +) + +// Ensure provider defined types fully satisfy framework interfaces. +var _ datasource.DataSource = &ClustersDataSource{} + +func NewClustersDataSource() datasource.DataSource { + return &ClustersDataSource{} +} + +// ClusterDataSource defines the data source implementation. +type ClustersDataSource struct { + client *zilliz.Client +} + +// ClustersModel describes the clusters data model. +type ClustersModel struct { + Clusters types.List `tfsdk:"clusters"` + Id types.String `tfsdk:"id"` +} + +func (d *ClustersDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_clusters" +} + +func (d *ClustersDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + // This description is used by the documentation generator and the language server. + MarkdownDescription: "Cluster data source", + + Attributes: map[string]schema.Attribute{ + "clusters": schema.ListNestedAttribute{ + MarkdownDescription: "List of Clusters", + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + MarkdownDescription: "The ID of the cluster.", + Computed: true, + }, + "cluster_name": schema.StringAttribute{ + MarkdownDescription: "The name of the cluster.", + Computed: true, + }, + "description": schema.StringAttribute{ + MarkdownDescription: "An optional description about the cluster.", + Computed: true, + }, + "region_id": schema.StringAttribute{ + MarkdownDescription: "The ID of the region where the cluster exists.", + Computed: true, + }, + "cluster_type": schema.StringAttribute{ + MarkdownDescription: "The type of CU associated with the cluster. Possible values are Performance-optimized and Capacity-optimized.", + Computed: true, + }, + "cu_size": schema.Int64Attribute{ + MarkdownDescription: "The size of the CU associated with the cluster.", + Computed: true, + }, + "status": schema.StringAttribute{ + MarkdownDescription: "The current status of the cluster. Possible values are INITIALIZING, RUNNING, SUSPENDING, and RESUMING.", + Computed: true, + }, + "connect_address": schema.StringAttribute{ + MarkdownDescription: "The public endpoint of the cluster. You can connect to the cluster using this endpoint from the public network.", + Computed: true, + }, + "private_link_address": schema.StringAttribute{ + MarkdownDescription: "The private endpoint of the cluster. You can set up a private link to allow your VPS in the same cloud region to access your cluster.", + Computed: true, + }, + "create_time": schema.StringAttribute{ + MarkdownDescription: "The time at which the cluster has been created.", + Computed: true, + }, + }, + }, + }, + "id": schema.StringAttribute{ + MarkdownDescription: "Clusters identifier", + Computed: true, + }, + }, + } +} + +func (d *ClustersDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*zilliz.Client) + + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *http.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + d.client = client +} + +func (d *ClustersDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data ClustersModel + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + clusters, err := d.client.ListClusters() + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to ListClusters, got error: %s", err)) + return + } + + // Save data into Terraform state + data.Id = types.StringValue(strconv.FormatInt(time.Now().Unix(), 10)) + + var cs []ClusterModel + for _, c := range clusters.Clusters { + cs = append(cs, ClusterModel{ + ClusterId: types.StringValue(c.ClusterId), + ClusterName: types.StringValue(c.ClusterName), + Description: types.StringValue(c.Description), + RegionId: types.StringValue(c.RegionId), + ClusterType: types.StringValue(c.ClusterType), + CuSize: types.Int64Value(c.CuSize), + Status: types.StringValue(c.Status), + ConnectAddress: types.StringValue(c.ConnectAddress), + PrivateLinkAddress: types.StringValue(c.PrivateLinkAddress), + CreateTime: types.StringValue(c.CreateTime)}) + } + var diag diag.Diagnostics + data.Clusters, diag = types.ListValueFrom(ctx, types.ObjectType{AttrTypes: ClusterModel{}.AttrTypes()}, cs) + resp.Diagnostics.Append(diag...) + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} diff --git a/internal/provider/projects_data_source.go b/internal/provider/projects_data_source.go new file mode 100644 index 0000000..bd4630c --- /dev/null +++ b/internal/provider/projects_data_source.go @@ -0,0 +1,132 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package provider + +import ( + "context" + "fmt" + zilliz "github.com/zilliztech/terraform-provider-zillizcloud/client" + "strconv" + "time" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Ensure provider defined types fully satisfy framework interfaces. +var _ datasource.DataSource = &ProjectsDataSource{} + +func NewProjectsDataSource() datasource.DataSource { + return &ProjectsDataSource{} +} + +// ProjectsDataSource defines the data source implementation. +type ProjectsDataSource struct { + client *zilliz.Client +} + +// ProjectDataSourceModel describes the data source data model. +type ProjectModel struct { + ProjectId types.String `tfsdk:"project_id"` + ProjectName types.String `tfsdk:"project_name"` +} + +func (p ProjectModel) AttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "project_id": types.StringType, + "project_name": types.StringType, + } +} + +// ProjectsDataSourceModel describes the data source data model. +type ProjectsDataSourceModel struct { + Projects types.List `tfsdk:"projects"` + Id types.String `tfsdk:"id"` +} + +func (d *ProjectsDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_projects" +} + +func (d *ProjectsDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + // This description is used by the documentation generator and the language server. + MarkdownDescription: "Cloud Providers data source", + + Attributes: map[string]schema.Attribute{ + "projects": schema.ListNestedAttribute{ + MarkdownDescription: "List of Projects", + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "project_id": schema.StringAttribute{ + MarkdownDescription: "Project Identifier", + Computed: true, + }, + "project_name": schema.StringAttribute{ + MarkdownDescription: "Project Name", + Computed: true, + }, + }, + }, + }, + "id": schema.StringAttribute{ + MarkdownDescription: "Projects identifier", + Computed: true, + }, + }, + } +} + +func (d *ProjectsDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*zilliz.Client) + + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *http.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + d.client = client +} + +func (d *ProjectsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data ProjectsDataSourceModel + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + projects, err := d.client.ListProjects() + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to ListProjects, got error: %s", err)) + return + } + + // Save data into Terraform state + data.Id = types.StringValue(strconv.FormatInt(time.Now().Unix(), 10)) + + var ps []ProjectModel + for _, p := range projects { + ps = append(ps, ProjectModel{ProjectId: types.StringValue(p.ProjectId), ProjectName: types.StringValue(p.ProjectName)}) + } + var diag diag.Diagnostics + data.Projects, diag = types.ListValueFrom(ctx, types.ObjectType{AttrTypes: ProjectModel{}.AttrTypes()}, ps) + resp.Diagnostics.Append(diag...) + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 2471df6..c8b452c 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -5,51 +5,56 @@ package provider import ( "context" - "net/http" + "os" "github.com/hashicorp/terraform-plugin-framework/datasource" - "github.com/hashicorp/terraform-plugin-framework/function" "github.com/hashicorp/terraform-plugin-framework/provider" "github.com/hashicorp/terraform-plugin-framework/provider/schema" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/types" + zilliz "github.com/zilliztech/terraform-provider-zillizcloud/client" ) -// Ensure ScaffoldingProvider satisfies various provider interfaces. -var _ provider.Provider = &ScaffoldingProvider{} -var _ provider.ProviderWithFunctions = &ScaffoldingProvider{} +// Ensure ZillizProvider satisfies various provider interfaces. +var _ provider.Provider = &ZillizProvider{} -// ScaffoldingProvider defines the provider implementation. -type ScaffoldingProvider struct { +// ZillizProvider defines the provider implementation. +type ZillizProvider struct { // version is set to the provider version on release, "dev" when the // provider is built and ran locally, and "test" when running acceptance // testing. version string } -// ScaffoldingProviderModel describes the provider data model. -type ScaffoldingProviderModel struct { - Endpoint types.String `tfsdk:"endpoint"` +// zillizProviderModel describes the provider data model. +type zillizProviderModel struct { + ApiKey types.String `tfsdk:"api_key"` + CloudRegionId types.String `tfsdk:"cloud_region_id"` } -func (p *ScaffoldingProvider) Metadata(ctx context.Context, req provider.MetadataRequest, resp *provider.MetadataResponse) { - resp.TypeName = "scaffolding" +func (p *ZillizProvider) Metadata(ctx context.Context, req provider.MetadataRequest, resp *provider.MetadataResponse) { + resp.TypeName = "zilliz" resp.Version = p.version } -func (p *ScaffoldingProvider) Schema(ctx context.Context, req provider.SchemaRequest, resp *provider.SchemaResponse) { +func (p *ZillizProvider) Schema(ctx context.Context, req provider.SchemaRequest, resp *provider.SchemaResponse) { resp.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ - "endpoint": schema.StringAttribute{ - MarkdownDescription: "Example provider attribute", + "api_key": schema.StringAttribute{ + MarkdownDescription: "Zilliz API Key", Optional: true, + Sensitive: true, + }, + "cloud_region_id": schema.StringAttribute{ + MarkdownDescription: "Zilliz Cloud Region Id", + Required: true, }, }, } } -func (p *ScaffoldingProvider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { - var data ScaffoldingProviderModel +func (p *ZillizProvider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { + var data zillizProviderModel resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) @@ -57,36 +62,38 @@ func (p *ScaffoldingProvider) Configure(ctx context.Context, req provider.Config return } - // Configuration values are now available. - // if data.Endpoint.IsNull() { /* ... */ } + // Default to environment variables, but override + // with Terraform configuration value if set. + apiKey := os.Getenv("ZILLIZ_API_KEY") + if !data.ApiKey.IsNull() { + apiKey = data.ApiKey.ValueString() + } + client := zilliz.NewClient(apiKey, data.CloudRegionId.ValueString()) - // Example client configuration for data sources and resources - client := http.DefaultClient + // Zilliz client for data sources and resources resp.DataSourceData = client resp.ResourceData = client } -func (p *ScaffoldingProvider) Resources(ctx context.Context) []func() resource.Resource { +func (p *ZillizProvider) Resources(ctx context.Context) []func() resource.Resource { return []func() resource.Resource{ - NewExampleResource, + NewClusterResource, } } -func (p *ScaffoldingProvider) DataSources(ctx context.Context) []func() datasource.DataSource { +func (p *ZillizProvider) DataSources(ctx context.Context) []func() datasource.DataSource { return []func() datasource.DataSource{ - NewExampleDataSource, - } -} - -func (p *ScaffoldingProvider) Functions(ctx context.Context) []func() function.Function { - return []func() function.Function{ - NewExampleFunction, + NewCloudProvidersDataSource, + NewCloudRegionsDataSource, + NewProjectsDataSource, + NewClustersDataSource, + NewClusterDataSource, } } func New(version string) func() provider.Provider { return func() provider.Provider { - return &ScaffoldingProvider{ + return &ZillizProvider{ version: version, } } diff --git a/internal/provider/provider_test.go b/internal/provider/provider_test.go deleted file mode 100644 index ef6599b..0000000 --- a/internal/provider/provider_test.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package provider - -import ( - "testing" - - "github.com/hashicorp/terraform-plugin-framework/providerserver" - "github.com/hashicorp/terraform-plugin-go/tfprotov6" -) - -// testAccProtoV6ProviderFactories are used to instantiate a provider during -// acceptance testing. The factory function will be invoked for every Terraform -// CLI command executed to create a provider server to which the CLI can -// reattach. -var testAccProtoV6ProviderFactories = map[string]func() (tfprotov6.ProviderServer, error){ - "scaffolding": providerserver.NewProtocol6WithError(New("test")()), -} - -func testAccPreCheck(t *testing.T) { - // You can add code here to run prior to any test case execution, for example assertions - // about the appropriate environment variables being set are common to see in a pre-check - // function. -}